1 /*
2 * Copyright (c) 1999-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <sys/proc_info.h>
54 #include <net/if_var.h>
55
56 #include <mach/vm_types.h>
57
58 #include <kern/thread.h>
59
60 #include <net/sockaddr_utils.h>
61
62 extern const int copysize_limit_panic;
63
64 struct kctl {
65 TAILQ_ENTRY(kctl) next; /* controller chain */
66 kern_ctl_ref kctlref;
67
68 /* controller information provided when registering */
69 char name[MAX_KCTL_NAME]; /* unique identifier */
70 u_int32_t id;
71 u_int32_t reg_unit;
72
73 /* misc communication information */
74 u_int32_t flags; /* support flags */
75 u_int32_t recvbufsize; /* request more than the default buffer size */
76 u_int32_t sendbufsize; /* request more than the default buffer size */
77
78 /* Dispatch functions */
79 ctl_setup_func setup; /* Setup contact */
80 ctl_bind_func bind; /* Prepare contact */
81 ctl_connect_func connect; /* Make contact */
82 ctl_disconnect_func disconnect; /* Break contact */
83 ctl_send_func send; /* Send data to nke */
84 ctl_send_list_func send_list; /* Send list of packets */
85 ctl_setopt_func setopt; /* set kctl configuration */
86 ctl_getopt_func getopt; /* get kctl configuration */
87 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
88
89 TAILQ_HEAD(, ctl_cb) kcb_head;
90 u_int32_t lastunit;
91 };
92
93 #if DEVELOPMENT || DEBUG
94 enum ctl_status {
95 KCTL_DISCONNECTED = 0,
96 KCTL_CONNECTING = 1,
97 KCTL_CONNECTED = 2
98 };
99 #endif /* DEVELOPMENT || DEBUG */
100
101 struct ctl_cb {
102 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
103 lck_mtx_t mtx;
104 struct socket *so; /* controlling socket */
105 struct kctl *kctl; /* back pointer to controller */
106 void *userdata;
107 struct sockaddr_ctl sac;
108 uint32_t usecount;
109 uint32_t kcb_usecount;
110 uint32_t require_clearing_count;
111 #if DEVELOPMENT || DEBUG
112 enum ctl_status status;
113 #endif /* DEVELOPMENT || DEBUG */
114 };
115
116 #ifndef ROUNDUP64
117 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
118 #endif
119
120 #ifndef ADVANCE64
121 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
122 #endif
123
124 /*
125 * Definitions and vars for we support
126 */
127
128 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
129 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
130
131 /*
132 * Definitions and vars for we support
133 */
134
135 const u_int32_t ctl_maxunit = 65536;
136 static LCK_ATTR_DECLARE(ctl_lck_attr, 0, 0);
137 static LCK_GRP_DECLARE(ctl_lck_grp, "Kernel Control Protocol");
138 static LCK_MTX_DECLARE_ATTR(ctl_mtx, &ctl_lck_grp, &ctl_lck_attr);
139
140 /* all the controllers are chained */
141 TAILQ_HEAD(kctl_list, kctl) ctl_head = TAILQ_HEAD_INITIALIZER(ctl_head);
142
143 static int ctl_attach(struct socket *, int, struct proc *);
144 static int ctl_detach(struct socket *);
145 static int ctl_sofreelastref(struct socket *so);
146 static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
147 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
148 static int ctl_disconnect(struct socket *);
149 static int ctl_ioctl(struct socket *so, u_long cmd,
150 caddr_t __sized_by(IOCPARM_LEN(cmd)) data,
151 struct ifnet *ifp, struct proc *p);
152 static int ctl_send(struct socket *, int, struct mbuf *,
153 struct sockaddr *, struct mbuf *, struct proc *);
154 static int ctl_send_list(struct socket *, struct mbuf *, u_int *, int);
155 static int ctl_ctloutput(struct socket *, struct sockopt *);
156 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
157 static int ctl_usr_rcvd(struct socket *so, int flags);
158
159 static struct kctl *ctl_find_by_name(const char *__null_terminated);
160 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
161
162 static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
163 u_int32_t *);
164 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
165 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
166
167 static int ctl_lock(struct socket *, int, void *);
168 static int ctl_unlock(struct socket *, int, void *);
169 static lck_mtx_t * ctl_getlock(struct socket *, int);
170
171 static struct pr_usrreqs ctl_usrreqs = {
172 .pru_attach = ctl_attach,
173 .pru_bind = ctl_bind,
174 .pru_connect = ctl_connect,
175 .pru_control = ctl_ioctl,
176 .pru_detach = ctl_detach,
177 .pru_disconnect = ctl_disconnect,
178 .pru_peeraddr = ctl_peeraddr,
179 .pru_rcvd = ctl_usr_rcvd,
180 .pru_send = ctl_send,
181 .pru_send_list = ctl_send_list,
182 .pru_sosend = sosend,
183 .pru_sosend_list = sosend_list,
184 .pru_soreceive = soreceive,
185 };
186
187 static struct protosw kctlsw[] = {
188 {
189 .pr_type = SOCK_DGRAM,
190 .pr_protocol = SYSPROTO_CONTROL,
191 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
192 .pr_ctloutput = ctl_ctloutput,
193 .pr_usrreqs = &ctl_usrreqs,
194 .pr_lock = ctl_lock,
195 .pr_unlock = ctl_unlock,
196 .pr_getlock = ctl_getlock,
197 },
198 {
199 .pr_type = SOCK_STREAM,
200 .pr_protocol = SYSPROTO_CONTROL,
201 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
202 .pr_ctloutput = ctl_ctloutput,
203 .pr_usrreqs = &ctl_usrreqs,
204 .pr_lock = ctl_lock,
205 .pr_unlock = ctl_unlock,
206 .pr_getlock = ctl_getlock,
207 }
208 };
209
210 __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
211 __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
212 __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
213
214
215 SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
216 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
217
218 struct kctlstat kctlstat;
219 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
220 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
221 kctl_getstat, "S,kctlstat", "");
222
223 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
224 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
225 kctl_reg_list, "S,xkctl_reg", "");
226
227 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
228 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
229 kctl_pcblist, "S,xkctlpcb", "");
230
231 u_int32_t ctl_autorcvbuf_max = 256 * 1024;
232 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
233 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
234
235 u_int32_t ctl_autorcvbuf_high = 0;
236 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
237 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
238
239 u_int32_t ctl_debug = 0;
240 SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
241 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
242
243 #if DEVELOPMENT || DEBUG
244 u_int32_t ctl_panic_debug = 0;
245 SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
246 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
247 #endif /* DEVELOPMENT || DEBUG */
248
249 SYSCTL_UINT(_net_systm_kctl, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
250 (unsigned int *)&kctlstat.kcs_pcbcount, 0, "");
251
252 #define KCTL_TBL_INC 16
253
254 static uintptr_t kctl_tbl_size = 0;
255 static u_int32_t kctl_tbl_growing = 0;
256 static u_int32_t kctl_tbl_growing_waiting = 0;
257 static uintptr_t kctl_tbl_count = 0;
258 static struct kctl **__counted_by_or_null(kctl_tbl_size) kctl_table = NULL;
259 static uintptr_t kctl_ref_gencnt = 0;
260
261 static void kctl_tbl_grow(void);
262 static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
263 static void kctl_delete_ref(kern_ctl_ref);
264 static struct kctl *kctl_from_ref(kern_ctl_ref);
265
266 /*
267 * Install the protosw's for the Kernel Control manager.
268 */
269 __private_extern__ void
kern_control_init(struct domain * dp)270 kern_control_init(struct domain *dp)
271 {
272 struct protosw *pr;
273 int i;
274 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
275
276 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
277 VERIFY(dp == systemdomain);
278
279 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
280 net_add_proto(pr, dp, 1);
281 }
282 }
283
284 static void
kcb_delete(struct ctl_cb * kcb)285 kcb_delete(struct ctl_cb *kcb)
286 {
287 if (kcb != 0) {
288 lck_mtx_destroy(&kcb->mtx, &ctl_lck_grp);
289 kfree_type(struct ctl_cb, kcb);
290 }
291 }
292
293 /*
294 * Kernel Controller user-request functions
295 * attach function must exist and succeed
296 * detach not necessary
297 * we need a pcb for the per socket mutex
298 */
299 static int
ctl_attach(struct socket * so,int proto,struct proc * p)300 ctl_attach(struct socket *so, int proto, struct proc *p)
301 {
302 #pragma unused(proto, p)
303 struct ctl_cb *__single kcb = 0;
304
305 kcb = kalloc_type(struct ctl_cb, Z_WAITOK | Z_ZERO | Z_NOFAIL);
306
307 lck_mtx_init(&kcb->mtx, &ctl_lck_grp, &ctl_lck_attr);
308 kcb->so = so;
309 so->so_pcb = (caddr_t)kcb;
310
311 /*
312 * For datagram, use character count for sbspace as its value
313 * may be use for packetization and we do not want to
314 * drop packets based on the sbspace hint that was just provided
315 */
316 if (SOCK_CHECK_TYPE(so, SOCK_DGRAM)) {
317 so->so_rcv.sb_flags |= SB_KCTL;
318 so->so_snd.sb_flags |= SB_KCTL;
319 }
320 return 0;
321 }
322
323 static int
ctl_sofreelastref(struct socket * so)324 ctl_sofreelastref(struct socket *so)
325 {
326 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
327
328 so->so_pcb = 0;
329
330 if (kcb != 0) {
331 struct kctl *__single kctl;
332 if ((kctl = kcb->kctl) != 0) {
333 lck_mtx_lock(&ctl_mtx);
334 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
335 kctlstat.kcs_pcbcount--;
336 kctlstat.kcs_gencnt++;
337 lck_mtx_unlock(&ctl_mtx);
338 }
339 kcb_delete(kcb);
340 }
341 sofreelastref(so, 1);
342 return 0;
343 }
344
345 /*
346 * Use this function and ctl_kcb_require_clearing to serialize
347 * critical calls into the kctl subsystem
348 */
349 static void
ctl_kcb_increment_use_count(struct ctl_cb * kcb,lck_mtx_t * mutex_held)350 ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
351 {
352 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
353 while (kcb->require_clearing_count > 0) {
354 msleep(&kcb->require_clearing_count, mutex_held, PSOCK | PCATCH, "kcb_require_clearing", NULL);
355 }
356 kcb->kcb_usecount++;
357 }
358
359 static void
ctl_kcb_require_clearing(struct ctl_cb * kcb,lck_mtx_t * mutex_held)360 ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
361 {
362 assert(kcb->kcb_usecount != 0);
363 kcb->require_clearing_count++;
364 kcb->kcb_usecount--;
365 while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
366 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
367 }
368 kcb->kcb_usecount++;
369 }
370
371 static void
ctl_kcb_done_clearing(struct ctl_cb * kcb)372 ctl_kcb_done_clearing(struct ctl_cb *kcb)
373 {
374 assert(kcb->require_clearing_count != 0);
375 kcb->require_clearing_count--;
376 wakeup((caddr_t)&kcb->require_clearing_count);
377 }
378
379 static void
ctl_kcb_decrement_use_count(struct ctl_cb * kcb)380 ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
381 {
382 assert(kcb->kcb_usecount != 0);
383 kcb->kcb_usecount--;
384 if (kcb->require_clearing_count != 0) {
385 wakeup((caddr_t)&kcb->kcb_usecount);
386 }
387 }
388
389 static int
ctl_detach(struct socket * so)390 ctl_detach(struct socket *so)
391 {
392 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
393
394 if (kcb == 0) {
395 return 0;
396 }
397
398 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
399 ctl_kcb_increment_use_count(kcb, mtx_held);
400 ctl_kcb_require_clearing(kcb, mtx_held);
401
402 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
403 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
404 // The unit was bound, but not connected
405 // Invoke the disconnected call to cleanup
406 if (kcb->kctl->disconnect != NULL) {
407 socket_unlock(so, 0);
408 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
409 kcb->sac.sc_unit, kcb->userdata);
410 socket_lock(so, 0);
411 }
412 }
413
414 soisdisconnected(so);
415 #if DEVELOPMENT || DEBUG
416 kcb->status = KCTL_DISCONNECTED;
417 #endif /* DEVELOPMENT || DEBUG */
418 so->so_flags |= SOF_PCBCLEARING;
419 ctl_kcb_done_clearing(kcb);
420 ctl_kcb_decrement_use_count(kcb);
421 return 0;
422 }
423
424 static int
ctl_setup_kctl(struct socket * so,struct sockaddr * nam,struct proc * p)425 ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
426 {
427 struct kctl *__single kctl = NULL;
428 int error = 0;
429 struct sockaddr_ctl sa;
430 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
431 struct ctl_cb *__single kcb_next = NULL;
432
433 if (kcb == 0) {
434 panic("ctl_setup_kctl so_pcb null");
435 }
436
437 if (kcb->kctl != NULL) {
438 // Already set up, skip
439 return 0;
440 }
441
442 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
443 return EINVAL;
444 }
445
446 SOCKADDR_COPY(nam, &sa, sizeof(struct sockaddr_ctl));
447
448 lck_mtx_lock(&ctl_mtx);
449 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
450 if (kctl == NULL) {
451 lck_mtx_unlock(&ctl_mtx);
452 return ENOENT;
453 }
454
455 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
456 (so->so_type != SOCK_STREAM)) ||
457 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
458 (so->so_type != SOCK_DGRAM))) {
459 lck_mtx_unlock(&ctl_mtx);
460 return EPROTOTYPE;
461 }
462
463 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
464 if (p == 0) {
465 lck_mtx_unlock(&ctl_mtx);
466 return EINVAL;
467 }
468 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
469 lck_mtx_unlock(&ctl_mtx);
470 return EPERM;
471 }
472 }
473
474 if (kctl->setup != NULL) {
475 error = (*kctl->setup)(&sa.sc_unit, &kcb->userdata);
476 if (error != 0) {
477 lck_mtx_unlock(&ctl_mtx);
478 return error;
479 }
480 } else if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
481 if (kcb_find(kctl, sa.sc_unit) != NULL) {
482 lck_mtx_unlock(&ctl_mtx);
483 return EBUSY;
484 }
485 } else {
486 /* Find an unused ID, assumes control IDs are in order */
487 u_int32_t unit = 1;
488
489 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
490 if (kcb_next->sac.sc_unit > unit) {
491 /* Found a gap, lets fill it in */
492 break;
493 }
494 unit = kcb_next->sac.sc_unit + 1;
495 if (unit == ctl_maxunit) {
496 break;
497 }
498 }
499
500 if (unit == ctl_maxunit) {
501 lck_mtx_unlock(&ctl_mtx);
502 return EBUSY;
503 }
504
505 sa.sc_unit = unit;
506 }
507
508 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
509 kcb->kctl = kctl;
510 if (kcb_next != NULL) {
511 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
512 } else {
513 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
514 }
515 kctlstat.kcs_pcbcount++;
516 kctlstat.kcs_gencnt++;
517 kctlstat.kcs_connections++;
518 lck_mtx_unlock(&ctl_mtx);
519
520 error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
521 if (error) {
522 #if (DEBUG || DEVELOPMENT)
523 if (ctl_debug) {
524 printf("%s - soreserve(%llu, %u, %u) error %d\n",
525 __func__, so->so_gencnt,
526 kctl->sendbufsize, kctl->recvbufsize, error);
527 }
528 #endif /* (DEBUG || DEVELOPMENT) */
529 goto done;
530 }
531
532 done:
533 if (error) {
534 soisdisconnected(so);
535 #if DEVELOPMENT || DEBUG
536 kcb->status = KCTL_DISCONNECTED;
537 #endif /* DEVELOPMENT || DEBUG */
538 lck_mtx_lock(&ctl_mtx);
539 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
540 kcb->kctl = NULL;
541 kcb->sac.sc_unit = 0;
542 kctlstat.kcs_pcbcount--;
543 kctlstat.kcs_gencnt++;
544 kctlstat.kcs_conn_fail++;
545 lck_mtx_unlock(&ctl_mtx);
546 }
547 return error;
548 }
549
550 static int
ctl_bind(struct socket * so,struct sockaddr * nam,struct proc * p)551 ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
552 {
553 int error = 0;
554 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
555
556 if (kcb == NULL) {
557 panic("ctl_bind so_pcb null");
558 }
559
560 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
561 ctl_kcb_increment_use_count(kcb, mtx_held);
562 ctl_kcb_require_clearing(kcb, mtx_held);
563
564 error = ctl_setup_kctl(so, nam, p);
565 if (error) {
566 goto out;
567 }
568
569 if (kcb->kctl == NULL) {
570 panic("ctl_bind kctl null");
571 }
572
573 if (kcb->kctl->bind == NULL) {
574 error = EINVAL;
575 goto out;
576 }
577
578 socket_unlock(so, 0);
579 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
580 socket_lock(so, 0);
581
582 out:
583 ctl_kcb_done_clearing(kcb);
584 ctl_kcb_decrement_use_count(kcb);
585 return error;
586 }
587
588 static int
ctl_connect(struct socket * so,struct sockaddr * nam,struct proc * p)589 ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
590 {
591 int error = 0;
592 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
593
594 if (kcb == NULL) {
595 panic("ctl_connect so_pcb null");
596 }
597
598 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
599 ctl_kcb_increment_use_count(kcb, mtx_held);
600 ctl_kcb_require_clearing(kcb, mtx_held);
601
602 #if DEVELOPMENT || DEBUG
603 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
604 panic("kctl already connecting/connected");
605 }
606 kcb->status = KCTL_CONNECTING;
607 #endif /* DEVELOPMENT || DEBUG */
608
609 error = ctl_setup_kctl(so, nam, p);
610 if (error) {
611 goto out;
612 }
613
614 if (kcb->kctl == NULL) {
615 panic("ctl_connect kctl null");
616 }
617
618 soisconnecting(so);
619 socket_unlock(so, 0);
620 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
621 socket_lock(so, 0);
622 if (error) {
623 goto end;
624 }
625 soisconnected(so);
626 #if DEVELOPMENT || DEBUG
627 kcb->status = KCTL_CONNECTED;
628 #endif /* DEVELOPMENT || DEBUG */
629
630 end:
631 if (error && kcb->kctl->disconnect) {
632 /*
633 * XXX Make sure we Don't check the return value
634 * of disconnect here.
635 * ipsec/utun_ctl_disconnect will return error when
636 * disconnect gets called after connect failure.
637 * However if we decide to check for disconnect return
638 * value here. Please make sure to revisit
639 * ipsec/utun_ctl_disconnect.
640 */
641 socket_unlock(so, 0);
642 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
643 socket_lock(so, 0);
644 }
645 if (error) {
646 soisdisconnected(so);
647 #if DEVELOPMENT || DEBUG
648 kcb->status = KCTL_DISCONNECTED;
649 #endif /* DEVELOPMENT || DEBUG */
650 lck_mtx_lock(&ctl_mtx);
651 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
652 kcb->kctl = NULL;
653 kcb->sac.sc_unit = 0;
654 kctlstat.kcs_pcbcount--;
655 kctlstat.kcs_gencnt++;
656 kctlstat.kcs_conn_fail++;
657 lck_mtx_unlock(&ctl_mtx);
658 }
659 out:
660 ctl_kcb_done_clearing(kcb);
661 ctl_kcb_decrement_use_count(kcb);
662 return error;
663 }
664
665 static int
ctl_disconnect(struct socket * so)666 ctl_disconnect(struct socket *so)
667 {
668 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
669
670 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
671 lck_mtx_t *__single mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
672 ctl_kcb_increment_use_count(kcb, mtx_held);
673 ctl_kcb_require_clearing(kcb, mtx_held);
674 struct kctl *__single kctl = kcb->kctl;
675
676 if (kctl && kctl->disconnect) {
677 socket_unlock(so, 0);
678 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
679 kcb->userdata);
680 socket_lock(so, 0);
681 }
682
683 soisdisconnected(so);
684 #if DEVELOPMENT || DEBUG
685 kcb->status = KCTL_DISCONNECTED;
686 #endif /* DEVELOPMENT || DEBUG */
687
688 socket_unlock(so, 0);
689 lck_mtx_lock(&ctl_mtx);
690 kcb->kctl = 0;
691 kcb->sac.sc_unit = 0;
692 while (kcb->usecount != 0) {
693 msleep(&kcb->usecount, &ctl_mtx, 0, "kcb->usecount", 0);
694 }
695
696 /* Check for NULL here for the case where ctl_disconnect is racing with itself
697 * and the first thread has already cleaned up the structure */
698 if (kctl) {
699 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
700 }
701 kctlstat.kcs_pcbcount--;
702 kctlstat.kcs_gencnt++;
703 lck_mtx_unlock(&ctl_mtx);
704 socket_lock(so, 0);
705 ctl_kcb_done_clearing(kcb);
706 ctl_kcb_decrement_use_count(kcb);
707 }
708 return 0;
709 }
710
711 static int
ctl_peeraddr(struct socket * so,struct sockaddr ** nam)712 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
713 {
714 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
715 struct kctl *__single kctl;
716 struct sockaddr_ctl sc;
717
718 if (kcb == NULL) { /* sanity check */
719 return ENOTCONN;
720 }
721
722 if ((kctl = kcb->kctl) == NULL) {
723 return EINVAL;
724 }
725
726 bzero(&sc, sizeof(struct sockaddr_ctl));
727 sc.sc_len = sizeof(struct sockaddr_ctl);
728 sc.sc_family = AF_SYSTEM;
729 sc.ss_sysaddr = AF_SYS_CONTROL;
730 sc.sc_id = kctl->id;
731 sc.sc_unit = kcb->sac.sc_unit;
732
733 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
734
735 return 0;
736 }
737
738 static void
ctl_sbrcv_trim(struct socket * so)739 ctl_sbrcv_trim(struct socket *so)
740 {
741 struct sockbuf *__single sb = &so->so_rcv;
742
743 if (sb->sb_hiwat > sb->sb_idealsize) {
744 u_int32_t diff;
745 int32_t trim;
746
747 /*
748 * The difference between the ideal size and the
749 * current size is the upper bound of the trimage
750 */
751 diff = sb->sb_hiwat - sb->sb_idealsize;
752 /*
753 * We cannot trim below the outstanding data
754 */
755 trim = sb->sb_hiwat - sb->sb_cc;
756
757 trim = imin(trim, (int32_t)diff);
758
759 if (trim > 0) {
760 sbreserve(sb, (sb->sb_hiwat - trim));
761
762 if (ctl_debug) {
763 printf("%s - shrunk to %d\n",
764 __func__, sb->sb_hiwat);
765 }
766 }
767 }
768 }
769
770 static int
ctl_usr_rcvd(struct socket * so,int flags)771 ctl_usr_rcvd(struct socket *so, int flags)
772 {
773 int error = 0;
774 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
775 struct kctl *kctl;
776
777 if (kcb == NULL) {
778 return ENOTCONN;
779 }
780
781 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
782 ctl_kcb_increment_use_count(kcb, mtx_held);
783
784 if ((kctl = kcb->kctl) == NULL) {
785 error = EINVAL;
786 goto out;
787 }
788
789 if (kctl->rcvd) {
790 socket_unlock(so, 0);
791 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
792 socket_lock(so, 0);
793 }
794
795 ctl_sbrcv_trim(so);
796
797 out:
798 ctl_kcb_decrement_use_count(kcb);
799 return error;
800 }
801
802 static int
ctl_send(struct socket * so,int flags,struct mbuf * m,struct sockaddr * addr,struct mbuf * control,struct proc * p)803 ctl_send(struct socket *so, int flags, struct mbuf *m,
804 struct sockaddr *addr, struct mbuf *control,
805 struct proc *p)
806 {
807 #pragma unused(addr, p)
808 int error = 0;
809 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
810 struct kctl *kctl;
811
812 if (control) {
813 m_freem(control);
814 }
815
816 if (kcb == NULL) { /* sanity check */
817 m_freem(m);
818 return ENOTCONN;
819 }
820
821 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
822 ctl_kcb_increment_use_count(kcb, mtx_held);
823
824 if (error == 0 && (kctl = kcb->kctl) == NULL) {
825 error = EINVAL;
826 }
827
828 if (error == 0 && kctl->send) {
829 so_update_tx_data_stats(so, 1, m->m_pkthdr.len);
830 socket_unlock(so, 0);
831 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
832 m, flags);
833 socket_lock(so, 0);
834 } else {
835 m_freem(m);
836 if (error == 0) {
837 error = ENOTSUP;
838 }
839 }
840 if (error != 0) {
841 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
842 }
843 ctl_kcb_decrement_use_count(kcb);
844
845 return error;
846 }
847
848 static int
ctl_send_list(struct socket * so,struct mbuf * m,u_int * pktcnt,int flags)849 ctl_send_list(struct socket *so, struct mbuf *m, u_int *pktcnt, int flags)
850 {
851 int error = 0;
852 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
853 struct kctl *kctl;
854
855 if (kcb == NULL) { /* sanity check */
856 m_freem_list(m);
857 return ENOTCONN;
858 }
859
860 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
861 ctl_kcb_increment_use_count(kcb, mtx_held);
862
863 if ((kctl = kcb->kctl) == NULL) {
864 error = EINVAL;
865 goto done;
866 }
867
868 *pktcnt = 0;
869 if (kctl->send_list != NULL) {
870 struct mbuf *nxt;
871 int space = 0;
872
873 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
874 space += nxt->m_pkthdr.len;
875 *pktcnt += 1;
876 }
877 so_update_tx_data_stats(so, *pktcnt, space);
878
879 socket_unlock(so, 0);
880 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
881 kcb->userdata, m, flags);
882 socket_lock(so, 0);
883 } else {
884 int space = 0;
885
886 while (m != NULL && error == 0) {
887 struct mbuf *nextpkt = m->m_nextpkt;
888
889 m->m_nextpkt = NULL;
890
891 space += m->m_pkthdr.len;
892
893 socket_unlock(so, 0);
894 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
895 kcb->userdata, m, flags);
896 socket_lock(so, 0);
897 m = nextpkt;
898 if (error == 0) {
899 *pktcnt += 1;
900 }
901 }
902 so_update_tx_data_stats(so, *pktcnt, space);
903
904 if (m != NULL) {
905 m_freem_list(m);
906 }
907 }
908 done:
909 if (error != 0) {
910 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
911 }
912 ctl_kcb_decrement_use_count(kcb);
913
914 return error;
915 }
916
917 static errno_t
ctl_rcvbspace(struct socket * so,size_t datasize,u_int32_t kctlflags,u_int32_t flags)918 ctl_rcvbspace(struct socket *so, size_t datasize,
919 u_int32_t kctlflags, u_int32_t flags)
920 {
921 struct sockbuf *__single sb = &so->so_rcv;
922 u_int32_t space = sbspace(sb);
923 errno_t error;
924
925 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
926 if ((u_int32_t) space >= datasize) {
927 error = 0;
928 } else {
929 error = ENOBUFS;
930 }
931 } else if ((flags & CTL_DATA_CRIT) == 0) {
932 /*
933 * Reserve 25% for critical messages
934 */
935 if (space < (sb->sb_hiwat >> 2) ||
936 space < datasize) {
937 error = ENOBUFS;
938 } else {
939 error = 0;
940 }
941 } else {
942 size_t autorcvbuf_max;
943
944 /*
945 * Allow overcommit of 25%
946 */
947 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
948 ctl_autorcvbuf_max);
949
950 if ((u_int32_t) space >= datasize) {
951 error = 0;
952 } else if (sb->sb_hiwat < autorcvbuf_max) {
953 /*
954 * Grow with a little bit of leeway
955 */
956 size_t grow = datasize - space + _MSIZE;
957 u_int32_t cc = (u_int32_t)MIN(MIN((sb->sb_hiwat + grow), autorcvbuf_max), UINT32_MAX);
958
959 if (sbreserve(sb, cc) == 1) {
960 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
961 ctl_autorcvbuf_high = sb->sb_hiwat;
962 }
963
964 /*
965 * A final check
966 */
967 if ((u_int32_t) sbspace(sb) >= datasize) {
968 error = 0;
969 } else {
970 error = ENOBUFS;
971 }
972
973 if (ctl_debug) {
974 printf("%s - grown to %d error %d\n",
975 __func__, sb->sb_hiwat, error);
976 }
977 } else {
978 error = ENOBUFS;
979 }
980 } else {
981 error = ENOBUFS;
982 }
983 }
984 return error;
985 }
986
987 errno_t
ctl_enqueuembuf(kern_ctl_ref kctlref,u_int32_t unit,struct mbuf * m,u_int32_t flags)988 ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
989 u_int32_t flags)
990 {
991 struct socket *__single so;
992 errno_t error = 0;
993 int len = m->m_pkthdr.len;
994 u_int32_t kctlflags;
995
996 so = kcb_find_socket(kctlref, unit, &kctlflags);
997 if (so == NULL) {
998 return EINVAL;
999 }
1000
1001 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1002 error = ENOBUFS;
1003 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1004 goto bye;
1005 }
1006 if ((flags & CTL_DATA_EOR)) {
1007 m->m_flags |= M_EOR;
1008 }
1009
1010 so_recv_data_stat(so, m, 0);
1011 if (sbappend_nodrop(&so->so_rcv, m) != 0) {
1012 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1013 sorwakeup(so);
1014 }
1015 } else {
1016 error = ENOBUFS;
1017 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1018 }
1019 bye:
1020 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1021 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1022 __func__, error, len,
1023 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1024 }
1025
1026 socket_unlock(so, 1);
1027 if (error != 0) {
1028 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1029 }
1030
1031 return error;
1032 }
1033
1034 /*
1035 * Compute space occupied by mbuf like sbappendrecord
1036 */
1037 static int
m_space(struct mbuf * m)1038 m_space(struct mbuf *m)
1039 {
1040 int space = 0;
1041 mbuf_ref_t nxt;
1042
1043 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
1044 space += nxt->m_len;
1045 }
1046
1047 return space;
1048 }
1049
1050 errno_t
ctl_enqueuembuf_list(void * kctlref,u_int32_t unit,struct mbuf * m_list,u_int32_t flags,struct mbuf ** m_remain)1051 ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
1052 u_int32_t flags, struct mbuf **m_remain)
1053 {
1054 struct socket *__single so = NULL;
1055 errno_t error = 0;
1056 mbuf_ref_t m, nextpkt;
1057 int needwakeup = 0;
1058 int len = 0;
1059 u_int32_t kctlflags;
1060
1061 /*
1062 * Need to point the beginning of the list in case of early exit
1063 */
1064 m = m_list;
1065
1066 /*
1067 * kcb_find_socket takes the socket lock with a reference
1068 */
1069 so = kcb_find_socket(kctlref, unit, &kctlflags);
1070 if (so == NULL) {
1071 error = EINVAL;
1072 goto done;
1073 }
1074
1075 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
1076 error = EOPNOTSUPP;
1077 goto done;
1078 }
1079 if (flags & CTL_DATA_EOR) {
1080 error = EINVAL;
1081 goto done;
1082 }
1083
1084 for (m = m_list; m != NULL; m = nextpkt) {
1085 nextpkt = m->m_nextpkt;
1086
1087 if (m->m_pkthdr.len == 0 && ctl_debug) {
1088 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1089 struct kctl *kctl = kcb == NULL ? NULL : kcb->kctl;
1090 uint32_t id = kctl == NULL ? -1 : kctl->id;
1091
1092 printf("%s: %u:%u m_pkthdr.len is 0",
1093 __func__, id, unit);
1094 }
1095
1096 /*
1097 * The mbuf is either appended or freed by sbappendrecord()
1098 * so it's not reliable from a data standpoint
1099 */
1100 len = m_space(m);
1101 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1102 error = ENOBUFS;
1103 OSIncrementAtomic64(
1104 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1105 break;
1106 } else {
1107 /*
1108 * Unlink from the list, m is on its own
1109 */
1110 m->m_nextpkt = NULL;
1111 so_recv_data_stat(so, m, 0);
1112 if (sbappendrecord_nodrop(&so->so_rcv, m) != 0) {
1113 needwakeup = 1;
1114 } else {
1115 /*
1116 * We free or return the remaining
1117 * mbufs in the list
1118 */
1119 m = nextpkt;
1120 error = ENOBUFS;
1121 OSIncrementAtomic64(
1122 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1123 break;
1124 }
1125 }
1126 }
1127 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1128 sorwakeup(so);
1129 }
1130
1131 done:
1132 if (so != NULL) {
1133 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1134 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1135 __func__, error, len,
1136 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1137 }
1138
1139 socket_unlock(so, 1);
1140 }
1141 if (m_remain) {
1142 *m_remain = m;
1143
1144 #if (DEBUG || DEVELOPMENT)
1145 if (m != NULL && socket_debug && so != NULL &&
1146 (so->so_options & SO_DEBUG)) {
1147 mbuf_ref_t n;
1148
1149 printf("%s m_list %llx\n", __func__,
1150 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1151 for (n = m; n != NULL; n = n->m_nextpkt) {
1152 printf(" remain %llx m_next %llx\n",
1153 (uint64_t) VM_KERNEL_ADDRPERM(n),
1154 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1155 }
1156 }
1157 #endif /* (DEBUG || DEVELOPMENT) */
1158 } else {
1159 if (m != NULL) {
1160 m_freem_list(m);
1161 }
1162 }
1163 if (error != 0) {
1164 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1165 }
1166 return error;
1167 }
1168
1169 errno_t
ctl_enqueuedata(void * kctlref,u_int32_t unit,void * __sized_by (len)data,size_t len,u_int32_t flags)1170 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *__sized_by(len) data,
1171 size_t len, u_int32_t flags)
1172 {
1173 struct socket *__single so;
1174 mbuf_ref_t m, n;
1175 errno_t error = 0;
1176 unsigned int num_needed;
1177 size_t curlen = 0;
1178 u_int32_t kctlflags;
1179
1180 so = kcb_find_socket(kctlref, unit, &kctlflags);
1181 if (so == NULL) {
1182 return EINVAL;
1183 }
1184
1185 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1186 error = ENOBUFS;
1187 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1188 goto bye;
1189 }
1190
1191 num_needed = 1;
1192 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1193 if (m == NULL) {
1194 kctlstat.kcs_enqdata_mb_alloc_fail++;
1195 if (ctl_debug) {
1196 printf("%s: m_allocpacket_internal(%lu) failed\n",
1197 __func__, len);
1198 }
1199 error = ENOMEM;
1200 goto bye;
1201 }
1202
1203 for (n = m; n != NULL; n = n->m_next) {
1204 size_t mlen = mbuf_maxlen(n);
1205
1206 if (mlen + curlen > len) {
1207 mlen = len - curlen;
1208 }
1209 n->m_len = (int32_t)mlen;
1210 bcopy((char *)data + curlen, m_mtod_current(n), mlen);
1211 curlen += mlen;
1212 }
1213 mbuf_pkthdr_setlen(m, curlen);
1214
1215 if ((flags & CTL_DATA_EOR)) {
1216 m->m_flags |= M_EOR;
1217 }
1218 so_recv_data_stat(so, m, 0);
1219 /*
1220 * No need to call the "nodrop" variant of sbappend
1221 * because the mbuf is local to the scope of the function
1222 */
1223 if (sbappend(&so->so_rcv, m) != 0) {
1224 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1225 sorwakeup(so);
1226 }
1227 } else {
1228 kctlstat.kcs_enqdata_sbappend_fail++;
1229 error = ENOBUFS;
1230 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1231 }
1232
1233 bye:
1234 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1235 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1236 __func__, error, (int)len,
1237 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1238 }
1239
1240 socket_unlock(so, 1);
1241 if (error != 0) {
1242 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1243 }
1244 return error;
1245 }
1246
1247 errno_t
ctl_getenqueuepacketcount(kern_ctl_ref kctlref,u_int32_t unit,u_int32_t * pcnt)1248 ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1249 {
1250 struct socket *__single so;
1251 u_int32_t cnt;
1252 struct mbuf *__single m1;
1253
1254 if (pcnt == NULL) {
1255 return EINVAL;
1256 }
1257
1258 so = kcb_find_socket(kctlref, unit, NULL);
1259 if (so == NULL) {
1260 return EINVAL;
1261 }
1262
1263 cnt = 0;
1264 m1 = so->so_rcv.sb_mb;
1265 while (m1 != NULL) {
1266 if (m_has_mtype(m1, MTF_DATA | MTF_HEADER | MTF_OOBDATA)) {
1267 cnt += 1;
1268 }
1269 m1 = m1->m_nextpkt;
1270 }
1271 *pcnt = cnt;
1272
1273 socket_unlock(so, 1);
1274
1275 return 0;
1276 }
1277
1278 errno_t
ctl_getenqueuespace(kern_ctl_ref kctlref,u_int32_t unit,size_t * space)1279 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1280 {
1281 struct socket *__single so;
1282 long avail;
1283
1284 if (space == NULL) {
1285 return EINVAL;
1286 }
1287
1288 so = kcb_find_socket(kctlref, unit, NULL);
1289 if (so == NULL) {
1290 return EINVAL;
1291 }
1292
1293 avail = sbspace(&so->so_rcv);
1294 *space = (avail < 0) ? 0 : avail;
1295 socket_unlock(so, 1);
1296
1297 return 0;
1298 }
1299
1300 errno_t
ctl_getenqueuereadable(kern_ctl_ref kctlref,u_int32_t unit,u_int32_t * difference)1301 ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1302 u_int32_t *difference)
1303 {
1304 struct socket *__single so;
1305
1306 if (difference == NULL) {
1307 return EINVAL;
1308 }
1309
1310 so = kcb_find_socket(kctlref, unit, NULL);
1311 if (so == NULL) {
1312 return EINVAL;
1313 }
1314
1315 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1316 *difference = 0;
1317 } else {
1318 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1319 }
1320 socket_unlock(so, 1);
1321
1322 return 0;
1323 }
1324
1325 static int
ctl_ctloutput(struct socket * so,struct sockopt * sopt)1326 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1327 {
1328 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
1329 struct kctl *__single kctl;
1330 int error = 0;
1331 void *data = NULL;
1332 size_t data_len = 0;
1333 size_t len;
1334
1335 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1336 return EINVAL;
1337 }
1338
1339 if (kcb == NULL) { /* sanity check */
1340 return ENOTCONN;
1341 }
1342
1343 if ((kctl = kcb->kctl) == NULL) {
1344 return EINVAL;
1345 }
1346
1347 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1348 ctl_kcb_increment_use_count(kcb, mtx_held);
1349
1350 switch (sopt->sopt_dir) {
1351 case SOPT_SET:
1352 if (kctl->setopt == NULL) {
1353 error = ENOTSUP;
1354 goto out;
1355 }
1356 if (sopt->sopt_valsize != 0) {
1357 data_len = sopt->sopt_valsize;
1358 if (__improbable(data_len > copysize_limit_panic)) {
1359 error = EINVAL;
1360 goto out;
1361 }
1362
1363 data = kalloc_data(data_len, Z_WAITOK | Z_ZERO);
1364 if (data == NULL) {
1365 data_len = 0;
1366 error = ENOMEM;
1367 goto out;
1368 }
1369 error = sooptcopyin(sopt, data,
1370 sopt->sopt_valsize, sopt->sopt_valsize);
1371 }
1372 if (error == 0) {
1373 socket_unlock(so, 0);
1374 error = (*kctl->setopt)(kctl->kctlref,
1375 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1376 data, sopt->sopt_valsize);
1377 socket_lock(so, 0);
1378 }
1379
1380 kfree_data(data, data_len);
1381 break;
1382
1383 case SOPT_GET:
1384 if (kctl->getopt == NULL) {
1385 error = ENOTSUP;
1386 goto out;
1387 }
1388
1389 if (sopt->sopt_valsize && sopt->sopt_val) {
1390 data_len = sopt->sopt_valsize;
1391 if (__improbable(data_len > copysize_limit_panic)) {
1392 error = EINVAL;
1393 goto out;
1394 }
1395
1396 data = kalloc_data(data_len, Z_WAITOK | Z_ZERO);
1397 if (data == NULL) {
1398 data_len = 0;
1399 error = ENOMEM;
1400 goto out;
1401 }
1402 /*
1403 * 4108337 - copy user data in case the
1404 * kernel control needs it
1405 */
1406 error = sooptcopyin(sopt, data,
1407 sopt->sopt_valsize, sopt->sopt_valsize);
1408 }
1409
1410 if (error == 0) {
1411 len = sopt->sopt_valsize;
1412 socket_unlock(so, 0);
1413 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1414 kcb->userdata, sopt->sopt_name,
1415 data, &len);
1416 if (data != NULL && len > sopt->sopt_valsize) {
1417 panic_plain("ctl_ctloutput: ctl %s returned "
1418 "len (%lu) > sopt_valsize (%lu)\n",
1419 kcb->kctl->name, len,
1420 sopt->sopt_valsize);
1421 }
1422 socket_lock(so, 0);
1423 if (error == 0) {
1424 if (data != NULL) {
1425 error = sooptcopyout(sopt, data, len);
1426 } else {
1427 sopt->sopt_valsize = len;
1428 }
1429 }
1430 }
1431
1432 kfree_data(data, data_len);
1433 break;
1434 }
1435
1436 out:
1437 ctl_kcb_decrement_use_count(kcb);
1438 return error;
1439 }
1440
1441 static int
ctl_ioctl(struct socket * so,u_long cmd,caddr_t __sized_by (IOCPARM_LEN (cmd))data,struct ifnet * ifp,struct proc * p)1442 ctl_ioctl(struct socket *so, u_long cmd,
1443 caddr_t __sized_by(IOCPARM_LEN(cmd)) data,
1444 struct ifnet *ifp, struct proc *p)
1445 {
1446 #pragma unused(so, ifp, p)
1447 int error = ENOTSUP;
1448
1449 switch (cmd) {
1450 /* get the number of controllers */
1451 case CTLIOCGCOUNT: {
1452 struct kctl *__single kctl;
1453 u_int32_t n = 0;
1454
1455 lck_mtx_lock(&ctl_mtx);
1456 TAILQ_FOREACH(kctl, &ctl_head, next)
1457 n++;
1458 lck_mtx_unlock(&ctl_mtx);
1459
1460 bcopy(&n, data, sizeof(n));
1461 error = 0;
1462 break;
1463 }
1464 case CTLIOCGINFO: {
1465 struct ctl_info ctl_info;
1466 struct kctl *__single kctl = 0;
1467 size_t name_len;
1468
1469 bcopy(data, &ctl_info, sizeof(ctl_info));
1470 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1471
1472 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1473 error = EINVAL;
1474 break;
1475 }
1476 lck_mtx_lock(&ctl_mtx);
1477 kctl = ctl_find_by_name(__unsafe_null_terminated_from_indexable(ctl_info.ctl_name));
1478 if (kctl) {
1479 ctl_info.ctl_id = kctl->id;
1480 error = 0;
1481 } else {
1482 error = ENOENT;
1483 }
1484 lck_mtx_unlock(&ctl_mtx);
1485
1486 if (error == 0) {
1487 bcopy(&ctl_info, data, sizeof(ctl_info));
1488 }
1489 break;
1490 }
1491
1492 /* add controls to get list of NKEs */
1493 }
1494
1495 return error;
1496 }
1497
1498 static void
kctl_tbl_grow(void)1499 kctl_tbl_grow(void)
1500 {
1501 struct kctl *__single *new_table;
1502 uintptr_t new_size;
1503
1504 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1505
1506 if (kctl_tbl_growing) {
1507 /* Another thread is allocating */
1508 kctl_tbl_growing_waiting++;
1509
1510 do {
1511 (void) msleep((caddr_t) &kctl_tbl_growing, &ctl_mtx,
1512 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1513 } while (kctl_tbl_growing);
1514 kctl_tbl_growing_waiting--;
1515 }
1516 /* Another thread grew the table */
1517 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1518 return;
1519 }
1520
1521 /* Verify we have a sane size */
1522 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1523 kctlstat.kcs_tbl_size_too_big++;
1524 if (ctl_debug) {
1525 printf("%s kctl_tbl_size %lu too big\n",
1526 __func__, kctl_tbl_size);
1527 }
1528 return;
1529 }
1530 kctl_tbl_growing = 1;
1531
1532 new_size = kctl_tbl_size + KCTL_TBL_INC;
1533
1534 lck_mtx_unlock(&ctl_mtx);
1535 new_table = kalloc_type(struct kctl *, new_size, Z_WAITOK | Z_ZERO);
1536 lck_mtx_lock(&ctl_mtx);
1537
1538 if (new_table != NULL) {
1539 if (kctl_table != NULL) {
1540 bcopy(kctl_table, new_table, kctl_tbl_size * sizeof(struct kctl *));
1541
1542 kfree_type_counted_by(struct kctl *, kctl_tbl_size, kctl_table);
1543 }
1544 kctl_table = new_table;
1545 kctl_tbl_size = new_size;
1546 }
1547
1548 kctl_tbl_growing = 0;
1549
1550 if (kctl_tbl_growing_waiting) {
1551 wakeup(&kctl_tbl_growing);
1552 }
1553 }
1554
1555 #define KCTLREF_INDEX_MASK 0x0000FFFF
1556 #define KCTLREF_GENCNT_MASK 0xFFFF0000
1557 #define KCTLREF_GENCNT_SHIFT 16
1558
1559 static kern_ctl_ref
kctl_make_ref(struct kctl * kctl)1560 kctl_make_ref(struct kctl *kctl)
1561 {
1562 uintptr_t i;
1563
1564 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1565
1566 if (kctl_tbl_count >= kctl_tbl_size) {
1567 kctl_tbl_grow();
1568 }
1569
1570 kctl->kctlref = NULL;
1571 for (i = 0; i < kctl_tbl_size; i++) {
1572 if (kctl_table[i] == NULL) {
1573 uintptr_t ref;
1574
1575 /*
1576 * Reference is index plus one
1577 */
1578 kctl_ref_gencnt += 1;
1579
1580 /*
1581 * Add generation count as salt to reference to prevent
1582 * use after deregister
1583 */
1584 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1585 KCTLREF_GENCNT_MASK) +
1586 ((i + 1) & KCTLREF_INDEX_MASK);
1587
1588 kctl->kctlref = __unsafe_forge_single(void *, ref);
1589 kctl_table[i] = kctl;
1590 kctl_tbl_count++;
1591 break;
1592 }
1593 }
1594
1595 if (kctl->kctlref == NULL) {
1596 panic("%s no space in table", __func__);
1597 }
1598
1599 if (ctl_debug > 0) {
1600 printf("%s %p for %p\n",
1601 __func__, kctl->kctlref, kctl);
1602 }
1603
1604 return kctl->kctlref;
1605 }
1606
1607 static void
kctl_delete_ref(kern_ctl_ref kctlref)1608 kctl_delete_ref(kern_ctl_ref kctlref)
1609 {
1610 /*
1611 * Reference is index plus one
1612 */
1613 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1614
1615 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1616
1617 if (i < kctl_tbl_size) {
1618 struct kctl *__single kctl = kctl_table[i];
1619
1620 if (kctl->kctlref == kctlref) {
1621 kctl_table[i] = NULL;
1622 kctl_tbl_count--;
1623 } else {
1624 kctlstat.kcs_bad_kctlref++;
1625 }
1626 } else {
1627 kctlstat.kcs_bad_kctlref++;
1628 }
1629 }
1630
1631 static struct kctl *
kctl_from_ref(kern_ctl_ref kctlref)1632 kctl_from_ref(kern_ctl_ref kctlref)
1633 {
1634 /*
1635 * Reference is index plus one
1636 */
1637 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1638 struct kctl *__single kctl = NULL;
1639
1640 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1641
1642 if (i >= kctl_tbl_size) {
1643 kctlstat.kcs_bad_kctlref++;
1644 return NULL;
1645 }
1646 kctl = kctl_table[i];
1647 if (kctl->kctlref != kctlref) {
1648 kctlstat.kcs_bad_kctlref++;
1649 return NULL;
1650 }
1651 return kctl;
1652 }
1653
1654 /*
1655 * Register/unregister a NKE
1656 */
1657 errno_t
ctl_register(struct kern_ctl_reg * userkctl,kern_ctl_ref * kctlref)1658 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1659 {
1660 struct kctl *__single kctl = NULL;
1661 struct kctl *__single kctl_next = NULL;
1662 u_int32_t id = 1;
1663 size_t name_len;
1664 int is_extended = 0;
1665 int is_setup = 0;
1666
1667 if (userkctl == NULL) { /* sanity check */
1668 return EINVAL;
1669 }
1670 if (userkctl->ctl_connect == NULL) {
1671 return EINVAL;
1672 }
1673 name_len = strnlen(userkctl->ctl_name, sizeof(userkctl->ctl_name));
1674 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1675 return EINVAL;
1676 }
1677
1678 kctl = kalloc_type(struct kctl, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1679
1680 lck_mtx_lock(&ctl_mtx);
1681
1682 if (kctl_make_ref(kctl) == NULL) {
1683 lck_mtx_unlock(&ctl_mtx);
1684 kfree_type(struct kctl, kctl);
1685 return ENOMEM;
1686 }
1687
1688 /*
1689 * Kernel Control IDs
1690 *
1691 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1692 * static. If they do not exist, add them to the list in order. If the
1693 * flag is not set, we must find a new unique value. We assume the
1694 * list is in order. We find the last item in the list and add one. If
1695 * this leads to wrapping the id around, we start at the front of the
1696 * list and look for a gap.
1697 */
1698
1699 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1700 /* Must dynamically assign an unused ID */
1701
1702 /* Verify the same name isn't already registered */
1703 if (ctl_find_by_name(__unsafe_null_terminated_from_indexable(userkctl->ctl_name)) != NULL) {
1704 kctl_delete_ref(kctl->kctlref);
1705 lck_mtx_unlock(&ctl_mtx);
1706 kfree_type(struct kctl, kctl);
1707 return EEXIST;
1708 }
1709
1710 /* Start with 1 in case the list is empty */
1711 id = 1;
1712 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1713
1714 if (kctl_next != NULL) {
1715 /* List was not empty, add one to the last item */
1716 id = kctl_next->id + 1;
1717 kctl_next = NULL;
1718
1719 /*
1720 * If this wrapped the id number, start looking at
1721 * the front of the list for an unused id.
1722 */
1723 if (id == 0) {
1724 /* Find the next unused ID */
1725 id = 1;
1726
1727 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1728 if (kctl_next->id > id) {
1729 /* We found a gap */
1730 break;
1731 }
1732
1733 id = kctl_next->id + 1;
1734 }
1735 }
1736 }
1737
1738 userkctl->ctl_id = id;
1739 kctl->id = id;
1740 kctl->reg_unit = -1;
1741 } else {
1742 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1743 if (kctl_next->id > userkctl->ctl_id) {
1744 break;
1745 }
1746 }
1747
1748 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1749 kctl_delete_ref(kctl->kctlref);
1750 lck_mtx_unlock(&ctl_mtx);
1751 kfree_type(struct kctl, kctl);
1752 return EEXIST;
1753 }
1754 kctl->id = userkctl->ctl_id;
1755 kctl->reg_unit = userkctl->ctl_unit;
1756 }
1757
1758 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1759 is_setup = (userkctl->ctl_flags & CTL_FLAG_REG_SETUP);
1760
1761 strbufcpy(kctl->name, userkctl->ctl_name);
1762 kctl->flags = userkctl->ctl_flags;
1763
1764 /*
1765 * Let the caller know the default send and receive sizes
1766 */
1767 if (userkctl->ctl_sendsize == 0) {
1768 kctl->sendbufsize = CTL_SENDSIZE;
1769 userkctl->ctl_sendsize = kctl->sendbufsize;
1770 } else {
1771 kctl->sendbufsize = userkctl->ctl_sendsize;
1772 }
1773 if (userkctl->ctl_recvsize == 0) {
1774 kctl->recvbufsize = CTL_RECVSIZE;
1775 userkctl->ctl_recvsize = kctl->recvbufsize;
1776 } else {
1777 kctl->recvbufsize = userkctl->ctl_recvsize;
1778 }
1779
1780 if (is_setup) {
1781 kctl->setup = userkctl->ctl_setup;
1782 }
1783 kctl->bind = userkctl->ctl_bind;
1784 kctl->connect = userkctl->ctl_connect;
1785 kctl->disconnect = userkctl->ctl_disconnect;
1786 kctl->send = userkctl->ctl_send;
1787 kctl->setopt = userkctl->ctl_setopt;
1788 kctl->getopt = userkctl->ctl_getopt;
1789 if (is_extended) {
1790 kctl->rcvd = userkctl->ctl_rcvd;
1791 kctl->send_list = userkctl->ctl_send_list;
1792 }
1793
1794 TAILQ_INIT(&kctl->kcb_head);
1795
1796 if (kctl_next) {
1797 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1798 } else {
1799 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1800 }
1801
1802 kctlstat.kcs_reg_count++;
1803 kctlstat.kcs_gencnt++;
1804
1805 lck_mtx_unlock(&ctl_mtx);
1806
1807 *kctlref = kctl->kctlref;
1808
1809 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1810 return 0;
1811 }
1812
1813 errno_t
ctl_deregister(void * kctlref)1814 ctl_deregister(void *kctlref)
1815 {
1816 struct kctl *__single kctl;
1817
1818 lck_mtx_lock(&ctl_mtx);
1819 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1820 kctlstat.kcs_bad_kctlref++;
1821 lck_mtx_unlock(&ctl_mtx);
1822 if (ctl_debug != 0) {
1823 printf("%s invalid kctlref %p\n",
1824 __func__, kctlref);
1825 }
1826 return EINVAL;
1827 }
1828
1829 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1830 lck_mtx_unlock(&ctl_mtx);
1831 return EBUSY;
1832 }
1833
1834 TAILQ_REMOVE(&ctl_head, kctl, next);
1835
1836 kctlstat.kcs_reg_count--;
1837 kctlstat.kcs_gencnt++;
1838
1839 kctl_delete_ref(kctl->kctlref);
1840 lck_mtx_unlock(&ctl_mtx);
1841
1842 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1843 kfree_type(struct kctl, kctl);
1844 return 0;
1845 }
1846
1847 /*
1848 * Must be called with global ctl_mtx lock taken
1849 */
1850 static struct kctl *
ctl_find_by_name(const char * __null_terminated name)1851 ctl_find_by_name(const char *__null_terminated name)
1852 {
1853 struct kctl *__single kctl;
1854
1855 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1856
1857 TAILQ_FOREACH(kctl, &ctl_head, next)
1858 if (strlcmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1859 return kctl;
1860 }
1861
1862 return NULL;
1863 }
1864
1865 u_int32_t
ctl_id_by_name(const char * name)1866 ctl_id_by_name(const char *name)
1867 {
1868 u_int32_t ctl_id = 0;
1869 struct kctl *__single kctl;
1870
1871 lck_mtx_lock(&ctl_mtx);
1872 kctl = ctl_find_by_name(name);
1873 if (kctl) {
1874 ctl_id = kctl->id;
1875 }
1876 lck_mtx_unlock(&ctl_mtx);
1877
1878 return ctl_id;
1879 }
1880
1881 errno_t
ctl_name_by_id(u_int32_t id,char * __counted_by (maxsize)out_name,size_t maxsize)1882 ctl_name_by_id(u_int32_t id, char *__counted_by(maxsize) out_name, size_t maxsize)
1883 {
1884 int found = 0;
1885 struct kctl *__single kctl;
1886
1887 lck_mtx_lock(&ctl_mtx);
1888 TAILQ_FOREACH(kctl, &ctl_head, next) {
1889 if (kctl->id == id) {
1890 break;
1891 }
1892 }
1893
1894 if (kctl) {
1895 size_t count = maxsize;
1896 if (maxsize > MAX_KCTL_NAME) {
1897 count = MAX_KCTL_NAME;
1898 }
1899 strbufcpy(out_name, count, kctl->name, sizeof(kctl->name));
1900 found = 1;
1901 }
1902 lck_mtx_unlock(&ctl_mtx);
1903
1904 return found ? 0 : ENOENT;
1905 }
1906
1907 /*
1908 * Must be called with global ctl_mtx lock taked
1909 *
1910 */
1911 static struct kctl *
ctl_find_by_id_unit(u_int32_t id,u_int32_t unit)1912 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1913 {
1914 struct kctl *__single kctl;
1915
1916 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1917
1918 TAILQ_FOREACH(kctl, &ctl_head, next) {
1919 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1920 return kctl;
1921 } else if (kctl->id == id && kctl->reg_unit == unit) {
1922 return kctl;
1923 }
1924 }
1925 return NULL;
1926 }
1927
1928 /*
1929 * Must be called with kernel controller lock taken
1930 */
1931 static struct ctl_cb *
kcb_find(struct kctl * kctl,u_int32_t unit)1932 kcb_find(struct kctl *kctl, u_int32_t unit)
1933 {
1934 struct ctl_cb *__single kcb;
1935
1936 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1937
1938 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1939 if (kcb->sac.sc_unit == unit) {
1940 return kcb;
1941 }
1942
1943 return NULL;
1944 }
1945
1946 static struct socket *
kcb_find_socket(kern_ctl_ref kctlref,u_int32_t unit,u_int32_t * kctlflags)1947 kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1948 {
1949 struct socket *__single so = NULL;
1950 struct ctl_cb *__single kcb;
1951 void *__single lr_saved;
1952 struct kctl *__single kctl;
1953 int i;
1954
1955 lr_saved = __unsafe_forge_single(void *, __builtin_return_address(0));
1956
1957 lck_mtx_lock(&ctl_mtx);
1958 /*
1959 * First validate the kctlref
1960 */
1961 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1962 kctlstat.kcs_bad_kctlref++;
1963 lck_mtx_unlock(&ctl_mtx);
1964 if (ctl_debug != 0) {
1965 printf("%s invalid kctlref %p\n",
1966 __func__, kctlref);
1967 }
1968 return NULL;
1969 }
1970
1971 kcb = kcb_find(kctl, unit);
1972 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1973 lck_mtx_unlock(&ctl_mtx);
1974 return NULL;
1975 }
1976 /*
1977 * This prevents the socket from being closed
1978 */
1979 kcb->usecount++;
1980 /*
1981 * Respect lock ordering: socket before ctl_mtx
1982 */
1983 lck_mtx_unlock(&ctl_mtx);
1984
1985 socket_lock(so, 1);
1986 /*
1987 * The socket lock history is more useful if we store
1988 * the address of the caller.
1989 */
1990 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1991 so->lock_lr[i] = lr_saved;
1992
1993 lck_mtx_lock(&ctl_mtx);
1994
1995 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
1996 lck_mtx_unlock(&ctl_mtx);
1997 socket_unlock(so, 1);
1998 so = NULL;
1999 lck_mtx_lock(&ctl_mtx);
2000 } else if (kctlflags != NULL) {
2001 *kctlflags = kctl->flags;
2002 }
2003
2004 kcb->usecount--;
2005 if (kcb->usecount == 0 && kcb->require_clearing_count != 0) {
2006 wakeup((event_t)&kcb->usecount);
2007 }
2008
2009 lck_mtx_unlock(&ctl_mtx);
2010
2011 return so;
2012 }
2013
2014 static void
ctl_post_msg(u_int32_t event_code,u_int32_t id)2015 ctl_post_msg(u_int32_t event_code, u_int32_t id)
2016 {
2017 struct ctl_event_data ctl_ev_data;
2018 struct kev_msg ev_msg;
2019
2020 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
2021
2022 bzero(&ev_msg, sizeof(struct kev_msg));
2023 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2024
2025 ev_msg.kev_class = KEV_SYSTEM_CLASS;
2026 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
2027 ev_msg.event_code = event_code;
2028
2029 /* common nke subclass data */
2030 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
2031 ctl_ev_data.ctl_id = id;
2032 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2033 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2034
2035 ev_msg.dv[1].data_length = 0;
2036
2037 kev_post_msg(&ev_msg);
2038 }
2039
2040 static int
ctl_lock(struct socket * so,int refcount,void * lr)2041 ctl_lock(struct socket *so, int refcount, void *lr)
2042 {
2043 void *__single lr_saved;
2044
2045 if (lr == NULL) {
2046 lr_saved = __unsafe_forge_single(void *, __builtin_return_address(0));
2047 } else {
2048 lr_saved = lr;
2049 }
2050
2051 if (so->so_pcb != NULL) {
2052 lck_mtx_lock(&((struct ctl_cb *)so->so_pcb)->mtx);
2053 } else {
2054 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s",
2055 so, lr_saved, solockhistory_nr(so));
2056 /* NOTREACHED */
2057 }
2058
2059 if (so->so_usecount < 0) {
2060 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s",
2061 so, so->so_pcb, lr_saved, so->so_usecount,
2062 solockhistory_nr(so));
2063 /* NOTREACHED */
2064 }
2065
2066 if (refcount) {
2067 so->so_usecount++;
2068 }
2069
2070 so->lock_lr[so->next_lock_lr] = lr_saved;
2071 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2072 return 0;
2073 }
2074
2075 static int
ctl_unlock(struct socket * so,int refcount,void * lr)2076 ctl_unlock(struct socket *so, int refcount, void *lr)
2077 {
2078 void *__single lr_saved;
2079 lck_mtx_t *__single mutex_held;
2080
2081 if (lr == NULL) {
2082 lr_saved = __unsafe_forge_single(void *, __builtin_return_address(0));
2083 } else {
2084 lr_saved = lr;
2085 }
2086
2087 #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2088 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2089 (uint64_t)VM_KERNEL_ADDRPERM(so),
2090 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2091 (uint64_t)VM_KERNEL_ADDRPERM(&((struct ctl_cb *)so->so_pcb)->mtx),
2092 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
2093 #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2094 if (refcount) {
2095 so->so_usecount--;
2096 }
2097
2098 if (so->so_usecount < 0) {
2099 panic("ctl_unlock: so=%p usecount=%x lrh= %s",
2100 so, so->so_usecount, solockhistory_nr(so));
2101 /* NOTREACHED */
2102 }
2103 if (so->so_pcb == NULL) {
2104 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s",
2105 so, so->so_usecount, (void *)lr_saved,
2106 solockhistory_nr(so));
2107 /* NOTREACHED */
2108 }
2109 mutex_held = &((struct ctl_cb *)so->so_pcb)->mtx;
2110
2111 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2112 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2113 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2114 lck_mtx_unlock(mutex_held);
2115
2116 if (so->so_usecount == 0) {
2117 ctl_sofreelastref(so);
2118 }
2119
2120 return 0;
2121 }
2122
2123 static lck_mtx_t *
2124 ctl_getlock(struct socket *so, int flags)
2125 {
2126 #pragma unused(flags)
2127 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
2128
2129 if (so->so_pcb) {
2130 if (so->so_usecount < 0) {
2131 panic("ctl_getlock: so=%p usecount=%x lrh= %s",
2132 so, so->so_usecount, solockhistory_nr(so));
2133 }
2134 return &kcb->mtx;
2135 } else {
2136 panic("ctl_getlock: so=%p NULL NO so_pcb %s",
2137 so, solockhistory_nr(so));
2138 return so->so_proto->pr_domain->dom_mtx;
2139 }
2140 }
2141
2142 __private_extern__ int
2143 kctl_reg_list SYSCTL_HANDLER_ARGS
2144 {
2145 #pragma unused(oidp, arg1, arg2)
2146 int error = 0;
2147 u_int64_t i, n;
2148 struct xsystmgen xsg;
2149 void *buf = NULL;
2150 struct kctl *__single kctl;
2151 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2152
2153 buf = kalloc_data(item_size, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2154
2155 lck_mtx_lock(&ctl_mtx);
2156
2157 n = kctlstat.kcs_reg_count;
2158
2159 if (req->oldptr == USER_ADDR_NULL) {
2160 req->oldidx = (size_t)(n + n / 8) * sizeof(struct xkctl_reg);
2161 goto done;
2162 }
2163 if (req->newptr != USER_ADDR_NULL) {
2164 error = EPERM;
2165 goto done;
2166 }
2167 bzero(&xsg, sizeof(xsg));
2168 xsg.xg_len = sizeof(xsg);
2169 xsg.xg_count = n;
2170 xsg.xg_gen = kctlstat.kcs_gencnt;
2171 xsg.xg_sogen = so_gencnt;
2172 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2173 if (error) {
2174 goto done;
2175 }
2176 /*
2177 * We are done if there is no pcb
2178 */
2179 if (n == 0) {
2180 goto done;
2181 }
2182
2183 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2184 i < n && kctl != NULL;
2185 i++, kctl = TAILQ_NEXT(kctl, next)) {
2186 struct xkctl_reg *__single xkr = (struct xkctl_reg *)buf;
2187 struct ctl_cb *__single kcb;
2188 u_int32_t pcbcount = 0;
2189
2190 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2191 pcbcount++;
2192
2193 bzero(buf, item_size);
2194
2195 xkr->xkr_len = sizeof(struct xkctl_reg);
2196 xkr->xkr_kind = XSO_KCREG;
2197 xkr->xkr_id = kctl->id;
2198 xkr->xkr_reg_unit = kctl->reg_unit;
2199 xkr->xkr_flags = kctl->flags;
2200 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2201 xkr->xkr_recvbufsize = kctl->recvbufsize;
2202 xkr->xkr_sendbufsize = kctl->sendbufsize;
2203 xkr->xkr_lastunit = kctl->lastunit;
2204 xkr->xkr_pcbcount = pcbcount;
2205 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2206 xkr->xkr_disconnect =
2207 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2208 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2209 xkr->xkr_send_list =
2210 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2211 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2212 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2213 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2214 strbufcpy(xkr->xkr_name, kctl->name);
2215
2216 error = SYSCTL_OUT(req, buf, item_size);
2217 }
2218
2219 if (error == 0) {
2220 /*
2221 * Give the user an updated idea of our state.
2222 * If the generation differs from what we told
2223 * her before, she knows that something happened
2224 * while we were processing this request, and it
2225 * might be necessary to retry.
2226 */
2227 bzero(&xsg, sizeof(xsg));
2228 xsg.xg_len = sizeof(xsg);
2229 xsg.xg_count = n;
2230 xsg.xg_gen = kctlstat.kcs_gencnt;
2231 xsg.xg_sogen = so_gencnt;
2232 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2233 if (error) {
2234 goto done;
2235 }
2236 }
2237
2238 done:
2239 lck_mtx_unlock(&ctl_mtx);
2240
2241 kfree_data(buf, item_size);
2242
2243 return error;
2244 }
2245
2246 __private_extern__ int
2247 kctl_pcblist SYSCTL_HANDLER_ARGS
2248 {
2249 #pragma unused(oidp, arg1, arg2)
2250 int error = 0;
2251 u_int64_t n, i;
2252 struct xsystmgen xsg;
2253 void *buf = NULL;
2254 struct kctl *__single kctl;
2255 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2256 ROUNDUP64(sizeof(struct xsocket_n)) +
2257 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2258 ROUNDUP64(sizeof(struct xsockstat_n));
2259
2260 buf = kalloc_data(item_size, Z_WAITOK_ZERO_NOFAIL);
2261
2262 lck_mtx_lock(&ctl_mtx);
2263
2264 n = kctlstat.kcs_pcbcount;
2265
2266 if (req->oldptr == USER_ADDR_NULL) {
2267 req->oldidx = (size_t)(n + n / 8) * item_size;
2268 goto done;
2269 }
2270 if (req->newptr != USER_ADDR_NULL) {
2271 error = EPERM;
2272 goto done;
2273 }
2274 bzero(&xsg, sizeof(xsg));
2275 xsg.xg_len = sizeof(xsg);
2276 xsg.xg_count = n;
2277 xsg.xg_gen = kctlstat.kcs_gencnt;
2278 xsg.xg_sogen = so_gencnt;
2279 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2280 if (error) {
2281 goto done;
2282 }
2283 /*
2284 * We are done if there is no pcb
2285 */
2286 if (n == 0) {
2287 goto done;
2288 }
2289
2290 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2291 i < n && kctl != NULL;
2292 kctl = TAILQ_NEXT(kctl, next)) {
2293 struct ctl_cb *__single kcb;
2294
2295 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2296 i < n && kcb != NULL;
2297 i++, kcb = TAILQ_NEXT(kcb, next)) {
2298 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2299 struct xsocket_n *xso = (struct xsocket_n *)
2300 ADVANCE64(xk, sizeof(*xk));
2301 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2302 ADVANCE64(xso, sizeof(*xso));
2303 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2304 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2305 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2306 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2307
2308 bzero(buf, item_size);
2309
2310 xk->xkp_len = sizeof(struct xkctlpcb);
2311 xk->xkp_kind = XSO_KCB;
2312 xk->xkp_unit = kcb->sac.sc_unit;
2313 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRHASH(kcb);
2314 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRHASH(kctl);
2315 xk->xkp_kctlid = kctl->id;
2316 strbufcpy(xk->xkp_kctlname, kctl->name);
2317
2318 sotoxsocket_n(kcb->so, xso);
2319 sbtoxsockbuf_n(kcb->so ?
2320 &kcb->so->so_rcv : NULL, xsbrcv);
2321 sbtoxsockbuf_n(kcb->so ?
2322 &kcb->so->so_snd : NULL, xsbsnd);
2323 sbtoxsockstat_n(kcb->so, xsostats);
2324
2325 error = SYSCTL_OUT(req, buf, item_size);
2326 }
2327 }
2328
2329 if (error == 0) {
2330 /*
2331 * Give the user an updated idea of our state.
2332 * If the generation differs from what we told
2333 * her before, she knows that something happened
2334 * while we were processing this request, and it
2335 * might be necessary to retry.
2336 */
2337 bzero(&xsg, sizeof(xsg));
2338 xsg.xg_len = sizeof(xsg);
2339 xsg.xg_count = n;
2340 xsg.xg_gen = kctlstat.kcs_gencnt;
2341 xsg.xg_sogen = so_gencnt;
2342 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2343 if (error) {
2344 goto done;
2345 }
2346 }
2347
2348 done:
2349 lck_mtx_unlock(&ctl_mtx);
2350
2351 kfree_data(buf, item_size);
2352 return error;
2353 }
2354
2355 int
2356 kctl_getstat SYSCTL_HANDLER_ARGS
2357 {
2358 #pragma unused(oidp, arg1, arg2)
2359 int error = 0;
2360
2361 lck_mtx_lock(&ctl_mtx);
2362
2363 if (req->newptr != USER_ADDR_NULL) {
2364 error = EPERM;
2365 goto done;
2366 }
2367 if (req->oldptr == USER_ADDR_NULL) {
2368 req->oldidx = sizeof(struct kctlstat);
2369 goto done;
2370 }
2371
2372 error = SYSCTL_OUT(req, &kctlstat,
2373 MIN(sizeof(struct kctlstat), req->oldlen));
2374 done:
2375 lck_mtx_unlock(&ctl_mtx);
2376 return error;
2377 }
2378
2379 void
2380 kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2381 {
2382 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
2383 struct kern_ctl_info *__single kcsi =
2384 &si->soi_proto.pri_kern_ctl;
2385 struct kctl *__single kctl = kcb->kctl;
2386
2387 si->soi_kind = SOCKINFO_KERN_CTL;
2388
2389 if (kctl == 0) {
2390 return;
2391 }
2392
2393 kcsi->kcsi_id = kctl->id;
2394 kcsi->kcsi_reg_unit = kctl->reg_unit;
2395 kcsi->kcsi_flags = kctl->flags;
2396 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2397 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2398 kcsi->kcsi_unit = kcb->sac.sc_unit;
2399 strbufcpy(kcsi->kcsi_name, kctl->name);
2400 }
2401