1 /*
2 * Copyright (c) 1999-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/syslog.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/sys_domain.h>
49 #include <sys/kern_event.h>
50 #include <sys/kern_control.h>
51 #include <sys/kauth.h>
52 #include <sys/sysctl.h>
53 #include <sys/proc_info.h>
54 #include <net/if_var.h>
55
56 #include <mach/vm_types.h>
57
58 #include <kern/thread.h>
59
60 #include <net/sockaddr_utils.h>
61
62 extern const int copysize_limit_panic;
63
64 struct kctl {
65 TAILQ_ENTRY(kctl) next; /* controller chain */
66 kern_ctl_ref kctlref;
67
68 /* controller information provided when registering */
69 char name[MAX_KCTL_NAME]; /* unique identifier */
70 u_int32_t id;
71 u_int32_t reg_unit;
72
73 /* misc communication information */
74 u_int32_t flags; /* support flags */
75 u_int32_t recvbufsize; /* request more than the default buffer size */
76 u_int32_t sendbufsize; /* request more than the default buffer size */
77
78 /* Dispatch functions */
79 ctl_setup_func setup; /* Setup contact */
80 ctl_bind_func bind; /* Prepare contact */
81 ctl_connect_func connect; /* Make contact */
82 ctl_disconnect_func disconnect; /* Break contact */
83 ctl_send_func send; /* Send data to nke */
84 ctl_send_list_func send_list; /* Send list of packets */
85 ctl_setopt_func setopt; /* set kctl configuration */
86 ctl_getopt_func getopt; /* get kctl configuration */
87 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
88
89 TAILQ_HEAD(, ctl_cb) kcb_head;
90 u_int32_t lastunit;
91 };
92
93 #if DEVELOPMENT || DEBUG
94 enum ctl_status {
95 KCTL_DISCONNECTED = 0,
96 KCTL_CONNECTING = 1,
97 KCTL_CONNECTED = 2
98 };
99 #endif /* DEVELOPMENT || DEBUG */
100
101 struct ctl_cb {
102 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
103 lck_mtx_t mtx;
104 struct socket *so; /* controlling socket */
105 struct kctl *kctl; /* back pointer to controller */
106 void *userdata;
107 struct sockaddr_ctl sac;
108 uint32_t usecount;
109 uint32_t kcb_usecount;
110 uint32_t require_clearing_count;
111 #if DEVELOPMENT || DEBUG
112 enum ctl_status status;
113 #endif /* DEVELOPMENT || DEBUG */
114 };
115
116 #ifndef ROUNDUP64
117 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
118 #endif
119
120 #ifndef ADVANCE64
121 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
122 #endif
123
124 /*
125 * Definitions and vars for we support
126 */
127
128 #define CTL_SENDSIZE (2 * 1024) /* default buffer size */
129 #define CTL_RECVSIZE (8 * 1024) /* default buffer size */
130
131 /*
132 * Definitions and vars for we support
133 */
134
135 const u_int32_t ctl_maxunit = 65536;
136 static LCK_ATTR_DECLARE(ctl_lck_attr, 0, 0);
137 static LCK_GRP_DECLARE(ctl_lck_grp, "Kernel Control Protocol");
138 static LCK_MTX_DECLARE_ATTR(ctl_mtx, &ctl_lck_grp, &ctl_lck_attr);
139
140 /* all the controllers are chained */
141 TAILQ_HEAD(kctl_list, kctl) ctl_head = TAILQ_HEAD_INITIALIZER(ctl_head);
142
143 static int ctl_attach(struct socket *, int, struct proc *);
144 static int ctl_detach(struct socket *);
145 static int ctl_sofreelastref(struct socket *so);
146 static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
147 static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
148 static int ctl_disconnect(struct socket *);
149 static int ctl_ioctl(struct socket *so, u_long cmd,
150 caddr_t __sized_by(IOCPARM_LEN(cmd)) data,
151 struct ifnet *ifp, struct proc *p);
152 static int ctl_send(struct socket *, int, struct mbuf *,
153 struct sockaddr *, struct mbuf *, struct proc *);
154 static int ctl_send_list(struct socket *, struct mbuf *, u_int *, int);
155 static int ctl_ctloutput(struct socket *, struct sockopt *);
156 static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
157 static int ctl_usr_rcvd(struct socket *so, int flags);
158
159 static struct kctl *ctl_find_by_name(const char *__null_terminated);
160 static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
161
162 static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
163 u_int32_t *);
164 static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
165 static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
166
167 static int ctl_lock(struct socket *, int, void *);
168 static int ctl_unlock(struct socket *, int, void *);
169 static lck_mtx_t * ctl_getlock(struct socket *, int);
170
171 static struct pr_usrreqs ctl_usrreqs = {
172 .pru_attach = ctl_attach,
173 .pru_bind = ctl_bind,
174 .pru_connect = ctl_connect,
175 .pru_control = ctl_ioctl,
176 .pru_detach = ctl_detach,
177 .pru_disconnect = ctl_disconnect,
178 .pru_peeraddr = ctl_peeraddr,
179 .pru_rcvd = ctl_usr_rcvd,
180 .pru_send = ctl_send,
181 .pru_send_list = ctl_send_list,
182 .pru_sosend = sosend,
183 .pru_sosend_list = sosend_list,
184 .pru_soreceive = soreceive,
185 };
186
187 static struct protosw kctlsw[] = {
188 {
189 .pr_type = SOCK_DGRAM,
190 .pr_protocol = SYSPROTO_CONTROL,
191 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
192 .pr_ctloutput = ctl_ctloutput,
193 .pr_usrreqs = &ctl_usrreqs,
194 .pr_lock = ctl_lock,
195 .pr_unlock = ctl_unlock,
196 .pr_getlock = ctl_getlock,
197 },
198 {
199 .pr_type = SOCK_STREAM,
200 .pr_protocol = SYSPROTO_CONTROL,
201 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
202 .pr_ctloutput = ctl_ctloutput,
203 .pr_usrreqs = &ctl_usrreqs,
204 .pr_lock = ctl_lock,
205 .pr_unlock = ctl_unlock,
206 .pr_getlock = ctl_getlock,
207 }
208 };
209
210 __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
211 __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
212 __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
213
214
215 SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
216 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
217
218 struct kctlstat kctlstat;
219 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
220 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
221 kctl_getstat, "S,kctlstat", "");
222
223 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
224 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
225 kctl_reg_list, "S,xkctl_reg", "");
226
227 SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
228 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
229 kctl_pcblist, "S,xkctlpcb", "");
230
231 u_int32_t ctl_autorcvbuf_max = 256 * 1024;
232 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
233 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
234
235 u_int32_t ctl_autorcvbuf_high = 0;
236 SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
237 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
238
239 u_int32_t ctl_debug = 0;
240 SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
241 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
242
243 #if DEVELOPMENT || DEBUG
244 u_int32_t ctl_panic_debug = 0;
245 SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
246 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
247 #endif /* DEVELOPMENT || DEBUG */
248
249 SYSCTL_UINT(_net_systm_kctl, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED,
250 (unsigned int *)&kctlstat.kcs_pcbcount, 0, "");
251
252 #define KCTL_TBL_INC 16
253
254 static uintptr_t kctl_tbl_size = 0;
255 static u_int32_t kctl_tbl_growing = 0;
256 static u_int32_t kctl_tbl_growing_waiting = 0;
257 static uintptr_t kctl_tbl_count = 0;
258 static struct kctl **__counted_by_or_null(kctl_tbl_size) kctl_table = NULL;
259 static uintptr_t kctl_ref_gencnt = 0;
260
261 static void kctl_tbl_grow(void);
262 static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
263 static void kctl_delete_ref(kern_ctl_ref);
264 static struct kctl *kctl_from_ref(kern_ctl_ref);
265
266 /*
267 * Install the protosw's for the Kernel Control manager.
268 */
269 __private_extern__ void
kern_control_init(struct domain * dp)270 kern_control_init(struct domain *dp)
271 {
272 struct protosw *pr;
273 int i;
274 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
275
276 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
277 VERIFY(dp == systemdomain);
278
279 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
280 net_add_proto(pr, dp, 1);
281 }
282 }
283
284 static void
kcb_delete(struct ctl_cb * kcb)285 kcb_delete(struct ctl_cb *kcb)
286 {
287 if (kcb != 0) {
288 lck_mtx_destroy(&kcb->mtx, &ctl_lck_grp);
289 kfree_type(struct ctl_cb, kcb);
290 }
291 }
292
293 /*
294 * Kernel Controller user-request functions
295 * attach function must exist and succeed
296 * detach not necessary
297 * we need a pcb for the per socket mutex
298 */
299 static int
ctl_attach(struct socket * so,int proto,struct proc * p)300 ctl_attach(struct socket *so, int proto, struct proc *p)
301 {
302 #pragma unused(proto, p)
303 struct ctl_cb *__single kcb = 0;
304
305 kcb = kalloc_type(struct ctl_cb, Z_WAITOK | Z_ZERO | Z_NOFAIL);
306
307 lck_mtx_init(&kcb->mtx, &ctl_lck_grp, &ctl_lck_attr);
308 kcb->so = so;
309 so->so_pcb = (caddr_t)kcb;
310
311 /*
312 * For datagram, use character count for sbspace as its value
313 * may be use for packetization and we do not want to
314 * drop packets based on the sbspace hint that was just provided
315 */
316 if (SOCK_CHECK_TYPE(so, SOCK_DGRAM)) {
317 so->so_rcv.sb_flags |= SB_KCTL;
318 so->so_snd.sb_flags |= SB_KCTL;
319 }
320 return 0;
321 }
322
323 static int
ctl_sofreelastref(struct socket * so)324 ctl_sofreelastref(struct socket *so)
325 {
326 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
327
328 so->so_pcb = 0;
329
330 if (kcb != 0) {
331 struct kctl *__single kctl;
332 if ((kctl = kcb->kctl) != 0) {
333 lck_mtx_lock(&ctl_mtx);
334 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
335 kctlstat.kcs_pcbcount--;
336 kctlstat.kcs_gencnt++;
337 lck_mtx_unlock(&ctl_mtx);
338 }
339 kcb_delete(kcb);
340 }
341 sofreelastref(so, 1);
342 return 0;
343 }
344
345 /*
346 * Use this function and ctl_kcb_require_clearing to serialize
347 * critical calls into the kctl subsystem
348 */
349 static void
ctl_kcb_increment_use_count(struct ctl_cb * kcb,lck_mtx_t * mutex_held)350 ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
351 {
352 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
353 while (kcb->require_clearing_count > 0) {
354 msleep(&kcb->require_clearing_count, mutex_held, PSOCK | PCATCH, "kcb_require_clearing", NULL);
355 }
356 kcb->kcb_usecount++;
357 }
358
359 static void
ctl_kcb_require_clearing(struct ctl_cb * kcb,lck_mtx_t * mutex_held)360 ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
361 {
362 assert(kcb->kcb_usecount != 0);
363 kcb->require_clearing_count++;
364 kcb->kcb_usecount--;
365 while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
366 msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
367 }
368 kcb->kcb_usecount++;
369 }
370
371 static void
ctl_kcb_done_clearing(struct ctl_cb * kcb)372 ctl_kcb_done_clearing(struct ctl_cb *kcb)
373 {
374 assert(kcb->require_clearing_count != 0);
375 kcb->require_clearing_count--;
376 wakeup((caddr_t)&kcb->require_clearing_count);
377 }
378
379 static void
ctl_kcb_decrement_use_count(struct ctl_cb * kcb)380 ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
381 {
382 assert(kcb->kcb_usecount != 0);
383 kcb->kcb_usecount--;
384 if (kcb->require_clearing_count != 0) {
385 wakeup((caddr_t)&kcb->kcb_usecount);
386 }
387 }
388
389 static int
ctl_detach(struct socket * so)390 ctl_detach(struct socket *so)
391 {
392 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
393
394 if (kcb == 0) {
395 return 0;
396 }
397
398 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
399 ctl_kcb_increment_use_count(kcb, mtx_held);
400 ctl_kcb_require_clearing(kcb, mtx_held);
401
402 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
403 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
404 // The unit was bound, but not connected
405 // Invoke the disconnected call to cleanup
406 if (kcb->kctl->disconnect != NULL) {
407 socket_unlock(so, 0);
408 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
409 kcb->sac.sc_unit, kcb->userdata);
410 socket_lock(so, 0);
411 }
412 }
413
414 soisdisconnected(so);
415 #if DEVELOPMENT || DEBUG
416 kcb->status = KCTL_DISCONNECTED;
417 #endif /* DEVELOPMENT || DEBUG */
418 so->so_flags |= SOF_PCBCLEARING;
419 ctl_kcb_done_clearing(kcb);
420 ctl_kcb_decrement_use_count(kcb);
421 return 0;
422 }
423
424 static int
ctl_setup_kctl(struct socket * so,struct sockaddr * nam,struct proc * p)425 ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
426 {
427 struct kctl *__single kctl = NULL;
428 int error = 0;
429 struct sockaddr_ctl sa;
430 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
431 struct ctl_cb *__single kcb_next = NULL;
432
433 if (kcb == 0) {
434 panic("ctl_setup_kctl so_pcb null");
435 }
436
437 if (kcb->kctl != NULL) {
438 // Already set up, skip
439 return 0;
440 }
441
442 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
443 return EINVAL;
444 }
445
446 SOCKADDR_COPY(nam, &sa, sizeof(struct sockaddr_ctl));
447
448 lck_mtx_lock(&ctl_mtx);
449 kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
450 if (kctl == NULL) {
451 lck_mtx_unlock(&ctl_mtx);
452 return ENOENT;
453 }
454
455 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
456 (so->so_type != SOCK_STREAM)) ||
457 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
458 (so->so_type != SOCK_DGRAM))) {
459 lck_mtx_unlock(&ctl_mtx);
460 return EPROTOTYPE;
461 }
462
463 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
464 if (p == 0) {
465 lck_mtx_unlock(&ctl_mtx);
466 return EINVAL;
467 }
468 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
469 lck_mtx_unlock(&ctl_mtx);
470 return EPERM;
471 }
472 }
473
474 if (kctl->setup != NULL) {
475 error = (*kctl->setup)(&sa.sc_unit, &kcb->userdata);
476 if (error != 0) {
477 lck_mtx_unlock(&ctl_mtx);
478 return error;
479 }
480 } else if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
481 if (kcb_find(kctl, sa.sc_unit) != NULL) {
482 lck_mtx_unlock(&ctl_mtx);
483 return EBUSY;
484 }
485 } else {
486 /* Find an unused ID, assumes control IDs are in order */
487 u_int32_t unit = 1;
488
489 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
490 if (kcb_next->sac.sc_unit > unit) {
491 /* Found a gap, lets fill it in */
492 break;
493 }
494 unit = kcb_next->sac.sc_unit + 1;
495 if (unit == ctl_maxunit) {
496 break;
497 }
498 }
499
500 if (unit == ctl_maxunit) {
501 lck_mtx_unlock(&ctl_mtx);
502 return EBUSY;
503 }
504
505 sa.sc_unit = unit;
506 }
507
508 bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl));
509 kcb->kctl = kctl;
510 if (kcb_next != NULL) {
511 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
512 } else {
513 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
514 }
515 kctlstat.kcs_pcbcount++;
516 kctlstat.kcs_gencnt++;
517 kctlstat.kcs_connections++;
518 lck_mtx_unlock(&ctl_mtx);
519
520 error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
521 if (error) {
522 #if (DEBUG || DEVELOPMENT)
523 if (ctl_debug) {
524 printf("%s - soreserve(%llu, %u, %u) error %d\n",
525 __func__, so->so_gencnt,
526 kctl->sendbufsize, kctl->recvbufsize, error);
527 }
528 #endif /* (DEBUG || DEVELOPMENT) */
529 goto done;
530 }
531
532 done:
533 if (error) {
534 soisdisconnected(so);
535 #if DEVELOPMENT || DEBUG
536 kcb->status = KCTL_DISCONNECTED;
537 #endif /* DEVELOPMENT || DEBUG */
538 lck_mtx_lock(&ctl_mtx);
539 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
540 kcb->kctl = NULL;
541 kcb->sac.sc_unit = 0;
542 kctlstat.kcs_pcbcount--;
543 kctlstat.kcs_gencnt++;
544 kctlstat.kcs_conn_fail++;
545 lck_mtx_unlock(&ctl_mtx);
546 }
547 return error;
548 }
549
550 static int
ctl_bind(struct socket * so,struct sockaddr * nam,struct proc * p)551 ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
552 {
553 int error = 0;
554 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
555
556 if (kcb == NULL) {
557 panic("ctl_bind so_pcb null");
558 }
559
560 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
561 ctl_kcb_increment_use_count(kcb, mtx_held);
562 ctl_kcb_require_clearing(kcb, mtx_held);
563
564 error = ctl_setup_kctl(so, nam, p);
565 if (error) {
566 goto out;
567 }
568
569 if (kcb->kctl == NULL) {
570 panic("ctl_bind kctl null");
571 }
572
573 if (kcb->kctl->bind == NULL) {
574 error = EINVAL;
575 goto out;
576 }
577
578 socket_unlock(so, 0);
579 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
580 socket_lock(so, 0);
581
582 out:
583 ctl_kcb_done_clearing(kcb);
584 ctl_kcb_decrement_use_count(kcb);
585 return error;
586 }
587
588 static int
ctl_connect(struct socket * so,struct sockaddr * nam,struct proc * p)589 ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
590 {
591 int error = 0;
592 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
593
594 if (kcb == NULL) {
595 panic("ctl_connect so_pcb null");
596 }
597
598 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
599 ctl_kcb_increment_use_count(kcb, mtx_held);
600 ctl_kcb_require_clearing(kcb, mtx_held);
601
602 #if DEVELOPMENT || DEBUG
603 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
604 panic("kctl already connecting/connected");
605 }
606 kcb->status = KCTL_CONNECTING;
607 #endif /* DEVELOPMENT || DEBUG */
608
609 error = ctl_setup_kctl(so, nam, p);
610 if (error) {
611 goto out;
612 }
613
614 if (kcb->kctl == NULL) {
615 panic("ctl_connect kctl null");
616 }
617
618 soisconnecting(so);
619 socket_unlock(so, 0);
620 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
621 socket_lock(so, 0);
622 if (error) {
623 goto end;
624 }
625 soisconnected(so);
626 #if DEVELOPMENT || DEBUG
627 kcb->status = KCTL_CONNECTED;
628 #endif /* DEVELOPMENT || DEBUG */
629
630 end:
631 if (error && kcb->kctl->disconnect) {
632 /*
633 * XXX Make sure we Don't check the return value
634 * of disconnect here.
635 * ipsec/utun_ctl_disconnect will return error when
636 * disconnect gets called after connect failure.
637 * However if we decide to check for disconnect return
638 * value here. Please make sure to revisit
639 * ipsec/utun_ctl_disconnect.
640 */
641 socket_unlock(so, 0);
642 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
643 socket_lock(so, 0);
644 }
645 if (error) {
646 soisdisconnected(so);
647 #if DEVELOPMENT || DEBUG
648 kcb->status = KCTL_DISCONNECTED;
649 #endif /* DEVELOPMENT || DEBUG */
650 lck_mtx_lock(&ctl_mtx);
651 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
652 kcb->kctl = NULL;
653 kcb->sac.sc_unit = 0;
654 kctlstat.kcs_pcbcount--;
655 kctlstat.kcs_gencnt++;
656 kctlstat.kcs_conn_fail++;
657 lck_mtx_unlock(&ctl_mtx);
658 }
659 out:
660 ctl_kcb_done_clearing(kcb);
661 ctl_kcb_decrement_use_count(kcb);
662 return error;
663 }
664
665 static int
ctl_disconnect(struct socket * so)666 ctl_disconnect(struct socket *so)
667 {
668 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
669
670 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
671 lck_mtx_t *__single mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
672 ctl_kcb_increment_use_count(kcb, mtx_held);
673 ctl_kcb_require_clearing(kcb, mtx_held);
674 struct kctl *__single kctl = kcb->kctl;
675
676 if (kctl && kctl->disconnect) {
677 socket_unlock(so, 0);
678 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
679 kcb->userdata);
680 socket_lock(so, 0);
681 }
682
683 soisdisconnected(so);
684 #if DEVELOPMENT || DEBUG
685 kcb->status = KCTL_DISCONNECTED;
686 #endif /* DEVELOPMENT || DEBUG */
687
688 socket_unlock(so, 0);
689 lck_mtx_lock(&ctl_mtx);
690 kcb->kctl = 0;
691 kcb->sac.sc_unit = 0;
692 while (kcb->usecount != 0) {
693 msleep(&kcb->usecount, &ctl_mtx, 0, "kcb->usecount", 0);
694 }
695
696 /* Check for NULL here for the case where ctl_disconnect is racing with itself
697 * and the first thread has already cleaned up the structure */
698 if (kctl) {
699 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
700 }
701 kctlstat.kcs_pcbcount--;
702 kctlstat.kcs_gencnt++;
703 lck_mtx_unlock(&ctl_mtx);
704 socket_lock(so, 0);
705 ctl_kcb_done_clearing(kcb);
706 ctl_kcb_decrement_use_count(kcb);
707 }
708 return 0;
709 }
710
711 static int
ctl_peeraddr(struct socket * so,struct sockaddr ** nam)712 ctl_peeraddr(struct socket *so, struct sockaddr **nam)
713 {
714 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
715 struct kctl *__single kctl;
716 struct sockaddr_ctl sc;
717
718 if (kcb == NULL) { /* sanity check */
719 return ENOTCONN;
720 }
721
722 if ((kctl = kcb->kctl) == NULL) {
723 return EINVAL;
724 }
725
726 bzero(&sc, sizeof(struct sockaddr_ctl));
727 sc.sc_len = sizeof(struct sockaddr_ctl);
728 sc.sc_family = AF_SYSTEM;
729 sc.ss_sysaddr = AF_SYS_CONTROL;
730 sc.sc_id = kctl->id;
731 sc.sc_unit = kcb->sac.sc_unit;
732
733 *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
734
735 return 0;
736 }
737
738 static void
ctl_sbrcv_trim(struct socket * so)739 ctl_sbrcv_trim(struct socket *so)
740 {
741 struct sockbuf *__single sb = &so->so_rcv;
742
743 if (sb->sb_hiwat > sb->sb_idealsize) {
744 u_int32_t diff;
745 int32_t trim;
746
747 /*
748 * The difference between the ideal size and the
749 * current size is the upper bound of the trimage
750 */
751 diff = sb->sb_hiwat - sb->sb_idealsize;
752 /*
753 * We cannot trim below the outstanding data
754 */
755 trim = sb->sb_hiwat - sb->sb_cc;
756
757 trim = imin(trim, (int32_t)diff);
758
759 if (trim > 0) {
760 sbreserve(sb, (sb->sb_hiwat - trim));
761
762 if (ctl_debug) {
763 printf("%s - shrunk to %d\n",
764 __func__, sb->sb_hiwat);
765 }
766 }
767 }
768 }
769
770 static int
ctl_usr_rcvd(struct socket * so,int flags)771 ctl_usr_rcvd(struct socket *so, int flags)
772 {
773 int error = 0;
774 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
775 struct kctl *kctl;
776
777 if (kcb == NULL) {
778 return ENOTCONN;
779 }
780
781 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
782 ctl_kcb_increment_use_count(kcb, mtx_held);
783
784 if ((kctl = kcb->kctl) == NULL) {
785 error = EINVAL;
786 goto out;
787 }
788
789 if (kctl->rcvd) {
790 socket_unlock(so, 0);
791 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
792 socket_lock(so, 0);
793 }
794
795 ctl_sbrcv_trim(so);
796
797 out:
798 ctl_kcb_decrement_use_count(kcb);
799 return error;
800 }
801
802 static int
ctl_send(struct socket * so,int flags,struct mbuf * m,struct sockaddr * addr,struct mbuf * control,struct proc * p)803 ctl_send(struct socket *so, int flags, struct mbuf *m,
804 struct sockaddr *addr, struct mbuf *control,
805 struct proc *p)
806 {
807 #pragma unused(addr, p)
808 int error = 0;
809 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
810 struct kctl *kctl;
811
812 if (control) {
813 m_freem(control);
814 }
815
816 if (kcb == NULL) { /* sanity check */
817 m_freem(m);
818 return ENOTCONN;
819 }
820
821 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
822 ctl_kcb_increment_use_count(kcb, mtx_held);
823
824 if (error == 0 && (kctl = kcb->kctl) == NULL) {
825 error = EINVAL;
826 }
827
828 if (error == 0 && kctl->send) {
829 so_update_tx_data_stats(so, 1, m->m_pkthdr.len);
830 socket_unlock(so, 0);
831 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
832 m, flags);
833 socket_lock(so, 0);
834 } else {
835 m_freem(m);
836 if (error == 0) {
837 error = ENOTSUP;
838 }
839 }
840 if (error != 0) {
841 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
842 }
843 ctl_kcb_decrement_use_count(kcb);
844
845 return error;
846 }
847
848 static int
ctl_send_list(struct socket * so,struct mbuf * m,u_int * pktcnt,int flags)849 ctl_send_list(struct socket *so, struct mbuf *m, u_int *pktcnt, int flags)
850 {
851 int error = 0;
852 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
853 struct kctl *kctl;
854
855 if (kcb == NULL) { /* sanity check */
856 m_freem_list(m);
857 return ENOTCONN;
858 }
859
860 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
861 ctl_kcb_increment_use_count(kcb, mtx_held);
862
863 if ((kctl = kcb->kctl) == NULL) {
864 error = EINVAL;
865 goto done;
866 }
867
868 *pktcnt = 0;
869 if (kctl->send_list != NULL) {
870 struct mbuf *nxt;
871 int space = 0;
872
873 for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
874 space += nxt->m_pkthdr.len;
875 *pktcnt += 1;
876 }
877 so_update_tx_data_stats(so, *pktcnt, space);
878
879 socket_unlock(so, 0);
880 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
881 kcb->userdata, m, flags);
882 socket_lock(so, 0);
883 } else {
884 int space = 0;
885
886 while (m != NULL && error == 0) {
887 struct mbuf *nextpkt = m->m_nextpkt;
888
889 m->m_nextpkt = NULL;
890
891 space += m->m_pkthdr.len;
892
893 socket_unlock(so, 0);
894 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
895 kcb->userdata, m, flags);
896 socket_lock(so, 0);
897 m = nextpkt;
898 if (error == 0) {
899 *pktcnt += 1;
900 }
901 }
902 so_update_tx_data_stats(so, *pktcnt, space);
903
904 if (m != NULL) {
905 m_freem_list(m);
906 }
907 }
908 done:
909 if (error != 0) {
910 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
911 }
912 ctl_kcb_decrement_use_count(kcb);
913
914 return error;
915 }
916
917 static errno_t
ctl_rcvbspace(struct socket * so,size_t datasize,u_int32_t kctlflags,u_int32_t flags)918 ctl_rcvbspace(struct socket *so, size_t datasize,
919 u_int32_t kctlflags, u_int32_t flags)
920 {
921 struct sockbuf *__single sb = &so->so_rcv;
922 u_int32_t space = sbspace(sb);
923 errno_t error;
924
925 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
926 if ((u_int32_t) space >= datasize) {
927 error = 0;
928 } else {
929 error = ENOBUFS;
930 }
931 } else if ((flags & CTL_DATA_CRIT) == 0) {
932 /*
933 * Reserve 25% for critical messages
934 */
935 if (space < (sb->sb_hiwat >> 2) ||
936 space < datasize) {
937 error = ENOBUFS;
938 } else {
939 error = 0;
940 }
941 } else {
942 size_t autorcvbuf_max;
943
944 /*
945 * Allow overcommit of 25%
946 */
947 autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
948 ctl_autorcvbuf_max);
949
950 if ((u_int32_t) space >= datasize) {
951 error = 0;
952 } else if (sb->sb_hiwat < autorcvbuf_max) {
953 /*
954 * Grow with a little bit of leeway
955 */
956 size_t grow = datasize - space + _MSIZE;
957 u_int32_t cc = (u_int32_t)MIN(MIN((sb->sb_hiwat + grow), autorcvbuf_max), UINT32_MAX);
958
959 if (sbreserve(sb, cc) == 1) {
960 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
961 ctl_autorcvbuf_high = sb->sb_hiwat;
962 }
963
964 /*
965 * A final check
966 */
967 if ((u_int32_t) sbspace(sb) >= datasize) {
968 error = 0;
969 } else {
970 error = ENOBUFS;
971 }
972
973 if (ctl_debug) {
974 printf("%s - grown to %d error %d\n",
975 __func__, sb->sb_hiwat, error);
976 }
977 } else {
978 error = ENOBUFS;
979 }
980 } else {
981 error = ENOBUFS;
982 }
983 }
984 return error;
985 }
986
987 errno_t
ctl_enqueuembuf(kern_ctl_ref kctlref,u_int32_t unit,struct mbuf * m,u_int32_t flags)988 ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
989 u_int32_t flags)
990 {
991 struct socket *__single so;
992 errno_t error = 0;
993 int len = m->m_pkthdr.len;
994 u_int32_t kctlflags;
995
996 so = kcb_find_socket(kctlref, unit, &kctlflags);
997 if (so == NULL) {
998 return EINVAL;
999 }
1000
1001 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1002 error = ENOBUFS;
1003 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1004 goto bye;
1005 }
1006 if ((flags & CTL_DATA_EOR)) {
1007 m->m_flags |= M_EOR;
1008 }
1009
1010 so_recv_data_stat(so, m, 0);
1011 if (sbappend_nodrop(&so->so_rcv, m) != 0) {
1012 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1013 sorwakeup(so);
1014 }
1015 } else {
1016 error = ENOBUFS;
1017 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1018 }
1019 bye:
1020 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1021 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1022 __func__, error, len,
1023 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1024 }
1025
1026 socket_unlock(so, 1);
1027 if (error != 0) {
1028 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1029 }
1030
1031 return error;
1032 }
1033
1034 /*
1035 * Compute space occupied by mbuf like sbappendrecord
1036 */
1037 static int
m_space(struct mbuf * m)1038 m_space(struct mbuf *m)
1039 {
1040 int space = 0;
1041 mbuf_ref_t nxt;
1042
1043 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
1044 space += nxt->m_len;
1045 }
1046
1047 return space;
1048 }
1049
1050 errno_t
ctl_enqueuembuf_list(void * kctlref,u_int32_t unit,struct mbuf * m_list,u_int32_t flags,struct mbuf ** m_remain)1051 ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
1052 u_int32_t flags, struct mbuf **m_remain)
1053 {
1054 struct socket *__single so = NULL;
1055 errno_t error = 0;
1056 mbuf_ref_t m, nextpkt;
1057 int needwakeup = 0;
1058 int len = 0;
1059 u_int32_t kctlflags;
1060
1061 /*
1062 * Need to point the beginning of the list in case of early exit
1063 */
1064 m = m_list;
1065
1066 /*
1067 * kcb_find_socket takes the socket lock with a reference
1068 */
1069 so = kcb_find_socket(kctlref, unit, &kctlflags);
1070 if (so == NULL) {
1071 error = EINVAL;
1072 goto done;
1073 }
1074
1075 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
1076 error = EOPNOTSUPP;
1077 goto done;
1078 }
1079 if (flags & CTL_DATA_EOR) {
1080 error = EINVAL;
1081 goto done;
1082 }
1083
1084 for (m = m_list; m != NULL; m = nextpkt) {
1085 nextpkt = m->m_nextpkt;
1086
1087 if (m->m_pkthdr.len == 0 && ctl_debug) {
1088 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1089 struct kctl *kctl = kcb == NULL ? NULL : kcb->kctl;
1090 uint32_t id = kctl == NULL ? -1 : kctl->id;
1091
1092 printf("%s: %u:%u m_pkthdr.len is 0",
1093 __func__, id, unit);
1094 }
1095
1096 /*
1097 * The mbuf is either appended or freed by sbappendrecord()
1098 * so it's not reliable from a data standpoint
1099 */
1100 len = m_space(m);
1101 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1102 error = ENOBUFS;
1103 OSIncrementAtomic64(
1104 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1105 break;
1106 } else {
1107 /*
1108 * Unlink from the list, m is on its own
1109 */
1110 m->m_nextpkt = NULL;
1111 so_recv_data_stat(so, m, 0);
1112 if (sbappendrecord_nodrop(&so->so_rcv, m) != 0) {
1113 needwakeup = 1;
1114 } else {
1115 /*
1116 * We free or return the remaining
1117 * mbufs in the list
1118 */
1119 m = nextpkt;
1120 error = ENOBUFS;
1121 OSIncrementAtomic64(
1122 (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1123 break;
1124 }
1125 }
1126 }
1127 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1128 sorwakeup(so);
1129 }
1130
1131 done:
1132 if (so != NULL) {
1133 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1134 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1135 __func__, error, len,
1136 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1137 }
1138
1139 socket_unlock(so, 1);
1140 }
1141 if (m_remain) {
1142 *m_remain = m;
1143
1144 #if (DEBUG || DEVELOPMENT)
1145 if (m != NULL && socket_debug && so != NULL &&
1146 (so->so_options & SO_DEBUG)) {
1147 mbuf_ref_t n;
1148
1149 printf("%s m_list %llx\n", __func__,
1150 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1151 for (n = m; n != NULL; n = n->m_nextpkt) {
1152 printf(" remain %llx m_next %llx\n",
1153 (uint64_t) VM_KERNEL_ADDRPERM(n),
1154 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1155 }
1156 }
1157 #endif /* (DEBUG || DEVELOPMENT) */
1158 } else {
1159 if (m != NULL) {
1160 m_freem_list(m);
1161 }
1162 }
1163 if (error != 0) {
1164 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1165 }
1166 return error;
1167 }
1168
1169 errno_t
ctl_enqueuedata(void * kctlref,u_int32_t unit,void * __sized_by (len)data,size_t len,u_int32_t flags)1170 ctl_enqueuedata(void *kctlref, u_int32_t unit, void *__sized_by(len) data,
1171 size_t len, u_int32_t flags)
1172 {
1173 struct socket *__single so;
1174 mbuf_ref_t m, n;
1175 errno_t error = 0;
1176 unsigned int num_needed;
1177 size_t curlen = 0;
1178 u_int32_t kctlflags;
1179
1180 so = kcb_find_socket(kctlref, unit, &kctlflags);
1181 if (so == NULL) {
1182 return EINVAL;
1183 }
1184
1185 if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
1186 error = ENOBUFS;
1187 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1188 goto bye;
1189 }
1190
1191 num_needed = 1;
1192 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1193 if (m == NULL) {
1194 kctlstat.kcs_enqdata_mb_alloc_fail++;
1195 if (ctl_debug) {
1196 printf("%s: m_allocpacket_internal(%lu) failed\n",
1197 __func__, len);
1198 }
1199 error = ENOMEM;
1200 goto bye;
1201 }
1202
1203 for (n = m; n != NULL; n = n->m_next) {
1204 size_t mlen = mbuf_maxlen(n);
1205
1206 if (mlen + curlen > len) {
1207 mlen = len - curlen;
1208 }
1209 n->m_len = (int32_t)mlen;
1210 bcopy((char *)data + curlen, m_mtod_current(n), mlen);
1211 curlen += mlen;
1212 }
1213 mbuf_pkthdr_setlen(m, curlen);
1214
1215 if ((flags & CTL_DATA_EOR)) {
1216 m->m_flags |= M_EOR;
1217 }
1218 so_recv_data_stat(so, m, 0);
1219 /*
1220 * No need to call the "nodrop" variant of sbappend
1221 * because the mbuf is local to the scope of the function
1222 */
1223 if (sbappend(&so->so_rcv, m) != 0) {
1224 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1225 sorwakeup(so);
1226 }
1227 } else {
1228 kctlstat.kcs_enqdata_sbappend_fail++;
1229 error = ENOBUFS;
1230 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1231 }
1232
1233 bye:
1234 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1235 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1236 __func__, error, (int)len,
1237 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1238 }
1239
1240 socket_unlock(so, 1);
1241 if (error != 0) {
1242 OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
1243 }
1244 return error;
1245 }
1246
1247 errno_t
ctl_getenqueuepacketcount(kern_ctl_ref kctlref,u_int32_t unit,u_int32_t * pcnt)1248 ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1249 {
1250 struct socket *__single so;
1251 u_int32_t cnt;
1252 struct mbuf *__single m1;
1253
1254 if (pcnt == NULL) {
1255 return EINVAL;
1256 }
1257
1258 so = kcb_find_socket(kctlref, unit, NULL);
1259 if (so == NULL) {
1260 return EINVAL;
1261 }
1262
1263 cnt = 0;
1264 m1 = so->so_rcv.sb_mb;
1265 while (m1 != NULL) {
1266 if (m_has_mtype(m1, MTF_DATA | MTF_HEADER | MTF_OOBDATA)) {
1267 cnt += 1;
1268 }
1269 m1 = m1->m_nextpkt;
1270 }
1271 *pcnt = cnt;
1272
1273 socket_unlock(so, 1);
1274
1275 return 0;
1276 }
1277
1278 errno_t
ctl_getenqueuespace(kern_ctl_ref kctlref,u_int32_t unit,size_t * space)1279 ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1280 {
1281 struct socket *__single so;
1282 long avail;
1283
1284 if (space == NULL) {
1285 return EINVAL;
1286 }
1287
1288 so = kcb_find_socket(kctlref, unit, NULL);
1289 if (so == NULL) {
1290 return EINVAL;
1291 }
1292
1293 avail = sbspace(&so->so_rcv);
1294 *space = (avail < 0) ? 0 : avail;
1295 socket_unlock(so, 1);
1296
1297 return 0;
1298 }
1299
1300 errno_t
ctl_getenqueuereadable(kern_ctl_ref kctlref,u_int32_t unit,u_int32_t * difference)1301 ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1302 u_int32_t *difference)
1303 {
1304 struct socket *__single so;
1305
1306 if (difference == NULL) {
1307 return EINVAL;
1308 }
1309
1310 so = kcb_find_socket(kctlref, unit, NULL);
1311 if (so == NULL) {
1312 return EINVAL;
1313 }
1314
1315 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1316 *difference = 0;
1317 } else {
1318 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1319 }
1320 socket_unlock(so, 1);
1321
1322 return 0;
1323 }
1324
1325 static int
ctl_ctloutput(struct socket * so,struct sockopt * sopt)1326 ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1327 {
1328 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
1329 struct kctl *__single kctl;
1330 int error = 0;
1331 void *data = NULL;
1332 size_t data_len = 0;
1333 size_t len;
1334
1335 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1336 return EINVAL;
1337 }
1338
1339 if (kcb == NULL) { /* sanity check */
1340 return ENOTCONN;
1341 }
1342
1343 if ((kctl = kcb->kctl) == NULL) {
1344 return EINVAL;
1345 }
1346
1347 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1348 ctl_kcb_increment_use_count(kcb, mtx_held);
1349
1350 switch (sopt->sopt_dir) {
1351 case SOPT_SET:
1352 if (kctl->setopt == NULL) {
1353 error = ENOTSUP;
1354 goto out;
1355 }
1356 if (sopt->sopt_valsize != 0) {
1357 data_len = sopt->sopt_valsize;
1358 if (__improbable(data_len > copysize_limit_panic)) {
1359 error = EINVAL;
1360 goto out;
1361 }
1362
1363 data = kalloc_data(data_len, Z_WAITOK | Z_ZERO);
1364 if (data == NULL) {
1365 data_len = 0;
1366 error = ENOMEM;
1367 goto out;
1368 }
1369 error = sooptcopyin(sopt, data,
1370 sopt->sopt_valsize, sopt->sopt_valsize);
1371 }
1372 if (error == 0) {
1373 socket_unlock(so, 0);
1374 error = (*kctl->setopt)(kctl->kctlref,
1375 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1376 data, sopt->sopt_valsize);
1377 socket_lock(so, 0);
1378 }
1379
1380 kfree_data(data, data_len);
1381 break;
1382
1383 case SOPT_GET:
1384 if (kctl->getopt == NULL) {
1385 error = ENOTSUP;
1386 goto out;
1387 }
1388
1389 if (sopt->sopt_valsize && sopt->sopt_val) {
1390 data_len = sopt->sopt_valsize;
1391 if (__improbable(data_len > copysize_limit_panic)) {
1392 error = EINVAL;
1393 goto out;
1394 }
1395
1396 data = kalloc_data(data_len, Z_WAITOK | Z_ZERO);
1397 if (data == NULL) {
1398 data_len = 0;
1399 error = ENOMEM;
1400 goto out;
1401 }
1402 /*
1403 * 4108337 - copy user data in case the
1404 * kernel control needs it
1405 */
1406 error = sooptcopyin(sopt, data,
1407 sopt->sopt_valsize, sopt->sopt_valsize);
1408 }
1409
1410 if (error == 0) {
1411 len = sopt->sopt_valsize;
1412 socket_unlock(so, 0);
1413 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1414 kcb->userdata, sopt->sopt_name,
1415 data, &len);
1416 if (data != NULL && len > sopt->sopt_valsize) {
1417 panic_plain("ctl_ctloutput: ctl %s returned "
1418 "len (%lu) > sopt_valsize (%lu)\n",
1419 kcb->kctl->name, len,
1420 sopt->sopt_valsize);
1421 }
1422 socket_lock(so, 0);
1423 if (error == 0) {
1424 if (data != NULL) {
1425 error = sooptcopyout(sopt, data, len);
1426 } else {
1427 sopt->sopt_valsize = len;
1428 }
1429 }
1430 }
1431
1432 kfree_data(data, data_len);
1433 break;
1434 }
1435
1436 out:
1437 ctl_kcb_decrement_use_count(kcb);
1438 return error;
1439 }
1440
1441 static int
ctl_ioctl(struct socket * so,u_long cmd,caddr_t __sized_by (IOCPARM_LEN (cmd))data,struct ifnet * ifp,struct proc * p)1442 ctl_ioctl(struct socket *so, u_long cmd,
1443 caddr_t __sized_by(IOCPARM_LEN(cmd)) data,
1444 struct ifnet *ifp, struct proc *p)
1445 {
1446 #pragma unused(so, ifp, p)
1447 int error = ENOTSUP;
1448
1449 switch (cmd) {
1450 /* get the number of controllers */
1451 case CTLIOCGCOUNT: {
1452 struct kctl *__single kctl;
1453 u_int32_t n = 0;
1454
1455 lck_mtx_lock(&ctl_mtx);
1456 TAILQ_FOREACH(kctl, &ctl_head, next)
1457 n++;
1458 lck_mtx_unlock(&ctl_mtx);
1459
1460 bcopy(&n, data, sizeof(n));
1461 error = 0;
1462 break;
1463 }
1464 case CTLIOCGINFO: {
1465 struct ctl_info ctl_info;
1466 struct kctl *__single kctl = 0;
1467 size_t name_len;
1468
1469 bcopy(data, &ctl_info, sizeof(ctl_info));
1470 name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
1471
1472 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1473 error = EINVAL;
1474 break;
1475 }
1476 lck_mtx_lock(&ctl_mtx);
1477 kctl = ctl_find_by_name(__unsafe_null_terminated_from_indexable(ctl_info.ctl_name));
1478 lck_mtx_unlock(&ctl_mtx);
1479 if (kctl == 0) {
1480 error = ENOENT;
1481 break;
1482 }
1483 ctl_info.ctl_id = kctl->id;
1484 bcopy(&ctl_info, data, sizeof(ctl_info));
1485 error = 0;
1486 break;
1487 }
1488
1489 /* add controls to get list of NKEs */
1490 }
1491
1492 return error;
1493 }
1494
1495 static void
kctl_tbl_grow(void)1496 kctl_tbl_grow(void)
1497 {
1498 struct kctl *__single *new_table;
1499 uintptr_t new_size;
1500
1501 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1502
1503 if (kctl_tbl_growing) {
1504 /* Another thread is allocating */
1505 kctl_tbl_growing_waiting++;
1506
1507 do {
1508 (void) msleep((caddr_t) &kctl_tbl_growing, &ctl_mtx,
1509 PSOCK | PCATCH, "kctl_tbl_growing", 0);
1510 } while (kctl_tbl_growing);
1511 kctl_tbl_growing_waiting--;
1512 }
1513 /* Another thread grew the table */
1514 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1515 return;
1516 }
1517
1518 /* Verify we have a sane size */
1519 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1520 kctlstat.kcs_tbl_size_too_big++;
1521 if (ctl_debug) {
1522 printf("%s kctl_tbl_size %lu too big\n",
1523 __func__, kctl_tbl_size);
1524 }
1525 return;
1526 }
1527 kctl_tbl_growing = 1;
1528
1529 new_size = kctl_tbl_size + KCTL_TBL_INC;
1530
1531 lck_mtx_unlock(&ctl_mtx);
1532 new_table = kalloc_type(struct kctl *, new_size, Z_WAITOK | Z_ZERO);
1533 lck_mtx_lock(&ctl_mtx);
1534
1535 if (new_table != NULL) {
1536 if (kctl_table != NULL) {
1537 bcopy(kctl_table, new_table, kctl_tbl_size * sizeof(struct kctl *));
1538
1539 kfree_type_counted_by(struct kctl *, kctl_tbl_size, kctl_table);
1540 }
1541 kctl_table = new_table;
1542 kctl_tbl_size = new_size;
1543 }
1544
1545 kctl_tbl_growing = 0;
1546
1547 if (kctl_tbl_growing_waiting) {
1548 wakeup(&kctl_tbl_growing);
1549 }
1550 }
1551
1552 #define KCTLREF_INDEX_MASK 0x0000FFFF
1553 #define KCTLREF_GENCNT_MASK 0xFFFF0000
1554 #define KCTLREF_GENCNT_SHIFT 16
1555
1556 static kern_ctl_ref
kctl_make_ref(struct kctl * kctl)1557 kctl_make_ref(struct kctl *kctl)
1558 {
1559 uintptr_t i;
1560
1561 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1562
1563 if (kctl_tbl_count >= kctl_tbl_size) {
1564 kctl_tbl_grow();
1565 }
1566
1567 kctl->kctlref = NULL;
1568 for (i = 0; i < kctl_tbl_size; i++) {
1569 if (kctl_table[i] == NULL) {
1570 uintptr_t ref;
1571
1572 /*
1573 * Reference is index plus one
1574 */
1575 kctl_ref_gencnt += 1;
1576
1577 /*
1578 * Add generation count as salt to reference to prevent
1579 * use after deregister
1580 */
1581 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1582 KCTLREF_GENCNT_MASK) +
1583 ((i + 1) & KCTLREF_INDEX_MASK);
1584
1585 kctl->kctlref = __unsafe_forge_single(void *, ref);
1586 kctl_table[i] = kctl;
1587 kctl_tbl_count++;
1588 break;
1589 }
1590 }
1591
1592 if (kctl->kctlref == NULL) {
1593 panic("%s no space in table", __func__);
1594 }
1595
1596 if (ctl_debug > 0) {
1597 printf("%s %p for %p\n",
1598 __func__, kctl->kctlref, kctl);
1599 }
1600
1601 return kctl->kctlref;
1602 }
1603
1604 static void
kctl_delete_ref(kern_ctl_ref kctlref)1605 kctl_delete_ref(kern_ctl_ref kctlref)
1606 {
1607 /*
1608 * Reference is index plus one
1609 */
1610 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1611
1612 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1613
1614 if (i < kctl_tbl_size) {
1615 struct kctl *__single kctl = kctl_table[i];
1616
1617 if (kctl->kctlref == kctlref) {
1618 kctl_table[i] = NULL;
1619 kctl_tbl_count--;
1620 } else {
1621 kctlstat.kcs_bad_kctlref++;
1622 }
1623 } else {
1624 kctlstat.kcs_bad_kctlref++;
1625 }
1626 }
1627
1628 static struct kctl *
kctl_from_ref(kern_ctl_ref kctlref)1629 kctl_from_ref(kern_ctl_ref kctlref)
1630 {
1631 /*
1632 * Reference is index plus one
1633 */
1634 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1635 struct kctl *__single kctl = NULL;
1636
1637 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1638
1639 if (i >= kctl_tbl_size) {
1640 kctlstat.kcs_bad_kctlref++;
1641 return NULL;
1642 }
1643 kctl = kctl_table[i];
1644 if (kctl->kctlref != kctlref) {
1645 kctlstat.kcs_bad_kctlref++;
1646 return NULL;
1647 }
1648 return kctl;
1649 }
1650
1651 /*
1652 * Register/unregister a NKE
1653 */
1654 errno_t
ctl_register(struct kern_ctl_reg * userkctl,kern_ctl_ref * kctlref)1655 ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1656 {
1657 struct kctl *__single kctl = NULL;
1658 struct kctl *__single kctl_next = NULL;
1659 u_int32_t id = 1;
1660 size_t name_len;
1661 int is_extended = 0;
1662 int is_setup = 0;
1663
1664 if (userkctl == NULL) { /* sanity check */
1665 return EINVAL;
1666 }
1667 if (userkctl->ctl_connect == NULL) {
1668 return EINVAL;
1669 }
1670 name_len = strnlen(userkctl->ctl_name, sizeof(userkctl->ctl_name));
1671 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1672 return EINVAL;
1673 }
1674
1675 kctl = kalloc_type(struct kctl, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1676
1677 lck_mtx_lock(&ctl_mtx);
1678
1679 if (kctl_make_ref(kctl) == NULL) {
1680 lck_mtx_unlock(&ctl_mtx);
1681 kfree_type(struct kctl, kctl);
1682 return ENOMEM;
1683 }
1684
1685 /*
1686 * Kernel Control IDs
1687 *
1688 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1689 * static. If they do not exist, add them to the list in order. If the
1690 * flag is not set, we must find a new unique value. We assume the
1691 * list is in order. We find the last item in the list and add one. If
1692 * this leads to wrapping the id around, we start at the front of the
1693 * list and look for a gap.
1694 */
1695
1696 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1697 /* Must dynamically assign an unused ID */
1698
1699 /* Verify the same name isn't already registered */
1700 if (ctl_find_by_name(__unsafe_null_terminated_from_indexable(userkctl->ctl_name)) != NULL) {
1701 kctl_delete_ref(kctl->kctlref);
1702 lck_mtx_unlock(&ctl_mtx);
1703 kfree_type(struct kctl, kctl);
1704 return EEXIST;
1705 }
1706
1707 /* Start with 1 in case the list is empty */
1708 id = 1;
1709 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1710
1711 if (kctl_next != NULL) {
1712 /* List was not empty, add one to the last item */
1713 id = kctl_next->id + 1;
1714 kctl_next = NULL;
1715
1716 /*
1717 * If this wrapped the id number, start looking at
1718 * the front of the list for an unused id.
1719 */
1720 if (id == 0) {
1721 /* Find the next unused ID */
1722 id = 1;
1723
1724 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1725 if (kctl_next->id > id) {
1726 /* We found a gap */
1727 break;
1728 }
1729
1730 id = kctl_next->id + 1;
1731 }
1732 }
1733 }
1734
1735 userkctl->ctl_id = id;
1736 kctl->id = id;
1737 kctl->reg_unit = -1;
1738 } else {
1739 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1740 if (kctl_next->id > userkctl->ctl_id) {
1741 break;
1742 }
1743 }
1744
1745 if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
1746 kctl_delete_ref(kctl->kctlref);
1747 lck_mtx_unlock(&ctl_mtx);
1748 kfree_type(struct kctl, kctl);
1749 return EEXIST;
1750 }
1751 kctl->id = userkctl->ctl_id;
1752 kctl->reg_unit = userkctl->ctl_unit;
1753 }
1754
1755 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1756 is_setup = (userkctl->ctl_flags & CTL_FLAG_REG_SETUP);
1757
1758 strbufcpy(kctl->name, userkctl->ctl_name);
1759 kctl->flags = userkctl->ctl_flags;
1760
1761 /*
1762 * Let the caller know the default send and receive sizes
1763 */
1764 if (userkctl->ctl_sendsize == 0) {
1765 kctl->sendbufsize = CTL_SENDSIZE;
1766 userkctl->ctl_sendsize = kctl->sendbufsize;
1767 } else {
1768 kctl->sendbufsize = userkctl->ctl_sendsize;
1769 }
1770 if (userkctl->ctl_recvsize == 0) {
1771 kctl->recvbufsize = CTL_RECVSIZE;
1772 userkctl->ctl_recvsize = kctl->recvbufsize;
1773 } else {
1774 kctl->recvbufsize = userkctl->ctl_recvsize;
1775 }
1776
1777 if (is_setup) {
1778 kctl->setup = userkctl->ctl_setup;
1779 }
1780 kctl->bind = userkctl->ctl_bind;
1781 kctl->connect = userkctl->ctl_connect;
1782 kctl->disconnect = userkctl->ctl_disconnect;
1783 kctl->send = userkctl->ctl_send;
1784 kctl->setopt = userkctl->ctl_setopt;
1785 kctl->getopt = userkctl->ctl_getopt;
1786 if (is_extended) {
1787 kctl->rcvd = userkctl->ctl_rcvd;
1788 kctl->send_list = userkctl->ctl_send_list;
1789 }
1790
1791 TAILQ_INIT(&kctl->kcb_head);
1792
1793 if (kctl_next) {
1794 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1795 } else {
1796 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1797 }
1798
1799 kctlstat.kcs_reg_count++;
1800 kctlstat.kcs_gencnt++;
1801
1802 lck_mtx_unlock(&ctl_mtx);
1803
1804 *kctlref = kctl->kctlref;
1805
1806 ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
1807 return 0;
1808 }
1809
1810 errno_t
ctl_deregister(void * kctlref)1811 ctl_deregister(void *kctlref)
1812 {
1813 struct kctl *__single kctl;
1814
1815 lck_mtx_lock(&ctl_mtx);
1816 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1817 kctlstat.kcs_bad_kctlref++;
1818 lck_mtx_unlock(&ctl_mtx);
1819 if (ctl_debug != 0) {
1820 printf("%s invalid kctlref %p\n",
1821 __func__, kctlref);
1822 }
1823 return EINVAL;
1824 }
1825
1826 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1827 lck_mtx_unlock(&ctl_mtx);
1828 return EBUSY;
1829 }
1830
1831 TAILQ_REMOVE(&ctl_head, kctl, next);
1832
1833 kctlstat.kcs_reg_count--;
1834 kctlstat.kcs_gencnt++;
1835
1836 kctl_delete_ref(kctl->kctlref);
1837 lck_mtx_unlock(&ctl_mtx);
1838
1839 ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
1840 kfree_type(struct kctl, kctl);
1841 return 0;
1842 }
1843
1844 /*
1845 * Must be called with global ctl_mtx lock taken
1846 */
1847 static struct kctl *
ctl_find_by_name(const char * __null_terminated name)1848 ctl_find_by_name(const char *__null_terminated name)
1849 {
1850 struct kctl *__single kctl;
1851
1852 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1853
1854 TAILQ_FOREACH(kctl, &ctl_head, next)
1855 if (strlcmp(kctl->name, name, sizeof(kctl->name)) == 0) {
1856 return kctl;
1857 }
1858
1859 return NULL;
1860 }
1861
1862 u_int32_t
ctl_id_by_name(const char * name)1863 ctl_id_by_name(const char *name)
1864 {
1865 u_int32_t ctl_id = 0;
1866 struct kctl *__single kctl;
1867
1868 lck_mtx_lock(&ctl_mtx);
1869 kctl = ctl_find_by_name(name);
1870 if (kctl) {
1871 ctl_id = kctl->id;
1872 }
1873 lck_mtx_unlock(&ctl_mtx);
1874
1875 return ctl_id;
1876 }
1877
1878 errno_t
ctl_name_by_id(u_int32_t id,char * __counted_by (maxsize)out_name,size_t maxsize)1879 ctl_name_by_id(u_int32_t id, char *__counted_by(maxsize) out_name, size_t maxsize)
1880 {
1881 int found = 0;
1882 struct kctl *__single kctl;
1883
1884 lck_mtx_lock(&ctl_mtx);
1885 TAILQ_FOREACH(kctl, &ctl_head, next) {
1886 if (kctl->id == id) {
1887 break;
1888 }
1889 }
1890
1891 if (kctl) {
1892 size_t count = maxsize;
1893 if (maxsize > MAX_KCTL_NAME) {
1894 count = MAX_KCTL_NAME;
1895 }
1896 strbufcpy(out_name, count, kctl->name, sizeof(kctl->name));
1897 found = 1;
1898 }
1899 lck_mtx_unlock(&ctl_mtx);
1900
1901 return found ? 0 : ENOENT;
1902 }
1903
1904 /*
1905 * Must be called with global ctl_mtx lock taked
1906 *
1907 */
1908 static struct kctl *
ctl_find_by_id_unit(u_int32_t id,u_int32_t unit)1909 ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1910 {
1911 struct kctl *__single kctl;
1912
1913 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1914
1915 TAILQ_FOREACH(kctl, &ctl_head, next) {
1916 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1917 return kctl;
1918 } else if (kctl->id == id && kctl->reg_unit == unit) {
1919 return kctl;
1920 }
1921 }
1922 return NULL;
1923 }
1924
1925 /*
1926 * Must be called with kernel controller lock taken
1927 */
1928 static struct ctl_cb *
kcb_find(struct kctl * kctl,u_int32_t unit)1929 kcb_find(struct kctl *kctl, u_int32_t unit)
1930 {
1931 struct ctl_cb *__single kcb;
1932
1933 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
1934
1935 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1936 if (kcb->sac.sc_unit == unit) {
1937 return kcb;
1938 }
1939
1940 return NULL;
1941 }
1942
1943 static struct socket *
kcb_find_socket(kern_ctl_ref kctlref,u_int32_t unit,u_int32_t * kctlflags)1944 kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1945 {
1946 struct socket *__single so = NULL;
1947 struct ctl_cb *__single kcb;
1948 void *__single lr_saved;
1949 struct kctl *__single kctl;
1950 int i;
1951
1952 lr_saved = __unsafe_forge_single(void *, __builtin_return_address(0));
1953
1954 lck_mtx_lock(&ctl_mtx);
1955 /*
1956 * First validate the kctlref
1957 */
1958 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1959 kctlstat.kcs_bad_kctlref++;
1960 lck_mtx_unlock(&ctl_mtx);
1961 if (ctl_debug != 0) {
1962 printf("%s invalid kctlref %p\n",
1963 __func__, kctlref);
1964 }
1965 return NULL;
1966 }
1967
1968 kcb = kcb_find(kctl, unit);
1969 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1970 lck_mtx_unlock(&ctl_mtx);
1971 return NULL;
1972 }
1973 /*
1974 * This prevents the socket from being closed
1975 */
1976 kcb->usecount++;
1977 /*
1978 * Respect lock ordering: socket before ctl_mtx
1979 */
1980 lck_mtx_unlock(&ctl_mtx);
1981
1982 socket_lock(so, 1);
1983 /*
1984 * The socket lock history is more useful if we store
1985 * the address of the caller.
1986 */
1987 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1988 so->lock_lr[i] = lr_saved;
1989
1990 lck_mtx_lock(&ctl_mtx);
1991
1992 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
1993 lck_mtx_unlock(&ctl_mtx);
1994 socket_unlock(so, 1);
1995 so = NULL;
1996 lck_mtx_lock(&ctl_mtx);
1997 } else if (kctlflags != NULL) {
1998 *kctlflags = kctl->flags;
1999 }
2000
2001 kcb->usecount--;
2002 if (kcb->usecount == 0 && kcb->require_clearing_count != 0) {
2003 wakeup((event_t)&kcb->usecount);
2004 }
2005
2006 lck_mtx_unlock(&ctl_mtx);
2007
2008 return so;
2009 }
2010
2011 static void
ctl_post_msg(u_int32_t event_code,u_int32_t id)2012 ctl_post_msg(u_int32_t event_code, u_int32_t id)
2013 {
2014 struct ctl_event_data ctl_ev_data;
2015 struct kev_msg ev_msg;
2016
2017 lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
2018
2019 bzero(&ev_msg, sizeof(struct kev_msg));
2020 ev_msg.vendor_code = KEV_VENDOR_APPLE;
2021
2022 ev_msg.kev_class = KEV_SYSTEM_CLASS;
2023 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
2024 ev_msg.event_code = event_code;
2025
2026 /* common nke subclass data */
2027 bzero(&ctl_ev_data, sizeof(ctl_ev_data));
2028 ctl_ev_data.ctl_id = id;
2029 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2030 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2031
2032 ev_msg.dv[1].data_length = 0;
2033
2034 kev_post_msg(&ev_msg);
2035 }
2036
2037 static int
ctl_lock(struct socket * so,int refcount,void * lr)2038 ctl_lock(struct socket *so, int refcount, void *lr)
2039 {
2040 void *__single lr_saved;
2041
2042 if (lr == NULL) {
2043 lr_saved = __unsafe_forge_single(void *, __builtin_return_address(0));
2044 } else {
2045 lr_saved = lr;
2046 }
2047
2048 if (so->so_pcb != NULL) {
2049 lck_mtx_lock(&((struct ctl_cb *)so->so_pcb)->mtx);
2050 } else {
2051 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s",
2052 so, lr_saved, solockhistory_nr(so));
2053 /* NOTREACHED */
2054 }
2055
2056 if (so->so_usecount < 0) {
2057 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s",
2058 so, so->so_pcb, lr_saved, so->so_usecount,
2059 solockhistory_nr(so));
2060 /* NOTREACHED */
2061 }
2062
2063 if (refcount) {
2064 so->so_usecount++;
2065 }
2066
2067 so->lock_lr[so->next_lock_lr] = lr_saved;
2068 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2069 return 0;
2070 }
2071
2072 static int
ctl_unlock(struct socket * so,int refcount,void * lr)2073 ctl_unlock(struct socket *so, int refcount, void *lr)
2074 {
2075 void *__single lr_saved;
2076 lck_mtx_t *__single mutex_held;
2077
2078 if (lr == NULL) {
2079 lr_saved = __unsafe_forge_single(void *, __builtin_return_address(0));
2080 } else {
2081 lr_saved = lr;
2082 }
2083
2084 #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2085 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2086 (uint64_t)VM_KERNEL_ADDRPERM(so),
2087 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2088 (uint64_t)VM_KERNEL_ADDRPERM(&((struct ctl_cb *)so->so_pcb)->mtx),
2089 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
2090 #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2091 if (refcount) {
2092 so->so_usecount--;
2093 }
2094
2095 if (so->so_usecount < 0) {
2096 panic("ctl_unlock: so=%p usecount=%x lrh= %s",
2097 so, so->so_usecount, solockhistory_nr(so));
2098 /* NOTREACHED */
2099 }
2100 if (so->so_pcb == NULL) {
2101 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s",
2102 so, so->so_usecount, (void *)lr_saved,
2103 solockhistory_nr(so));
2104 /* NOTREACHED */
2105 }
2106 mutex_held = &((struct ctl_cb *)so->so_pcb)->mtx;
2107
2108 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
2109 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2110 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2111 lck_mtx_unlock(mutex_held);
2112
2113 if (so->so_usecount == 0) {
2114 ctl_sofreelastref(so);
2115 }
2116
2117 return 0;
2118 }
2119
2120 static lck_mtx_t *
2121 ctl_getlock(struct socket *so, int flags)
2122 {
2123 #pragma unused(flags)
2124 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
2125
2126 if (so->so_pcb) {
2127 if (so->so_usecount < 0) {
2128 panic("ctl_getlock: so=%p usecount=%x lrh= %s",
2129 so, so->so_usecount, solockhistory_nr(so));
2130 }
2131 return &kcb->mtx;
2132 } else {
2133 panic("ctl_getlock: so=%p NULL NO so_pcb %s",
2134 so, solockhistory_nr(so));
2135 return so->so_proto->pr_domain->dom_mtx;
2136 }
2137 }
2138
2139 __private_extern__ int
2140 kctl_reg_list SYSCTL_HANDLER_ARGS
2141 {
2142 #pragma unused(oidp, arg1, arg2)
2143 int error = 0;
2144 u_int64_t i, n;
2145 struct xsystmgen xsg;
2146 void *buf = NULL;
2147 struct kctl *__single kctl;
2148 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2149
2150 buf = kalloc_data(item_size, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2151
2152 lck_mtx_lock(&ctl_mtx);
2153
2154 n = kctlstat.kcs_reg_count;
2155
2156 if (req->oldptr == USER_ADDR_NULL) {
2157 req->oldidx = (size_t)(n + n / 8) * sizeof(struct xkctl_reg);
2158 goto done;
2159 }
2160 if (req->newptr != USER_ADDR_NULL) {
2161 error = EPERM;
2162 goto done;
2163 }
2164 bzero(&xsg, sizeof(xsg));
2165 xsg.xg_len = sizeof(xsg);
2166 xsg.xg_count = n;
2167 xsg.xg_gen = kctlstat.kcs_gencnt;
2168 xsg.xg_sogen = so_gencnt;
2169 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2170 if (error) {
2171 goto done;
2172 }
2173 /*
2174 * We are done if there is no pcb
2175 */
2176 if (n == 0) {
2177 goto done;
2178 }
2179
2180 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2181 i < n && kctl != NULL;
2182 i++, kctl = TAILQ_NEXT(kctl, next)) {
2183 struct xkctl_reg *__single xkr = (struct xkctl_reg *)buf;
2184 struct ctl_cb *__single kcb;
2185 u_int32_t pcbcount = 0;
2186
2187 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2188 pcbcount++;
2189
2190 bzero(buf, item_size);
2191
2192 xkr->xkr_len = sizeof(struct xkctl_reg);
2193 xkr->xkr_kind = XSO_KCREG;
2194 xkr->xkr_id = kctl->id;
2195 xkr->xkr_reg_unit = kctl->reg_unit;
2196 xkr->xkr_flags = kctl->flags;
2197 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2198 xkr->xkr_recvbufsize = kctl->recvbufsize;
2199 xkr->xkr_sendbufsize = kctl->sendbufsize;
2200 xkr->xkr_lastunit = kctl->lastunit;
2201 xkr->xkr_pcbcount = pcbcount;
2202 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2203 xkr->xkr_disconnect =
2204 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2205 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2206 xkr->xkr_send_list =
2207 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2208 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2209 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2210 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2211 strbufcpy(xkr->xkr_name, kctl->name);
2212
2213 error = SYSCTL_OUT(req, buf, item_size);
2214 }
2215
2216 if (error == 0) {
2217 /*
2218 * Give the user an updated idea of our state.
2219 * If the generation differs from what we told
2220 * her before, she knows that something happened
2221 * while we were processing this request, and it
2222 * might be necessary to retry.
2223 */
2224 bzero(&xsg, sizeof(xsg));
2225 xsg.xg_len = sizeof(xsg);
2226 xsg.xg_count = n;
2227 xsg.xg_gen = kctlstat.kcs_gencnt;
2228 xsg.xg_sogen = so_gencnt;
2229 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2230 if (error) {
2231 goto done;
2232 }
2233 }
2234
2235 done:
2236 lck_mtx_unlock(&ctl_mtx);
2237
2238 kfree_data(buf, item_size);
2239
2240 return error;
2241 }
2242
2243 __private_extern__ int
2244 kctl_pcblist SYSCTL_HANDLER_ARGS
2245 {
2246 #pragma unused(oidp, arg1, arg2)
2247 int error = 0;
2248 u_int64_t n, i;
2249 struct xsystmgen xsg;
2250 void *buf = NULL;
2251 struct kctl *__single kctl;
2252 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2253 ROUNDUP64(sizeof(struct xsocket_n)) +
2254 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2255 ROUNDUP64(sizeof(struct xsockstat_n));
2256
2257 buf = kalloc_data(item_size, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2258
2259 lck_mtx_lock(&ctl_mtx);
2260
2261 n = kctlstat.kcs_pcbcount;
2262
2263 if (req->oldptr == USER_ADDR_NULL) {
2264 req->oldidx = (size_t)(n + n / 8) * item_size;
2265 goto done;
2266 }
2267 if (req->newptr != USER_ADDR_NULL) {
2268 error = EPERM;
2269 goto done;
2270 }
2271 bzero(&xsg, sizeof(xsg));
2272 xsg.xg_len = sizeof(xsg);
2273 xsg.xg_count = n;
2274 xsg.xg_gen = kctlstat.kcs_gencnt;
2275 xsg.xg_sogen = so_gencnt;
2276 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2277 if (error) {
2278 goto done;
2279 }
2280 /*
2281 * We are done if there is no pcb
2282 */
2283 if (n == 0) {
2284 goto done;
2285 }
2286
2287 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2288 i < n && kctl != NULL;
2289 kctl = TAILQ_NEXT(kctl, next)) {
2290 struct ctl_cb *__single kcb;
2291
2292 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2293 i < n && kcb != NULL;
2294 i++, kcb = TAILQ_NEXT(kcb, next)) {
2295 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2296 struct xsocket_n *xso = (struct xsocket_n *)
2297 ADVANCE64(xk, sizeof(*xk));
2298 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2299 ADVANCE64(xso, sizeof(*xso));
2300 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2301 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2302 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2303 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2304
2305 bzero(buf, item_size);
2306
2307 xk->xkp_len = sizeof(struct xkctlpcb);
2308 xk->xkp_kind = XSO_KCB;
2309 xk->xkp_unit = kcb->sac.sc_unit;
2310 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRHASH(kcb);
2311 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRHASH(kctl);
2312 xk->xkp_kctlid = kctl->id;
2313 strbufcpy(xk->xkp_kctlname, kctl->name);
2314
2315 sotoxsocket_n(kcb->so, xso);
2316 sbtoxsockbuf_n(kcb->so ?
2317 &kcb->so->so_rcv : NULL, xsbrcv);
2318 sbtoxsockbuf_n(kcb->so ?
2319 &kcb->so->so_snd : NULL, xsbsnd);
2320 sbtoxsockstat_n(kcb->so, xsostats);
2321
2322 error = SYSCTL_OUT(req, buf, item_size);
2323 }
2324 }
2325
2326 if (error == 0) {
2327 /*
2328 * Give the user an updated idea of our state.
2329 * If the generation differs from what we told
2330 * her before, she knows that something happened
2331 * while we were processing this request, and it
2332 * might be necessary to retry.
2333 */
2334 bzero(&xsg, sizeof(xsg));
2335 xsg.xg_len = sizeof(xsg);
2336 xsg.xg_count = n;
2337 xsg.xg_gen = kctlstat.kcs_gencnt;
2338 xsg.xg_sogen = so_gencnt;
2339 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2340 if (error) {
2341 goto done;
2342 }
2343 }
2344
2345 done:
2346 lck_mtx_unlock(&ctl_mtx);
2347
2348 kfree_data(buf, item_size);
2349 return error;
2350 }
2351
2352 int
2353 kctl_getstat SYSCTL_HANDLER_ARGS
2354 {
2355 #pragma unused(oidp, arg1, arg2)
2356 int error = 0;
2357
2358 lck_mtx_lock(&ctl_mtx);
2359
2360 if (req->newptr != USER_ADDR_NULL) {
2361 error = EPERM;
2362 goto done;
2363 }
2364 if (req->oldptr == USER_ADDR_NULL) {
2365 req->oldidx = sizeof(struct kctlstat);
2366 goto done;
2367 }
2368
2369 error = SYSCTL_OUT(req, &kctlstat,
2370 MIN(sizeof(struct kctlstat), req->oldlen));
2371 done:
2372 lck_mtx_unlock(&ctl_mtx);
2373 return error;
2374 }
2375
2376 void
2377 kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2378 {
2379 struct ctl_cb *__single kcb = (struct ctl_cb *)so->so_pcb;
2380 struct kern_ctl_info *__single kcsi =
2381 &si->soi_proto.pri_kern_ctl;
2382 struct kctl *__single kctl = kcb->kctl;
2383
2384 si->soi_kind = SOCKINFO_KERN_CTL;
2385
2386 if (kctl == 0) {
2387 return;
2388 }
2389
2390 kcsi->kcsi_id = kctl->id;
2391 kcsi->kcsi_reg_unit = kctl->reg_unit;
2392 kcsi->kcsi_flags = kctl->flags;
2393 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2394 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2395 kcsi->kcsi_unit = kcb->sac.sc_unit;
2396 strbufcpy(kcsi->kcsi_name, kctl->name);
2397 }
2398