1 /*
2 * Copyright (c) 2010-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40
41 #include <kern/clock.h>
42 #include <kern/debug.h>
43
44 #include <libkern/libkern.h>
45 #include <libkern/OSAtomic.h>
46 #include <libkern/locks.h>
47
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_types.h>
51 #include <net/route.h>
52 #include <net/dlil.h>
53
54 // These includes appear in ntstat.h but we include them here first so they won't trigger
55 // any clang diagnostic errors.
56 #include <netinet/in.h>
57 #include <netinet/in_stat.h>
58 #include <netinet/tcp.h>
59
60 #pragma clang diagnostic push
61 #pragma clang diagnostic error "-Wpadded"
62 #pragma clang diagnostic error "-Wpacked"
63 // This header defines structures shared with user space, so we need to ensure there is
64 // no compiler inserted padding in case the user space process isn't using the same
65 // architecture as the kernel (example: i386 process with x86_64 kernel).
66 #include <net/ntstat.h>
67 #pragma clang diagnostic pop
68
69 #include <netinet/ip_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/in_var.h>
72 #include <netinet/tcp_var.h>
73 #include <netinet/tcp_fsm.h>
74 #include <netinet/tcp_cc.h>
75 #include <netinet/udp.h>
76 #include <netinet/udp_var.h>
77 #include <netinet6/in6_pcb.h>
78 #include <netinet6/in6_var.h>
79
80 __private_extern__ int nstat_collect = 1;
81
82 #if (DEBUG || DEVELOPMENT)
83 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
84 &nstat_collect, 0, "Collect detailed statistics");
85 #endif /* (DEBUG || DEVELOPMENT) */
86
87 #if !XNU_TARGET_OS_OSX
88 static int nstat_privcheck = 1;
89 #else /* XNU_TARGET_OS_OSX */
90 static int nstat_privcheck = 0;
91 #endif /* XNU_TARGET_OS_OSX */
92 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
93 &nstat_privcheck, 0, "Entitlement check");
94
95 SYSCTL_NODE(_net, OID_AUTO, stats,
96 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "network statistics");
97
98 static int nstat_debug = 0;
99 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
100 &nstat_debug, 0, "");
101
102 static int nstat_debug_pid = 0; // Only log socket level debug for specified pid
103 SYSCTL_INT(_net_stats, OID_AUTO, debug_pid, CTLFLAG_RW | CTLFLAG_LOCKED,
104 &nstat_debug_pid, 0, "");
105
106 static int nstat_sendspace = 2048;
107 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
108 &nstat_sendspace, 0, "");
109
110 static int nstat_recvspace = 8192;
111 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
112 &nstat_recvspace, 0, "");
113
114 static struct nstat_stats nstat_stats;
115 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
116 &nstat_stats, nstat_stats, "");
117
118 static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */
119 static u_int32_t nstat_lim_min_tx_pkts = 100;
120 static u_int32_t nstat_lim_min_rx_pkts = 100;
121 #if (DEBUG || DEVELOPMENT)
122 SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval,
123 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0,
124 "Low internet stat report interval");
125
126 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts,
127 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0,
128 "Low Internet, min transmit packets threshold");
129
130 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts,
131 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0,
132 "Low Internet, min receive packets threshold");
133 #endif /* DEBUG || DEVELOPMENT */
134
135 static struct net_api_stats net_api_stats_before;
136 static u_int64_t net_api_stats_last_report_time;
137 #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
138 static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL;
139
140 #if (DEBUG || DEVELOPMENT)
141 SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval,
142 CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "");
143 #endif /* DEBUG || DEVELOPMENT */
144
145 #define NSTAT_DEBUG_SOCKET_PID_MATCHED(so) \
146 (so && (nstat_debug_pid == (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid)))
147
148 #define NSTAT_DEBUG_SOCKET_ON(so) \
149 ((nstat_debug && (!nstat_debug_pid || NSTAT_DEBUG_SOCKET_PID_MATCHED(so))) ? nstat_debug : 0)
150
151 #define NSTAT_DEBUG_SOCKET_LOG(so, fmt, ...) \
152 if (NSTAT_DEBUG_SOCKET_ON(so)) { \
153 printf("NSTAT_DEBUG_SOCKET <pid %d>: " fmt "\n", (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid), ##__VA_ARGS__); \
154 }
155
156 enum{
157 NSTAT_FLAG_CLEANUP = (1 << 0),
158 NSTAT_FLAG_REQCOUNTS = (1 << 1),
159 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
160 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
161 };
162
163 #if !XNU_TARGET_OS_OSX
164 #define QUERY_CONTINUATION_SRC_COUNT 50
165 #else /* XNU_TARGET_OS_OSX */
166 #define QUERY_CONTINUATION_SRC_COUNT 100
167 #endif /* XNU_TARGET_OS_OSX */
168
169 #ifndef ROUNDUP64
170 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
171 #endif
172
173 #ifndef ADVANCE64
174 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
175 #endif
176
177 typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src;
178 typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src;
179
180 typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
181 typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
182
183 typedef TAILQ_HEAD(, nstat_generic_shadow) tailq_head_generic_shadow;
184 typedef TAILQ_ENTRY(nstat_generic_shadow) tailq_entry_generic_shadow;
185
186 typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails;
187 typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails;
188
189 struct nstat_procdetails {
190 tailq_entry_procdetails pdet_link;
191 int pdet_pid;
192 u_int64_t pdet_upid;
193 char pdet_procname[64];
194 uuid_t pdet_uuid;
195 u_int32_t pdet_refcnt;
196 u_int32_t pdet_magic;
197 };
198
199 typedef struct nstat_provider_filter {
200 u_int64_t npf_flags;
201 u_int64_t npf_events;
202 u_int64_t npf_extensions;
203 pid_t npf_pid;
204 uuid_t npf_uuid;
205 } nstat_provider_filter;
206
207
208 typedef struct nstat_control_state {
209 struct nstat_control_state *ncs_next;
210 u_int32_t ncs_watching;
211 decl_lck_mtx_data(, ncs_mtx);
212 kern_ctl_ref ncs_kctl;
213 u_int32_t ncs_unit;
214 nstat_src_ref_t ncs_next_srcref;
215 tailq_head_nstat_src ncs_src_queue;
216 mbuf_t ncs_accumulated;
217 u_int32_t ncs_flags;
218 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
219 /* state maintained for partial query requests */
220 u_int64_t ncs_context;
221 u_int64_t ncs_seq;
222 /* For ease of debugging with lldb macros */
223 struct nstat_procdetails *ncs_procdetails;
224 } nstat_control_state;
225
226 typedef struct nstat_provider {
227 struct nstat_provider *next;
228 nstat_provider_id_t nstat_provider_id;
229 size_t nstat_descriptor_length;
230 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
231 int (*nstat_gone)(nstat_provider_cookie_t cookie);
232 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
233 errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req);
234 void (*nstat_watcher_remove)(nstat_control_state *state);
235 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, size_t len);
236 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
237 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, u_int64_t suppression_flags);
238 size_t (*nstat_copy_extension)(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len);
239 } nstat_provider;
240
241 typedef struct nstat_src {
242 tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over.
243 nstat_control_state *ns_control; // The nstat_control_state that this is a source for
244 nstat_src_ref_t srcref;
245 nstat_provider *provider;
246 nstat_provider_cookie_t cookie;
247 uint32_t filter;
248 bool ns_reported; // At least one update/counts/desc message has been sent
249 uint64_t seq;
250 } nstat_src;
251
252 static errno_t nstat_control_send_counts(nstat_control_state *, nstat_src *, unsigned long long, u_int16_t, int *);
253 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
254 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int64_t event, u_int16_t hdr_flags, int *gone);
255 static errno_t nstat_control_send_removed(nstat_control_state *state, nstat_src *src, u_int16_t hdr_flags);
256 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
257 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
258 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src, u_int64_t suppression_flags);
259 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
260 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
261 static void nstat_ifnet_report_ecn_stats(void);
262 static void nstat_ifnet_report_lim_stats(void);
263 static void nstat_net_api_report_stats(void);
264 static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req);
265 static errno_t nstat_control_send_event(nstat_control_state *state, nstat_src *src, u_int64_t event);
266
267 static u_int32_t nstat_udp_watchers = 0;
268 static u_int32_t nstat_tcp_watchers = 0;
269
270 static void nstat_control_register(void);
271
272 /*
273 * The lock order is as follows:
274 *
275 * socket_lock (inpcb)
276 * nstat_mtx
277 * state->ncs_mtx
278 */
279 static KALLOC_HEAP_DEFINE(KHEAP_NET_STAT, NET_STAT_CONTROL_NAME,
280 KHEAP_ID_DEFAULT);
281 static nstat_control_state *nstat_controls = NULL;
282 static uint64_t nstat_idle_time = 0;
283 static LCK_GRP_DECLARE(nstat_lck_grp, "network statistics kctl");
284 static LCK_MTX_DECLARE(nstat_mtx, &nstat_lck_grp);
285
286
287 /* some extern definitions */
288 extern void mbuf_report_peak_usage(void);
289 extern void tcp_report_stats(void);
290
291 static void
nstat_copy_sa_out(const struct sockaddr * src,struct sockaddr * dst,int maxlen)292 nstat_copy_sa_out(
293 const struct sockaddr *src,
294 struct sockaddr *dst,
295 int maxlen)
296 {
297 if (src->sa_len > maxlen) {
298 return;
299 }
300
301 bcopy(src, dst, src->sa_len);
302 if (src->sa_family == AF_INET6 &&
303 src->sa_len >= sizeof(struct sockaddr_in6)) {
304 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
305 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
306 sin6->sin6_scope_id = ((const struct sockaddr_in6*)(const void*)(src))->sin6_scope_id;
307 if (in6_embedded_scope) {
308 in6_verify_ifscope(&sin6->sin6_addr, sin6->sin6_scope_id);
309 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
310 sin6->sin6_addr.s6_addr16[1] = 0;
311 }
312 }
313 }
314 }
315
316 static void
nstat_ip_to_sockaddr(const struct in_addr * ip,u_int16_t port,struct sockaddr_in * sin,u_int32_t maxlen)317 nstat_ip_to_sockaddr(
318 const struct in_addr *ip,
319 u_int16_t port,
320 struct sockaddr_in *sin,
321 u_int32_t maxlen)
322 {
323 if (maxlen < sizeof(struct sockaddr_in)) {
324 return;
325 }
326
327 sin->sin_family = AF_INET;
328 sin->sin_len = sizeof(*sin);
329 sin->sin_port = port;
330 sin->sin_addr = *ip;
331 }
332
333 u_int16_t
nstat_ifnet_to_flags(struct ifnet * ifp)334 nstat_ifnet_to_flags(
335 struct ifnet *ifp)
336 {
337 u_int16_t flags = 0;
338 u_int32_t functional_type = if_functional_type(ifp, FALSE);
339
340 /* Panic if someone adds a functional type without updating ntstat. */
341 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
342
343 switch (functional_type) {
344 case IFRTYPE_FUNCTIONAL_UNKNOWN:
345 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
346 break;
347 case IFRTYPE_FUNCTIONAL_LOOPBACK:
348 flags |= NSTAT_IFNET_IS_LOOPBACK;
349 break;
350 case IFRTYPE_FUNCTIONAL_WIRED:
351 case IFRTYPE_FUNCTIONAL_INTCOPROC:
352 flags |= NSTAT_IFNET_IS_WIRED;
353 break;
354 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
355 flags |= NSTAT_IFNET_IS_WIFI;
356 break;
357 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
358 flags |= NSTAT_IFNET_IS_WIFI;
359 flags |= NSTAT_IFNET_IS_AWDL;
360 break;
361 case IFRTYPE_FUNCTIONAL_CELLULAR:
362 flags |= NSTAT_IFNET_IS_CELLULAR;
363 break;
364 case IFRTYPE_FUNCTIONAL_COMPANIONLINK:
365 flags |= NSTAT_IFNET_IS_COMPANIONLINK;
366 break;
367 }
368
369 if (IFNET_IS_EXPENSIVE(ifp)) {
370 flags |= NSTAT_IFNET_IS_EXPENSIVE;
371 }
372 if (IFNET_IS_CONSTRAINED(ifp)) {
373 flags |= NSTAT_IFNET_IS_CONSTRAINED;
374 }
375 if (ifp->if_xflags & IFXF_LOW_LATENCY) {
376 flags |= NSTAT_IFNET_IS_WIFI;
377 flags |= NSTAT_IFNET_IS_LLW;
378 }
379
380 return flags;
381 }
382
383 static u_int32_t
extend_ifnet_flags(u_int16_t condensed_flags)384 extend_ifnet_flags(
385 u_int16_t condensed_flags)
386 {
387 u_int32_t extended_flags = (u_int32_t)condensed_flags;
388
389 if ((extended_flags & NSTAT_IFNET_IS_WIFI) && ((extended_flags & (NSTAT_IFNET_IS_AWDL | NSTAT_IFNET_IS_LLW)) == 0)) {
390 extended_flags |= NSTAT_IFNET_IS_WIFI_INFRA;
391 }
392 return extended_flags;
393 }
394
395 u_int32_t
nstat_ifnet_to_flags_extended(struct ifnet * ifp)396 nstat_ifnet_to_flags_extended(
397 struct ifnet *ifp)
398 {
399 u_int32_t flags = extend_ifnet_flags(nstat_ifnet_to_flags(ifp));
400
401 return flags;
402 }
403
404 static u_int32_t
nstat_inpcb_to_flags(const struct inpcb * inp)405 nstat_inpcb_to_flags(
406 const struct inpcb *inp)
407 {
408 u_int32_t flags = 0;
409
410 if (inp != NULL) {
411 if (inp->inp_last_outifp != NULL) {
412 struct ifnet *ifp = inp->inp_last_outifp;
413 flags = nstat_ifnet_to_flags_extended(ifp);
414
415 struct tcpcb *tp = intotcpcb(inp);
416 if (tp) {
417 if (tp->t_flags & TF_LOCAL) {
418 flags |= NSTAT_IFNET_IS_LOCAL;
419 } else {
420 flags |= NSTAT_IFNET_IS_NON_LOCAL;
421 }
422 }
423 } else {
424 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
425 }
426 if (inp->inp_socket != NULL &&
427 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) {
428 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
429 }
430 }
431 return flags;
432 }
433
434 #pragma mark -- Network Statistic Providers --
435
436 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
437 struct nstat_provider *nstat_providers = NULL;
438
439 static struct nstat_provider*
nstat_find_provider_by_id(nstat_provider_id_t id)440 nstat_find_provider_by_id(
441 nstat_provider_id_t id)
442 {
443 struct nstat_provider *provider;
444
445 for (provider = nstat_providers; provider != NULL; provider = provider->next) {
446 if (provider->nstat_provider_id == id) {
447 break;
448 }
449 }
450
451 return provider;
452 }
453
454 static errno_t
nstat_lookup_entry(nstat_provider_id_t id,const void * data,u_int32_t length,nstat_provider ** out_provider,nstat_provider_cookie_t * out_cookie)455 nstat_lookup_entry(
456 nstat_provider_id_t id,
457 const void *data,
458 u_int32_t length,
459 nstat_provider **out_provider,
460 nstat_provider_cookie_t *out_cookie)
461 {
462 *out_provider = nstat_find_provider_by_id(id);
463 if (*out_provider == NULL) {
464 return ENOENT;
465 }
466
467 return (*out_provider)->nstat_lookup(data, length, out_cookie);
468 }
469
470 static void nstat_init_route_provider(void);
471 static void nstat_init_tcp_provider(void);
472 static void nstat_init_udp_provider(void);
473 #if SKYWALK
474 static void nstat_init_userland_tcp_provider(void);
475 static void nstat_init_userland_udp_provider(void);
476 static void nstat_init_userland_quic_provider(void);
477 #endif /* SKYWALK */
478 static void nstat_init_userland_conn_provider(void);
479 static void nstat_init_udp_subflow_provider(void);
480 static void nstat_init_ifnet_provider(void);
481
482 __private_extern__ void
nstat_init(void)483 nstat_init(void)
484 {
485 nstat_init_route_provider();
486 nstat_init_tcp_provider();
487 nstat_init_udp_provider();
488 #if SKYWALK
489 nstat_init_userland_tcp_provider();
490 nstat_init_userland_udp_provider();
491 nstat_init_userland_quic_provider();
492 #endif /* SKYWALK */
493 nstat_init_userland_conn_provider();
494 nstat_init_udp_subflow_provider();
495 nstat_init_ifnet_provider();
496 nstat_control_register();
497 }
498
499 #pragma mark -- Aligned Buffer Allocation --
500
501 struct align_header {
502 u_int32_t offset;
503 u_int32_t length;
504 };
505
506 static void*
nstat_malloc_aligned(size_t length,u_int8_t alignment,zalloc_flags_t flags)507 nstat_malloc_aligned(
508 size_t length,
509 u_int8_t alignment,
510 zalloc_flags_t flags)
511 {
512 struct align_header *hdr = NULL;
513 size_t size = length + sizeof(*hdr) + alignment - 1;
514
515 // Arbitrary limit to prevent abuse
516 if (length > (64 * 1024)) {
517 return NULL;
518 }
519 u_int8_t *buffer = (u_int8_t *)kalloc_data(size, flags);
520 if (buffer == NULL) {
521 return NULL;
522 }
523
524 u_int8_t *aligned = buffer + sizeof(*hdr);
525 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
526
527 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
528 hdr->offset = aligned - buffer;
529 hdr->length = size;
530
531 return aligned;
532 }
533
534 static void
nstat_free_aligned(void * buffer)535 nstat_free_aligned(
536 void *buffer)
537 {
538 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
539 char *offset_buffer = (char *)buffer - hdr->offset;
540 kfree_data(offset_buffer, hdr->length);
541 }
542
543 #pragma mark -- Utilities --
544
545 #define NSTAT_PROCDETAILS_MAGIC 0xfeedc001
546 #define NSTAT_PROCDETAILS_UNMAGIC 0xdeadc001
547
548 static tailq_head_procdetails nstat_procdetails_head = TAILQ_HEAD_INITIALIZER(nstat_procdetails_head);
549
550 static struct nstat_procdetails *
nstat_retain_curprocdetails(void)551 nstat_retain_curprocdetails(void)
552 {
553 struct nstat_procdetails *procdetails = NULL;
554 uint64_t upid = proc_uniqueid(current_proc());
555
556 lck_mtx_lock(&nstat_mtx);
557
558 TAILQ_FOREACH(procdetails, &nstat_procdetails_head, pdet_link) {
559 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
560
561 if (procdetails->pdet_upid == upid) {
562 OSIncrementAtomic(&procdetails->pdet_refcnt);
563 break;
564 }
565 }
566 lck_mtx_unlock(&nstat_mtx);
567 if (!procdetails) {
568 // No need for paranoia on locking, it would be OK if there are duplicate structs on the list
569 procdetails = kalloc_type(struct nstat_procdetails,
570 Z_WAITOK | Z_NOFAIL);
571 procdetails->pdet_pid = proc_selfpid();
572 procdetails->pdet_upid = upid;
573 proc_selfname(procdetails->pdet_procname, sizeof(procdetails->pdet_procname));
574 proc_getexecutableuuid(current_proc(), procdetails->pdet_uuid, sizeof(uuid_t));
575 procdetails->pdet_refcnt = 1;
576 procdetails->pdet_magic = NSTAT_PROCDETAILS_MAGIC;
577 lck_mtx_lock(&nstat_mtx);
578 TAILQ_INSERT_HEAD(&nstat_procdetails_head, procdetails, pdet_link);
579 lck_mtx_unlock(&nstat_mtx);
580 }
581
582 return procdetails;
583 }
584
585 static void
nstat_release_procdetails(struct nstat_procdetails * procdetails)586 nstat_release_procdetails(struct nstat_procdetails *procdetails)
587 {
588 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
589 // These are harvested later to amortize costs
590 OSDecrementAtomic(&procdetails->pdet_refcnt);
591 }
592
593 static void
nstat_prune_procdetails(void)594 nstat_prune_procdetails(void)
595 {
596 struct nstat_procdetails *procdetails;
597 struct nstat_procdetails *tmpdetails;
598 tailq_head_procdetails dead_list;
599
600 TAILQ_INIT(&dead_list);
601 lck_mtx_lock(&nstat_mtx);
602
603 TAILQ_FOREACH_SAFE(procdetails, &nstat_procdetails_head, pdet_link, tmpdetails)
604 {
605 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
606 if (procdetails->pdet_refcnt == 0) {
607 // Pull it off the list
608 TAILQ_REMOVE(&nstat_procdetails_head, procdetails, pdet_link);
609 TAILQ_INSERT_TAIL(&dead_list, procdetails, pdet_link);
610 }
611 }
612 lck_mtx_unlock(&nstat_mtx);
613
614 while ((procdetails = TAILQ_FIRST(&dead_list))) {
615 TAILQ_REMOVE(&dead_list, procdetails, pdet_link);
616 procdetails->pdet_magic = NSTAT_PROCDETAILS_UNMAGIC;
617 kfree_type(struct nstat_procdetails, procdetails);
618 }
619 }
620
621 #pragma mark -- Route Provider --
622
623 static nstat_provider nstat_route_provider;
624
625 static errno_t
nstat_route_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)626 nstat_route_lookup(
627 const void *data,
628 u_int32_t length,
629 nstat_provider_cookie_t *out_cookie)
630 {
631 // rt_lookup doesn't take const params but it doesn't modify the parameters for
632 // the lookup. So...we use a union to eliminate the warning.
633 union{
634 struct sockaddr *sa;
635 const struct sockaddr *const_sa;
636 } dst, mask;
637
638 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
639 *out_cookie = NULL;
640
641 if (length < sizeof(*param)) {
642 return EINVAL;
643 }
644
645 if (param->dst.v4.sin_family == 0 ||
646 param->dst.v4.sin_family > AF_MAX ||
647 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) {
648 return EINVAL;
649 }
650
651 if (param->dst.v4.sin_len > sizeof(param->dst) ||
652 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) {
653 return EINVAL;
654 }
655 if ((param->dst.v4.sin_family == AF_INET &&
656 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
657 (param->dst.v6.sin6_family == AF_INET6 &&
658 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) {
659 return EINVAL;
660 }
661
662 dst.const_sa = (const struct sockaddr*)¶m->dst;
663 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)¶m->mask : NULL;
664
665 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
666 if (rnh == NULL) {
667 return EAFNOSUPPORT;
668 }
669
670 lck_mtx_lock(rnh_lock);
671 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
672 lck_mtx_unlock(rnh_lock);
673
674 if (rt) {
675 *out_cookie = (nstat_provider_cookie_t)rt;
676 }
677
678 return rt ? 0 : ENOENT;
679 }
680
681 static int
nstat_route_gone(nstat_provider_cookie_t cookie)682 nstat_route_gone(
683 nstat_provider_cookie_t cookie)
684 {
685 struct rtentry *rt = (struct rtentry*)cookie;
686 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
687 }
688
689 static errno_t
nstat_route_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)690 nstat_route_counts(
691 nstat_provider_cookie_t cookie,
692 struct nstat_counts *out_counts,
693 int *out_gone)
694 {
695 struct rtentry *rt = (struct rtentry*)cookie;
696 struct nstat_counts *rt_stats = rt->rt_stats;
697
698 if (out_gone) {
699 *out_gone = 0;
700 }
701
702 if (out_gone && (rt->rt_flags & RTF_UP) == 0) {
703 *out_gone = 1;
704 }
705
706 if (rt_stats) {
707 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
708 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
709 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
710 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
711 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
712 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
713 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
714 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
715 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
716 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
717 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
718 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
719 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
720 } else {
721 bzero(out_counts, sizeof(*out_counts));
722 }
723
724 return 0;
725 }
726
727 static void
nstat_route_release(nstat_provider_cookie_t cookie,__unused int locked)728 nstat_route_release(
729 nstat_provider_cookie_t cookie,
730 __unused int locked)
731 {
732 rtfree((struct rtentry*)cookie);
733 }
734
735 static u_int32_t nstat_route_watchers = 0;
736
737 static int
nstat_route_walktree_add(struct radix_node * rn,void * context)738 nstat_route_walktree_add(
739 struct radix_node *rn,
740 void *context)
741 {
742 errno_t result = 0;
743 struct rtentry *rt = (struct rtentry *)rn;
744 nstat_control_state *state = (nstat_control_state*)context;
745
746 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
747
748 /* RTF_UP can't change while rnh_lock is held */
749 if ((rt->rt_flags & RTF_UP) != 0) {
750 /* Clear RTPRF_OURS if the route is still usable */
751 RT_LOCK(rt);
752 if (rt_validate(rt)) {
753 RT_ADDREF_LOCKED(rt);
754 RT_UNLOCK(rt);
755 } else {
756 RT_UNLOCK(rt);
757 rt = NULL;
758 }
759
760 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
761 if (rt == NULL) {
762 return 0;
763 }
764
765 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
766 if (result != 0) {
767 rtfree_locked(rt);
768 }
769 }
770
771 return result;
772 }
773
774 static errno_t
nstat_route_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)775 nstat_route_add_watcher(
776 nstat_control_state *state,
777 nstat_msg_add_all_srcs *req)
778 {
779 int i;
780 errno_t result = 0;
781
782 lck_mtx_lock(rnh_lock);
783
784 result = nstat_set_provider_filter(state, req);
785 if (result == 0) {
786 OSIncrementAtomic(&nstat_route_watchers);
787
788 for (i = 1; i < AF_MAX; i++) {
789 struct radix_node_head *rnh;
790 rnh = rt_tables[i];
791 if (!rnh) {
792 continue;
793 }
794
795 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
796 if (result != 0) {
797 // This is probably resource exhaustion.
798 // There currently isn't a good way to recover from this.
799 // Least bad seems to be to give up on the add-all but leave
800 // the watcher in place.
801 break;
802 }
803 }
804 }
805 lck_mtx_unlock(rnh_lock);
806
807 return result;
808 }
809
810 __private_extern__ void
nstat_route_new_entry(struct rtentry * rt)811 nstat_route_new_entry(
812 struct rtentry *rt)
813 {
814 if (nstat_route_watchers == 0) {
815 return;
816 }
817
818 lck_mtx_lock(&nstat_mtx);
819 if ((rt->rt_flags & RTF_UP) != 0) {
820 nstat_control_state *state;
821 for (state = nstat_controls; state; state = state->ncs_next) {
822 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) {
823 // this client is watching routes
824 // acquire a reference for the route
825 RT_ADDREF(rt);
826
827 // add the source, if that fails, release the reference
828 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0) {
829 RT_REMREF(rt);
830 }
831 }
832 }
833 }
834 lck_mtx_unlock(&nstat_mtx);
835 }
836
837 static void
nstat_route_remove_watcher(__unused nstat_control_state * state)838 nstat_route_remove_watcher(
839 __unused nstat_control_state *state)
840 {
841 OSDecrementAtomic(&nstat_route_watchers);
842 }
843
844 static errno_t
nstat_route_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)845 nstat_route_copy_descriptor(
846 nstat_provider_cookie_t cookie,
847 void *data,
848 size_t len)
849 {
850 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
851 if (len < sizeof(*desc)) {
852 return EINVAL;
853 }
854 bzero(desc, sizeof(*desc));
855
856 struct rtentry *rt = (struct rtentry*)cookie;
857 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
858 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
859 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
860
861
862 // key/dest
863 struct sockaddr *sa;
864 if ((sa = rt_key(rt))) {
865 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
866 }
867
868 // mask
869 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) {
870 memcpy(&desc->mask, sa, sa->sa_len);
871 }
872
873 // gateway
874 if ((sa = rt->rt_gateway)) {
875 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
876 }
877
878 if (rt->rt_ifp) {
879 desc->ifindex = rt->rt_ifp->if_index;
880 }
881
882 desc->flags = rt->rt_flags;
883
884 return 0;
885 }
886
887 static bool
nstat_route_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)888 nstat_route_reporting_allowed(
889 nstat_provider_cookie_t cookie,
890 nstat_provider_filter *filter,
891 __unused u_int64_t suppression_flags)
892 {
893 bool retval = true;
894
895 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
896 struct rtentry *rt = (struct rtentry*)cookie;
897 struct ifnet *ifp = rt->rt_ifp;
898
899 if (ifp) {
900 uint32_t interface_properties = nstat_ifnet_to_flags_extended(ifp);
901
902 if ((filter->npf_flags & interface_properties) == 0) {
903 retval = false;
904 }
905 }
906 }
907 return retval;
908 }
909
910 static void
nstat_init_route_provider(void)911 nstat_init_route_provider(void)
912 {
913 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
914 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
915 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
916 nstat_route_provider.nstat_lookup = nstat_route_lookup;
917 nstat_route_provider.nstat_gone = nstat_route_gone;
918 nstat_route_provider.nstat_counts = nstat_route_counts;
919 nstat_route_provider.nstat_release = nstat_route_release;
920 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
921 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
922 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
923 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
924 nstat_route_provider.next = nstat_providers;
925 nstat_providers = &nstat_route_provider;
926 }
927
928 #pragma mark -- Route Collection --
929
930 __private_extern__ struct nstat_counts*
nstat_route_attach(struct rtentry * rte)931 nstat_route_attach(
932 struct rtentry *rte)
933 {
934 struct nstat_counts *result = rte->rt_stats;
935 if (result) {
936 return result;
937 }
938
939 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t),
940 Z_WAITOK | Z_ZERO);
941 if (!result) {
942 return result;
943 }
944
945 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) {
946 nstat_free_aligned(result);
947 result = rte->rt_stats;
948 }
949
950 return result;
951 }
952
953 __private_extern__ void
nstat_route_detach(struct rtentry * rte)954 nstat_route_detach(
955 struct rtentry *rte)
956 {
957 if (rte->rt_stats) {
958 nstat_free_aligned(rte->rt_stats);
959 rte->rt_stats = NULL;
960 }
961 }
962
963 __private_extern__ void
nstat_route_connect_attempt(struct rtentry * rte)964 nstat_route_connect_attempt(
965 struct rtentry *rte)
966 {
967 while (rte) {
968 struct nstat_counts* stats = nstat_route_attach(rte);
969 if (stats) {
970 OSIncrementAtomic(&stats->nstat_connectattempts);
971 }
972
973 rte = rte->rt_parent;
974 }
975 }
976
977 __private_extern__ void
nstat_route_connect_success(struct rtentry * rte)978 nstat_route_connect_success(
979 struct rtentry *rte)
980 {
981 // This route
982 while (rte) {
983 struct nstat_counts* stats = nstat_route_attach(rte);
984 if (stats) {
985 OSIncrementAtomic(&stats->nstat_connectsuccesses);
986 }
987
988 rte = rte->rt_parent;
989 }
990 }
991
992 __private_extern__ void
nstat_route_tx(struct rtentry * rte,u_int32_t packets,u_int32_t bytes,u_int32_t flags)993 nstat_route_tx(
994 struct rtentry *rte,
995 u_int32_t packets,
996 u_int32_t bytes,
997 u_int32_t flags)
998 {
999 while (rte) {
1000 struct nstat_counts* stats = nstat_route_attach(rte);
1001 if (stats) {
1002 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) {
1003 OSAddAtomic(bytes, &stats->nstat_txretransmit);
1004 } else {
1005 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
1006 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
1007 }
1008 }
1009
1010 rte = rte->rt_parent;
1011 }
1012 }
1013
1014 __private_extern__ void
nstat_route_rx(struct rtentry * rte,u_int32_t packets,u_int32_t bytes,u_int32_t flags)1015 nstat_route_rx(
1016 struct rtentry *rte,
1017 u_int32_t packets,
1018 u_int32_t bytes,
1019 u_int32_t flags)
1020 {
1021 while (rte) {
1022 struct nstat_counts* stats = nstat_route_attach(rte);
1023 if (stats) {
1024 if (flags == 0) {
1025 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
1026 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
1027 } else {
1028 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) {
1029 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
1030 }
1031 if (flags & NSTAT_RX_FLAG_DUPLICATE) {
1032 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
1033 }
1034 }
1035 }
1036
1037 rte = rte->rt_parent;
1038 }
1039 }
1040
1041 /* atomically average current value at _val_addr with _new_val and store */
1042 #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
1043 volatile uint32_t _old_val; \
1044 volatile uint32_t _avg; \
1045 do { \
1046 _old_val = *_val_addr; \
1047 if (_old_val == 0) \
1048 { \
1049 _avg = _new_val; \
1050 } \
1051 else \
1052 { \
1053 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
1054 } \
1055 if (_old_val == _avg) break; \
1056 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
1057 } while (0);
1058
1059 /* atomically compute minimum of current value at _val_addr with _new_val and store */
1060 #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
1061 volatile uint32_t _old_val; \
1062 do { \
1063 _old_val = *_val_addr; \
1064 if (_old_val != 0 && _old_val < _new_val) \
1065 { \
1066 break; \
1067 } \
1068 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
1069 } while (0);
1070
1071 __private_extern__ void
nstat_route_rtt(struct rtentry * rte,u_int32_t rtt,u_int32_t rtt_var)1072 nstat_route_rtt(
1073 struct rtentry *rte,
1074 u_int32_t rtt,
1075 u_int32_t rtt_var)
1076 {
1077 const uint32_t decay = 3;
1078
1079 while (rte) {
1080 struct nstat_counts* stats = nstat_route_attach(rte);
1081 if (stats) {
1082 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
1083 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
1084 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
1085 }
1086 rte = rte->rt_parent;
1087 }
1088 }
1089
1090 __private_extern__ void
nstat_route_update(struct rtentry * rte,uint32_t connect_attempts,uint32_t connect_successes,uint32_t rx_packets,uint32_t rx_bytes,uint32_t rx_duplicatebytes,uint32_t rx_outoforderbytes,uint32_t tx_packets,uint32_t tx_bytes,uint32_t tx_retransmit,uint32_t rtt,uint32_t rtt_var)1091 nstat_route_update(
1092 struct rtentry *rte,
1093 uint32_t connect_attempts,
1094 uint32_t connect_successes,
1095 uint32_t rx_packets,
1096 uint32_t rx_bytes,
1097 uint32_t rx_duplicatebytes,
1098 uint32_t rx_outoforderbytes,
1099 uint32_t tx_packets,
1100 uint32_t tx_bytes,
1101 uint32_t tx_retransmit,
1102 uint32_t rtt,
1103 uint32_t rtt_var)
1104 {
1105 const uint32_t decay = 3;
1106
1107 while (rte) {
1108 struct nstat_counts* stats = nstat_route_attach(rte);
1109 if (stats) {
1110 OSAddAtomic(connect_attempts, &stats->nstat_connectattempts);
1111 OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses);
1112 OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets);
1113 OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes);
1114 OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit);
1115 OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets);
1116 OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes);
1117 OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes);
1118 OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes);
1119
1120 if (rtt != 0) {
1121 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
1122 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
1123 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
1124 }
1125 }
1126 rte = rte->rt_parent;
1127 }
1128 }
1129
1130 #pragma mark -- TCP Kernel Provider --
1131
1132 /*
1133 * Due to the way the kernel deallocates a process (the process structure
1134 * might be gone by the time we get the PCB detach notification),
1135 * we need to cache the process name. Without this, proc_name() would
1136 * return null and the process name would never be sent to userland.
1137 *
1138 * For UDP sockets, we also store the cached the connection tuples along with
1139 * the interface index. This is necessary because when UDP sockets are
1140 * disconnected, the connection tuples are forever lost from the inpcb, thus
1141 * we need to keep track of the last call to connect() in ntstat.
1142 */
1143 struct nstat_tucookie {
1144 struct inpcb *inp;
1145 char pname[MAXCOMLEN + 1];
1146 bool cached;
1147 union{
1148 struct sockaddr_in v4;
1149 struct sockaddr_in6 v6;
1150 } local;
1151 union{
1152 struct sockaddr_in v4;
1153 struct sockaddr_in6 v6;
1154 } remote;
1155 unsigned int if_index;
1156 uint32_t ifnet_properties;
1157 };
1158
1159 static struct nstat_tucookie *
nstat_tucookie_alloc_internal(struct inpcb * inp,bool ref,bool locked)1160 nstat_tucookie_alloc_internal(
1161 struct inpcb *inp,
1162 bool ref,
1163 bool locked)
1164 {
1165 struct nstat_tucookie *cookie;
1166
1167 cookie = kalloc_type(struct nstat_tucookie,
1168 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1169 if (!locked) {
1170 LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
1171 }
1172 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
1173 kfree_type(struct nstat_tucookie, cookie);
1174 return NULL;
1175 }
1176 cookie->inp = inp;
1177 proc_name(inp->inp_socket->last_pid, cookie->pname,
1178 sizeof(cookie->pname));
1179 /*
1180 * We only increment the reference count for UDP sockets because we
1181 * only cache UDP socket tuples.
1182 */
1183 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) {
1184 OSIncrementAtomic(&inp->inp_nstat_refcnt);
1185 }
1186
1187 return cookie;
1188 }
1189
1190 static struct nstat_tucookie *
nstat_tucookie_alloc(struct inpcb * inp)1191 nstat_tucookie_alloc(
1192 struct inpcb *inp)
1193 {
1194 return nstat_tucookie_alloc_internal(inp, false, false);
1195 }
1196
1197 static struct nstat_tucookie *
nstat_tucookie_alloc_ref(struct inpcb * inp)1198 nstat_tucookie_alloc_ref(
1199 struct inpcb *inp)
1200 {
1201 return nstat_tucookie_alloc_internal(inp, true, false);
1202 }
1203
1204 static struct nstat_tucookie *
nstat_tucookie_alloc_ref_locked(struct inpcb * inp)1205 nstat_tucookie_alloc_ref_locked(
1206 struct inpcb *inp)
1207 {
1208 return nstat_tucookie_alloc_internal(inp, true, true);
1209 }
1210
1211 static void
nstat_tucookie_release_internal(struct nstat_tucookie * cookie,int inplock)1212 nstat_tucookie_release_internal(
1213 struct nstat_tucookie *cookie,
1214 int inplock)
1215 {
1216 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) {
1217 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1218 }
1219 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1220 kfree_type(struct nstat_tucookie, cookie);
1221 }
1222
1223 static void
nstat_tucookie_release(struct nstat_tucookie * cookie)1224 nstat_tucookie_release(
1225 struct nstat_tucookie *cookie)
1226 {
1227 nstat_tucookie_release_internal(cookie, false);
1228 }
1229
1230 static void
nstat_tucookie_release_locked(struct nstat_tucookie * cookie)1231 nstat_tucookie_release_locked(
1232 struct nstat_tucookie *cookie)
1233 {
1234 nstat_tucookie_release_internal(cookie, true);
1235 }
1236
1237
1238 static size_t
nstat_inp_domain_info(struct inpcb * inp,nstat_domain_info * domain_info,size_t len)1239 nstat_inp_domain_info(struct inpcb *inp, nstat_domain_info *domain_info, size_t len)
1240 {
1241 // Note, the caller has guaranteed that the buffer has been zeroed, there is no need to clear it again
1242 struct socket *so = inp->inp_socket;
1243
1244 if (so == NULL) {
1245 return 0;
1246 }
1247
1248 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: Collecting stats");
1249
1250 if (domain_info == NULL) {
1251 return sizeof(nstat_domain_info);
1252 }
1253
1254 if (len < sizeof(nstat_domain_info)) {
1255 return 0;
1256 }
1257
1258 domain_info->is_tracker = !!(so->so_flags1 & SOF1_KNOWN_TRACKER);
1259 domain_info->is_non_app_initiated = !!(so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED);
1260 if (domain_info->is_tracker &&
1261 inp->inp_necp_attributes.inp_tracker_domain != NULL) {
1262 strlcpy(domain_info->domain_name, inp->inp_necp_attributes.inp_tracker_domain,
1263 sizeof(domain_info->domain_name));
1264 } else if (inp->inp_necp_attributes.inp_domain != NULL) {
1265 strlcpy(domain_info->domain_name, inp->inp_necp_attributes.inp_domain,
1266 sizeof(domain_info->domain_name));
1267 }
1268 if (inp->inp_necp_attributes.inp_domain_owner != NULL) {
1269 strlcpy(domain_info->domain_owner, inp->inp_necp_attributes.inp_domain_owner,
1270 sizeof(domain_info->domain_owner));
1271 }
1272 if (inp->inp_necp_attributes.inp_domain_context != NULL) {
1273 strlcpy(domain_info->domain_tracker_ctxt, inp->inp_necp_attributes.inp_domain_context,
1274 sizeof(domain_info->domain_tracker_ctxt));
1275 }
1276
1277 if (domain_info) {
1278 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: <pid %d> Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> "
1279 "is_tracker %d is_non_app_initiated %d is_silent %d",
1280 so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid,
1281 domain_info->domain_name,
1282 domain_info->domain_owner,
1283 domain_info->domain_tracker_ctxt,
1284 domain_info->domain_attributed_bundle_id,
1285 domain_info->is_tracker,
1286 domain_info->is_non_app_initiated,
1287 domain_info->is_silent);
1288 }
1289
1290 /* XXX tracking context is not provided through kernel for socket flows */
1291 return sizeof(nstat_domain_info);
1292 }
1293
1294
1295 static nstat_provider nstat_tcp_provider;
1296
1297 static errno_t
nstat_tcpudp_lookup(struct inpcbinfo * inpinfo,const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)1298 nstat_tcpudp_lookup(
1299 struct inpcbinfo *inpinfo,
1300 const void *data,
1301 u_int32_t length,
1302 nstat_provider_cookie_t *out_cookie)
1303 {
1304 struct inpcb *inp = NULL;
1305
1306 // parameter validation
1307 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
1308 if (length < sizeof(*param)) {
1309 return EINVAL;
1310 }
1311
1312 // src and dst must match
1313 if (param->remote.v4.sin_family != 0 &&
1314 param->remote.v4.sin_family != param->local.v4.sin_family) {
1315 return EINVAL;
1316 }
1317
1318
1319 switch (param->local.v4.sin_family) {
1320 case AF_INET:
1321 {
1322 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
1323 (param->remote.v4.sin_family != 0 &&
1324 param->remote.v4.sin_len != sizeof(param->remote.v4))) {
1325 return EINVAL;
1326 }
1327
1328 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1329 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1330 }
1331 break;
1332
1333 case AF_INET6:
1334 {
1335 union{
1336 const struct in6_addr *in6c;
1337 struct in6_addr *in6;
1338 } local, remote;
1339
1340 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1341 (param->remote.v6.sin6_family != 0 &&
1342 param->remote.v6.sin6_len != sizeof(param->remote.v6))) {
1343 return EINVAL;
1344 }
1345
1346 local.in6c = ¶m->local.v6.sin6_addr;
1347 remote.in6c = ¶m->remote.v6.sin6_addr;
1348
1349 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port, param->remote.v6.sin6_scope_id,
1350 local.in6, param->local.v6.sin6_port, param->local.v6.sin6_scope_id, 1, NULL);
1351 }
1352 break;
1353
1354 default:
1355 return EINVAL;
1356 }
1357
1358 if (inp == NULL) {
1359 return ENOENT;
1360 }
1361
1362 // At this point we have a ref to the inpcb
1363 *out_cookie = nstat_tucookie_alloc(inp);
1364 if (*out_cookie == NULL) {
1365 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1366 }
1367
1368 return 0;
1369 }
1370
1371 static errno_t
nstat_tcp_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)1372 nstat_tcp_lookup(
1373 const void *data,
1374 u_int32_t length,
1375 nstat_provider_cookie_t *out_cookie)
1376 {
1377 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1378 }
1379
1380 static int
nstat_tcp_gone(nstat_provider_cookie_t cookie)1381 nstat_tcp_gone(
1382 nstat_provider_cookie_t cookie)
1383 {
1384 struct nstat_tucookie *tucookie =
1385 (struct nstat_tucookie *)cookie;
1386 struct inpcb *inp;
1387 struct tcpcb *tp;
1388
1389 return (!(inp = tucookie->inp) ||
1390 !(tp = intotcpcb(inp)) ||
1391 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1392 }
1393
1394 static errno_t
nstat_tcp_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)1395 nstat_tcp_counts(
1396 nstat_provider_cookie_t cookie,
1397 struct nstat_counts *out_counts,
1398 int *out_gone)
1399 {
1400 struct nstat_tucookie *tucookie =
1401 (struct nstat_tucookie *)cookie;
1402 struct inpcb *inp;
1403
1404 bzero(out_counts, sizeof(*out_counts));
1405
1406 if (out_gone) {
1407 *out_gone = 0;
1408 }
1409
1410 // if the pcb is in the dead state, we should stop using it
1411 if (nstat_tcp_gone(cookie)) {
1412 if (out_gone) {
1413 *out_gone = 1;
1414 }
1415 if (!(inp = tucookie->inp) || !intotcpcb(inp)) {
1416 return EINVAL;
1417 }
1418 }
1419 inp = tucookie->inp;
1420 struct tcpcb *tp = intotcpcb(inp);
1421
1422 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1423 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1424 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1425 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1426 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1427 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1428 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1429 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1430 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1431 out_counts->nstat_avg_rtt = tp->t_srtt;
1432 out_counts->nstat_min_rtt = tp->t_rttbest;
1433 out_counts->nstat_var_rtt = tp->t_rttvar;
1434 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) {
1435 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1436 }
1437 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1438 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1439 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1440 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1441 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1442 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1443
1444 return 0;
1445 }
1446
1447 static void
nstat_tcp_release(nstat_provider_cookie_t cookie,int locked)1448 nstat_tcp_release(
1449 nstat_provider_cookie_t cookie,
1450 int locked)
1451 {
1452 struct nstat_tucookie *tucookie =
1453 (struct nstat_tucookie *)cookie;
1454
1455 nstat_tucookie_release_internal(tucookie, locked);
1456 }
1457
1458 static errno_t
nstat_tcp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)1459 nstat_tcp_add_watcher(
1460 nstat_control_state *state,
1461 nstat_msg_add_all_srcs *req)
1462 {
1463 // There is a tricky issue around getting all TCP sockets added once
1464 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1465 // being placed on any lists where it might be found.
1466 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1467 // it should be impossible for a new socket to be added twice.
1468 // On the other hand, there is still a timing issue where a new socket
1469 // results in a call to nstat_tcp_new_pcb() before this watcher
1470 // is instantiated and yet the socket doesn't make it into ipi_listhead
1471 // prior to the scan. <rdar://problem/30361716>
1472
1473 errno_t result;
1474
1475 lck_rw_lock_shared(&tcbinfo.ipi_lock);
1476 result = nstat_set_provider_filter(state, req);
1477 if (result == 0) {
1478 OSIncrementAtomic(&nstat_tcp_watchers);
1479
1480 // Add all current tcp inpcbs. Ignore those in timewait
1481 struct inpcb *inp;
1482 struct nstat_tucookie *cookie;
1483 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1484 {
1485 cookie = nstat_tucookie_alloc_ref(inp);
1486 if (cookie == NULL) {
1487 continue;
1488 }
1489 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1490 cookie) != 0) {
1491 nstat_tucookie_release(cookie);
1492 break;
1493 }
1494 }
1495 }
1496
1497 lck_rw_done(&tcbinfo.ipi_lock);
1498
1499 return result;
1500 }
1501
1502 static void
nstat_tcp_remove_watcher(__unused nstat_control_state * state)1503 nstat_tcp_remove_watcher(
1504 __unused nstat_control_state *state)
1505 {
1506 OSDecrementAtomic(&nstat_tcp_watchers);
1507 }
1508
1509 __private_extern__ void
nstat_tcp_new_pcb(struct inpcb * inp)1510 nstat_tcp_new_pcb(
1511 struct inpcb *inp)
1512 {
1513 struct nstat_tucookie *cookie;
1514
1515 inp->inp_start_timestamp = mach_continuous_time();
1516
1517 if (nstat_tcp_watchers == 0) {
1518 return;
1519 }
1520
1521 socket_lock(inp->inp_socket, 0);
1522 lck_mtx_lock(&nstat_mtx);
1523 nstat_control_state *state;
1524 for (state = nstat_controls; state; state = state->ncs_next) {
1525 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) {
1526 // this client is watching tcp
1527 // acquire a reference for it
1528 cookie = nstat_tucookie_alloc_ref_locked(inp);
1529 if (cookie == NULL) {
1530 continue;
1531 }
1532 // add the source, if that fails, release the reference
1533 if (nstat_control_source_add(0, state,
1534 &nstat_tcp_provider, cookie) != 0) {
1535 nstat_tucookie_release_locked(cookie);
1536 break;
1537 }
1538 }
1539 }
1540 lck_mtx_unlock(&nstat_mtx);
1541 socket_unlock(inp->inp_socket, 0);
1542 }
1543
1544 __private_extern__ void
nstat_pcb_detach(struct inpcb * inp)1545 nstat_pcb_detach(struct inpcb *inp)
1546 {
1547 nstat_control_state *state;
1548 nstat_src *src;
1549 tailq_head_nstat_src dead_list;
1550 struct nstat_tucookie *tucookie;
1551 errno_t result;
1552
1553 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1554 return;
1555 }
1556
1557 TAILQ_INIT(&dead_list);
1558 lck_mtx_lock(&nstat_mtx);
1559 for (state = nstat_controls; state; state = state->ncs_next) {
1560 lck_mtx_lock(&state->ncs_mtx);
1561 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1562 {
1563 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
1564 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1565 tucookie = (struct nstat_tucookie *)src->cookie;
1566 if (tucookie->inp == inp) {
1567 break;
1568 }
1569 }
1570 }
1571
1572 if (src) {
1573 result = nstat_control_send_goodbye(state, src);
1574
1575 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
1576 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
1577 }
1578 lck_mtx_unlock(&state->ncs_mtx);
1579 }
1580 lck_mtx_unlock(&nstat_mtx);
1581
1582 while ((src = TAILQ_FIRST(&dead_list))) {
1583 TAILQ_REMOVE(&dead_list, src, ns_control_link);
1584 nstat_control_cleanup_source(NULL, src, TRUE);
1585 }
1586 }
1587
1588 __private_extern__ void
nstat_pcb_event(struct inpcb * inp,u_int64_t event)1589 nstat_pcb_event(struct inpcb *inp, u_int64_t event)
1590 {
1591 nstat_control_state *state;
1592 nstat_src *src;
1593 struct nstat_tucookie *tucookie;
1594 errno_t result;
1595 nstat_provider_id_t provider_id;
1596
1597 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1598 return;
1599 }
1600
1601 lck_mtx_lock(&nstat_mtx);
1602 for (state = nstat_controls; state; state = state->ncs_next) {
1603 if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_KERNEL].npf_events & event) == 0) &&
1604 ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_KERNEL].npf_events & event) == 0)) {
1605 continue;
1606 }
1607 lck_mtx_lock(&state->ncs_mtx);
1608 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1609 {
1610 provider_id = src->provider->nstat_provider_id;
1611 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1612 tucookie = (struct nstat_tucookie *)src->cookie;
1613 if (tucookie->inp == inp) {
1614 break;
1615 }
1616 }
1617 }
1618
1619 if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) {
1620 result = nstat_control_send_event(state, src, event);
1621 }
1622 lck_mtx_unlock(&state->ncs_mtx);
1623 }
1624 lck_mtx_unlock(&nstat_mtx);
1625 }
1626
1627
1628 __private_extern__ void
nstat_pcb_cache(struct inpcb * inp)1629 nstat_pcb_cache(struct inpcb *inp)
1630 {
1631 nstat_control_state *state;
1632 nstat_src *src;
1633 struct nstat_tucookie *tucookie;
1634
1635 if (inp == NULL || nstat_udp_watchers == 0 ||
1636 inp->inp_nstat_refcnt == 0) {
1637 return;
1638 }
1639 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1640 lck_mtx_lock(&nstat_mtx);
1641 for (state = nstat_controls; state; state = state->ncs_next) {
1642 lck_mtx_lock(&state->ncs_mtx);
1643 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1644 {
1645 tucookie = (struct nstat_tucookie *)src->cookie;
1646 if (tucookie->inp == inp) {
1647 if (inp->inp_vflag & INP_IPV6) {
1648 in6_ip6_to_sockaddr(&inp->in6p_laddr,
1649 inp->inp_lport,
1650 inp->inp_lifscope,
1651 &tucookie->local.v6,
1652 sizeof(tucookie->local));
1653 in6_ip6_to_sockaddr(&inp->in6p_faddr,
1654 inp->inp_fport,
1655 inp->inp_fifscope,
1656 &tucookie->remote.v6,
1657 sizeof(tucookie->remote));
1658 } else if (inp->inp_vflag & INP_IPV4) {
1659 nstat_ip_to_sockaddr(&inp->inp_laddr,
1660 inp->inp_lport,
1661 &tucookie->local.v4,
1662 sizeof(tucookie->local));
1663 nstat_ip_to_sockaddr(&inp->inp_faddr,
1664 inp->inp_fport,
1665 &tucookie->remote.v4,
1666 sizeof(tucookie->remote));
1667 }
1668 if (inp->inp_last_outifp) {
1669 tucookie->if_index =
1670 inp->inp_last_outifp->if_index;
1671 }
1672
1673 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1674 tucookie->cached = true;
1675 break;
1676 }
1677 }
1678 lck_mtx_unlock(&state->ncs_mtx);
1679 }
1680 lck_mtx_unlock(&nstat_mtx);
1681 }
1682
1683 __private_extern__ void
nstat_pcb_invalidate_cache(struct inpcb * inp)1684 nstat_pcb_invalidate_cache(struct inpcb *inp)
1685 {
1686 nstat_control_state *state;
1687 nstat_src *src;
1688 struct nstat_tucookie *tucookie;
1689
1690 if (inp == NULL || nstat_udp_watchers == 0 ||
1691 inp->inp_nstat_refcnt == 0) {
1692 return;
1693 }
1694 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1695 lck_mtx_lock(&nstat_mtx);
1696 for (state = nstat_controls; state; state = state->ncs_next) {
1697 lck_mtx_lock(&state->ncs_mtx);
1698 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1699 {
1700 tucookie = (struct nstat_tucookie *)src->cookie;
1701 if (tucookie->inp == inp) {
1702 tucookie->cached = false;
1703 break;
1704 }
1705 }
1706 lck_mtx_unlock(&state->ncs_mtx);
1707 }
1708 lck_mtx_unlock(&nstat_mtx);
1709 }
1710
1711 static errno_t
nstat_tcp_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)1712 nstat_tcp_copy_descriptor(
1713 nstat_provider_cookie_t cookie,
1714 void *data,
1715 size_t len)
1716 {
1717 if (len < sizeof(nstat_tcp_descriptor)) {
1718 return EINVAL;
1719 }
1720
1721 if (nstat_tcp_gone(cookie)) {
1722 return EINVAL;
1723 }
1724
1725 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1726 struct nstat_tucookie *tucookie =
1727 (struct nstat_tucookie *)cookie;
1728 struct inpcb *inp = tucookie->inp;
1729 struct tcpcb *tp = intotcpcb(inp);
1730 bzero(desc, sizeof(*desc));
1731
1732 if (inp->inp_vflag & INP_IPV6) {
1733 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope,
1734 &desc->local.v6, sizeof(desc->local));
1735 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope,
1736 &desc->remote.v6, sizeof(desc->remote));
1737 } else if (inp->inp_vflag & INP_IPV4) {
1738 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1739 &desc->local.v4, sizeof(desc->local));
1740 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1741 &desc->remote.v4, sizeof(desc->remote));
1742 }
1743
1744 desc->state = intotcpcb(inp)->t_state;
1745 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1746 inp->inp_last_outifp->if_index;
1747
1748 // danger - not locked, values could be bogus
1749 desc->txunacked = tp->snd_max - tp->snd_una;
1750 desc->txwindow = tp->snd_wnd;
1751 desc->txcwindow = tp->snd_cwnd;
1752
1753 if (CC_ALGO(tp)->name != NULL) {
1754 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1755 sizeof(desc->cc_algo));
1756 }
1757
1758 struct socket *so = inp->inp_socket;
1759 if (so) {
1760 // TBD - take the socket lock around these to make sure
1761 // they're in sync?
1762 desc->upid = so->last_upid;
1763 desc->pid = so->last_pid;
1764 desc->traffic_class = so->so_traffic_class;
1765 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) {
1766 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1767 }
1768 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) {
1769 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1770 }
1771 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1772 if (desc->pname[0] == 0) {
1773 strlcpy(desc->pname, tucookie->pname,
1774 sizeof(desc->pname));
1775 } else {
1776 desc->pname[sizeof(desc->pname) - 1] = 0;
1777 strlcpy(tucookie->pname, desc->pname,
1778 sizeof(tucookie->pname));
1779 }
1780 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1781 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1782 if (so->so_flags & SOF_DELEGATED) {
1783 desc->eupid = so->e_upid;
1784 desc->epid = so->e_pid;
1785 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1786 } else {
1787 desc->eupid = desc->upid;
1788 desc->epid = desc->pid;
1789 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1790 }
1791 uuid_copy(desc->fuuid, inp->necp_client_uuid);
1792 desc->sndbufsize = so->so_snd.sb_hiwat;
1793 desc->sndbufused = so->so_snd.sb_cc;
1794 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1795 desc->rcvbufused = so->so_rcv.sb_cc;
1796 }
1797
1798 tcp_get_connectivity_status(tp, &desc->connstatus);
1799 desc->ifnet_properties = (uint16_t)nstat_inpcb_to_flags(inp);
1800 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1801 desc->start_timestamp = inp->inp_start_timestamp;
1802 desc->timestamp = mach_continuous_time();
1803 return 0;
1804 }
1805
1806 static bool
nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,bool is_UDP)1807 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1808 {
1809 bool retval = true;
1810
1811 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS | NSTAT_FILTER_SPECIFIC_USER)) != 0) {
1812 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1813 struct inpcb *inp = tucookie->inp;
1814
1815 /* Only apply interface filter if at least one is allowed. */
1816 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
1817 uint32_t interface_properties = nstat_inpcb_to_flags(inp);
1818
1819 if ((filter->npf_flags & interface_properties) == 0) {
1820 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1821 // We allow reporting if there have been transfers of the requested kind.
1822 // This is imperfect as we cannot account for the expensive attribute over wifi.
1823 // We also assume that cellular is expensive and we have no way to select for AWDL
1824 if (is_UDP) {
1825 do{
1826 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR | NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1827 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) {
1828 break;
1829 }
1830 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1831 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) {
1832 break;
1833 }
1834 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1835 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) {
1836 break;
1837 }
1838 return false;
1839 } while (0);
1840 } else {
1841 return false;
1842 }
1843 }
1844 }
1845
1846 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) {
1847 struct socket *so = inp->inp_socket;
1848 retval = false;
1849
1850 if (so) {
1851 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1852 (filter->npf_pid == so->last_pid)) {
1853 retval = true;
1854 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1855 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) {
1856 retval = true;
1857 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1858 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0)) {
1859 retval = true;
1860 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1861 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1862 sizeof(so->last_uuid)) == 0)) {
1863 retval = true;
1864 }
1865 }
1866 }
1867 }
1868 return retval;
1869 }
1870
1871 static bool
nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)1872 nstat_tcp_reporting_allowed(
1873 nstat_provider_cookie_t cookie,
1874 nstat_provider_filter *filter,
1875 __unused u_int64_t suppression_flags)
1876 {
1877 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1878 }
1879
1880 static size_t
nstat_tcp_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)1881 nstat_tcp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
1882 {
1883 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1884 struct inpcb *inp = tucookie->inp;
1885
1886 if (nstat_tcp_gone(cookie)) {
1887 return 0;
1888 }
1889
1890 switch (extension_id) {
1891 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
1892 return nstat_inp_domain_info(inp, (nstat_domain_info *)buf, len);
1893
1894 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV:
1895 default:
1896 break;
1897 }
1898 return 0;
1899 }
1900
1901 static void
nstat_init_tcp_provider(void)1902 nstat_init_tcp_provider(void)
1903 {
1904 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1905 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1906 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1907 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1908 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1909 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1910 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1911 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1912 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1913 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1914 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1915 nstat_tcp_provider.nstat_copy_extension = nstat_tcp_extensions;
1916 nstat_tcp_provider.next = nstat_providers;
1917 nstat_providers = &nstat_tcp_provider;
1918 }
1919
1920 #pragma mark -- UDP Provider --
1921
1922 static nstat_provider nstat_udp_provider;
1923
1924 static errno_t
nstat_udp_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)1925 nstat_udp_lookup(
1926 const void *data,
1927 u_int32_t length,
1928 nstat_provider_cookie_t *out_cookie)
1929 {
1930 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1931 }
1932
1933 static int
nstat_udp_gone(nstat_provider_cookie_t cookie)1934 nstat_udp_gone(
1935 nstat_provider_cookie_t cookie)
1936 {
1937 struct nstat_tucookie *tucookie =
1938 (struct nstat_tucookie *)cookie;
1939 struct inpcb *inp;
1940
1941 return (!(inp = tucookie->inp) ||
1942 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1943 }
1944
1945 static errno_t
nstat_udp_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)1946 nstat_udp_counts(
1947 nstat_provider_cookie_t cookie,
1948 struct nstat_counts *out_counts,
1949 int *out_gone)
1950 {
1951 struct nstat_tucookie *tucookie =
1952 (struct nstat_tucookie *)cookie;
1953
1954 if (out_gone) {
1955 *out_gone = 0;
1956 }
1957
1958 // if the pcb is in the dead state, we should stop using it
1959 if (nstat_udp_gone(cookie)) {
1960 if (out_gone) {
1961 *out_gone = 1;
1962 }
1963 if (!tucookie->inp) {
1964 return EINVAL;
1965 }
1966 }
1967 struct inpcb *inp = tucookie->inp;
1968
1969 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1970 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1971 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1972 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1973 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1974 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1975 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1976 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1977 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1978 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1979
1980 return 0;
1981 }
1982
1983 static void
nstat_udp_release(nstat_provider_cookie_t cookie,int locked)1984 nstat_udp_release(
1985 nstat_provider_cookie_t cookie,
1986 int locked)
1987 {
1988 struct nstat_tucookie *tucookie =
1989 (struct nstat_tucookie *)cookie;
1990
1991 nstat_tucookie_release_internal(tucookie, locked);
1992 }
1993
1994 static errno_t
nstat_udp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)1995 nstat_udp_add_watcher(
1996 nstat_control_state *state,
1997 nstat_msg_add_all_srcs *req)
1998 {
1999 // There is a tricky issue around getting all UDP sockets added once
2000 // and only once. nstat_udp_new_pcb() is called prior to the new item
2001 // being placed on any lists where it might be found.
2002 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
2003 // it should be impossible for a new socket to be added twice.
2004 // On the other hand, there is still a timing issue where a new socket
2005 // results in a call to nstat_udp_new_pcb() before this watcher
2006 // is instantiated and yet the socket doesn't make it into ipi_listhead
2007 // prior to the scan. <rdar://problem/30361716>
2008
2009 errno_t result;
2010
2011 lck_rw_lock_shared(&udbinfo.ipi_lock);
2012 result = nstat_set_provider_filter(state, req);
2013
2014 if (result == 0) {
2015 struct inpcb *inp;
2016 struct nstat_tucookie *cookie;
2017
2018 OSIncrementAtomic(&nstat_udp_watchers);
2019
2020 // Add all current UDP inpcbs.
2021 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
2022 {
2023 cookie = nstat_tucookie_alloc_ref(inp);
2024 if (cookie == NULL) {
2025 continue;
2026 }
2027 if (nstat_control_source_add(0, state, &nstat_udp_provider,
2028 cookie) != 0) {
2029 nstat_tucookie_release(cookie);
2030 break;
2031 }
2032 }
2033 }
2034
2035 lck_rw_done(&udbinfo.ipi_lock);
2036
2037 return result;
2038 }
2039
2040 static void
nstat_udp_remove_watcher(__unused nstat_control_state * state)2041 nstat_udp_remove_watcher(
2042 __unused nstat_control_state *state)
2043 {
2044 OSDecrementAtomic(&nstat_udp_watchers);
2045 }
2046
2047 __private_extern__ void
nstat_udp_new_pcb(struct inpcb * inp)2048 nstat_udp_new_pcb(
2049 struct inpcb *inp)
2050 {
2051 struct nstat_tucookie *cookie;
2052
2053 inp->inp_start_timestamp = mach_continuous_time();
2054
2055 if (nstat_udp_watchers == 0) {
2056 return;
2057 }
2058
2059 socket_lock(inp->inp_socket, 0);
2060 lck_mtx_lock(&nstat_mtx);
2061 nstat_control_state *state;
2062 for (state = nstat_controls; state; state = state->ncs_next) {
2063 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) {
2064 // this client is watching tcp
2065 // acquire a reference for it
2066 cookie = nstat_tucookie_alloc_ref_locked(inp);
2067 if (cookie == NULL) {
2068 continue;
2069 }
2070 // add the source, if that fails, release the reference
2071 if (nstat_control_source_add(0, state,
2072 &nstat_udp_provider, cookie) != 0) {
2073 nstat_tucookie_release_locked(cookie);
2074 break;
2075 }
2076 }
2077 }
2078 lck_mtx_unlock(&nstat_mtx);
2079 socket_unlock(inp->inp_socket, 0);
2080 }
2081
2082 static errno_t
nstat_udp_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)2083 nstat_udp_copy_descriptor(
2084 nstat_provider_cookie_t cookie,
2085 void *data,
2086 size_t len)
2087 {
2088 if (len < sizeof(nstat_udp_descriptor)) {
2089 return EINVAL;
2090 }
2091
2092 if (nstat_udp_gone(cookie)) {
2093 return EINVAL;
2094 }
2095
2096 struct nstat_tucookie *tucookie =
2097 (struct nstat_tucookie *)cookie;
2098 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
2099 struct inpcb *inp = tucookie->inp;
2100
2101 bzero(desc, sizeof(*desc));
2102
2103 if (tucookie->cached == false) {
2104 if (inp->inp_vflag & INP_IPV6) {
2105 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope,
2106 &desc->local.v6, sizeof(desc->local.v6));
2107 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope,
2108 &desc->remote.v6, sizeof(desc->remote.v6));
2109 } else if (inp->inp_vflag & INP_IPV4) {
2110 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
2111 &desc->local.v4, sizeof(desc->local.v4));
2112 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
2113 &desc->remote.v4, sizeof(desc->remote.v4));
2114 }
2115 desc->ifnet_properties = (uint16_t)nstat_inpcb_to_flags(inp);
2116 } else {
2117 if (inp->inp_vflag & INP_IPV6) {
2118 memcpy(&desc->local.v6, &tucookie->local.v6,
2119 sizeof(desc->local.v6));
2120 memcpy(&desc->remote.v6, &tucookie->remote.v6,
2121 sizeof(desc->remote.v6));
2122 } else if (inp->inp_vflag & INP_IPV4) {
2123 memcpy(&desc->local.v4, &tucookie->local.v4,
2124 sizeof(desc->local.v4));
2125 memcpy(&desc->remote.v4, &tucookie->remote.v4,
2126 sizeof(desc->remote.v4));
2127 }
2128 desc->ifnet_properties = tucookie->ifnet_properties;
2129 }
2130
2131 if (inp->inp_last_outifp) {
2132 desc->ifindex = inp->inp_last_outifp->if_index;
2133 } else {
2134 desc->ifindex = tucookie->if_index;
2135 }
2136
2137 struct socket *so = inp->inp_socket;
2138 if (so) {
2139 // TBD - take the socket lock around these to make sure
2140 // they're in sync?
2141 desc->upid = so->last_upid;
2142 desc->pid = so->last_pid;
2143 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
2144 if (desc->pname[0] == 0) {
2145 strlcpy(desc->pname, tucookie->pname,
2146 sizeof(desc->pname));
2147 } else {
2148 desc->pname[sizeof(desc->pname) - 1] = 0;
2149 strlcpy(tucookie->pname, desc->pname,
2150 sizeof(tucookie->pname));
2151 }
2152 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
2153 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
2154 if (so->so_flags & SOF_DELEGATED) {
2155 desc->eupid = so->e_upid;
2156 desc->epid = so->e_pid;
2157 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
2158 } else {
2159 desc->eupid = desc->upid;
2160 desc->epid = desc->pid;
2161 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
2162 }
2163 uuid_copy(desc->fuuid, inp->necp_client_uuid);
2164 desc->rcvbufsize = so->so_rcv.sb_hiwat;
2165 desc->rcvbufused = so->so_rcv.sb_cc;
2166 desc->traffic_class = so->so_traffic_class;
2167 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
2168 desc->start_timestamp = inp->inp_start_timestamp;
2169 desc->timestamp = mach_continuous_time();
2170 }
2171
2172 return 0;
2173 }
2174
2175 static bool
nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2176 nstat_udp_reporting_allowed(
2177 nstat_provider_cookie_t cookie,
2178 nstat_provider_filter *filter,
2179 __unused u_int64_t suppression_flags)
2180 {
2181 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
2182 }
2183
2184
2185 static size_t
nstat_udp_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)2186 nstat_udp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
2187 {
2188 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
2189 struct inpcb *inp = tucookie->inp;
2190 if (nstat_udp_gone(cookie)) {
2191 return 0;
2192 }
2193
2194 switch (extension_id) {
2195 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
2196 return nstat_inp_domain_info(inp, (nstat_domain_info *)buf, len);
2197
2198 default:
2199 break;
2200 }
2201 return 0;
2202 }
2203
2204
2205 static void
nstat_init_udp_provider(void)2206 nstat_init_udp_provider(void)
2207 {
2208 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
2209 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
2210 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2211 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
2212 nstat_udp_provider.nstat_gone = nstat_udp_gone;
2213 nstat_udp_provider.nstat_counts = nstat_udp_counts;
2214 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
2215 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
2216 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
2217 nstat_udp_provider.nstat_release = nstat_udp_release;
2218 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
2219 nstat_udp_provider.nstat_copy_extension = nstat_udp_extensions;
2220 nstat_udp_provider.next = nstat_providers;
2221 nstat_providers = &nstat_udp_provider;
2222 }
2223
2224 #if SKYWALK
2225
2226 #pragma mark -- TCP/UDP/QUIC Userland
2227
2228 // Almost all of this infrastucture is common to both TCP and UDP
2229
2230 static u_int32_t nstat_userland_quic_watchers = 0;
2231 static u_int32_t nstat_userland_udp_watchers = 0;
2232 static u_int32_t nstat_userland_tcp_watchers = 0;
2233
2234 static u_int32_t nstat_userland_quic_shadows = 0;
2235 static u_int32_t nstat_userland_udp_shadows = 0;
2236 static u_int32_t nstat_userland_tcp_shadows = 0;
2237
2238 static nstat_provider nstat_userland_quic_provider;
2239 static nstat_provider nstat_userland_udp_provider;
2240 static nstat_provider nstat_userland_tcp_provider;
2241
2242 struct nstat_tu_shadow {
2243 tailq_entry_tu_shadow shad_link;
2244 userland_stats_request_vals_fn *shad_getvals_fn;
2245 userland_stats_request_extension_fn *shad_get_extension_fn;
2246 userland_stats_provider_context *shad_provider_context;
2247 u_int64_t shad_properties;
2248 u_int64_t shad_start_timestamp;
2249 nstat_provider_id_t shad_provider;
2250 struct nstat_procdetails *shad_procdetails;
2251 bool shad_live; // false if defunct
2252 uint32_t shad_magic;
2253 };
2254
2255 // Magic number checking should remain in place until the userland provider has been fully proven
2256 #define TU_SHADOW_MAGIC 0xfeedf00d
2257 #define TU_SHADOW_UNMAGIC 0xdeaddeed
2258
2259 static tailq_head_tu_shadow nstat_userprot_shad_head = TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head);
2260
2261 static errno_t
nstat_userland_tu_lookup(__unused const void * data,__unused u_int32_t length,__unused nstat_provider_cookie_t * out_cookie)2262 nstat_userland_tu_lookup(
2263 __unused const void *data,
2264 __unused u_int32_t length,
2265 __unused nstat_provider_cookie_t *out_cookie)
2266 {
2267 // Looking up a specific connection is not supported
2268 return ENOTSUP;
2269 }
2270
2271 static int
nstat_userland_tu_gone(__unused nstat_provider_cookie_t cookie)2272 nstat_userland_tu_gone(
2273 __unused nstat_provider_cookie_t cookie)
2274 {
2275 // Returns non-zero if the source has gone.
2276 // We don't keep a source hanging around, so the answer is always 0
2277 return 0;
2278 }
2279
2280 static errno_t
nstat_userland_tu_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)2281 nstat_userland_tu_counts(
2282 nstat_provider_cookie_t cookie,
2283 struct nstat_counts *out_counts,
2284 int *out_gone)
2285 {
2286 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2287 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2288 assert(shad->shad_live);
2289
2290 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, out_counts, NULL);
2291
2292 if (out_gone) {
2293 *out_gone = 0;
2294 }
2295
2296 return (result)? 0 : EIO;
2297 }
2298
2299
2300 static errno_t
nstat_userland_tu_copy_descriptor(nstat_provider_cookie_t cookie,void * data,__unused size_t len)2301 nstat_userland_tu_copy_descriptor(
2302 nstat_provider_cookie_t cookie,
2303 void *data,
2304 __unused size_t len)
2305 {
2306 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2307 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2308 assert(shad->shad_live);
2309 struct nstat_procdetails *procdetails = shad->shad_procdetails;
2310 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2311
2312 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, data);
2313
2314 switch (shad->shad_provider) {
2315 case NSTAT_PROVIDER_TCP_USERLAND:
2316 {
2317 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)data;
2318 desc->pid = procdetails->pdet_pid;
2319 desc->upid = procdetails->pdet_upid;
2320 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2321 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2322 desc->start_timestamp = shad->shad_start_timestamp;
2323 desc->timestamp = mach_continuous_time();
2324 }
2325 break;
2326 case NSTAT_PROVIDER_UDP_USERLAND:
2327 {
2328 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data;
2329 desc->pid = procdetails->pdet_pid;
2330 desc->upid = procdetails->pdet_upid;
2331 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2332 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2333 desc->start_timestamp = shad->shad_start_timestamp;
2334 desc->timestamp = mach_continuous_time();
2335 }
2336 break;
2337 case NSTAT_PROVIDER_QUIC_USERLAND:
2338 {
2339 nstat_quic_descriptor *desc = (nstat_quic_descriptor *)data;
2340 desc->pid = procdetails->pdet_pid;
2341 desc->upid = procdetails->pdet_upid;
2342 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2343 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2344 desc->start_timestamp = shad->shad_start_timestamp;
2345 desc->timestamp = mach_continuous_time();
2346 }
2347 break;
2348 default:
2349 break;
2350 }
2351 return (result)? 0 : EIO;
2352 }
2353
2354 static void
nstat_userland_tu_release(__unused nstat_provider_cookie_t cookie,__unused int locked)2355 nstat_userland_tu_release(
2356 __unused nstat_provider_cookie_t cookie,
2357 __unused int locked)
2358 {
2359 // Called when a nstat_src is detached.
2360 // We don't reference count or ask for delayed release so nothing to do here.
2361 // Note that any associated nstat_tu_shadow may already have been released.
2362 }
2363
2364 static bool
check_reporting_for_user(nstat_provider_filter * filter,pid_t pid,pid_t epid,uuid_t * uuid,uuid_t * euuid)2365 check_reporting_for_user(nstat_provider_filter *filter, pid_t pid, pid_t epid, uuid_t *uuid, uuid_t *euuid)
2366 {
2367 bool retval = true;
2368
2369 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2370 retval = false;
2371
2372 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
2373 (filter->npf_pid == pid)) {
2374 retval = true;
2375 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
2376 (filter->npf_pid == epid)) {
2377 retval = true;
2378 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
2379 (memcmp(filter->npf_uuid, uuid, sizeof(*uuid)) == 0)) {
2380 retval = true;
2381 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
2382 (memcmp(filter->npf_uuid, euuid, sizeof(*euuid)) == 0)) {
2383 retval = true;
2384 }
2385 }
2386 return retval;
2387 }
2388
2389 static bool
nstat_userland_tcp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2390 nstat_userland_tcp_reporting_allowed(
2391 nstat_provider_cookie_t cookie,
2392 nstat_provider_filter *filter,
2393 __unused u_int64_t suppression_flags)
2394 {
2395 bool retval = true;
2396 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2397
2398 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2399
2400 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2401 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2402
2403 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2404 u_int32_t extended_ifflags = extend_ifnet_flags(ifflags);
2405 if ((filter->npf_flags & extended_ifflags) == 0) {
2406 return false;
2407 }
2408 }
2409 }
2410
2411 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2412 nstat_tcp_descriptor tcp_desc; // Stack allocation - OK or pushing the limits too far?
2413 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &tcp_desc)) {
2414 retval = check_reporting_for_user(filter, (pid_t)tcp_desc.pid, (pid_t)tcp_desc.epid,
2415 &tcp_desc.uuid, &tcp_desc.euuid);
2416 } else {
2417 retval = false; // No further information, so might as well give up now.
2418 }
2419 }
2420 return retval;
2421 }
2422
2423 static size_t
nstat_userland_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)2424 nstat_userland_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
2425 {
2426 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2427 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2428 assert(shad->shad_live);
2429 assert(shad->shad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2430
2431 return shad->shad_get_extension_fn(shad->shad_provider_context, extension_id, buf, len);
2432 }
2433
2434
2435 static bool
nstat_userland_udp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2436 nstat_userland_udp_reporting_allowed(
2437 nstat_provider_cookie_t cookie,
2438 nstat_provider_filter *filter,
2439 __unused u_int64_t suppression_flags)
2440 {
2441 bool retval = true;
2442 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2443
2444 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2445
2446 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2447 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2448
2449 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2450 u_int32_t extended_ifflags = extend_ifnet_flags(ifflags);
2451 if ((filter->npf_flags & extended_ifflags) == 0) {
2452 return false;
2453 }
2454 }
2455 }
2456 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2457 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
2458 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &udp_desc)) {
2459 retval = check_reporting_for_user(filter, (pid_t)udp_desc.pid, (pid_t)udp_desc.epid,
2460 &udp_desc.uuid, &udp_desc.euuid);
2461 } else {
2462 retval = false; // No further information, so might as well give up now.
2463 }
2464 }
2465 return retval;
2466 }
2467
2468 static bool
nstat_userland_quic_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2469 nstat_userland_quic_reporting_allowed(
2470 nstat_provider_cookie_t cookie,
2471 nstat_provider_filter *filter,
2472 __unused u_int64_t suppression_flags)
2473 {
2474 bool retval = true;
2475 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2476
2477 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2478
2479 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2480 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2481
2482 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2483 u_int32_t extended_ifflags = extend_ifnet_flags(ifflags);
2484 if ((filter->npf_flags & extended_ifflags) == 0) {
2485 return false;
2486 }
2487 }
2488 }
2489 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2490 nstat_quic_descriptor quic_desc; // Stack allocation - OK or pushing the limits too far?
2491 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &quic_desc)) {
2492 retval = check_reporting_for_user(filter, (pid_t)quic_desc.pid, (pid_t)quic_desc.epid,
2493 &quic_desc.uuid, &quic_desc.euuid);
2494 } else {
2495 retval = false; // No further information, so might as well give up now.
2496 }
2497 }
2498 return retval;
2499 }
2500
2501 static errno_t
nstat_userland_protocol_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req,nstat_provider_type_t nstat_provider_type,nstat_provider * nstat_provider,u_int32_t * proto_watcher_cnt)2502 nstat_userland_protocol_add_watcher(
2503 nstat_control_state *state,
2504 nstat_msg_add_all_srcs *req,
2505 nstat_provider_type_t nstat_provider_type,
2506 nstat_provider *nstat_provider,
2507 u_int32_t *proto_watcher_cnt)
2508 {
2509 errno_t result;
2510
2511 lck_mtx_lock(&nstat_mtx);
2512 result = nstat_set_provider_filter(state, req);
2513
2514 if (result == 0) {
2515 struct nstat_tu_shadow *shad;
2516
2517 OSIncrementAtomic(proto_watcher_cnt);
2518
2519 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2520 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2521
2522 if ((shad->shad_provider == nstat_provider_type) && (shad->shad_live)) {
2523 result = nstat_control_source_add(0, state, nstat_provider, shad);
2524 if (result != 0) {
2525 printf("%s - nstat_control_source_add returned %d for "
2526 "provider type: %d\n", __func__, result, nstat_provider_type);
2527 break;
2528 }
2529 }
2530 }
2531 }
2532 lck_mtx_unlock(&nstat_mtx);
2533
2534 return result;
2535 }
2536
2537 static errno_t
nstat_userland_tcp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2538 nstat_userland_tcp_add_watcher(
2539 nstat_control_state *state,
2540 nstat_msg_add_all_srcs *req)
2541 {
2542 return nstat_userland_protocol_add_watcher(state, req, NSTAT_PROVIDER_TCP_USERLAND,
2543 &nstat_userland_tcp_provider, &nstat_userland_tcp_watchers);
2544 }
2545
2546 static errno_t
nstat_userland_udp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2547 nstat_userland_udp_add_watcher(
2548 nstat_control_state *state,
2549 nstat_msg_add_all_srcs *req)
2550 {
2551 return nstat_userland_protocol_add_watcher(state, req, NSTAT_PROVIDER_UDP_USERLAND,
2552 &nstat_userland_udp_provider, &nstat_userland_udp_watchers);
2553 }
2554
2555 static errno_t
nstat_userland_quic_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2556 nstat_userland_quic_add_watcher(
2557 nstat_control_state *state,
2558 nstat_msg_add_all_srcs *req)
2559 {
2560 return nstat_userland_protocol_add_watcher(state, req, NSTAT_PROVIDER_QUIC_USERLAND,
2561 &nstat_userland_quic_provider, &nstat_userland_quic_watchers);
2562 }
2563
2564 static void
nstat_userland_tcp_remove_watcher(__unused nstat_control_state * state)2565 nstat_userland_tcp_remove_watcher(
2566 __unused nstat_control_state *state)
2567 {
2568 OSDecrementAtomic(&nstat_userland_tcp_watchers);
2569 }
2570
2571 static void
nstat_userland_udp_remove_watcher(__unused nstat_control_state * state)2572 nstat_userland_udp_remove_watcher(
2573 __unused nstat_control_state *state)
2574 {
2575 OSDecrementAtomic(&nstat_userland_udp_watchers);
2576 }
2577
2578 static void
nstat_userland_quic_remove_watcher(__unused nstat_control_state * state)2579 nstat_userland_quic_remove_watcher(
2580 __unused nstat_control_state *state)
2581 {
2582 OSDecrementAtomic(&nstat_userland_quic_watchers);
2583 }
2584
2585
2586 static void
nstat_init_userland_tcp_provider(void)2587 nstat_init_userland_tcp_provider(void)
2588 {
2589 bzero(&nstat_userland_tcp_provider, sizeof(nstat_userland_tcp_provider));
2590 nstat_userland_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
2591 nstat_userland_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_USERLAND;
2592 nstat_userland_tcp_provider.nstat_lookup = nstat_userland_tu_lookup;
2593 nstat_userland_tcp_provider.nstat_gone = nstat_userland_tu_gone;
2594 nstat_userland_tcp_provider.nstat_counts = nstat_userland_tu_counts;
2595 nstat_userland_tcp_provider.nstat_release = nstat_userland_tu_release;
2596 nstat_userland_tcp_provider.nstat_watcher_add = nstat_userland_tcp_add_watcher;
2597 nstat_userland_tcp_provider.nstat_watcher_remove = nstat_userland_tcp_remove_watcher;
2598 nstat_userland_tcp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2599 nstat_userland_tcp_provider.nstat_reporting_allowed = nstat_userland_tcp_reporting_allowed;
2600 nstat_userland_tcp_provider.nstat_copy_extension = nstat_userland_extensions;
2601 nstat_userland_tcp_provider.next = nstat_providers;
2602 nstat_providers = &nstat_userland_tcp_provider;
2603 }
2604
2605
2606 static void
nstat_init_userland_udp_provider(void)2607 nstat_init_userland_udp_provider(void)
2608 {
2609 bzero(&nstat_userland_udp_provider, sizeof(nstat_userland_udp_provider));
2610 nstat_userland_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2611 nstat_userland_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_USERLAND;
2612 nstat_userland_udp_provider.nstat_lookup = nstat_userland_tu_lookup;
2613 nstat_userland_udp_provider.nstat_gone = nstat_userland_tu_gone;
2614 nstat_userland_udp_provider.nstat_counts = nstat_userland_tu_counts;
2615 nstat_userland_udp_provider.nstat_release = nstat_userland_tu_release;
2616 nstat_userland_udp_provider.nstat_watcher_add = nstat_userland_udp_add_watcher;
2617 nstat_userland_udp_provider.nstat_watcher_remove = nstat_userland_udp_remove_watcher;
2618 nstat_userland_udp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2619 nstat_userland_udp_provider.nstat_reporting_allowed = nstat_userland_udp_reporting_allowed;
2620 nstat_userland_udp_provider.nstat_copy_extension = nstat_userland_extensions;
2621 nstat_userland_udp_provider.next = nstat_providers;
2622 nstat_providers = &nstat_userland_udp_provider;
2623 }
2624
2625 static void
nstat_init_userland_quic_provider(void)2626 nstat_init_userland_quic_provider(void)
2627 {
2628 bzero(&nstat_userland_quic_provider, sizeof(nstat_userland_quic_provider));
2629 nstat_userland_quic_provider.nstat_descriptor_length = sizeof(nstat_quic_descriptor);
2630 nstat_userland_quic_provider.nstat_provider_id = NSTAT_PROVIDER_QUIC_USERLAND;
2631 nstat_userland_quic_provider.nstat_lookup = nstat_userland_tu_lookup;
2632 nstat_userland_quic_provider.nstat_gone = nstat_userland_tu_gone;
2633 nstat_userland_quic_provider.nstat_counts = nstat_userland_tu_counts;
2634 nstat_userland_quic_provider.nstat_release = nstat_userland_tu_release;
2635 nstat_userland_quic_provider.nstat_watcher_add = nstat_userland_quic_add_watcher;
2636 nstat_userland_quic_provider.nstat_watcher_remove = nstat_userland_quic_remove_watcher;
2637 nstat_userland_quic_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2638 nstat_userland_quic_provider.nstat_reporting_allowed = nstat_userland_quic_reporting_allowed;
2639 nstat_userland_quic_provider.nstat_copy_extension = nstat_userland_extensions;
2640 nstat_userland_quic_provider.next = nstat_providers;
2641 nstat_providers = &nstat_userland_quic_provider;
2642 }
2643
2644
2645 // Things get started with a call to netstats to say that there’s a new connection:
2646 __private_extern__ nstat_userland_context
ntstat_userland_stats_open(userland_stats_provider_context * ctx,int provider_id,u_int64_t properties,userland_stats_request_vals_fn req_fn,userland_stats_request_extension_fn req_extension_fn)2647 ntstat_userland_stats_open(userland_stats_provider_context *ctx,
2648 int provider_id,
2649 u_int64_t properties,
2650 userland_stats_request_vals_fn req_fn,
2651 userland_stats_request_extension_fn req_extension_fn)
2652 {
2653 struct nstat_tu_shadow *shad;
2654 struct nstat_procdetails *procdetails;
2655 nstat_provider *provider;
2656
2657 if ((provider_id != NSTAT_PROVIDER_TCP_USERLAND) &&
2658 (provider_id != NSTAT_PROVIDER_UDP_USERLAND) &&
2659 (provider_id != NSTAT_PROVIDER_QUIC_USERLAND)) {
2660 printf("%s - incorrect provider is supplied, %d\n", __func__, provider_id);
2661 return NULL;
2662 }
2663
2664 shad = kalloc_type(struct nstat_tu_shadow, Z_WAITOK | Z_NOFAIL);
2665
2666 procdetails = nstat_retain_curprocdetails();
2667
2668 if (procdetails == NULL) {
2669 kfree_type(struct nstat_tu_shadow, shad);
2670 return NULL;
2671 }
2672
2673 shad->shad_getvals_fn = req_fn;
2674 shad->shad_get_extension_fn = req_extension_fn;
2675 shad->shad_provider_context = ctx;
2676 shad->shad_provider = provider_id;
2677 shad->shad_properties = properties;
2678 shad->shad_procdetails = procdetails;
2679 shad->shad_start_timestamp = mach_continuous_time();
2680 shad->shad_live = true;
2681 shad->shad_magic = TU_SHADOW_MAGIC;
2682
2683 lck_mtx_lock(&nstat_mtx);
2684 nstat_control_state *state;
2685
2686 // Even if there are no watchers, we save the shadow structure
2687 TAILQ_INSERT_HEAD(&nstat_userprot_shad_head, shad, shad_link);
2688
2689 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND) {
2690 nstat_userland_tcp_shadows++;
2691 provider = &nstat_userland_tcp_provider;
2692 } else if (provider_id == NSTAT_PROVIDER_UDP_USERLAND) {
2693 nstat_userland_udp_shadows++;
2694 provider = &nstat_userland_udp_provider;
2695 } else {
2696 nstat_userland_quic_shadows++;
2697 provider = &nstat_userland_quic_provider;
2698 }
2699
2700 for (state = nstat_controls; state; state = state->ncs_next) {
2701 if ((state->ncs_watching & (1 << provider_id)) != 0) {
2702 // this client is watching tcp/udp/quic userland
2703 // Link to it.
2704 int result = nstat_control_source_add(0, state, provider, shad);
2705 if (result != 0) {
2706 // There should be some kind of statistics for failures like this.
2707 // <rdar://problem/31377195> The kernel ntstat component should keep some
2708 // internal counters reflecting operational state for eventual AWD reporting
2709 }
2710 }
2711 }
2712 lck_mtx_unlock(&nstat_mtx);
2713
2714 return (nstat_userland_context)shad;
2715 }
2716
2717
2718 __private_extern__ void
ntstat_userland_stats_close(nstat_userland_context nstat_ctx)2719 ntstat_userland_stats_close(nstat_userland_context nstat_ctx)
2720 {
2721 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2722 tailq_head_nstat_src dead_list;
2723 nstat_src *src;
2724
2725 if (shad == NULL) {
2726 return;
2727 }
2728
2729 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2730 TAILQ_INIT(&dead_list);
2731
2732 lck_mtx_lock(&nstat_mtx);
2733 if (nstat_userland_udp_watchers != 0 ||
2734 nstat_userland_tcp_watchers != 0 ||
2735 nstat_userland_quic_watchers != 0) {
2736 nstat_control_state *state;
2737 errno_t result;
2738
2739 for (state = nstat_controls; state; state = state->ncs_next) {
2740 lck_mtx_lock(&state->ncs_mtx);
2741 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2742 {
2743 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2744 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
2745 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND ||
2746 provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2747 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2748 break;
2749 }
2750 }
2751 }
2752
2753 if (src) {
2754 result = nstat_control_send_goodbye(state, src);
2755
2756 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2757 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2758 }
2759 lck_mtx_unlock(&state->ncs_mtx);
2760 }
2761 }
2762 TAILQ_REMOVE(&nstat_userprot_shad_head, shad, shad_link);
2763
2764 if (shad->shad_live) {
2765 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) {
2766 nstat_userland_tcp_shadows--;
2767 } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) {
2768 nstat_userland_udp_shadows--;
2769 } else {
2770 nstat_userland_quic_shadows--;
2771 }
2772 }
2773
2774 lck_mtx_unlock(&nstat_mtx);
2775
2776 while ((src = TAILQ_FIRST(&dead_list))) {
2777 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2778 nstat_control_cleanup_source(NULL, src, TRUE);
2779 }
2780 nstat_release_procdetails(shad->shad_procdetails);
2781 shad->shad_magic = TU_SHADOW_UNMAGIC;
2782
2783 kfree_type(struct nstat_tu_shadow, shad);
2784 }
2785
2786
2787 __private_extern__ void
ntstat_userland_stats_event(nstat_userland_context nstat_ctx,uint64_t event)2788 ntstat_userland_stats_event(
2789 nstat_userland_context nstat_ctx,
2790 uint64_t event)
2791 {
2792 // This will need refinement for when we do genuine stats filtering
2793 // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications
2794 // For now it deals only with events that potentially cause any traditional netstat sources to be closed
2795
2796 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2797 tailq_head_nstat_src dead_list;
2798 nstat_src *src;
2799
2800 if (shad == NULL) {
2801 return;
2802 }
2803
2804 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2805
2806 if (event & NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT) {
2807 TAILQ_INIT(&dead_list);
2808
2809 lck_mtx_lock(&nstat_mtx);
2810 if (nstat_userland_udp_watchers != 0 ||
2811 nstat_userland_tcp_watchers != 0 ||
2812 nstat_userland_quic_watchers != 0) {
2813 nstat_control_state *state;
2814 errno_t result;
2815
2816 for (state = nstat_controls; state; state = state->ncs_next) {
2817 lck_mtx_lock(&state->ncs_mtx);
2818 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2819 {
2820 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2821 break;
2822 }
2823 }
2824
2825 if (src) {
2826 if (!(src->filter & NSTAT_FILTER_TCP_NO_EARLY_CLOSE)) {
2827 result = nstat_control_send_goodbye(state, src);
2828
2829 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2830 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2831 }
2832 }
2833 lck_mtx_unlock(&state->ncs_mtx);
2834 }
2835 }
2836 lck_mtx_unlock(&nstat_mtx);
2837
2838 while ((src = TAILQ_FIRST(&dead_list))) {
2839 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2840 nstat_control_cleanup_source(NULL, src, TRUE);
2841 }
2842 }
2843 }
2844
2845 __private_extern__ void
nstats_userland_stats_defunct_for_process(int pid)2846 nstats_userland_stats_defunct_for_process(int pid)
2847 {
2848 // Note that this can be called multiple times for the same process
2849 tailq_head_nstat_src dead_list;
2850 nstat_src *src, *tmpsrc;
2851 struct nstat_tu_shadow *shad;
2852
2853 TAILQ_INIT(&dead_list);
2854
2855 lck_mtx_lock(&nstat_mtx);
2856
2857 if (nstat_userland_udp_watchers != 0 ||
2858 nstat_userland_tcp_watchers != 0 ||
2859 nstat_userland_quic_watchers != 0) {
2860 nstat_control_state *state;
2861 errno_t result;
2862
2863 for (state = nstat_controls; state; state = state->ncs_next) {
2864 lck_mtx_lock(&state->ncs_mtx);
2865 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
2866 {
2867 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
2868 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND ||
2869 provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2870 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2871 shad = (struct nstat_tu_shadow *)src->cookie;
2872 if (shad->shad_procdetails->pdet_pid == pid) {
2873 result = nstat_control_send_goodbye(state, src);
2874
2875 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2876 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2877 }
2878 }
2879 }
2880 lck_mtx_unlock(&state->ncs_mtx);
2881 }
2882 }
2883
2884 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2885 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2886
2887 if (shad->shad_live) {
2888 if (shad->shad_procdetails->pdet_pid == pid) {
2889 shad->shad_live = false;
2890 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) {
2891 nstat_userland_tcp_shadows--;
2892 } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) {
2893 nstat_userland_udp_shadows--;
2894 } else {
2895 nstat_userland_quic_shadows--;
2896 }
2897 }
2898 }
2899 }
2900
2901 lck_mtx_unlock(&nstat_mtx);
2902
2903 while ((src = TAILQ_FIRST(&dead_list))) {
2904 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2905 nstat_control_cleanup_source(NULL, src, TRUE);
2906 }
2907 }
2908
2909
2910 #pragma mark -- Generic Providers --
2911
2912 static nstat_provider nstat_userland_conn_provider;
2913 static nstat_provider nstat_udp_subflow_provider;
2914
2915 static u_int32_t nstat_generic_provider_watchers[NSTAT_PROVIDER_COUNT];
2916
2917 struct nstat_generic_shadow {
2918 tailq_entry_generic_shadow gshad_link;
2919 nstat_provider_context gshad_provider_context;
2920 nstat_provider_request_vals_fn *gshad_getvals_fn;
2921 nstat_provider_request_extensions_fn *gshad_getextensions_fn;
2922 u_int64_t gshad_properties;
2923 u_int64_t gshad_start_timestamp;
2924 struct nstat_procdetails *gshad_procdetails;
2925 nstat_provider_id_t gshad_provider;
2926 int32_t gshad_refcnt;
2927 uint32_t gshad_magic;
2928 };
2929
2930 // Magic number checking should remain in place until the userland provider has been fully proven
2931 #define NSTAT_GENERIC_SHADOW_MAGIC 0xfadef00d
2932 #define NSTAT_GENERIC_SHADOW_UNMAGIC 0xfadedead
2933
2934 static tailq_head_generic_shadow nstat_gshad_head = TAILQ_HEAD_INITIALIZER(nstat_gshad_head);
2935
2936 static void
nstat_release_gshad(struct nstat_generic_shadow * gshad)2937 nstat_release_gshad(
2938 struct nstat_generic_shadow *gshad)
2939 {
2940 assert(gshad->gshad_magic = NSTAT_GENERIC_SHADOW_MAGIC);
2941
2942 if (OSDecrementAtomic(&gshad->gshad_refcnt) == 1) {
2943 nstat_release_procdetails(gshad->gshad_procdetails);
2944 gshad->gshad_magic = NSTAT_GENERIC_SHADOW_UNMAGIC;
2945 kfree_type(struct nstat_generic_shadow, gshad);
2946 }
2947 }
2948
2949 static errno_t
nstat_generic_provider_lookup(__unused const void * data,__unused u_int32_t length,__unused nstat_provider_cookie_t * out_cookie)2950 nstat_generic_provider_lookup(
2951 __unused const void *data,
2952 __unused u_int32_t length,
2953 __unused nstat_provider_cookie_t *out_cookie)
2954 {
2955 // Looking up a specific connection is not supported
2956 return ENOTSUP;
2957 }
2958
2959 static int
nstat_generic_provider_gone(__unused nstat_provider_cookie_t cookie)2960 nstat_generic_provider_gone(
2961 __unused nstat_provider_cookie_t cookie)
2962 {
2963 // Returns non-zero if the source has gone.
2964 // We don't keep a source hanging around, so the answer is always 0
2965 return 0;
2966 }
2967
2968 static errno_t
nstat_generic_provider_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)2969 nstat_generic_provider_counts(
2970 nstat_provider_cookie_t cookie,
2971 struct nstat_counts *out_counts,
2972 int *out_gone)
2973 {
2974 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
2975 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
2976
2977 memset(out_counts, 0, sizeof(*out_counts));
2978
2979 bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, out_counts, NULL);
2980
2981 if (out_gone) {
2982 *out_gone = 0;
2983 }
2984 return (result)? 0 : EIO;
2985 }
2986
2987
2988 static errno_t
nstat_generic_provider_copy_descriptor(nstat_provider_cookie_t cookie,void * data,__unused size_t len)2989 nstat_generic_provider_copy_descriptor(
2990 nstat_provider_cookie_t cookie,
2991 void *data,
2992 __unused size_t len)
2993 {
2994 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
2995 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
2996 struct nstat_procdetails *procdetails = gshad->gshad_procdetails;
2997 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2998
2999 bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, data);
3000
3001 switch (gshad->gshad_provider) {
3002 case NSTAT_PROVIDER_CONN_USERLAND:
3003 {
3004 nstat_connection_descriptor *desc = (nstat_connection_descriptor *)data;
3005 desc->pid = procdetails->pdet_pid;
3006 desc->upid = procdetails->pdet_upid;
3007 uuid_copy(desc->uuid, procdetails->pdet_uuid);
3008 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
3009 desc->start_timestamp = gshad->gshad_start_timestamp;
3010 desc->timestamp = mach_continuous_time();
3011 break;
3012 }
3013 case NSTAT_PROVIDER_UDP_SUBFLOW:
3014 {
3015 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data;
3016 desc->pid = procdetails->pdet_pid;
3017 desc->upid = procdetails->pdet_upid;
3018 uuid_copy(desc->uuid, procdetails->pdet_uuid);
3019 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
3020 desc->start_timestamp = gshad->gshad_start_timestamp;
3021 desc->timestamp = mach_continuous_time();
3022 break;
3023 }
3024 default:
3025 break;
3026 }
3027 return (result)? 0 : EIO;
3028 }
3029
3030 static void
nstat_generic_provider_release(__unused nstat_provider_cookie_t cookie,__unused int locked)3031 nstat_generic_provider_release(
3032 __unused nstat_provider_cookie_t cookie,
3033 __unused int locked)
3034 {
3035 // Called when a nstat_src is detached.
3036 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3037
3038 nstat_release_gshad(gshad);
3039 }
3040
3041 static bool
nstat_generic_provider_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,u_int64_t suppression_flags)3042 nstat_generic_provider_reporting_allowed(
3043 nstat_provider_cookie_t cookie,
3044 nstat_provider_filter *filter,
3045 u_int64_t suppression_flags)
3046 {
3047 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3048
3049 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3050
3051 if ((filter->npf_flags & NSTAT_FILTER_SUPPRESS_BORING_FLAGS) != 0) {
3052 if ((filter->npf_flags & suppression_flags) != 0) {
3053 return false;
3054 }
3055 }
3056
3057 // Filter based on interface and connection flags
3058 // If a provider doesn't support flags, a client shouldn't attempt to use filtering
3059 if ((filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS) != 0) {
3060 u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
3061
3062 if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, &ifflags, NULL, NULL)) {
3063 if ((filter->npf_flags & ifflags) == 0) {
3064 return false;
3065 }
3066 }
3067 }
3068
3069 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
3070 struct nstat_procdetails *procdetails = gshad->gshad_procdetails;
3071 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3072
3073 // Check details that we have readily to hand before asking the provider for descriptor items
3074 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
3075 (filter->npf_pid == procdetails->pdet_pid)) {
3076 return true;
3077 }
3078 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
3079 (memcmp(filter->npf_uuid, &procdetails->pdet_uuid, sizeof(filter->npf_uuid)) == 0)) {
3080 return true;
3081 }
3082 if ((filter->npf_flags & (NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID)) != 0) {
3083 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
3084 switch (gshad->gshad_provider) {
3085 case NSTAT_PROVIDER_CONN_USERLAND:
3086 // Filtering by effective uuid or effective pid is currently not supported
3087 filter->npf_flags &= ~((uint64_t)(NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID));
3088 printf("%s - attempt to filter conn provider by effective pid/uuid, not supported\n", __func__);
3089 return true;
3090
3091 case NSTAT_PROVIDER_UDP_SUBFLOW:
3092 if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, &udp_desc)) {
3093 if (check_reporting_for_user(filter, procdetails->pdet_pid, (pid_t)udp_desc.epid,
3094 &procdetails->pdet_uuid, &udp_desc.euuid)) {
3095 return true;
3096 }
3097 }
3098 break;
3099 default:
3100 break;
3101 }
3102 }
3103 return false;
3104 }
3105 return true;
3106 }
3107
3108 static size_t
nstat_generic_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)3109 nstat_generic_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
3110 {
3111 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3112 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3113 assert(gshad->gshad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3114
3115 if (gshad->gshad_getextensions_fn == NULL) {
3116 return 0;
3117 }
3118 return gshad->gshad_getextensions_fn(gshad->gshad_provider_context, extension_id, buf, len);
3119 }
3120
3121 static errno_t
nstat_generic_provider_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)3122 nstat_generic_provider_add_watcher(
3123 nstat_control_state *state,
3124 nstat_msg_add_all_srcs *req)
3125 {
3126 errno_t result;
3127 nstat_provider_id_t provider_id = req->provider;
3128 nstat_provider *provider;
3129
3130 switch (provider_id) {
3131 case NSTAT_PROVIDER_CONN_USERLAND:
3132 provider = &nstat_userland_conn_provider;
3133 break;
3134 case NSTAT_PROVIDER_UDP_SUBFLOW:
3135 provider = &nstat_udp_subflow_provider;
3136 break;
3137 default:
3138 return ENOTSUP;
3139 }
3140
3141 lck_mtx_lock(&nstat_mtx);
3142 result = nstat_set_provider_filter(state, req);
3143
3144 if (result == 0) {
3145 struct nstat_generic_shadow *gshad;
3146 nstat_provider_filter *filter = &state->ncs_provider_filters[provider_id];
3147
3148 OSIncrementAtomic(&nstat_generic_provider_watchers[provider_id]);
3149
3150 TAILQ_FOREACH(gshad, &nstat_gshad_head, gshad_link) {
3151 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3152
3153 if (gshad->gshad_provider == provider_id) {
3154 if (filter->npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) {
3155 u_int64_t npf_flags = filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS;
3156 if ((npf_flags != 0) && ((npf_flags & gshad->gshad_properties) == 0)) {
3157 // Skip this one
3158 // Note - no filtering by pid or UUID supported at this point, for simplicity
3159 continue;
3160 }
3161 }
3162 result = nstat_control_source_add(0, state, provider, gshad);
3163 if (result != 0) {
3164 printf("%s - nstat_control_source_add returned %d for "
3165 "provider type: %d\n", __func__, result, provider_id);
3166 break;
3167 } else {
3168 OSIncrementAtomic(&gshad->gshad_refcnt);
3169 }
3170 }
3171 }
3172 }
3173 lck_mtx_unlock(&nstat_mtx);
3174
3175 return result;
3176 }
3177
3178 static void
nstat_userland_conn_remove_watcher(__unused nstat_control_state * state)3179 nstat_userland_conn_remove_watcher(
3180 __unused nstat_control_state *state)
3181 {
3182 OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_CONN_USERLAND]);
3183 }
3184
3185 static void
nstat_udp_subflow_remove_watcher(__unused nstat_control_state * state)3186 nstat_udp_subflow_remove_watcher(
3187 __unused nstat_control_state *state)
3188 {
3189 OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_UDP_SUBFLOW]);
3190 }
3191
3192 static void
nstat_init_userland_conn_provider(void)3193 nstat_init_userland_conn_provider(void)
3194 {
3195 bzero(&nstat_userland_conn_provider, sizeof(nstat_userland_conn_provider));
3196 nstat_userland_conn_provider.nstat_descriptor_length = sizeof(nstat_connection_descriptor);
3197 nstat_userland_conn_provider.nstat_provider_id = NSTAT_PROVIDER_CONN_USERLAND;
3198 nstat_userland_conn_provider.nstat_lookup = nstat_generic_provider_lookup;
3199 nstat_userland_conn_provider.nstat_gone = nstat_generic_provider_gone;
3200 nstat_userland_conn_provider.nstat_counts = nstat_generic_provider_counts;
3201 nstat_userland_conn_provider.nstat_release = nstat_generic_provider_release;
3202 nstat_userland_conn_provider.nstat_watcher_add = nstat_generic_provider_add_watcher;
3203 nstat_userland_conn_provider.nstat_watcher_remove = nstat_userland_conn_remove_watcher;
3204 nstat_userland_conn_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor;
3205 nstat_userland_conn_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed;
3206 nstat_userland_conn_provider.nstat_copy_extension = nstat_generic_extensions;
3207 nstat_userland_conn_provider.next = nstat_providers;
3208 nstat_providers = &nstat_userland_conn_provider;
3209 }
3210
3211 static void
nstat_init_udp_subflow_provider(void)3212 nstat_init_udp_subflow_provider(void)
3213 {
3214 bzero(&nstat_udp_subflow_provider, sizeof(nstat_udp_subflow_provider));
3215 nstat_udp_subflow_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
3216 nstat_udp_subflow_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_SUBFLOW;
3217 nstat_udp_subflow_provider.nstat_lookup = nstat_generic_provider_lookup;
3218 nstat_udp_subflow_provider.nstat_gone = nstat_generic_provider_gone;
3219 nstat_udp_subflow_provider.nstat_counts = nstat_generic_provider_counts;
3220 nstat_udp_subflow_provider.nstat_release = nstat_generic_provider_release;
3221 nstat_udp_subflow_provider.nstat_watcher_add = nstat_generic_provider_add_watcher;
3222 nstat_udp_subflow_provider.nstat_watcher_remove = nstat_udp_subflow_remove_watcher;
3223 nstat_udp_subflow_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor;
3224 nstat_udp_subflow_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed;
3225 nstat_udp_subflow_provider.nstat_copy_extension = nstat_generic_extensions;
3226 nstat_udp_subflow_provider.next = nstat_providers;
3227 nstat_providers = &nstat_udp_subflow_provider;
3228 }
3229
3230 // Things get started with a call from the provider to netstats to say that there’s a new source
3231 __private_extern__ nstat_context
nstat_provider_stats_open(nstat_provider_context ctx,int provider_id,u_int64_t properties,nstat_provider_request_vals_fn req_fn,nstat_provider_request_extensions_fn req_extensions_fn)3232 nstat_provider_stats_open(nstat_provider_context ctx,
3233 int provider_id,
3234 u_int64_t properties,
3235 nstat_provider_request_vals_fn req_fn,
3236 nstat_provider_request_extensions_fn req_extensions_fn)
3237 {
3238 struct nstat_generic_shadow *gshad;
3239 struct nstat_procdetails *procdetails;
3240 nstat_provider *provider = nstat_find_provider_by_id(provider_id);
3241
3242 gshad = kalloc_type(struct nstat_generic_shadow, Z_WAITOK | Z_NOFAIL);
3243
3244 procdetails = nstat_retain_curprocdetails();
3245
3246 if (procdetails == NULL) {
3247 kfree_type(struct nstat_generic_shadow, gshad);
3248 return NULL;
3249 }
3250
3251 gshad->gshad_getvals_fn = req_fn;
3252 gshad->gshad_getextensions_fn = req_extensions_fn;
3253 gshad->gshad_provider_context = ctx;
3254 gshad->gshad_properties = properties;
3255 gshad->gshad_procdetails = procdetails;
3256 gshad->gshad_provider = provider_id;
3257 gshad->gshad_start_timestamp = mach_continuous_time();
3258 gshad->gshad_refcnt = 1;
3259 gshad->gshad_magic = NSTAT_GENERIC_SHADOW_MAGIC;
3260
3261 lck_mtx_lock(&nstat_mtx);
3262 nstat_control_state *state;
3263
3264 // Even if there are no watchers, we save the shadow structure
3265 TAILQ_INSERT_HEAD(&nstat_gshad_head, gshad, gshad_link);
3266
3267 for (state = nstat_controls; state; state = state->ncs_next) {
3268 if ((state->ncs_watching & (1 << provider_id)) != 0) {
3269 // Does this client want an initial filtering to be made?
3270 u_int64_t npf_flags = state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
3271 if (npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) {
3272 npf_flags &= NSTAT_FILTER_IFNET_AND_CONN_FLAGS;
3273 if ((npf_flags != 0) && ((npf_flags & properties) == 0)) {
3274 // Skip this one
3275 // Note - no filtering by pid or UUID supported at this point, for simplicity
3276 continue;
3277 }
3278 }
3279 // this client is watching, so link to it.
3280 int result = nstat_control_source_add(0, state, provider, gshad);
3281 if (result != 0) {
3282 // There should be some kind of statistics for failures like this.
3283 // <rdar://problem/31377195> The kernel ntstat component should keep some
3284 // internal counters reflecting operational state for eventual AWD reporting
3285 } else {
3286 OSIncrementAtomic(&gshad->gshad_refcnt);
3287 }
3288 }
3289 }
3290 lck_mtx_unlock(&nstat_mtx);
3291
3292 return (nstat_context) gshad;
3293 }
3294
3295
3296 // When the source is closed, netstats will make one last call on the request functions to retrieve final values
3297 __private_extern__ void
nstat_provider_stats_close(nstat_context nstat_ctx)3298 nstat_provider_stats_close(nstat_context nstat_ctx)
3299 {
3300 tailq_head_nstat_src dead_list;
3301 nstat_src *src;
3302 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)nstat_ctx;
3303
3304 if (gshad == NULL) {
3305 printf("%s - called with null reference", __func__);
3306 return;
3307 }
3308
3309 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3310
3311 if (gshad->gshad_magic != NSTAT_GENERIC_SHADOW_MAGIC) {
3312 printf("%s - called with incorrect shadow magic 0x%x", __func__, gshad->gshad_magic);
3313 }
3314
3315 TAILQ_INIT(&dead_list);
3316
3317 lck_mtx_lock(&nstat_mtx);
3318
3319 TAILQ_REMOVE(&nstat_gshad_head, gshad, gshad_link);
3320
3321 int32_t num_srcs = gshad->gshad_refcnt - 1;
3322 if ((nstat_generic_provider_watchers[gshad->gshad_provider] != 0) && (num_srcs > 0)) {
3323 nstat_control_state *state;
3324 errno_t result;
3325
3326 for (state = nstat_controls; state; state = state->ncs_next) {
3327 // Only scan further if this client is watching
3328 if ((state->ncs_watching & (1 << gshad->gshad_provider)) != 0) {
3329 lck_mtx_lock(&state->ncs_mtx);
3330 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3331 {
3332 if ((gshad == (struct nstat_generic_shadow *)src->cookie) &&
3333 (gshad->gshad_provider == src->provider->nstat_provider_id)) {
3334 break;
3335 }
3336 }
3337 if (src) {
3338 result = nstat_control_send_goodbye(state, src);
3339 // There is currently no recovery possible from failure to send,
3340 // so no need to check the return code.
3341 // rdar://28312774 (Scalability and resilience issues in ntstat.c)
3342
3343 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
3344 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
3345 --num_srcs;
3346 }
3347 lck_mtx_unlock(&state->ncs_mtx);
3348
3349 // Performance optimization, don't scan full lists if no chance of presence
3350 if (num_srcs == 0) {
3351 break;
3352 }
3353 }
3354 }
3355 }
3356 lck_mtx_unlock(&nstat_mtx);
3357
3358 while ((src = TAILQ_FIRST(&dead_list))) {
3359 TAILQ_REMOVE(&dead_list, src, ns_control_link);
3360 nstat_control_cleanup_source(NULL, src, TRUE);
3361 }
3362 nstat_release_gshad(gshad);
3363 }
3364
3365 // Events that cause a significant change may be reported via a flags word
3366 void
nstat_provider_stats_event(__unused nstat_context nstat_ctx,__unused uint64_t event)3367 nstat_provider_stats_event(__unused nstat_context nstat_ctx, __unused uint64_t event)
3368 {
3369 nstat_src *src;
3370 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)nstat_ctx;
3371
3372 if (gshad == NULL) {
3373 printf("%s - called with null reference", __func__);
3374 return;
3375 }
3376
3377 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3378
3379 if (gshad->gshad_magic != NSTAT_GENERIC_SHADOW_MAGIC) {
3380 printf("%s - called with incorrect shadow magic 0x%x", __func__, gshad->gshad_magic);
3381 }
3382
3383 lck_mtx_lock(&nstat_mtx);
3384
3385 if (nstat_generic_provider_watchers[gshad->gshad_provider] != 0) {
3386 nstat_control_state *state;
3387 errno_t result;
3388 nstat_provider_id_t provider_id = gshad->gshad_provider;
3389
3390 for (state = nstat_controls; state; state = state->ncs_next) {
3391 // Only scan further if this client is watching and has interest in the event
3392 // or the client has requested "boring" unchanged status to be ignored
3393 if (((state->ncs_watching & (1 << provider_id)) != 0) &&
3394 (((state->ncs_provider_filters[provider_id].npf_events & event) != 0) ||
3395 ((state->ncs_provider_filters[provider_id].npf_flags & NSTAT_FILTER_SUPPRESS_BORING_FLAGS) != 0))) {
3396 lck_mtx_lock(&state->ncs_mtx);
3397 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3398 {
3399 if (gshad == (struct nstat_generic_shadow *)src->cookie) {
3400 break;
3401 }
3402 }
3403
3404 if (src) {
3405 src->ns_reported = false;
3406 if ((state->ncs_provider_filters[provider_id].npf_events & event) != 0) {
3407 result = nstat_control_send_event(state, src, event);
3408 // There is currently no recovery possible from failure to send,
3409 // so no need to check the return code.
3410 // rdar://28312774 (Scalability and resilience issues in ntstat.c)
3411 }
3412 }
3413 lck_mtx_unlock(&state->ncs_mtx);
3414 }
3415 }
3416 }
3417 lck_mtx_unlock(&nstat_mtx);
3418 }
3419
3420 #endif /* SKYWALK */
3421
3422
3423 #pragma mark -- ifnet Provider --
3424
3425 static nstat_provider nstat_ifnet_provider;
3426
3427 /*
3428 * We store a pointer to the ifnet and the original threshold
3429 * requested by the client.
3430 */
3431 struct nstat_ifnet_cookie {
3432 struct ifnet *ifp;
3433 uint64_t threshold;
3434 };
3435
3436 static errno_t
nstat_ifnet_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)3437 nstat_ifnet_lookup(
3438 const void *data,
3439 u_int32_t length,
3440 nstat_provider_cookie_t *out_cookie)
3441 {
3442 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
3443 struct ifnet *ifp;
3444 boolean_t changed = FALSE;
3445 nstat_control_state *state;
3446 nstat_src *src;
3447 struct nstat_ifnet_cookie *cookie;
3448
3449 if (length < sizeof(*param) || param->threshold < 1024 * 1024) {
3450 return EINVAL;
3451 }
3452 if (nstat_privcheck != 0) {
3453 errno_t result = priv_check_cred(kauth_cred_get(),
3454 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
3455 if (result != 0) {
3456 return result;
3457 }
3458 }
3459 cookie = kalloc_type(struct nstat_ifnet_cookie,
3460 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3461
3462 ifnet_head_lock_shared();
3463 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
3464 {
3465 if (!ifnet_is_attached(ifp, 1)) {
3466 continue;
3467 }
3468 ifnet_lock_exclusive(ifp);
3469 if (ifp->if_index == param->ifindex) {
3470 cookie->ifp = ifp;
3471 cookie->threshold = param->threshold;
3472 *out_cookie = cookie;
3473 if (!ifp->if_data_threshold ||
3474 ifp->if_data_threshold > param->threshold) {
3475 changed = TRUE;
3476 ifp->if_data_threshold = param->threshold;
3477 }
3478 ifnet_lock_done(ifp);
3479 ifnet_reference(ifp);
3480 ifnet_decr_iorefcnt(ifp);
3481 break;
3482 }
3483 ifnet_lock_done(ifp);
3484 ifnet_decr_iorefcnt(ifp);
3485 }
3486 ifnet_head_done();
3487
3488 /*
3489 * When we change the threshold to something smaller, we notify
3490 * all of our clients with a description message.
3491 * We won't send a message to the client we are currently serving
3492 * because it has no `ifnet source' yet.
3493 */
3494 if (changed) {
3495 lck_mtx_lock(&nstat_mtx);
3496 for (state = nstat_controls; state; state = state->ncs_next) {
3497 lck_mtx_lock(&state->ncs_mtx);
3498 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3499 {
3500 if (src->provider != &nstat_ifnet_provider) {
3501 continue;
3502 }
3503 nstat_control_send_description(state, src, 0, 0);
3504 }
3505 lck_mtx_unlock(&state->ncs_mtx);
3506 }
3507 lck_mtx_unlock(&nstat_mtx);
3508 }
3509 if (cookie->ifp == NULL) {
3510 kfree_type(struct nstat_ifnet_cookie, cookie);
3511 }
3512
3513 return ifp ? 0 : EINVAL;
3514 }
3515
3516 static int
nstat_ifnet_gone(nstat_provider_cookie_t cookie)3517 nstat_ifnet_gone(
3518 nstat_provider_cookie_t cookie)
3519 {
3520 struct ifnet *ifp;
3521 struct nstat_ifnet_cookie *ifcookie =
3522 (struct nstat_ifnet_cookie *)cookie;
3523
3524 ifnet_head_lock_shared();
3525 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
3526 {
3527 if (ifp == ifcookie->ifp) {
3528 break;
3529 }
3530 }
3531 ifnet_head_done();
3532
3533 return ifp ? 0 : 1;
3534 }
3535
3536 static errno_t
nstat_ifnet_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)3537 nstat_ifnet_counts(
3538 nstat_provider_cookie_t cookie,
3539 struct nstat_counts *out_counts,
3540 int *out_gone)
3541 {
3542 struct nstat_ifnet_cookie *ifcookie =
3543 (struct nstat_ifnet_cookie *)cookie;
3544 struct ifnet *ifp = ifcookie->ifp;
3545
3546 if (out_gone) {
3547 *out_gone = 0;
3548 }
3549
3550 // if the ifnet is gone, we should stop using it
3551 if (nstat_ifnet_gone(cookie)) {
3552 if (out_gone) {
3553 *out_gone = 1;
3554 }
3555 return EINVAL;
3556 }
3557
3558 bzero(out_counts, sizeof(*out_counts));
3559 out_counts->nstat_rxpackets = ifp->if_ipackets;
3560 out_counts->nstat_rxbytes = ifp->if_ibytes;
3561 out_counts->nstat_txpackets = ifp->if_opackets;
3562 out_counts->nstat_txbytes = ifp->if_obytes;
3563 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
3564 return 0;
3565 }
3566
3567 static void
nstat_ifnet_release(nstat_provider_cookie_t cookie,__unused int locked)3568 nstat_ifnet_release(
3569 nstat_provider_cookie_t cookie,
3570 __unused int locked)
3571 {
3572 struct nstat_ifnet_cookie *ifcookie;
3573 struct ifnet *ifp;
3574 nstat_control_state *state;
3575 nstat_src *src;
3576 uint64_t minthreshold = UINT64_MAX;
3577
3578 /*
3579 * Find all the clients that requested a threshold
3580 * for this ifnet and re-calculate if_data_threshold.
3581 */
3582 lck_mtx_lock(&nstat_mtx);
3583 for (state = nstat_controls; state; state = state->ncs_next) {
3584 lck_mtx_lock(&state->ncs_mtx);
3585 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3586 {
3587 /* Skip the provider we are about to detach. */
3588 if (src->provider != &nstat_ifnet_provider ||
3589 src->cookie == cookie) {
3590 continue;
3591 }
3592 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
3593 if (ifcookie->threshold < minthreshold) {
3594 minthreshold = ifcookie->threshold;
3595 }
3596 }
3597 lck_mtx_unlock(&state->ncs_mtx);
3598 }
3599 lck_mtx_unlock(&nstat_mtx);
3600 /*
3601 * Reset if_data_threshold or disable it.
3602 */
3603 ifcookie = (struct nstat_ifnet_cookie *)cookie;
3604 ifp = ifcookie->ifp;
3605 if (ifnet_is_attached(ifp, 1)) {
3606 ifnet_lock_exclusive(ifp);
3607 if (minthreshold == UINT64_MAX) {
3608 ifp->if_data_threshold = 0;
3609 } else {
3610 ifp->if_data_threshold = minthreshold;
3611 }
3612 ifnet_lock_done(ifp);
3613 ifnet_decr_iorefcnt(ifp);
3614 }
3615 ifnet_release(ifp);
3616 kfree_type(struct nstat_ifnet_cookie, ifcookie);
3617 }
3618
3619 static void
nstat_ifnet_copy_link_status(struct ifnet * ifp,struct nstat_ifnet_descriptor * desc)3620 nstat_ifnet_copy_link_status(
3621 struct ifnet *ifp,
3622 struct nstat_ifnet_descriptor *desc)
3623 {
3624 struct if_link_status *ifsr = ifp->if_link_status;
3625 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
3626
3627 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
3628 if (ifsr == NULL) {
3629 return;
3630 }
3631
3632 lck_rw_lock_shared(&ifp->if_link_status_lock);
3633
3634 if (ifp->if_type == IFT_CELLULAR) {
3635 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
3636 struct if_cellular_status_v1 *if_cell_sr =
3637 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3638
3639 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1) {
3640 goto done;
3641 }
3642
3643 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
3644
3645 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
3646 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
3647 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
3648 }
3649 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
3650 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
3651 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
3652 }
3653 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
3654 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
3655 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
3656 }
3657 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
3658 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
3659 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
3660 }
3661 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
3662 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
3663 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
3664 }
3665 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
3666 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
3667 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
3668 }
3669 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
3670 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
3671 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE) {
3672 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
3673 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW) {
3674 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
3675 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM) {
3676 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
3677 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH) {
3678 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
3679 } else {
3680 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
3681 }
3682 }
3683 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
3684 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
3685 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
3686 }
3687 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
3688 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
3689 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
3690 }
3691 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
3692 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
3693 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
3694 }
3695 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
3696 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
3697 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
3698 }
3699 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
3700 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
3701 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
3702 }
3703 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
3704 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
3705 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
3706 }
3707 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
3708 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
3709 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
3710 }
3711 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
3712 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
3713 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
3714 }
3715 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
3716 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
3717 cell_status->mss_recommended = if_cell_sr->mss_recommended;
3718 }
3719 } else if (IFNET_IS_WIFI(ifp)) {
3720 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
3721 struct if_wifi_status_v1 *if_wifi_sr =
3722 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3723
3724 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1) {
3725 goto done;
3726 }
3727
3728 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
3729
3730 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
3731 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
3732 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
3733 }
3734 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
3735 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3736 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
3737 }
3738 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
3739 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
3740 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
3741 }
3742 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
3743 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
3744 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
3745 }
3746 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
3747 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
3748 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
3749 }
3750 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
3751 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
3752 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
3753 }
3754 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
3755 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
3756 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE) {
3757 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
3758 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW) {
3759 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
3760 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM) {
3761 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
3762 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH) {
3763 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
3764 } else {
3765 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
3766 }
3767 }
3768 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
3769 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
3770 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
3771 }
3772 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
3773 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
3774 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
3775 }
3776 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
3777 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3778 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
3779 }
3780 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
3781 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
3782 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
3783 }
3784 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
3785 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
3786 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
3787 }
3788 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
3789 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
3790 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
3791 }
3792 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
3793 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
3794 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
3795 }
3796 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
3797 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
3798 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
3799 }
3800 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
3801 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
3802 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ) {
3803 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
3804 } else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ) {
3805 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
3806 } else {
3807 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
3808 }
3809 }
3810 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
3811 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
3812 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
3813 }
3814 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
3815 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
3816 wifi_status->scan_count = if_wifi_sr->scan_count;
3817 }
3818 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
3819 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
3820 wifi_status->scan_duration = if_wifi_sr->scan_duration;
3821 }
3822 }
3823
3824 done:
3825 lck_rw_done(&ifp->if_link_status_lock);
3826 }
3827
3828 static u_int64_t nstat_ifnet_last_report_time = 0;
3829 extern int tcp_report_stats_interval;
3830
3831 static void
nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat * ifst)3832 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
3833 {
3834 /* Retransmit percentage */
3835 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
3836 /* shift by 10 for precision */
3837 ifst->rxmit_percent =
3838 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
3839 } else {
3840 ifst->rxmit_percent = 0;
3841 }
3842
3843 /* Out-of-order percentage */
3844 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
3845 /* shift by 10 for precision */
3846 ifst->oo_percent =
3847 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
3848 } else {
3849 ifst->oo_percent = 0;
3850 }
3851
3852 /* Reorder percentage */
3853 if (ifst->total_reorderpkts > 0 &&
3854 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
3855 /* shift by 10 for precision */
3856 ifst->reorder_percent =
3857 ((ifst->total_reorderpkts << 10) * 100) /
3858 (ifst->total_txpkts + ifst->total_rxpkts);
3859 } else {
3860 ifst->reorder_percent = 0;
3861 }
3862 }
3863
3864 static void
nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat * if_st)3865 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
3866 {
3867 u_int64_t ecn_on_conn, ecn_off_conn;
3868
3869 if (if_st == NULL) {
3870 return;
3871 }
3872 ecn_on_conn = if_st->ecn_client_success +
3873 if_st->ecn_server_success;
3874 ecn_off_conn = if_st->ecn_off_conn +
3875 (if_st->ecn_client_setup - if_st->ecn_client_success) +
3876 (if_st->ecn_server_setup - if_st->ecn_server_success);
3877
3878 /*
3879 * report sack episodes, rst_drop and rxmit_drop
3880 * as a ratio per connection, shift by 10 for precision
3881 */
3882 if (ecn_on_conn > 0) {
3883 if_st->ecn_on.sack_episodes =
3884 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
3885 if_st->ecn_on.rst_drop =
3886 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
3887 if_st->ecn_on.rxmit_drop =
3888 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
3889 } else {
3890 /* set to zero, just in case */
3891 if_st->ecn_on.sack_episodes = 0;
3892 if_st->ecn_on.rst_drop = 0;
3893 if_st->ecn_on.rxmit_drop = 0;
3894 }
3895
3896 if (ecn_off_conn > 0) {
3897 if_st->ecn_off.sack_episodes =
3898 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
3899 if_st->ecn_off.rst_drop =
3900 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
3901 if_st->ecn_off.rxmit_drop =
3902 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
3903 } else {
3904 if_st->ecn_off.sack_episodes = 0;
3905 if_st->ecn_off.rst_drop = 0;
3906 if_st->ecn_off.rxmit_drop = 0;
3907 }
3908 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
3909 }
3910
3911 static void
nstat_ifnet_report_ecn_stats(void)3912 nstat_ifnet_report_ecn_stats(void)
3913 {
3914 u_int64_t uptime, last_report_time;
3915 struct nstat_sysinfo_data data;
3916 struct nstat_sysinfo_ifnet_ecn_stats *st;
3917 struct ifnet *ifp;
3918
3919 uptime = net_uptime();
3920
3921 if ((int)(uptime - nstat_ifnet_last_report_time) <
3922 tcp_report_stats_interval) {
3923 return;
3924 }
3925
3926 last_report_time = nstat_ifnet_last_report_time;
3927 nstat_ifnet_last_report_time = uptime;
3928 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
3929 st = &data.u.ifnet_ecn_stats;
3930
3931 ifnet_head_lock_shared();
3932 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3933 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL) {
3934 continue;
3935 }
3936
3937 if (!IF_FULLY_ATTACHED(ifp)) {
3938 continue;
3939 }
3940
3941 /* Limit reporting to Wifi, Ethernet and cellular. */
3942 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
3943 continue;
3944 }
3945
3946 bzero(st, sizeof(*st));
3947 if (IFNET_IS_CELLULAR(ifp)) {
3948 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
3949 } else if (IFNET_IS_WIFI(ifp)) {
3950 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
3951 } else {
3952 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
3953 }
3954 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
3955 /* skip if there was no update since last report */
3956 if (ifp->if_ipv4_stat->timestamp <= 0 ||
3957 ifp->if_ipv4_stat->timestamp < last_report_time) {
3958 goto v6;
3959 }
3960 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
3961 /* compute percentages using packet counts */
3962 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
3963 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
3964 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
3965 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
3966 sizeof(st->ecn_stat));
3967 nstat_sysinfo_send_data(&data);
3968 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
3969
3970 v6:
3971 /* skip if there was no update since last report */
3972 if (ifp->if_ipv6_stat->timestamp <= 0 ||
3973 ifp->if_ipv6_stat->timestamp < last_report_time) {
3974 continue;
3975 }
3976 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
3977
3978 /* compute percentages using packet counts */
3979 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
3980 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
3981 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
3982 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
3983 sizeof(st->ecn_stat));
3984 nstat_sysinfo_send_data(&data);
3985
3986 /* Zero the stats in ifp */
3987 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
3988 }
3989 ifnet_head_done();
3990 }
3991
3992 /* Some thresholds to determine Low Iternet mode */
3993 #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
3994 #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
3995 #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
3996 #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
3997 #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
3998
3999 static boolean_t
nstat_lim_activity_check(struct if_lim_perf_stat * st)4000 nstat_lim_activity_check(struct if_lim_perf_stat *st)
4001 {
4002 /* check that the current activity is enough to report stats */
4003 if (st->lim_total_txpkts < nstat_lim_min_tx_pkts ||
4004 st->lim_total_rxpkts < nstat_lim_min_rx_pkts ||
4005 st->lim_conn_attempts == 0) {
4006 return FALSE;
4007 }
4008
4009 /*
4010 * Compute percentages if there was enough activity. Use
4011 * shift-left by 10 to preserve precision.
4012 */
4013 st->lim_packet_loss_percent = ((st->lim_total_retxpkts << 10) /
4014 st->lim_total_txpkts) * 100;
4015
4016 st->lim_packet_ooo_percent = ((st->lim_total_oopkts << 10) /
4017 st->lim_total_rxpkts) * 100;
4018
4019 st->lim_conn_timeout_percent = ((st->lim_conn_timeouts << 10) /
4020 st->lim_conn_attempts) * 100;
4021
4022 /*
4023 * Is Low Internet detected? First order metrics are bandwidth
4024 * and RTT. If these metrics are below the minimum thresholds
4025 * defined then the network attachment can be classified as
4026 * having Low Internet capacity.
4027 *
4028 * High connection timeout rate also indicates Low Internet
4029 * capacity.
4030 */
4031 if (st->lim_dl_max_bandwidth > 0 &&
4032 st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD) {
4033 st->lim_dl_detected = 1;
4034 }
4035
4036 if ((st->lim_ul_max_bandwidth > 0 &&
4037 st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) ||
4038 st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD) {
4039 st->lim_ul_detected = 1;
4040 }
4041
4042 if (st->lim_conn_attempts > 20 &&
4043 st->lim_conn_timeout_percent >=
4044 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD) {
4045 st->lim_ul_detected = 1;
4046 }
4047 /*
4048 * Second order metrics: If there was high packet loss even after
4049 * using delay based algorithms then we classify it as Low Internet
4050 * again
4051 */
4052 if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts &&
4053 st->lim_packet_loss_percent >=
4054 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD) {
4055 st->lim_ul_detected = 1;
4056 }
4057 return TRUE;
4058 }
4059
4060 static u_int64_t nstat_lim_last_report_time = 0;
4061 static void
nstat_ifnet_report_lim_stats(void)4062 nstat_ifnet_report_lim_stats(void)
4063 {
4064 u_int64_t uptime;
4065 struct nstat_sysinfo_data data;
4066 struct nstat_sysinfo_lim_stats *st;
4067 struct ifnet *ifp;
4068 int err;
4069
4070 uptime = net_uptime();
4071
4072 if ((u_int32_t)(uptime - nstat_lim_last_report_time) <
4073 nstat_lim_interval) {
4074 return;
4075 }
4076
4077 nstat_lim_last_report_time = uptime;
4078 data.flags = NSTAT_SYSINFO_LIM_STATS;
4079 st = &data.u.lim_stats;
4080 data.unsent_data_cnt = 0;
4081
4082 ifnet_head_lock_shared();
4083 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
4084 if (!IF_FULLY_ATTACHED(ifp)) {
4085 continue;
4086 }
4087
4088 /* Limit reporting to Wifi, Ethernet and cellular */
4089 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
4090 continue;
4091 }
4092
4093 if (!nstat_lim_activity_check(&ifp->if_lim_stat)) {
4094 continue;
4095 }
4096
4097 bzero(st, sizeof(*st));
4098 st->ifnet_siglen = sizeof(st->ifnet_signature);
4099 err = ifnet_get_netsignature(ifp, AF_INET,
4100 (u_int8_t *)&st->ifnet_siglen, NULL,
4101 st->ifnet_signature);
4102 if (err != 0) {
4103 err = ifnet_get_netsignature(ifp, AF_INET6,
4104 (u_int8_t *)&st->ifnet_siglen, NULL,
4105 st->ifnet_signature);
4106 if (err != 0) {
4107 continue;
4108 }
4109 }
4110 ifnet_lock_shared(ifp);
4111 if (IFNET_IS_CELLULAR(ifp)) {
4112 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
4113 } else if (IFNET_IS_WIFI(ifp)) {
4114 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
4115 } else {
4116 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET;
4117 }
4118 bcopy(&ifp->if_lim_stat, &st->lim_stat,
4119 sizeof(st->lim_stat));
4120
4121 /* Zero the stats in ifp */
4122 bzero(&ifp->if_lim_stat, sizeof(ifp->if_lim_stat));
4123 ifnet_lock_done(ifp);
4124 nstat_sysinfo_send_data(&data);
4125 }
4126 ifnet_head_done();
4127 }
4128
4129 static errno_t
nstat_ifnet_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)4130 nstat_ifnet_copy_descriptor(
4131 nstat_provider_cookie_t cookie,
4132 void *data,
4133 size_t len)
4134 {
4135 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
4136 struct nstat_ifnet_cookie *ifcookie =
4137 (struct nstat_ifnet_cookie *)cookie;
4138 struct ifnet *ifp = ifcookie->ifp;
4139
4140 if (len < sizeof(nstat_ifnet_descriptor)) {
4141 return EINVAL;
4142 }
4143
4144 if (nstat_ifnet_gone(cookie)) {
4145 return EINVAL;
4146 }
4147
4148 bzero(desc, sizeof(*desc));
4149 ifnet_lock_shared(ifp);
4150 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
4151 desc->ifindex = ifp->if_index;
4152 desc->threshold = ifp->if_data_threshold;
4153 desc->type = ifp->if_type;
4154 if (ifp->if_desc.ifd_len < sizeof(desc->description)) {
4155 memcpy(desc->description, ifp->if_desc.ifd_desc,
4156 sizeof(desc->description));
4157 }
4158 nstat_ifnet_copy_link_status(ifp, desc);
4159 ifnet_lock_done(ifp);
4160 return 0;
4161 }
4162
4163 static void
nstat_init_ifnet_provider(void)4164 nstat_init_ifnet_provider(void)
4165 {
4166 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
4167 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
4168 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
4169 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
4170 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
4171 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
4172 nstat_ifnet_provider.nstat_watcher_add = NULL;
4173 nstat_ifnet_provider.nstat_watcher_remove = NULL;
4174 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
4175 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
4176 nstat_ifnet_provider.next = nstat_providers;
4177 nstat_providers = &nstat_ifnet_provider;
4178 }
4179
4180 __private_extern__ void
nstat_ifnet_threshold_reached(unsigned int ifindex)4181 nstat_ifnet_threshold_reached(unsigned int ifindex)
4182 {
4183 nstat_control_state *state;
4184 nstat_src *src;
4185 struct ifnet *ifp;
4186 struct nstat_ifnet_cookie *ifcookie;
4187
4188 lck_mtx_lock(&nstat_mtx);
4189 for (state = nstat_controls; state; state = state->ncs_next) {
4190 lck_mtx_lock(&state->ncs_mtx);
4191 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4192 {
4193 if (src->provider != &nstat_ifnet_provider) {
4194 continue;
4195 }
4196 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
4197 ifp = ifcookie->ifp;
4198 if (ifp->if_index != ifindex) {
4199 continue;
4200 }
4201 nstat_control_send_counts(state, src, 0, 0, NULL);
4202 }
4203 lck_mtx_unlock(&state->ncs_mtx);
4204 }
4205 lck_mtx_unlock(&nstat_mtx);
4206 }
4207
4208 #pragma mark -- Sysinfo --
4209 static void
nstat_set_keyval_scalar(nstat_sysinfo_keyval * kv,int key,u_int32_t val)4210 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
4211 {
4212 kv->nstat_sysinfo_key = key;
4213 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
4214 kv->u.nstat_sysinfo_scalar = val;
4215 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
4216 }
4217
4218 static void
nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval * kv,int key,u_int64_t val)4219 nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval *kv, int key, u_int64_t val)
4220 {
4221 kv->nstat_sysinfo_key = key;
4222 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
4223 kv->u.nstat_sysinfo_scalar = val;
4224 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
4225 }
4226
4227 static void
nstat_set_keyval_string(nstat_sysinfo_keyval * kv,int key,u_int8_t * buf,u_int32_t len)4228 nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf,
4229 u_int32_t len)
4230 {
4231 kv->nstat_sysinfo_key = key;
4232 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_STRING;
4233 kv->nstat_sysinfo_valsize = min(len,
4234 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE);
4235 bcopy(buf, kv->u.nstat_sysinfo_string, kv->nstat_sysinfo_valsize);
4236 }
4237
4238 static void
nstat_sysinfo_send_data_internal(nstat_control_state * control,nstat_sysinfo_data * data)4239 nstat_sysinfo_send_data_internal(
4240 nstat_control_state *control,
4241 nstat_sysinfo_data *data)
4242 {
4243 nstat_msg_sysinfo_counts *syscnt = NULL;
4244 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
4245 nstat_sysinfo_keyval *kv;
4246 errno_t result = 0;
4247 size_t i = 0;
4248
4249 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
4250 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
4251 finalsize = allocsize;
4252
4253 /* get number of key-vals for each kind of stat */
4254 switch (data->flags) {
4255 case NSTAT_SYSINFO_MBUF_STATS:
4256 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
4257 sizeof(u_int32_t);
4258 break;
4259 case NSTAT_SYSINFO_TCP_STATS:
4260 nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT;
4261 break;
4262 case NSTAT_SYSINFO_IFNET_ECN_STATS:
4263 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
4264 sizeof(u_int64_t));
4265
4266 /* Two more keys for ifnet type and proto */
4267 nkeyvals += 2;
4268
4269 /* One key for unsent data. */
4270 nkeyvals++;
4271 break;
4272 case NSTAT_SYSINFO_LIM_STATS:
4273 nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT;
4274 break;
4275 case NSTAT_SYSINFO_NET_API_STATS:
4276 nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT;
4277 break;
4278 default:
4279 return;
4280 }
4281 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
4282 allocsize += countsize;
4283
4284 syscnt = (nstat_msg_sysinfo_counts *) kalloc_data(allocsize,
4285 Z_WAITOK | Z_ZERO);
4286 if (syscnt == NULL) {
4287 return;
4288 }
4289
4290 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
4291 switch (data->flags) {
4292 case NSTAT_SYSINFO_MBUF_STATS:
4293 {
4294 nstat_set_keyval_scalar(&kv[i++],
4295 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
4296 data->u.mb_stats.total_256b);
4297 nstat_set_keyval_scalar(&kv[i++],
4298 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
4299 data->u.mb_stats.total_2kb);
4300 nstat_set_keyval_scalar(&kv[i++],
4301 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
4302 data->u.mb_stats.total_4kb);
4303 nstat_set_keyval_scalar(&kv[i++],
4304 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
4305 data->u.mb_stats.total_16kb);
4306 nstat_set_keyval_scalar(&kv[i++],
4307 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
4308 data->u.mb_stats.sbmb_total);
4309 nstat_set_keyval_scalar(&kv[i++],
4310 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
4311 data->u.mb_stats.sb_atmbuflimit);
4312 nstat_set_keyval_scalar(&kv[i++],
4313 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
4314 data->u.mb_stats.draincnt);
4315 nstat_set_keyval_scalar(&kv[i++],
4316 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
4317 data->u.mb_stats.memreleased);
4318 nstat_set_keyval_scalar(&kv[i++],
4319 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
4320 data->u.mb_stats.sbmb_floor);
4321 VERIFY(i == nkeyvals);
4322 break;
4323 }
4324 case NSTAT_SYSINFO_TCP_STATS:
4325 {
4326 nstat_set_keyval_scalar(&kv[i++],
4327 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
4328 data->u.tcp_stats.ipv4_avgrtt);
4329 nstat_set_keyval_scalar(&kv[i++],
4330 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
4331 data->u.tcp_stats.ipv6_avgrtt);
4332 nstat_set_keyval_scalar(&kv[i++],
4333 NSTAT_SYSINFO_KEY_SEND_PLR,
4334 data->u.tcp_stats.send_plr);
4335 nstat_set_keyval_scalar(&kv[i++],
4336 NSTAT_SYSINFO_KEY_RECV_PLR,
4337 data->u.tcp_stats.recv_plr);
4338 nstat_set_keyval_scalar(&kv[i++],
4339 NSTAT_SYSINFO_KEY_SEND_TLRTO,
4340 data->u.tcp_stats.send_tlrto_rate);
4341 nstat_set_keyval_scalar(&kv[i++],
4342 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
4343 data->u.tcp_stats.send_reorder_rate);
4344 nstat_set_keyval_scalar(&kv[i++],
4345 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
4346 data->u.tcp_stats.connection_attempts);
4347 nstat_set_keyval_scalar(&kv[i++],
4348 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
4349 data->u.tcp_stats.connection_accepts);
4350 nstat_set_keyval_scalar(&kv[i++],
4351 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
4352 data->u.tcp_stats.ecn_client_enabled);
4353 nstat_set_keyval_scalar(&kv[i++],
4354 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
4355 data->u.tcp_stats.ecn_server_enabled);
4356 nstat_set_keyval_scalar(&kv[i++],
4357 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
4358 data->u.tcp_stats.ecn_client_setup);
4359 nstat_set_keyval_scalar(&kv[i++],
4360 NSTAT_SYSINFO_ECN_SERVER_SETUP,
4361 data->u.tcp_stats.ecn_server_setup);
4362 nstat_set_keyval_scalar(&kv[i++],
4363 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
4364 data->u.tcp_stats.ecn_client_success);
4365 nstat_set_keyval_scalar(&kv[i++],
4366 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
4367 data->u.tcp_stats.ecn_server_success);
4368 nstat_set_keyval_scalar(&kv[i++],
4369 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
4370 data->u.tcp_stats.ecn_not_supported);
4371 nstat_set_keyval_scalar(&kv[i++],
4372 NSTAT_SYSINFO_ECN_LOST_SYN,
4373 data->u.tcp_stats.ecn_lost_syn);
4374 nstat_set_keyval_scalar(&kv[i++],
4375 NSTAT_SYSINFO_ECN_LOST_SYNACK,
4376 data->u.tcp_stats.ecn_lost_synack);
4377 nstat_set_keyval_scalar(&kv[i++],
4378 NSTAT_SYSINFO_ECN_RECV_CE,
4379 data->u.tcp_stats.ecn_recv_ce);
4380 nstat_set_keyval_scalar(&kv[i++],
4381 NSTAT_SYSINFO_ECN_RECV_ECE,
4382 data->u.tcp_stats.ecn_recv_ece);
4383 nstat_set_keyval_scalar(&kv[i++],
4384 NSTAT_SYSINFO_ECN_SENT_ECE,
4385 data->u.tcp_stats.ecn_sent_ece);
4386 nstat_set_keyval_scalar(&kv[i++],
4387 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
4388 data->u.tcp_stats.ecn_conn_recv_ce);
4389 nstat_set_keyval_scalar(&kv[i++],
4390 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
4391 data->u.tcp_stats.ecn_conn_recv_ece);
4392 nstat_set_keyval_scalar(&kv[i++],
4393 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
4394 data->u.tcp_stats.ecn_conn_plnoce);
4395 nstat_set_keyval_scalar(&kv[i++],
4396 NSTAT_SYSINFO_ECN_CONN_PL_CE,
4397 data->u.tcp_stats.ecn_conn_pl_ce);
4398 nstat_set_keyval_scalar(&kv[i++],
4399 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
4400 data->u.tcp_stats.ecn_conn_nopl_ce);
4401 nstat_set_keyval_scalar(&kv[i++],
4402 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
4403 data->u.tcp_stats.ecn_fallback_synloss);
4404 nstat_set_keyval_scalar(&kv[i++],
4405 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
4406 data->u.tcp_stats.ecn_fallback_reorder);
4407 nstat_set_keyval_scalar(&kv[i++],
4408 NSTAT_SYSINFO_ECN_FALLBACK_CE,
4409 data->u.tcp_stats.ecn_fallback_ce);
4410 nstat_set_keyval_scalar(&kv[i++],
4411 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
4412 data->u.tcp_stats.tfo_syn_data_rcv);
4413 nstat_set_keyval_scalar(&kv[i++],
4414 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
4415 data->u.tcp_stats.tfo_cookie_req_rcv);
4416 nstat_set_keyval_scalar(&kv[i++],
4417 NSTAT_SYSINFO_TFO_COOKIE_SENT,
4418 data->u.tcp_stats.tfo_cookie_sent);
4419 nstat_set_keyval_scalar(&kv[i++],
4420 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
4421 data->u.tcp_stats.tfo_cookie_invalid);
4422 nstat_set_keyval_scalar(&kv[i++],
4423 NSTAT_SYSINFO_TFO_COOKIE_REQ,
4424 data->u.tcp_stats.tfo_cookie_req);
4425 nstat_set_keyval_scalar(&kv[i++],
4426 NSTAT_SYSINFO_TFO_COOKIE_RCV,
4427 data->u.tcp_stats.tfo_cookie_rcv);
4428 nstat_set_keyval_scalar(&kv[i++],
4429 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
4430 data->u.tcp_stats.tfo_syn_data_sent);
4431 nstat_set_keyval_scalar(&kv[i++],
4432 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
4433 data->u.tcp_stats.tfo_syn_data_acked);
4434 nstat_set_keyval_scalar(&kv[i++],
4435 NSTAT_SYSINFO_TFO_SYN_LOSS,
4436 data->u.tcp_stats.tfo_syn_loss);
4437 nstat_set_keyval_scalar(&kv[i++],
4438 NSTAT_SYSINFO_TFO_BLACKHOLE,
4439 data->u.tcp_stats.tfo_blackhole);
4440 nstat_set_keyval_scalar(&kv[i++],
4441 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
4442 data->u.tcp_stats.tfo_cookie_wrong);
4443 nstat_set_keyval_scalar(&kv[i++],
4444 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
4445 data->u.tcp_stats.tfo_no_cookie_rcv);
4446 nstat_set_keyval_scalar(&kv[i++],
4447 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
4448 data->u.tcp_stats.tfo_heuristics_disable);
4449 nstat_set_keyval_scalar(&kv[i++],
4450 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
4451 data->u.tcp_stats.tfo_sndblackhole);
4452 nstat_set_keyval_scalar(&kv[i++],
4453 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT,
4454 data->u.tcp_stats.mptcp_handover_attempt);
4455 nstat_set_keyval_scalar(&kv[i++],
4456 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT,
4457 data->u.tcp_stats.mptcp_interactive_attempt);
4458 nstat_set_keyval_scalar(&kv[i++],
4459 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT,
4460 data->u.tcp_stats.mptcp_aggregate_attempt);
4461 nstat_set_keyval_scalar(&kv[i++],
4462 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT,
4463 data->u.tcp_stats.mptcp_fp_handover_attempt);
4464 nstat_set_keyval_scalar(&kv[i++],
4465 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT,
4466 data->u.tcp_stats.mptcp_fp_interactive_attempt);
4467 nstat_set_keyval_scalar(&kv[i++],
4468 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT,
4469 data->u.tcp_stats.mptcp_fp_aggregate_attempt);
4470 nstat_set_keyval_scalar(&kv[i++],
4471 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK,
4472 data->u.tcp_stats.mptcp_heuristic_fallback);
4473 nstat_set_keyval_scalar(&kv[i++],
4474 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK,
4475 data->u.tcp_stats.mptcp_fp_heuristic_fallback);
4476 nstat_set_keyval_scalar(&kv[i++],
4477 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI,
4478 data->u.tcp_stats.mptcp_handover_success_wifi);
4479 nstat_set_keyval_scalar(&kv[i++],
4480 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL,
4481 data->u.tcp_stats.mptcp_handover_success_cell);
4482 nstat_set_keyval_scalar(&kv[i++],
4483 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS,
4484 data->u.tcp_stats.mptcp_interactive_success);
4485 nstat_set_keyval_scalar(&kv[i++],
4486 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS,
4487 data->u.tcp_stats.mptcp_aggregate_success);
4488 nstat_set_keyval_scalar(&kv[i++],
4489 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI,
4490 data->u.tcp_stats.mptcp_fp_handover_success_wifi);
4491 nstat_set_keyval_scalar(&kv[i++],
4492 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL,
4493 data->u.tcp_stats.mptcp_fp_handover_success_cell);
4494 nstat_set_keyval_scalar(&kv[i++],
4495 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS,
4496 data->u.tcp_stats.mptcp_fp_interactive_success);
4497 nstat_set_keyval_scalar(&kv[i++],
4498 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS,
4499 data->u.tcp_stats.mptcp_fp_aggregate_success);
4500 nstat_set_keyval_scalar(&kv[i++],
4501 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI,
4502 data->u.tcp_stats.mptcp_handover_cell_from_wifi);
4503 nstat_set_keyval_scalar(&kv[i++],
4504 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL,
4505 data->u.tcp_stats.mptcp_handover_wifi_from_cell);
4506 nstat_set_keyval_scalar(&kv[i++],
4507 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI,
4508 data->u.tcp_stats.mptcp_interactive_cell_from_wifi);
4509 nstat_set_keyval_u64_scalar(&kv[i++],
4510 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES,
4511 data->u.tcp_stats.mptcp_handover_cell_bytes);
4512 nstat_set_keyval_u64_scalar(&kv[i++],
4513 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES,
4514 data->u.tcp_stats.mptcp_interactive_cell_bytes);
4515 nstat_set_keyval_u64_scalar(&kv[i++],
4516 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES,
4517 data->u.tcp_stats.mptcp_aggregate_cell_bytes);
4518 nstat_set_keyval_u64_scalar(&kv[i++],
4519 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES,
4520 data->u.tcp_stats.mptcp_handover_all_bytes);
4521 nstat_set_keyval_u64_scalar(&kv[i++],
4522 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES,
4523 data->u.tcp_stats.mptcp_interactive_all_bytes);
4524 nstat_set_keyval_u64_scalar(&kv[i++],
4525 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES,
4526 data->u.tcp_stats.mptcp_aggregate_all_bytes);
4527 nstat_set_keyval_scalar(&kv[i++],
4528 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI,
4529 data->u.tcp_stats.mptcp_back_to_wifi);
4530 nstat_set_keyval_scalar(&kv[i++],
4531 NSTAT_SYSINFO_MPTCP_WIFI_PROXY,
4532 data->u.tcp_stats.mptcp_wifi_proxy);
4533 nstat_set_keyval_scalar(&kv[i++],
4534 NSTAT_SYSINFO_MPTCP_CELL_PROXY,
4535 data->u.tcp_stats.mptcp_cell_proxy);
4536 nstat_set_keyval_scalar(&kv[i++],
4537 NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL,
4538 data->u.tcp_stats.mptcp_triggered_cell);
4539 VERIFY(i == nkeyvals);
4540 break;
4541 }
4542 case NSTAT_SYSINFO_IFNET_ECN_STATS:
4543 {
4544 nstat_set_keyval_scalar(&kv[i++],
4545 NSTAT_SYSINFO_ECN_IFNET_TYPE,
4546 data->u.ifnet_ecn_stats.ifnet_type);
4547 nstat_set_keyval_scalar(&kv[i++],
4548 NSTAT_SYSINFO_ECN_IFNET_PROTO,
4549 data->u.ifnet_ecn_stats.ifnet_proto);
4550 nstat_set_keyval_u64_scalar(&kv[i++],
4551 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
4552 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
4553 nstat_set_keyval_u64_scalar(&kv[i++],
4554 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
4555 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
4556 nstat_set_keyval_u64_scalar(&kv[i++],
4557 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
4558 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
4559 nstat_set_keyval_u64_scalar(&kv[i++],
4560 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
4561 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
4562 nstat_set_keyval_u64_scalar(&kv[i++],
4563 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
4564 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
4565 nstat_set_keyval_u64_scalar(&kv[i++],
4566 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
4567 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
4568 nstat_set_keyval_u64_scalar(&kv[i++],
4569 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
4570 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
4571 nstat_set_keyval_u64_scalar(&kv[i++],
4572 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
4573 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
4574 nstat_set_keyval_u64_scalar(&kv[i++],
4575 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
4576 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
4577 nstat_set_keyval_u64_scalar(&kv[i++],
4578 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
4579 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
4580 nstat_set_keyval_u64_scalar(&kv[i++],
4581 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
4582 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
4583 nstat_set_keyval_u64_scalar(&kv[i++],
4584 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
4585 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
4586 nstat_set_keyval_u64_scalar(&kv[i++],
4587 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
4588 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
4589 nstat_set_keyval_u64_scalar(&kv[i++],
4590 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
4591 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
4592 nstat_set_keyval_u64_scalar(&kv[i++],
4593 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
4594 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
4595 nstat_set_keyval_u64_scalar(&kv[i++],
4596 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
4597 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
4598 nstat_set_keyval_u64_scalar(&kv[i++],
4599 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
4600 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
4601 nstat_set_keyval_u64_scalar(&kv[i++],
4602 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
4603 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
4604 nstat_set_keyval_u64_scalar(&kv[i++],
4605 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
4606 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
4607 nstat_set_keyval_u64_scalar(&kv[i++],
4608 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
4609 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
4610 nstat_set_keyval_u64_scalar(&kv[i++],
4611 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
4612 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
4613 nstat_set_keyval_u64_scalar(&kv[i++],
4614 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
4615 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
4616 nstat_set_keyval_u64_scalar(&kv[i++],
4617 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
4618 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
4619 nstat_set_keyval_u64_scalar(&kv[i++],
4620 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
4621 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
4622 nstat_set_keyval_u64_scalar(&kv[i++],
4623 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
4624 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
4625 nstat_set_keyval_u64_scalar(&kv[i++],
4626 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
4627 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
4628 nstat_set_keyval_u64_scalar(&kv[i++],
4629 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
4630 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
4631 nstat_set_keyval_u64_scalar(&kv[i++],
4632 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
4633 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
4634 nstat_set_keyval_u64_scalar(&kv[i++],
4635 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
4636 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
4637 nstat_set_keyval_u64_scalar(&kv[i++],
4638 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
4639 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
4640 nstat_set_keyval_u64_scalar(&kv[i++],
4641 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
4642 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
4643 nstat_set_keyval_u64_scalar(&kv[i++],
4644 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
4645 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
4646 nstat_set_keyval_u64_scalar(&kv[i++],
4647 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
4648 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
4649 nstat_set_keyval_u64_scalar(&kv[i++],
4650 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
4651 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
4652 nstat_set_keyval_u64_scalar(&kv[i++],
4653 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
4654 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
4655 nstat_set_keyval_u64_scalar(&kv[i++],
4656 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
4657 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
4658 nstat_set_keyval_u64_scalar(&kv[i++],
4659 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
4660 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
4661 nstat_set_keyval_u64_scalar(&kv[i++],
4662 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
4663 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
4664 nstat_set_keyval_u64_scalar(&kv[i++],
4665 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
4666 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
4667 nstat_set_keyval_u64_scalar(&kv[i++],
4668 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
4669 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
4670 nstat_set_keyval_u64_scalar(&kv[i++],
4671 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
4672 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
4673 nstat_set_keyval_u64_scalar(&kv[i++],
4674 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
4675 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
4676 nstat_set_keyval_scalar(&kv[i++],
4677 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
4678 data->unsent_data_cnt);
4679 nstat_set_keyval_u64_scalar(&kv[i++],
4680 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
4681 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
4682 nstat_set_keyval_u64_scalar(&kv[i++],
4683 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
4684 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
4685 nstat_set_keyval_u64_scalar(&kv[i++],
4686 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST,
4687 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst);
4688 break;
4689 }
4690 case NSTAT_SYSINFO_LIM_STATS:
4691 {
4692 nstat_set_keyval_string(&kv[i++],
4693 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE,
4694 data->u.lim_stats.ifnet_signature,
4695 data->u.lim_stats.ifnet_siglen);
4696 nstat_set_keyval_u64_scalar(&kv[i++],
4697 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH,
4698 data->u.lim_stats.lim_stat.lim_dl_max_bandwidth);
4699 nstat_set_keyval_u64_scalar(&kv[i++],
4700 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH,
4701 data->u.lim_stats.lim_stat.lim_ul_max_bandwidth);
4702 nstat_set_keyval_u64_scalar(&kv[i++],
4703 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT,
4704 data->u.lim_stats.lim_stat.lim_packet_loss_percent);
4705 nstat_set_keyval_u64_scalar(&kv[i++],
4706 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT,
4707 data->u.lim_stats.lim_stat.lim_packet_ooo_percent);
4708 nstat_set_keyval_u64_scalar(&kv[i++],
4709 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE,
4710 data->u.lim_stats.lim_stat.lim_rtt_variance);
4711 nstat_set_keyval_u64_scalar(&kv[i++],
4712 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN,
4713 data->u.lim_stats.lim_stat.lim_rtt_min);
4714 nstat_set_keyval_u64_scalar(&kv[i++],
4715 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG,
4716 data->u.lim_stats.lim_stat.lim_rtt_average);
4717 nstat_set_keyval_u64_scalar(&kv[i++],
4718 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT,
4719 data->u.lim_stats.lim_stat.lim_conn_timeout_percent);
4720 nstat_set_keyval_scalar(&kv[i++],
4721 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED,
4722 data->u.lim_stats.lim_stat.lim_dl_detected);
4723 nstat_set_keyval_scalar(&kv[i++],
4724 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED,
4725 data->u.lim_stats.lim_stat.lim_ul_detected);
4726 nstat_set_keyval_scalar(&kv[i++],
4727 NSTAT_SYSINFO_LIM_IFNET_TYPE,
4728 data->u.lim_stats.ifnet_type);
4729 break;
4730 }
4731 case NSTAT_SYSINFO_NET_API_STATS:
4732 {
4733 nstat_set_keyval_u64_scalar(&kv[i++],
4734 NSTAT_SYSINFO_API_IF_FLTR_ATTACH,
4735 data->u.net_api_stats.net_api_stats.nas_iflt_attach_total);
4736 nstat_set_keyval_u64_scalar(&kv[i++],
4737 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS,
4738 data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total);
4739 nstat_set_keyval_u64_scalar(&kv[i++],
4740 NSTAT_SYSINFO_API_IP_FLTR_ADD,
4741 data->u.net_api_stats.net_api_stats.nas_ipf_add_total);
4742 nstat_set_keyval_u64_scalar(&kv[i++],
4743 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS,
4744 data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total);
4745 nstat_set_keyval_u64_scalar(&kv[i++],
4746 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH,
4747 data->u.net_api_stats.net_api_stats.nas_sfltr_register_total);
4748 nstat_set_keyval_u64_scalar(&kv[i++],
4749 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS,
4750 data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total);
4751
4752
4753 nstat_set_keyval_u64_scalar(&kv[i++],
4754 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL,
4755 data->u.net_api_stats.net_api_stats.nas_socket_alloc_total);
4756 nstat_set_keyval_u64_scalar(&kv[i++],
4757 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL,
4758 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total);
4759 nstat_set_keyval_u64_scalar(&kv[i++],
4760 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS,
4761 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total);
4762 nstat_set_keyval_u64_scalar(&kv[i++],
4763 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID,
4764 data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total);
4765
4766 nstat_set_keyval_u64_scalar(&kv[i++],
4767 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL,
4768 data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total);
4769 nstat_set_keyval_u64_scalar(&kv[i++],
4770 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE,
4771 data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total);
4772 nstat_set_keyval_u64_scalar(&kv[i++],
4773 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET,
4774 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total);
4775 nstat_set_keyval_u64_scalar(&kv[i++],
4776 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6,
4777 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total);
4778 nstat_set_keyval_u64_scalar(&kv[i++],
4779 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM,
4780 data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total);
4781 nstat_set_keyval_u64_scalar(&kv[i++],
4782 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH,
4783 data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total);
4784 nstat_set_keyval_u64_scalar(&kv[i++],
4785 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY,
4786 data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total);
4787 nstat_set_keyval_u64_scalar(&kv[i++],
4788 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV,
4789 data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total);
4790 nstat_set_keyval_u64_scalar(&kv[i++],
4791 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER,
4792 data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total);
4793
4794 nstat_set_keyval_u64_scalar(&kv[i++],
4795 NSTAT_SYSINFO_API_SOCK_INET_STREAM,
4796 data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total);
4797 nstat_set_keyval_u64_scalar(&kv[i++],
4798 NSTAT_SYSINFO_API_SOCK_INET_DGRAM,
4799 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total);
4800 nstat_set_keyval_u64_scalar(&kv[i++],
4801 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED,
4802 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected);
4803 nstat_set_keyval_u64_scalar(&kv[i++],
4804 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS,
4805 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns);
4806 nstat_set_keyval_u64_scalar(&kv[i++],
4807 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA,
4808 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data);
4809
4810 nstat_set_keyval_u64_scalar(&kv[i++],
4811 NSTAT_SYSINFO_API_SOCK_INET6_STREAM,
4812 data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total);
4813 nstat_set_keyval_u64_scalar(&kv[i++],
4814 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM,
4815 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total);
4816 nstat_set_keyval_u64_scalar(&kv[i++],
4817 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED,
4818 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected);
4819 nstat_set_keyval_u64_scalar(&kv[i++],
4820 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS,
4821 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns);
4822 nstat_set_keyval_u64_scalar(&kv[i++],
4823 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA,
4824 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data);
4825
4826 nstat_set_keyval_u64_scalar(&kv[i++],
4827 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN,
4828 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total);
4829 nstat_set_keyval_u64_scalar(&kv[i++],
4830 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS,
4831 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total);
4832
4833 nstat_set_keyval_u64_scalar(&kv[i++],
4834 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM,
4835 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total);
4836 nstat_set_keyval_u64_scalar(&kv[i++],
4837 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM,
4838 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total);
4839
4840 nstat_set_keyval_u64_scalar(&kv[i++],
4841 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM,
4842 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total);
4843 nstat_set_keyval_u64_scalar(&kv[i++],
4844 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM,
4845 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total);
4846
4847 nstat_set_keyval_u64_scalar(&kv[i++],
4848 NSTAT_SYSINFO_API_IFNET_ALLOC,
4849 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total);
4850 nstat_set_keyval_u64_scalar(&kv[i++],
4851 NSTAT_SYSINFO_API_IFNET_ALLOC_OS,
4852 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total);
4853
4854 nstat_set_keyval_u64_scalar(&kv[i++],
4855 NSTAT_SYSINFO_API_PF_ADDRULE,
4856 data->u.net_api_stats.net_api_stats.nas_pf_addrule_total);
4857 nstat_set_keyval_u64_scalar(&kv[i++],
4858 NSTAT_SYSINFO_API_PF_ADDRULE_OS,
4859 data->u.net_api_stats.net_api_stats.nas_pf_addrule_os);
4860
4861 nstat_set_keyval_u64_scalar(&kv[i++],
4862 NSTAT_SYSINFO_API_VMNET_START,
4863 data->u.net_api_stats.net_api_stats.nas_vmnet_total);
4864
4865 #if SKYWALK
4866 nstat_set_keyval_scalar(&kv[i++],
4867 NSTAT_SYSINFO_API_IF_NETAGENT_ENABLED,
4868 if_is_fsw_transport_netagent_enabled());
4869 #endif /* SKYWALK */
4870
4871 nstat_set_keyval_scalar(&kv[i++],
4872 NSTAT_SYSINFO_API_REPORT_INTERVAL,
4873 data->u.net_api_stats.report_interval);
4874
4875 break;
4876 }
4877 }
4878 if (syscnt != NULL) {
4879 VERIFY(i > 0 && i <= nkeyvals);
4880 countsize = offsetof(nstat_sysinfo_counts,
4881 nstat_sysinfo_keyvals) +
4882 sizeof(nstat_sysinfo_keyval) * i;
4883 finalsize += countsize;
4884 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
4885 assert(finalsize <= MAX_NSTAT_MSG_HDR_LENGTH);
4886 syscnt->hdr.length = (u_int16_t)finalsize;
4887 syscnt->counts.nstat_sysinfo_len = (u_int32_t)countsize;
4888
4889 result = ctl_enqueuedata(control->ncs_kctl,
4890 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
4891 if (result != 0) {
4892 nstat_stats.nstat_sysinfofailures += 1;
4893 }
4894 kfree_data(syscnt, allocsize);
4895 }
4896 return;
4897 }
4898
4899 __private_extern__ void
nstat_sysinfo_send_data(nstat_sysinfo_data * data)4900 nstat_sysinfo_send_data(
4901 nstat_sysinfo_data *data)
4902 {
4903 nstat_control_state *control;
4904
4905 lck_mtx_lock(&nstat_mtx);
4906 for (control = nstat_controls; control; control = control->ncs_next) {
4907 lck_mtx_lock(&control->ncs_mtx);
4908 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0) {
4909 nstat_sysinfo_send_data_internal(control, data);
4910 }
4911 lck_mtx_unlock(&control->ncs_mtx);
4912 }
4913 lck_mtx_unlock(&nstat_mtx);
4914 }
4915
4916 static void
nstat_sysinfo_generate_report(void)4917 nstat_sysinfo_generate_report(void)
4918 {
4919 mbuf_report_peak_usage();
4920 tcp_report_stats();
4921 nstat_ifnet_report_ecn_stats();
4922 nstat_ifnet_report_lim_stats();
4923 nstat_net_api_report_stats();
4924 }
4925
4926 #pragma mark -- net_api --
4927
4928 static struct net_api_stats net_api_stats_before;
4929 static u_int64_t net_api_stats_last_report_time;
4930
4931 static void
nstat_net_api_report_stats(void)4932 nstat_net_api_report_stats(void)
4933 {
4934 struct nstat_sysinfo_data data;
4935 struct nstat_sysinfo_net_api_stats *st = &data.u.net_api_stats;
4936 u_int64_t uptime;
4937
4938 uptime = net_uptime();
4939
4940 if ((u_int32_t)(uptime - net_api_stats_last_report_time) <
4941 net_api_stats_report_interval) {
4942 return;
4943 }
4944
4945 st->report_interval = (u_int32_t)(uptime - net_api_stats_last_report_time);
4946 net_api_stats_last_report_time = uptime;
4947
4948 data.flags = NSTAT_SYSINFO_NET_API_STATS;
4949 data.unsent_data_cnt = 0;
4950
4951 /*
4952 * Some of the fields in the report are the current value and
4953 * other fields are the delta from the last report:
4954 * - Report difference for the per flow counters as they increase
4955 * with time
4956 * - Report current value for other counters as they tend not to change
4957 * much with time
4958 */
4959 #define STATCOPY(f) \
4960 (st->net_api_stats.f = net_api_stats.f)
4961 #define STATDIFF(f) \
4962 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
4963
4964 STATCOPY(nas_iflt_attach_count);
4965 STATCOPY(nas_iflt_attach_total);
4966 STATCOPY(nas_iflt_attach_os_total);
4967
4968 STATCOPY(nas_ipf_add_count);
4969 STATCOPY(nas_ipf_add_total);
4970 STATCOPY(nas_ipf_add_os_total);
4971
4972 STATCOPY(nas_sfltr_register_count);
4973 STATCOPY(nas_sfltr_register_total);
4974 STATCOPY(nas_sfltr_register_os_total);
4975
4976 STATDIFF(nas_socket_alloc_total);
4977 STATDIFF(nas_socket_in_kernel_total);
4978 STATDIFF(nas_socket_in_kernel_os_total);
4979 STATDIFF(nas_socket_necp_clientuuid_total);
4980
4981 STATDIFF(nas_socket_domain_local_total);
4982 STATDIFF(nas_socket_domain_route_total);
4983 STATDIFF(nas_socket_domain_inet_total);
4984 STATDIFF(nas_socket_domain_inet6_total);
4985 STATDIFF(nas_socket_domain_system_total);
4986 STATDIFF(nas_socket_domain_multipath_total);
4987 STATDIFF(nas_socket_domain_key_total);
4988 STATDIFF(nas_socket_domain_ndrv_total);
4989 STATDIFF(nas_socket_domain_other_total);
4990
4991 STATDIFF(nas_socket_inet_stream_total);
4992 STATDIFF(nas_socket_inet_dgram_total);
4993 STATDIFF(nas_socket_inet_dgram_connected);
4994 STATDIFF(nas_socket_inet_dgram_dns);
4995 STATDIFF(nas_socket_inet_dgram_no_data);
4996
4997 STATDIFF(nas_socket_inet6_stream_total);
4998 STATDIFF(nas_socket_inet6_dgram_total);
4999 STATDIFF(nas_socket_inet6_dgram_connected);
5000 STATDIFF(nas_socket_inet6_dgram_dns);
5001 STATDIFF(nas_socket_inet6_dgram_no_data);
5002
5003 STATDIFF(nas_socket_mcast_join_total);
5004 STATDIFF(nas_socket_mcast_join_os_total);
5005
5006 STATDIFF(nas_sock_inet6_stream_exthdr_in);
5007 STATDIFF(nas_sock_inet6_stream_exthdr_out);
5008 STATDIFF(nas_sock_inet6_dgram_exthdr_in);
5009 STATDIFF(nas_sock_inet6_dgram_exthdr_out);
5010
5011 STATDIFF(nas_nx_flow_inet_stream_total);
5012 STATDIFF(nas_nx_flow_inet_dgram_total);
5013
5014 STATDIFF(nas_nx_flow_inet6_stream_total);
5015 STATDIFF(nas_nx_flow_inet6_dgram_total);
5016
5017 STATCOPY(nas_ifnet_alloc_count);
5018 STATCOPY(nas_ifnet_alloc_total);
5019 STATCOPY(nas_ifnet_alloc_os_count);
5020 STATCOPY(nas_ifnet_alloc_os_total);
5021
5022 STATCOPY(nas_pf_addrule_total);
5023 STATCOPY(nas_pf_addrule_os);
5024
5025 STATCOPY(nas_vmnet_total);
5026
5027 #undef STATCOPY
5028 #undef STATDIFF
5029
5030 nstat_sysinfo_send_data(&data);
5031
5032 /*
5033 * Save a copy of the current fields so we can diff them the next time
5034 */
5035 memcpy(&net_api_stats_before, &net_api_stats,
5036 sizeof(struct net_api_stats));
5037 _CASSERT(sizeof(net_api_stats_before) == sizeof(net_api_stats));
5038 }
5039
5040
5041 #pragma mark -- Kernel Control Socket --
5042
5043 static kern_ctl_ref nstat_ctlref = NULL;
5044
5045 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
5046 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
5047 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
5048
5049 static errno_t
nstat_enqueue_success(uint64_t context,nstat_control_state * state,u_int16_t flags)5050 nstat_enqueue_success(
5051 uint64_t context,
5052 nstat_control_state *state,
5053 u_int16_t flags)
5054 {
5055 nstat_msg_hdr success;
5056 errno_t result;
5057
5058 bzero(&success, sizeof(success));
5059 success.context = context;
5060 success.type = NSTAT_MSG_TYPE_SUCCESS;
5061 success.length = sizeof(success);
5062 success.flags = flags;
5063 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
5064 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
5065 if (result != 0) {
5066 if (nstat_debug != 0) {
5067 printf("%s: could not enqueue success message %d\n",
5068 __func__, result);
5069 }
5070 nstat_stats.nstat_successmsgfailures += 1;
5071 }
5072 return result;
5073 }
5074
5075 static errno_t
nstat_control_send_event(nstat_control_state * state,nstat_src * src,u_int64_t event)5076 nstat_control_send_event(
5077 nstat_control_state *state,
5078 nstat_src *src,
5079 u_int64_t event)
5080 {
5081 errno_t result = 0;
5082 int failed = 0;
5083
5084 if (nstat_control_reporting_allowed(state, src, 0)) {
5085 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
5086 result = nstat_control_send_update(state, src, 0, event, 0, NULL);
5087 if (result != 0) {
5088 failed = 1;
5089 if (nstat_debug != 0) {
5090 printf("%s - nstat_control_send_event() %d\n", __func__, result);
5091 }
5092 }
5093 } else {
5094 if (nstat_debug != 0) {
5095 printf("%s - nstat_control_send_event() used when updates not supported\n", __func__);
5096 }
5097 }
5098 }
5099 return result;
5100 }
5101
5102 static errno_t
nstat_control_send_goodbye(nstat_control_state * state,nstat_src * src)5103 nstat_control_send_goodbye(
5104 nstat_control_state *state,
5105 nstat_src *src)
5106 {
5107 errno_t result = 0;
5108 int failed = 0;
5109 u_int16_t hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_FILTER;
5110
5111 if (nstat_control_reporting_allowed(state, src, (src->ns_reported)? NSTAT_FILTER_SUPPRESS_BORING_CLOSE: 0)) {
5112 hdr_flags = 0;
5113 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
5114 result = nstat_control_send_update(state, src, 0, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
5115 if (result != 0) {
5116 failed = 1;
5117 hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP;
5118 if (nstat_debug != 0) {
5119 printf("%s - nstat_control_send_update() %d\n", __func__, result);
5120 }
5121 }
5122 } else {
5123 // send one last counts notification
5124 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
5125 if (result != 0) {
5126 failed = 1;
5127 hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP;
5128 if (nstat_debug != 0) {
5129 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
5130 }
5131 }
5132
5133 // send a last description
5134 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
5135 if (result != 0) {
5136 failed = 1;
5137 hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP;
5138 if (nstat_debug != 0) {
5139 printf("%s - nstat_control_send_description() %d\n", __func__, result);
5140 }
5141 }
5142 }
5143 }
5144
5145 // send the source removed notification
5146 result = nstat_control_send_removed(state, src, hdr_flags);
5147 if (result != 0 && nstat_debug) {
5148 failed = 1;
5149 if (nstat_debug != 0) {
5150 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
5151 }
5152 }
5153
5154 if (failed != 0) {
5155 nstat_stats.nstat_control_send_goodbye_failures++;
5156 }
5157
5158
5159 return result;
5160 }
5161
5162 static errno_t
nstat_flush_accumulated_msgs(nstat_control_state * state)5163 nstat_flush_accumulated_msgs(
5164 nstat_control_state *state)
5165 {
5166 errno_t result = 0;
5167 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0) {
5168 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
5169 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
5170 if (result != 0) {
5171 nstat_stats.nstat_flush_accumulated_msgs_failures++;
5172 if (nstat_debug != 0) {
5173 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
5174 }
5175 mbuf_freem(state->ncs_accumulated);
5176 }
5177 state->ncs_accumulated = NULL;
5178 }
5179 return result;
5180 }
5181
5182 static errno_t
nstat_accumulate_msg(nstat_control_state * state,nstat_msg_hdr * hdr,size_t length)5183 nstat_accumulate_msg(
5184 nstat_control_state *state,
5185 nstat_msg_hdr *hdr,
5186 size_t length)
5187 {
5188 assert(length <= MAX_NSTAT_MSG_HDR_LENGTH);
5189
5190 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length) {
5191 // Will send the current mbuf
5192 nstat_flush_accumulated_msgs(state);
5193 }
5194
5195 errno_t result = 0;
5196
5197 if (state->ncs_accumulated == NULL) {
5198 unsigned int one = 1;
5199 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0) {
5200 if (nstat_debug != 0) {
5201 printf("%s - mbuf_allocpacket failed\n", __func__);
5202 }
5203 result = ENOMEM;
5204 } else {
5205 mbuf_setlen(state->ncs_accumulated, 0);
5206 }
5207 }
5208
5209 if (result == 0) {
5210 hdr->length = (u_int16_t)length;
5211 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
5212 length, hdr, MBUF_DONTWAIT);
5213 }
5214
5215 if (result != 0) {
5216 nstat_flush_accumulated_msgs(state);
5217 if (nstat_debug != 0) {
5218 printf("%s - resorting to ctl_enqueuedata\n", __func__);
5219 }
5220 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
5221 }
5222
5223 if (result != 0) {
5224 nstat_stats.nstat_accumulate_msg_failures++;
5225 }
5226
5227 return result;
5228 }
5229
5230 static void*
nstat_idle_check(__unused thread_call_param_t p0,__unused thread_call_param_t p1)5231 nstat_idle_check(
5232 __unused thread_call_param_t p0,
5233 __unused thread_call_param_t p1)
5234 {
5235 nstat_control_state *control;
5236 nstat_src *src, *tmpsrc;
5237 tailq_head_nstat_src dead_list;
5238 TAILQ_INIT(&dead_list);
5239
5240 lck_mtx_lock(&nstat_mtx);
5241
5242 nstat_idle_time = 0;
5243
5244 for (control = nstat_controls; control; control = control->ncs_next) {
5245 lck_mtx_lock(&control->ncs_mtx);
5246 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS)) {
5247 TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc)
5248 {
5249 if (src->provider->nstat_gone(src->cookie)) {
5250 errno_t result;
5251
5252 // Pull it off the list
5253 TAILQ_REMOVE(&control->ncs_src_queue, src, ns_control_link);
5254
5255 result = nstat_control_send_goodbye(control, src);
5256
5257 // Put this on the list to release later
5258 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
5259 }
5260 }
5261 }
5262 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
5263 lck_mtx_unlock(&control->ncs_mtx);
5264 }
5265
5266 if (nstat_controls) {
5267 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
5268 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
5269 }
5270
5271 lck_mtx_unlock(&nstat_mtx);
5272
5273 /* Generate any system level reports, if needed */
5274 nstat_sysinfo_generate_report();
5275
5276 // Release the sources now that we aren't holding lots of locks
5277 while ((src = TAILQ_FIRST(&dead_list))) {
5278 TAILQ_REMOVE(&dead_list, src, ns_control_link);
5279 nstat_control_cleanup_source(NULL, src, FALSE);
5280 }
5281
5282 nstat_prune_procdetails();
5283
5284 return NULL;
5285 }
5286
5287 static void
nstat_control_register(void)5288 nstat_control_register(void)
5289 {
5290 // Register the control
5291 struct kern_ctl_reg nstat_control;
5292 bzero(&nstat_control, sizeof(nstat_control));
5293 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
5294 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
5295 nstat_control.ctl_sendsize = nstat_sendspace;
5296 nstat_control.ctl_recvsize = nstat_recvspace;
5297 nstat_control.ctl_connect = nstat_control_connect;
5298 nstat_control.ctl_disconnect = nstat_control_disconnect;
5299 nstat_control.ctl_send = nstat_control_send;
5300
5301 ctl_register(&nstat_control, &nstat_ctlref);
5302 }
5303
5304 static void
nstat_control_cleanup_source(nstat_control_state * state,struct nstat_src * src,boolean_t locked)5305 nstat_control_cleanup_source(
5306 nstat_control_state *state,
5307 struct nstat_src *src,
5308 boolean_t locked)
5309 {
5310 errno_t result;
5311
5312 if (state) {
5313 result = nstat_control_send_removed(state, src, 0);
5314 if (result != 0) {
5315 nstat_stats.nstat_control_cleanup_source_failures++;
5316 if (nstat_debug != 0) {
5317 printf("%s - nstat_control_send_removed() %d\n",
5318 __func__, result);
5319 }
5320 }
5321 }
5322 // Cleanup the source if we found it.
5323 src->provider->nstat_release(src->cookie, locked);
5324 kfree_type(struct nstat_src, src);
5325 }
5326
5327
5328 static bool
nstat_control_reporting_allowed(nstat_control_state * state,nstat_src * src,u_int64_t suppression_flags)5329 nstat_control_reporting_allowed(
5330 nstat_control_state *state,
5331 nstat_src *src,
5332 u_int64_t suppression_flags)
5333 {
5334 if (src->provider->nstat_reporting_allowed == NULL) {
5335 return TRUE;
5336 }
5337
5338 return src->provider->nstat_reporting_allowed(src->cookie,
5339 &state->ncs_provider_filters[src->provider->nstat_provider_id], suppression_flags);
5340 }
5341
5342
5343 static errno_t
nstat_control_connect(kern_ctl_ref kctl,struct sockaddr_ctl * sac,void ** uinfo)5344 nstat_control_connect(
5345 kern_ctl_ref kctl,
5346 struct sockaddr_ctl *sac,
5347 void **uinfo)
5348 {
5349 nstat_control_state *state = kalloc_type(nstat_control_state,
5350 Z_WAITOK | Z_ZERO);
5351 if (state == NULL) {
5352 return ENOMEM;
5353 }
5354
5355 lck_mtx_init(&state->ncs_mtx, &nstat_lck_grp, NULL);
5356 state->ncs_kctl = kctl;
5357 state->ncs_unit = sac->sc_unit;
5358 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
5359 state->ncs_procdetails = nstat_retain_curprocdetails();
5360 *uinfo = state;
5361
5362 lck_mtx_lock(&nstat_mtx);
5363 state->ncs_next = nstat_controls;
5364 nstat_controls = state;
5365
5366 if (nstat_idle_time == 0) {
5367 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
5368 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
5369 }
5370
5371 lck_mtx_unlock(&nstat_mtx);
5372
5373 return 0;
5374 }
5375
5376 static errno_t
nstat_control_disconnect(__unused kern_ctl_ref kctl,__unused u_int32_t unit,void * uinfo)5377 nstat_control_disconnect(
5378 __unused kern_ctl_ref kctl,
5379 __unused u_int32_t unit,
5380 void *uinfo)
5381 {
5382 u_int32_t watching;
5383 nstat_control_state *state = (nstat_control_state*)uinfo;
5384 tailq_head_nstat_src cleanup_list;
5385 nstat_src *src;
5386
5387 TAILQ_INIT(&cleanup_list);
5388
5389 // pull it out of the global list of states
5390 lck_mtx_lock(&nstat_mtx);
5391 nstat_control_state **statepp;
5392 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next) {
5393 if (*statepp == state) {
5394 *statepp = state->ncs_next;
5395 break;
5396 }
5397 }
5398 lck_mtx_unlock(&nstat_mtx);
5399
5400 lck_mtx_lock(&state->ncs_mtx);
5401 // Stop watching for sources
5402 nstat_provider *provider;
5403 watching = state->ncs_watching;
5404 state->ncs_watching = 0;
5405 for (provider = nstat_providers; provider && watching; provider = provider->next) {
5406 if ((watching & (1 << provider->nstat_provider_id)) != 0) {
5407 watching &= ~(1 << provider->nstat_provider_id);
5408 provider->nstat_watcher_remove(state);
5409 }
5410 }
5411
5412 // set cleanup flags
5413 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
5414
5415 if (state->ncs_accumulated) {
5416 mbuf_freem(state->ncs_accumulated);
5417 state->ncs_accumulated = NULL;
5418 }
5419
5420 // Copy out the list of sources
5421 TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link);
5422 lck_mtx_unlock(&state->ncs_mtx);
5423
5424 while ((src = TAILQ_FIRST(&cleanup_list))) {
5425 TAILQ_REMOVE(&cleanup_list, src, ns_control_link);
5426 nstat_control_cleanup_source(NULL, src, FALSE);
5427 }
5428
5429 lck_mtx_destroy(&state->ncs_mtx, &nstat_lck_grp);
5430 nstat_release_procdetails(state->ncs_procdetails);
5431 kfree_type(struct nstat_control_state, state);
5432
5433 return 0;
5434 }
5435
5436 static nstat_src_ref_t
nstat_control_next_src_ref(nstat_control_state * state)5437 nstat_control_next_src_ref(
5438 nstat_control_state *state)
5439 {
5440 return ++state->ncs_next_srcref;
5441 }
5442
5443 static errno_t
nstat_control_send_counts(nstat_control_state * state,nstat_src * src,unsigned long long context,u_int16_t hdr_flags,int * gone)5444 nstat_control_send_counts(
5445 nstat_control_state *state,
5446 nstat_src *src,
5447 unsigned long long context,
5448 u_int16_t hdr_flags,
5449 int *gone)
5450 {
5451 nstat_msg_src_counts counts;
5452 errno_t result = 0;
5453
5454 /* Some providers may not have any counts to send */
5455 if (src->provider->nstat_counts == NULL) {
5456 return 0;
5457 }
5458
5459 bzero(&counts, sizeof(counts));
5460 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
5461 counts.hdr.length = sizeof(counts);
5462 counts.hdr.flags = hdr_flags;
5463 counts.hdr.context = context;
5464 counts.srcref = src->srcref;
5465 counts.event_flags = 0;
5466
5467 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0) {
5468 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
5469 counts.counts.nstat_rxbytes == 0 &&
5470 counts.counts.nstat_txbytes == 0) {
5471 result = EAGAIN;
5472 } else {
5473 result = ctl_enqueuedata(state->ncs_kctl,
5474 state->ncs_unit, &counts, sizeof(counts),
5475 CTL_DATA_EOR);
5476 if (result != 0) {
5477 nstat_stats.nstat_sendcountfailures += 1;
5478 }
5479 }
5480 }
5481 return result;
5482 }
5483
5484 static errno_t
nstat_control_append_counts(nstat_control_state * state,nstat_src * src,int * gone)5485 nstat_control_append_counts(
5486 nstat_control_state *state,
5487 nstat_src *src,
5488 int *gone)
5489 {
5490 /* Some providers may not have any counts to send */
5491 if (!src->provider->nstat_counts) {
5492 return 0;
5493 }
5494
5495 nstat_msg_src_counts counts;
5496 bzero(&counts, sizeof(counts));
5497 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
5498 counts.hdr.length = sizeof(counts);
5499 counts.srcref = src->srcref;
5500 counts.event_flags = 0;
5501
5502 errno_t result = 0;
5503 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
5504 if (result != 0) {
5505 return result;
5506 }
5507
5508 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
5509 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0) {
5510 return EAGAIN;
5511 }
5512
5513 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
5514 }
5515
5516 static int
nstat_control_send_description(nstat_control_state * state,nstat_src * src,u_int64_t context,u_int16_t hdr_flags)5517 nstat_control_send_description(
5518 nstat_control_state *state,
5519 nstat_src *src,
5520 u_int64_t context,
5521 u_int16_t hdr_flags)
5522 {
5523 // Provider doesn't support getting the descriptor? Done.
5524 if (src->provider->nstat_descriptor_length == 0 ||
5525 src->provider->nstat_copy_descriptor == NULL) {
5526 return EOPNOTSUPP;
5527 }
5528
5529 // Allocate storage for the descriptor message
5530 mbuf_t msg;
5531 unsigned int one = 1;
5532 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
5533 assert(size <= MAX_NSTAT_MSG_HDR_LENGTH);
5534
5535 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
5536 return ENOMEM;
5537 }
5538
5539 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
5540 bzero(desc, size);
5541 mbuf_setlen(msg, size);
5542 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
5543
5544 // Query the provider for the provider specific bits
5545 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
5546
5547 if (result != 0) {
5548 mbuf_freem(msg);
5549 return result;
5550 }
5551
5552 desc->hdr.context = context;
5553 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
5554 desc->hdr.length = (u_int16_t)size;
5555 desc->hdr.flags = hdr_flags;
5556 desc->srcref = src->srcref;
5557 desc->event_flags = 0;
5558 desc->provider = src->provider->nstat_provider_id;
5559
5560 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
5561 if (result != 0) {
5562 nstat_stats.nstat_descriptionfailures += 1;
5563 mbuf_freem(msg);
5564 }
5565
5566 return result;
5567 }
5568
5569 static errno_t
nstat_control_append_description(nstat_control_state * state,nstat_src * src)5570 nstat_control_append_description(
5571 nstat_control_state *state,
5572 nstat_src *src)
5573 {
5574 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
5575 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
5576 src->provider->nstat_copy_descriptor == NULL) {
5577 return EOPNOTSUPP;
5578 }
5579
5580 // Fill out a buffer on the stack, we will copy to the mbuf later
5581 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
5582 bzero(buffer, size);
5583
5584 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
5585 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
5586 desc->hdr.length = (u_int16_t)size;
5587 desc->srcref = src->srcref;
5588 desc->event_flags = 0;
5589 desc->provider = src->provider->nstat_provider_id;
5590
5591 errno_t result = 0;
5592 // Fill in the description
5593 // Query the provider for the provider specific bits
5594 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
5595 src->provider->nstat_descriptor_length);
5596 if (result != 0) {
5597 return result;
5598 }
5599
5600 return nstat_accumulate_msg(state, &desc->hdr, size);
5601 }
5602
5603 static uint64_t
nstat_extension_flags_for_source(nstat_control_state * state,nstat_src * src)5604 nstat_extension_flags_for_source(
5605 nstat_control_state *state,
5606 nstat_src *src)
5607 {
5608 VERIFY(state != NULL & src != NULL);
5609 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
5610
5611 return state->ncs_provider_filters[provider_id].npf_extensions;
5612 }
5613
5614 static int
nstat_control_send_update(nstat_control_state * state,nstat_src * src,u_int64_t context,u_int64_t event,u_int16_t hdr_flags,int * gone)5615 nstat_control_send_update(
5616 nstat_control_state *state,
5617 nstat_src *src,
5618 u_int64_t context,
5619 u_int64_t event,
5620 u_int16_t hdr_flags,
5621 int *gone)
5622 {
5623 // Provider doesn't support getting the descriptor or counts? Done.
5624 if ((src->provider->nstat_descriptor_length == 0 ||
5625 src->provider->nstat_copy_descriptor == NULL) &&
5626 src->provider->nstat_counts == NULL) {
5627 return EOPNOTSUPP;
5628 }
5629
5630 // Allocate storage for the descriptor message
5631 mbuf_t msg;
5632 unsigned int one = 1;
5633 size_t size = offsetof(nstat_msg_src_update, data) +
5634 src->provider->nstat_descriptor_length;
5635 size_t total_extension_size = 0;
5636 u_int32_t num_extensions = 0;
5637 u_int64_t extension_mask = nstat_extension_flags_for_source(state, src);
5638
5639 if ((extension_mask != 0) && (src->provider->nstat_copy_extension != NULL)) {
5640 uint32_t extension_id = 0;
5641 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5642 if ((extension_mask & (1ull << extension_id)) != 0) {
5643 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, NULL, 0);
5644 if (extension_size == 0) {
5645 extension_mask &= ~(1ull << extension_id);
5646 } else {
5647 num_extensions++;
5648 total_extension_size += ROUNDUP64(extension_size);
5649 }
5650 }
5651 }
5652 size += total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions);
5653 }
5654 assert(size <= MAX_NSTAT_MSG_HDR_LENGTH);
5655
5656 /*
5657 * XXX Would be interesting to see how extended updates affect mbuf
5658 * allocations, given the max segments defined as 1, one may get
5659 * allocations with higher fragmentation.
5660 */
5661 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
5662 return ENOMEM;
5663 }
5664
5665 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
5666 bzero(desc, size);
5667 desc->hdr.context = context;
5668 desc->hdr.type = (num_extensions == 0) ? NSTAT_MSG_TYPE_SRC_UPDATE :
5669 NSTAT_MSG_TYPE_SRC_EXTENDED_UPDATE;
5670 desc->hdr.length = (u_int16_t)size;
5671 desc->hdr.flags = hdr_flags;
5672 desc->srcref = src->srcref;
5673 desc->event_flags = event;
5674 desc->provider = src->provider->nstat_provider_id;
5675
5676 /*
5677 * XXX The following two lines are only valid when max-segments is passed
5678 * as one.
5679 * Other computations with offset also depend on that being true.
5680 * Be aware of that before making any modifications that changes that
5681 * behavior.
5682 */
5683 mbuf_setlen(msg, size);
5684 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
5685
5686 errno_t result = 0;
5687 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
5688 // Query the provider for the provider specific bits
5689 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
5690 src->provider->nstat_descriptor_length);
5691 if (result != 0) {
5692 mbuf_freem(msg);
5693 return result;
5694 }
5695 }
5696
5697 if (num_extensions > 0) {
5698 nstat_msg_src_extended_item_hdr *p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)mbuf_data(msg) +
5699 sizeof(nstat_msg_src_update_hdr) + src->provider->nstat_descriptor_length);
5700 uint32_t extension_id = 0;
5701
5702 bzero(p_extension_hdr, total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions));
5703
5704 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5705 if ((extension_mask & (1ull << extension_id)) != 0) {
5706 void *buf = (void *)(p_extension_hdr + 1);
5707 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, buf, total_extension_size);
5708 if ((extension_size == 0) || (extension_size > total_extension_size)) {
5709 // Something has gone wrong. Instead of attempting to wind back the excess buffer space, mark it as unused
5710 p_extension_hdr->type = NSTAT_EXTENDED_UPDATE_TYPE_UNKNOWN;
5711 p_extension_hdr->length = total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * (num_extensions - 1));
5712 break;
5713 } else {
5714 // The extension may be of any size alignment, reported as such in the extension header,
5715 // but we pad to ensure that whatever comes next is suitably aligned
5716 p_extension_hdr->type = extension_id;
5717 p_extension_hdr->length = extension_size;
5718 extension_size = ROUNDUP64(extension_size);
5719 total_extension_size -= extension_size;
5720 p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buf + extension_size);
5721 num_extensions--;
5722 }
5723 }
5724 }
5725 }
5726
5727 if (src->provider->nstat_counts) {
5728 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
5729 if (result == 0) {
5730 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
5731 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
5732 result = EAGAIN;
5733 } else {
5734 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
5735 }
5736 }
5737 }
5738
5739 if (result != 0) {
5740 nstat_stats.nstat_srcupatefailures += 1;
5741 mbuf_freem(msg);
5742 } else {
5743 src->ns_reported = true;
5744 }
5745
5746 return result;
5747 }
5748
5749 static errno_t
nstat_control_append_update(nstat_control_state * state,nstat_src * src,int * gone)5750 nstat_control_append_update(
5751 nstat_control_state *state,
5752 nstat_src *src,
5753 int *gone)
5754 {
5755 if ((src->provider->nstat_descriptor_length == 0 ||
5756 src->provider->nstat_copy_descriptor == NULL) &&
5757 src->provider->nstat_counts == NULL) {
5758 return EOPNOTSUPP;
5759 }
5760
5761 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
5762 size_t total_extension_size = 0;
5763 u_int32_t num_extensions = 0;
5764 u_int64_t extension_mask = nstat_extension_flags_for_source(state, src);
5765
5766 if ((extension_mask != 0) && (src->provider->nstat_copy_extension != NULL)) {
5767 uint32_t extension_id = 0;
5768 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5769 if ((extension_mask & (1ull << extension_id)) != 0) {
5770 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, NULL, 0);
5771 if (extension_size == 0) {
5772 extension_mask &= ~(1ull << extension_id);
5773 } else {
5774 num_extensions++;
5775 total_extension_size += ROUNDUP64(extension_size);
5776 }
5777 }
5778 }
5779 size += total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions);
5780 }
5781
5782 /*
5783 * This kind of limits extensions.
5784 * The optimization is around being able to deliver multiple
5785 * of updates bundled together.
5786 * Increasing the size runs the risk of too much stack usage.
5787 * One could potentially changed the allocation below to be on heap.
5788 * For now limiting it to half of NSTAT_MAX_MSG_SIZE.
5789 */
5790 if (size > (NSTAT_MAX_MSG_SIZE >> 1)) {
5791 return EOPNOTSUPP;
5792 }
5793
5794 // Fill out a buffer on the stack, we will copy to the mbuf later
5795 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
5796 bzero(buffer, size);
5797
5798 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
5799 desc->hdr.type = (num_extensions == 0) ? NSTAT_MSG_TYPE_SRC_UPDATE :
5800 NSTAT_MSG_TYPE_SRC_EXTENDED_UPDATE;
5801 desc->hdr.length = (u_int16_t)size;
5802 desc->srcref = src->srcref;
5803 desc->event_flags = 0;
5804 desc->provider = src->provider->nstat_provider_id;
5805
5806 errno_t result = 0;
5807 // Fill in the description
5808 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
5809 // Query the provider for the provider specific bits
5810 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
5811 src->provider->nstat_descriptor_length);
5812 if (result != 0) {
5813 nstat_stats.nstat_copy_descriptor_failures++;
5814 if (nstat_debug != 0) {
5815 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
5816 }
5817 return result;
5818 }
5819 }
5820
5821 if (num_extensions > 0) {
5822 nstat_msg_src_extended_item_hdr *p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buffer +
5823 sizeof(nstat_msg_src_update_hdr) + src->provider->nstat_descriptor_length);
5824 uint32_t extension_id = 0;
5825 bzero(p_extension_hdr, total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions));
5826
5827 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5828 if ((extension_mask & (1ull << extension_id)) != 0) {
5829 void *buf = (void *)(p_extension_hdr + 1);
5830 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, buf, total_extension_size);
5831 if ((extension_size == 0) || (extension_size > total_extension_size)) {
5832 // Something has gone wrong. Instead of attempting to wind back the excess buffer space, mark it as unused
5833 p_extension_hdr->type = NSTAT_EXTENDED_UPDATE_TYPE_UNKNOWN;
5834 p_extension_hdr->length = total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * (num_extensions - 1));
5835 break;
5836 } else {
5837 extension_size = ROUNDUP64(extension_size);
5838 p_extension_hdr->type = extension_id;
5839 p_extension_hdr->length = extension_size;
5840 total_extension_size -= extension_size;
5841 p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buf + extension_size);
5842 num_extensions--;
5843 }
5844 }
5845 }
5846 }
5847
5848 if (src->provider->nstat_counts) {
5849 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
5850 if (result != 0) {
5851 nstat_stats.nstat_provider_counts_failures++;
5852 if (nstat_debug != 0) {
5853 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
5854 }
5855 return result;
5856 }
5857
5858 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
5859 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
5860 return EAGAIN;
5861 }
5862 }
5863
5864 result = nstat_accumulate_msg(state, &desc->hdr, size);
5865 if (result == 0) {
5866 src->ns_reported = true;
5867 }
5868 return result;
5869 }
5870
5871 static errno_t
nstat_control_send_removed(nstat_control_state * state,nstat_src * src,u_int16_t hdr_flags)5872 nstat_control_send_removed(
5873 nstat_control_state *state,
5874 nstat_src *src,
5875 u_int16_t hdr_flags)
5876 {
5877 nstat_msg_src_removed removed;
5878 errno_t result;
5879
5880 bzero(&removed, sizeof(removed));
5881 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
5882 removed.hdr.length = sizeof(removed);
5883 removed.hdr.context = 0;
5884 removed.hdr.flags = hdr_flags;
5885 removed.srcref = src->srcref;
5886 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
5887 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
5888 if (result != 0) {
5889 nstat_stats.nstat_msgremovedfailures += 1;
5890 }
5891
5892 return result;
5893 }
5894
5895 static errno_t
nstat_control_handle_add_request(nstat_control_state * state,mbuf_t m)5896 nstat_control_handle_add_request(
5897 nstat_control_state *state,
5898 mbuf_t m)
5899 {
5900 errno_t result;
5901
5902 // Verify the header fits in the first mbuf
5903 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param)) {
5904 return EINVAL;
5905 }
5906
5907 // Calculate the length of the parameter field
5908 ssize_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
5909 if (paramlength < 0 || paramlength > 2 * 1024) {
5910 return EINVAL;
5911 }
5912
5913 nstat_provider *provider = NULL;
5914 nstat_provider_cookie_t cookie = NULL;
5915 nstat_msg_add_src_req *req = mbuf_data(m);
5916 if (mbuf_pkthdr_len(m) > mbuf_len(m)) {
5917 // parameter is too large, we need to make a contiguous copy
5918 void *data = (void *) kalloc_data(paramlength, Z_WAITOK);
5919
5920 if (!data) {
5921 return ENOMEM;
5922 }
5923 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
5924 if (result == 0) {
5925 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
5926 }
5927 kfree_data(data, paramlength);
5928 } else {
5929 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
5930 }
5931
5932 if (result != 0) {
5933 return result;
5934 }
5935
5936 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
5937 if (result != 0) {
5938 provider->nstat_release(cookie, 0);
5939 }
5940
5941 return result;
5942 }
5943
5944 static errno_t
nstat_set_provider_filter(nstat_control_state * state,nstat_msg_add_all_srcs * req)5945 nstat_set_provider_filter(
5946 nstat_control_state *state,
5947 nstat_msg_add_all_srcs *req)
5948 {
5949 nstat_provider_id_t provider_id = req->provider;
5950
5951 u_int32_t prev_ncs_watching = atomic_or_32_ov(&state->ncs_watching, (1 << provider_id));
5952
5953 if ((prev_ncs_watching & (1 << provider_id)) != 0) {
5954 return EALREADY;
5955 }
5956
5957 state->ncs_watching |= (1 << provider_id);
5958 state->ncs_provider_filters[provider_id].npf_events = req->events;
5959 state->ncs_provider_filters[provider_id].npf_flags = req->filter;
5960
5961 // The extensions should be populated by a more direct mechanism
5962 // Using the top 32 bits of the filter flags reduces the namespace of both,
5963 // but is a convenient workaround that avoids ntstat.h changes that would require rebuild of all clients
5964 state->ncs_provider_filters[provider_id].npf_extensions = (req->filter >> NSTAT_FILTER_ALLOWED_EXTENSIONS_SHIFT) & NSTAT_EXTENDED_UPDATE_FLAG_MASK;
5965 state->ncs_provider_filters[provider_id].npf_pid = req->target_pid;
5966 uuid_copy(state->ncs_provider_filters[provider_id].npf_uuid, req->target_uuid);
5967 return 0;
5968 }
5969
5970 static errno_t
nstat_control_handle_add_all(nstat_control_state * state,mbuf_t m)5971 nstat_control_handle_add_all(
5972 nstat_control_state *state,
5973 mbuf_t m)
5974 {
5975 errno_t result = 0;
5976
5977 // Verify the header fits in the first mbuf
5978 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs)) {
5979 return EINVAL;
5980 }
5981
5982 nstat_msg_add_all_srcs *req = mbuf_data(m);
5983 if (req->provider > NSTAT_PROVIDER_LAST) {
5984 return ENOENT;
5985 }
5986
5987 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
5988
5989 if (!provider) {
5990 return ENOENT;
5991 }
5992 if (provider->nstat_watcher_add == NULL) {
5993 return ENOTSUP;
5994 }
5995
5996 if (nstat_privcheck != 0) {
5997 result = priv_check_cred(kauth_cred_get(),
5998 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
5999 if (result != 0) {
6000 return result;
6001 }
6002 }
6003
6004 lck_mtx_lock(&state->ncs_mtx);
6005 if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED) {
6006 // Suppression of source messages implicitly requires the use of update messages
6007 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
6008 }
6009 lck_mtx_unlock(&state->ncs_mtx);
6010
6011 // rdar://problem/30301300 Different providers require different synchronization
6012 // to ensure that a new entry does not get double counted due to being added prior
6013 // to all current provider entries being added. Hence pass the provider the details
6014 // in the original request for this to be applied atomically
6015
6016 result = provider->nstat_watcher_add(state, req);
6017
6018 if (result == 0) {
6019 nstat_enqueue_success(req->hdr.context, state, 0);
6020 }
6021
6022 return result;
6023 }
6024
6025 static errno_t
nstat_control_source_add(u_int64_t context,nstat_control_state * state,nstat_provider * provider,nstat_provider_cookie_t cookie)6026 nstat_control_source_add(
6027 u_int64_t context,
6028 nstat_control_state *state,
6029 nstat_provider *provider,
6030 nstat_provider_cookie_t cookie)
6031 {
6032 // Fill out source added message if appropriate
6033 mbuf_t msg = NULL;
6034 nstat_src_ref_t *srcrefp = NULL;
6035
6036 u_int64_t provider_filter_flags =
6037 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
6038 boolean_t tell_user =
6039 ((provider_filter_flags & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
6040 u_int32_t src_filter =
6041 (provider_filter_flags & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
6042 ? NSTAT_FILTER_NOZEROBYTES : 0;
6043
6044 if (provider_filter_flags & NSTAT_FILTER_TCP_NO_EARLY_CLOSE) {
6045 src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE;
6046 }
6047
6048 if (tell_user) {
6049 unsigned int one = 1;
6050
6051 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
6052 &one, &msg) != 0) {
6053 return ENOMEM;
6054 }
6055
6056 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
6057 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
6058 nstat_msg_src_added *add = mbuf_data(msg);
6059 bzero(add, sizeof(*add));
6060 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
6061 assert(mbuf_len(msg) <= MAX_NSTAT_MSG_HDR_LENGTH);
6062 add->hdr.length = (u_int16_t)mbuf_len(msg);
6063 add->hdr.context = context;
6064 add->provider = provider->nstat_provider_id;
6065 srcrefp = &add->srcref;
6066 }
6067
6068 // Allocate storage for the source
6069 nstat_src *src = kalloc_type(struct nstat_src, Z_WAITOK);
6070 if (src == NULL) {
6071 if (msg) {
6072 mbuf_freem(msg);
6073 }
6074 return ENOMEM;
6075 }
6076
6077 // Fill in the source, including picking an unused source ref
6078 lck_mtx_lock(&state->ncs_mtx);
6079
6080 src->srcref = nstat_control_next_src_ref(state);
6081 if (srcrefp) {
6082 *srcrefp = src->srcref;
6083 }
6084
6085 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) {
6086 lck_mtx_unlock(&state->ncs_mtx);
6087 kfree_type(struct nstat_src, src);
6088 if (msg) {
6089 mbuf_freem(msg);
6090 }
6091 return EINVAL;
6092 }
6093 src->provider = provider;
6094 src->cookie = cookie;
6095 src->filter = src_filter;
6096 src->seq = 0;
6097
6098 if (msg) {
6099 // send the source added message if appropriate
6100 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
6101 CTL_DATA_EOR);
6102 if (result != 0) {
6103 nstat_stats.nstat_srcaddedfailures += 1;
6104 lck_mtx_unlock(&state->ncs_mtx);
6105 kfree_type(struct nstat_src, src);
6106 mbuf_freem(msg);
6107 return result;
6108 }
6109 }
6110 // Put the source in the list
6111 TAILQ_INSERT_HEAD(&state->ncs_src_queue, src, ns_control_link);
6112 src->ns_control = state;
6113
6114 lck_mtx_unlock(&state->ncs_mtx);
6115
6116 return 0;
6117 }
6118
6119 static errno_t
nstat_control_handle_remove_request(nstat_control_state * state,mbuf_t m)6120 nstat_control_handle_remove_request(
6121 nstat_control_state *state,
6122 mbuf_t m)
6123 {
6124 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
6125 nstat_src *src;
6126
6127 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0) {
6128 return EINVAL;
6129 }
6130
6131 lck_mtx_lock(&state->ncs_mtx);
6132
6133 // Remove this source as we look for it
6134 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
6135 {
6136 if (src->srcref == srcref) {
6137 break;
6138 }
6139 }
6140 if (src) {
6141 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
6142 }
6143
6144 lck_mtx_unlock(&state->ncs_mtx);
6145
6146 if (src) {
6147 nstat_control_cleanup_source(state, src, FALSE);
6148 }
6149
6150 return src ? 0 : ENOENT;
6151 }
6152
6153 static errno_t
nstat_control_handle_query_request(nstat_control_state * state,mbuf_t m)6154 nstat_control_handle_query_request(
6155 nstat_control_state *state,
6156 mbuf_t m)
6157 {
6158 // TBD: handle this from another thread so we can enqueue a lot of data
6159 // As written, if a client requests query all, this function will be
6160 // called from their send of the request message. We will attempt to write
6161 // responses and succeed until the buffer fills up. Since the clients thread
6162 // is blocked on send, it won't be reading unless the client has two threads
6163 // using this socket, one for read and one for write. Two threads probably
6164 // won't work with this code anyhow since we don't have proper locking in
6165 // place yet.
6166 tailq_head_nstat_src dead_list;
6167 errno_t result = ENOENT;
6168 nstat_msg_query_src_req req;
6169
6170 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6171 return EINVAL;
6172 }
6173
6174 TAILQ_INIT(&dead_list);
6175 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
6176
6177 lck_mtx_lock(&state->ncs_mtx);
6178
6179 if (all_srcs) {
6180 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
6181 }
6182 nstat_src *src, *tmpsrc;
6183 u_int64_t src_count = 0;
6184 boolean_t partial = FALSE;
6185
6186 /*
6187 * Error handling policy and sequence number generation is folded into
6188 * nstat_control_begin_query.
6189 */
6190 partial = nstat_control_begin_query(state, &req.hdr);
6191
6192
6193 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
6194 {
6195 int gone = 0;
6196
6197 // XXX ignore IFACE types?
6198 if (all_srcs || src->srcref == req.srcref) {
6199 if (nstat_control_reporting_allowed(state, src, 0)
6200 && (!partial || !all_srcs || src->seq != state->ncs_seq)) {
6201 if (all_srcs &&
6202 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0) {
6203 result = nstat_control_append_counts(state, src, &gone);
6204 } else {
6205 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
6206 }
6207
6208 if (ENOMEM == result || ENOBUFS == result) {
6209 /*
6210 * If the counts message failed to
6211 * enqueue then we should clear our flag so
6212 * that a client doesn't miss anything on
6213 * idle cleanup. We skip the "gone"
6214 * processing in the hope that we may
6215 * catch it another time.
6216 */
6217 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
6218 break;
6219 }
6220 if (partial) {
6221 /*
6222 * We skip over hard errors and
6223 * filtered sources.
6224 */
6225 src->seq = state->ncs_seq;
6226 src_count++;
6227 }
6228 }
6229 }
6230
6231 if (gone) {
6232 // send one last descriptor message so client may see last state
6233 // If we can't send the notification now, it
6234 // will be sent in the idle cleanup.
6235 result = nstat_control_send_description(state, src, 0, 0);
6236 if (result != 0) {
6237 nstat_stats.nstat_control_send_description_failures++;
6238 if (nstat_debug != 0) {
6239 printf("%s - nstat_control_send_description() %d\n", __func__, result);
6240 }
6241 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
6242 break;
6243 }
6244
6245 // pull src out of the list
6246 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
6247 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
6248 }
6249
6250 if (all_srcs) {
6251 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
6252 break;
6253 }
6254 } else if (req.srcref == src->srcref) {
6255 break;
6256 }
6257 }
6258
6259 nstat_flush_accumulated_msgs(state);
6260
6261 u_int16_t flags = 0;
6262 if (req.srcref == NSTAT_SRC_REF_ALL) {
6263 flags = nstat_control_end_query(state, src, partial);
6264 }
6265
6266 lck_mtx_unlock(&state->ncs_mtx);
6267
6268 /*
6269 * If an error occurred enqueueing data, then allow the error to
6270 * propagate to nstat_control_send. This way, the error is sent to
6271 * user-level.
6272 */
6273 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
6274 nstat_enqueue_success(req.hdr.context, state, flags);
6275 result = 0;
6276 }
6277
6278 while ((src = TAILQ_FIRST(&dead_list))) {
6279 TAILQ_REMOVE(&dead_list, src, ns_control_link);
6280 nstat_control_cleanup_source(state, src, FALSE);
6281 }
6282
6283 return result;
6284 }
6285
6286 static errno_t
nstat_control_handle_get_src_description(nstat_control_state * state,mbuf_t m)6287 nstat_control_handle_get_src_description(
6288 nstat_control_state *state,
6289 mbuf_t m)
6290 {
6291 nstat_msg_get_src_description req;
6292 errno_t result = ENOENT;
6293 nstat_src *src;
6294
6295 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6296 return EINVAL;
6297 }
6298
6299 lck_mtx_lock(&state->ncs_mtx);
6300 u_int64_t src_count = 0;
6301 boolean_t partial = FALSE;
6302 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
6303
6304 /*
6305 * Error handling policy and sequence number generation is folded into
6306 * nstat_control_begin_query.
6307 */
6308 partial = nstat_control_begin_query(state, &req.hdr);
6309
6310 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
6311 {
6312 if (all_srcs || src->srcref == req.srcref) {
6313 if (nstat_control_reporting_allowed(state, src, 0)
6314 && (!all_srcs || !partial || src->seq != state->ncs_seq)) {
6315 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs) {
6316 result = nstat_control_append_description(state, src);
6317 } else {
6318 result = nstat_control_send_description(state, src, req.hdr.context, 0);
6319 }
6320
6321 if (ENOMEM == result || ENOBUFS == result) {
6322 /*
6323 * If the description message failed to
6324 * enqueue then we give up for now.
6325 */
6326 break;
6327 }
6328 if (partial) {
6329 /*
6330 * Note, we skip over hard errors and
6331 * filtered sources.
6332 */
6333 src->seq = state->ncs_seq;
6334 src_count++;
6335 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
6336 break;
6337 }
6338 }
6339 }
6340
6341 if (!all_srcs) {
6342 break;
6343 }
6344 }
6345 }
6346 nstat_flush_accumulated_msgs(state);
6347
6348 u_int16_t flags = 0;
6349 if (req.srcref == NSTAT_SRC_REF_ALL) {
6350 flags = nstat_control_end_query(state, src, partial);
6351 }
6352
6353 lck_mtx_unlock(&state->ncs_mtx);
6354 /*
6355 * If an error occurred enqueueing data, then allow the error to
6356 * propagate to nstat_control_send. This way, the error is sent to
6357 * user-level.
6358 */
6359 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
6360 nstat_enqueue_success(req.hdr.context, state, flags);
6361 result = 0;
6362 }
6363
6364 return result;
6365 }
6366
6367 static errno_t
nstat_control_handle_set_filter(nstat_control_state * state,mbuf_t m)6368 nstat_control_handle_set_filter(
6369 nstat_control_state *state,
6370 mbuf_t m)
6371 {
6372 nstat_msg_set_filter req;
6373 nstat_src *src;
6374
6375 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6376 return EINVAL;
6377 }
6378 if (req.srcref == NSTAT_SRC_REF_ALL ||
6379 req.srcref == NSTAT_SRC_REF_INVALID) {
6380 return EINVAL;
6381 }
6382
6383 lck_mtx_lock(&state->ncs_mtx);
6384 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
6385 {
6386 if (req.srcref == src->srcref) {
6387 src->filter = req.filter;
6388 break;
6389 }
6390 }
6391 lck_mtx_unlock(&state->ncs_mtx);
6392 if (src == NULL) {
6393 return ENOENT;
6394 }
6395
6396 return 0;
6397 }
6398
6399 static void
nstat_send_error(nstat_control_state * state,u_int64_t context,u_int32_t error)6400 nstat_send_error(
6401 nstat_control_state *state,
6402 u_int64_t context,
6403 u_int32_t error)
6404 {
6405 errno_t result;
6406 struct nstat_msg_error err;
6407
6408 bzero(&err, sizeof(err));
6409 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
6410 err.hdr.length = sizeof(err);
6411 err.hdr.context = context;
6412 err.error = error;
6413
6414 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
6415 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
6416 if (result != 0) {
6417 nstat_stats.nstat_msgerrorfailures++;
6418 }
6419 }
6420
6421 static boolean_t
nstat_control_begin_query(nstat_control_state * state,const nstat_msg_hdr * hdrp)6422 nstat_control_begin_query(
6423 nstat_control_state *state,
6424 const nstat_msg_hdr *hdrp)
6425 {
6426 boolean_t partial = FALSE;
6427
6428 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION) {
6429 /* A partial query all has been requested. */
6430 partial = TRUE;
6431
6432 if (state->ncs_context != hdrp->context) {
6433 if (state->ncs_context != 0) {
6434 nstat_send_error(state, state->ncs_context, EAGAIN);
6435 }
6436
6437 /* Initialize state for a partial query all. */
6438 state->ncs_context = hdrp->context;
6439 state->ncs_seq++;
6440 }
6441 }
6442
6443 return partial;
6444 }
6445
6446 static u_int16_t
nstat_control_end_query(nstat_control_state * state,nstat_src * last_src,boolean_t partial)6447 nstat_control_end_query(
6448 nstat_control_state *state,
6449 nstat_src *last_src,
6450 boolean_t partial)
6451 {
6452 u_int16_t flags = 0;
6453
6454 if (last_src == NULL || !partial) {
6455 /*
6456 * We iterated through the entire srcs list or exited early
6457 * from the loop when a partial update was not requested (an
6458 * error occurred), so clear context to indicate internally
6459 * that the query is finished.
6460 */
6461 state->ncs_context = 0;
6462 } else {
6463 /*
6464 * Indicate to userlevel to make another partial request as
6465 * there are still sources left to be reported.
6466 */
6467 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
6468 }
6469
6470 return flags;
6471 }
6472
6473 static errno_t
nstat_control_handle_get_update(nstat_control_state * state,mbuf_t m)6474 nstat_control_handle_get_update(
6475 nstat_control_state *state,
6476 mbuf_t m)
6477 {
6478 nstat_msg_query_src_req req;
6479
6480 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6481 return EINVAL;
6482 }
6483
6484 lck_mtx_lock(&state->ncs_mtx);
6485
6486 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
6487
6488 errno_t result = ENOENT;
6489 nstat_src *src, *tmpsrc;
6490 tailq_head_nstat_src dead_list;
6491 u_int64_t src_count = 0;
6492 boolean_t partial = FALSE;
6493 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
6494 TAILQ_INIT(&dead_list);
6495
6496 /*
6497 * Error handling policy and sequence number generation is folded into
6498 * nstat_control_begin_query.
6499 */
6500 partial = nstat_control_begin_query(state, &req.hdr);
6501
6502 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc) {
6503 int gone = 0;
6504 if (all_srcs) {
6505 // Check to see if we should handle this source or if we're still skipping to find where to continue
6506 if ((FALSE == partial || src->seq != state->ncs_seq)) {
6507 u_int64_t suppression_flags = (src->ns_reported)? NSTAT_FILTER_SUPPRESS_BORING_POLL: 0;
6508 if (nstat_control_reporting_allowed(state, src, suppression_flags)) {
6509 result = nstat_control_append_update(state, src, &gone);
6510 if (ENOMEM == result || ENOBUFS == result) {
6511 /*
6512 * If the update message failed to
6513 * enqueue then give up.
6514 */
6515 break;
6516 }
6517 if (partial) {
6518 /*
6519 * We skip over hard errors and
6520 * filtered sources.
6521 */
6522 src->seq = state->ncs_seq;
6523 src_count++;
6524 }
6525 }
6526 }
6527 } else if (src->srcref == req.srcref) {
6528 if (nstat_control_reporting_allowed(state, src, 0)) {
6529 result = nstat_control_send_update(state, src, req.hdr.context, 0, 0, &gone);
6530 }
6531 }
6532
6533 if (gone) {
6534 // pull src out of the list
6535 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
6536 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
6537 }
6538
6539 if (!all_srcs && req.srcref == src->srcref) {
6540 break;
6541 }
6542 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
6543 break;
6544 }
6545 }
6546
6547 nstat_flush_accumulated_msgs(state);
6548
6549
6550 u_int16_t flags = 0;
6551 if (req.srcref == NSTAT_SRC_REF_ALL) {
6552 flags = nstat_control_end_query(state, src, partial);
6553 }
6554
6555 lck_mtx_unlock(&state->ncs_mtx);
6556 /*
6557 * If an error occurred enqueueing data, then allow the error to
6558 * propagate to nstat_control_send. This way, the error is sent to
6559 * user-level.
6560 */
6561 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
6562 nstat_enqueue_success(req.hdr.context, state, flags);
6563 result = 0;
6564 }
6565
6566 while ((src = TAILQ_FIRST(&dead_list))) {
6567 TAILQ_REMOVE(&dead_list, src, ns_control_link);
6568 // release src and send notification
6569 nstat_control_cleanup_source(state, src, FALSE);
6570 }
6571
6572 return result;
6573 }
6574
6575 static errno_t
nstat_control_handle_subscribe_sysinfo(nstat_control_state * state)6576 nstat_control_handle_subscribe_sysinfo(
6577 nstat_control_state *state)
6578 {
6579 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
6580
6581 if (result != 0) {
6582 return result;
6583 }
6584
6585 lck_mtx_lock(&state->ncs_mtx);
6586 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
6587 lck_mtx_unlock(&state->ncs_mtx);
6588
6589 return 0;
6590 }
6591
6592 static errno_t
nstat_control_send(kern_ctl_ref kctl,u_int32_t unit,void * uinfo,mbuf_t m,__unused int flags)6593 nstat_control_send(
6594 kern_ctl_ref kctl,
6595 u_int32_t unit,
6596 void *uinfo,
6597 mbuf_t m,
6598 __unused int flags)
6599 {
6600 nstat_control_state *state = (nstat_control_state*)uinfo;
6601 struct nstat_msg_hdr *hdr;
6602 struct nstat_msg_hdr storage;
6603 errno_t result = 0;
6604
6605 if (mbuf_pkthdr_len(m) < sizeof(*hdr)) {
6606 // Is this the right thing to do?
6607 mbuf_freem(m);
6608 return EINVAL;
6609 }
6610
6611 if (mbuf_len(m) >= sizeof(*hdr)) {
6612 hdr = mbuf_data(m);
6613 } else {
6614 mbuf_copydata(m, 0, sizeof(storage), &storage);
6615 hdr = &storage;
6616 }
6617
6618 // Legacy clients may not set the length
6619 // Those clients are likely not setting the flags either
6620 // Fix everything up so old clients continue to work
6621 if (hdr->length != mbuf_pkthdr_len(m)) {
6622 hdr->flags = 0;
6623 assert(mbuf_pkthdr_len(m) <= MAX_NSTAT_MSG_HDR_LENGTH);
6624 hdr->length = (u_int16_t)mbuf_pkthdr_len(m);
6625 if (hdr == &storage) {
6626 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
6627 }
6628 }
6629
6630 switch (hdr->type) {
6631 case NSTAT_MSG_TYPE_ADD_SRC:
6632 result = nstat_control_handle_add_request(state, m);
6633 break;
6634
6635 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
6636 result = nstat_control_handle_add_all(state, m);
6637 break;
6638
6639 case NSTAT_MSG_TYPE_REM_SRC:
6640 result = nstat_control_handle_remove_request(state, m);
6641 break;
6642
6643 case NSTAT_MSG_TYPE_QUERY_SRC:
6644 result = nstat_control_handle_query_request(state, m);
6645 break;
6646
6647 case NSTAT_MSG_TYPE_GET_SRC_DESC:
6648 result = nstat_control_handle_get_src_description(state, m);
6649 break;
6650
6651 case NSTAT_MSG_TYPE_SET_FILTER:
6652 result = nstat_control_handle_set_filter(state, m);
6653 break;
6654
6655 case NSTAT_MSG_TYPE_GET_UPDATE:
6656 result = nstat_control_handle_get_update(state, m);
6657 break;
6658
6659 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
6660 result = nstat_control_handle_subscribe_sysinfo(state);
6661 break;
6662
6663 default:
6664 result = EINVAL;
6665 break;
6666 }
6667
6668 if (result != 0) {
6669 struct nstat_msg_error err;
6670
6671 bzero(&err, sizeof(err));
6672 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
6673 err.hdr.length = (u_int16_t)(sizeof(err) + mbuf_pkthdr_len(m));
6674 err.hdr.context = hdr->context;
6675 err.error = result;
6676
6677 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
6678 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0) {
6679 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
6680 if (result != 0) {
6681 mbuf_freem(m);
6682 }
6683 m = NULL;
6684 }
6685
6686 if (result != 0) {
6687 // Unable to prepend the error to the request - just send the error
6688 err.hdr.length = sizeof(err);
6689 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
6690 CTL_DATA_EOR | CTL_DATA_CRIT);
6691 if (result != 0) {
6692 nstat_stats.nstat_msgerrorfailures += 1;
6693 }
6694 }
6695 nstat_stats.nstat_handle_msg_failures += 1;
6696 }
6697
6698 if (m) {
6699 mbuf_freem(m);
6700 }
6701
6702 return result;
6703 }
6704
6705
6706 /* Performs interface matching based on NSTAT_IFNET_IS… filter flags provided by an external caller */
6707 static bool
nstat_interface_matches_filter_flag(uint32_t filter_flags,struct ifnet * ifp)6708 nstat_interface_matches_filter_flag(uint32_t filter_flags, struct ifnet *ifp)
6709 {
6710 bool result = false;
6711
6712 if (ifp) {
6713 uint32_t flag_mask = (NSTAT_FILTER_IFNET_FLAGS & ~(NSTAT_IFNET_IS_NON_LOCAL | NSTAT_IFNET_IS_LOCAL));
6714 filter_flags &= flag_mask;
6715
6716 uint32_t flags = nstat_ifnet_to_flags_extended(ifp);
6717 if (filter_flags & flags) {
6718 result = true;
6719 }
6720 }
6721 return result;
6722 }
6723
6724
6725 static int
tcp_progress_indicators_for_interface(unsigned int ifindex,uint64_t recentflow_maxduration,uint32_t filter_flags,struct xtcpprogress_indicators * indicators)6726 tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_maxduration, uint32_t filter_flags, struct xtcpprogress_indicators *indicators)
6727 {
6728 int error = 0;
6729 struct inpcb *inp;
6730 uint64_t min_recent_start_time;
6731 #if SKYWALK
6732 struct nstat_tu_shadow *shad;
6733 #endif /* SKYWALK */
6734
6735 min_recent_start_time = mach_continuous_time() - recentflow_maxduration;
6736 bzero(indicators, sizeof(*indicators));
6737
6738 #if NSTAT_DEBUG
6739 /* interface index -1 may be passed in to only match against the filters specified in the flags */
6740 if (ifindex < UINT_MAX) {
6741 printf("%s - for interface index %u with flags %x\n", __func__, ifindex, filter_flags);
6742 } else {
6743 printf("%s - for matching interface with flags %x\n", __func__, filter_flags);
6744 }
6745 #endif
6746
6747 lck_rw_lock_shared(&tcbinfo.ipi_lock);
6748 /*
6749 * For progress indicators we don't need to special case TCP to collect time wait connections
6750 */
6751 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
6752 {
6753 struct tcpcb *tp = intotcpcb(inp);
6754 /* radar://57100452
6755 * The conditional logic implemented below performs an *inclusive* match based on the desired interface index in addition to any filter values.
6756 * While the general expectation is that only one criteria normally is used for queries, the capability exists satisfy any eccentric future needs.
6757 */
6758 if (tp &&
6759 inp->inp_state != INPCB_STATE_DEAD &&
6760 inp->inp_last_outifp &&
6761 /* matches the given interface index, or against any provided filter flags */
6762 (((inp->inp_last_outifp->if_index == ifindex) ||
6763 nstat_interface_matches_filter_flag(filter_flags, inp->inp_last_outifp)) &&
6764 /* perform flow state matching based any provided filter flags */
6765 (((filter_flags & (NSTAT_IFNET_IS_NON_LOCAL | NSTAT_IFNET_IS_LOCAL)) == 0) ||
6766 ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && !(tp->t_flags & TF_LOCAL)) ||
6767 ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (tp->t_flags & TF_LOCAL))))) {
6768 struct tcp_conn_status connstatus;
6769 #if NSTAT_DEBUG
6770 printf("%s - *matched non-Skywalk* [filter match: %d]\n", __func__, nstat_interface_matches_filter_flag(filter_flags, inp->inp_last_outifp));
6771 #endif
6772 indicators->xp_numflows++;
6773 tcp_get_connectivity_status(tp, &connstatus);
6774 if (connstatus.write_probe_failed) {
6775 indicators->xp_write_probe_fails++;
6776 }
6777 if (connstatus.read_probe_failed) {
6778 indicators->xp_read_probe_fails++;
6779 }
6780 if (connstatus.conn_probe_failed) {
6781 indicators->xp_conn_probe_fails++;
6782 }
6783 if (inp->inp_start_timestamp > min_recent_start_time) {
6784 uint64_t flow_count;
6785
6786 indicators->xp_recentflows++;
6787 atomic_get_64(flow_count, &inp->inp_stat->rxbytes);
6788 indicators->xp_recentflows_rxbytes += flow_count;
6789 atomic_get_64(flow_count, &inp->inp_stat->txbytes);
6790 indicators->xp_recentflows_txbytes += flow_count;
6791
6792 indicators->xp_recentflows_rxooo += tp->t_stat.rxoutoforderbytes;
6793 indicators->xp_recentflows_rxdup += tp->t_stat.rxduplicatebytes;
6794 indicators->xp_recentflows_retx += tp->t_stat.txretransmitbytes;
6795 if (tp->snd_max - tp->snd_una) {
6796 indicators->xp_recentflows_unacked++;
6797 }
6798 }
6799 }
6800 }
6801 lck_rw_done(&tcbinfo.ipi_lock);
6802
6803 #if SKYWALK
6804 lck_mtx_lock(&nstat_mtx);
6805
6806 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
6807 assert(shad->shad_magic == TU_SHADOW_MAGIC);
6808
6809 if ((shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) && (shad->shad_live)) {
6810 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6811 u_int32_t extended_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6812 if (filter_flags != 0) {
6813 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL);
6814 error = (result)? 0 : EIO;
6815 if (error) {
6816 printf("%s - nstat get ifflags %d\n", __func__, error);
6817 continue;
6818 }
6819 extended_ifflags = extend_ifnet_flags(ifflags);
6820
6821 if ((extended_ifflags & filter_flags) == 0) {
6822 continue;
6823 }
6824 // Skywalk locality flags are not yet in place, see <rdar://problem/35607563>
6825 // Instead of checking flags with a simple logical and, check the inverse.
6826 // This allows for default action of fallthrough if the flags are not set.
6827 if ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && (ifflags & NSTAT_IFNET_IS_LOCAL)) {
6828 continue;
6829 }
6830 if ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (ifflags & NSTAT_IFNET_IS_NON_LOCAL)) {
6831 continue;
6832 }
6833 }
6834
6835 nstat_progress_digest digest;
6836 bzero(&digest, sizeof(digest));
6837 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &digest, NULL, NULL);
6838
6839 error = (result)? 0 : EIO;
6840 if (error) {
6841 printf("%s - nstat get progressdigest returned %d\n", __func__, error);
6842 continue;
6843 }
6844 if ((digest.ifindex == (u_int32_t)ifindex) ||
6845 (filter_flags & extended_ifflags)) {
6846 #if NSTAT_DEBUG
6847 printf("%s - *matched Skywalk* [filter match: %x %x]\n", __func__, filter_flags, extended_flags);
6848 #endif
6849 indicators->xp_numflows++;
6850 if (digest.connstatus.write_probe_failed) {
6851 indicators->xp_write_probe_fails++;
6852 }
6853 if (digest.connstatus.read_probe_failed) {
6854 indicators->xp_read_probe_fails++;
6855 }
6856 if (digest.connstatus.conn_probe_failed) {
6857 indicators->xp_conn_probe_fails++;
6858 }
6859 if (shad->shad_start_timestamp > min_recent_start_time) {
6860 indicators->xp_recentflows++;
6861 indicators->xp_recentflows_rxbytes += digest.rxbytes;
6862 indicators->xp_recentflows_txbytes += digest.txbytes;
6863 indicators->xp_recentflows_rxooo += digest.rxoutoforderbytes;
6864 indicators->xp_recentflows_rxdup += digest.rxduplicatebytes;
6865 indicators->xp_recentflows_retx += digest.txretransmit;
6866 if (digest.txunacked) {
6867 indicators->xp_recentflows_unacked++;
6868 }
6869 }
6870 }
6871 }
6872 }
6873
6874 lck_mtx_unlock(&nstat_mtx);
6875
6876 #endif /* SKYWALK */
6877 return error;
6878 }
6879
6880
6881 static int
tcp_progress_probe_enable_for_interface(unsigned int ifindex,uint32_t filter_flags,uint32_t enable_flags)6882 tcp_progress_probe_enable_for_interface(unsigned int ifindex, uint32_t filter_flags, uint32_t enable_flags)
6883 {
6884 int error = 0;
6885 struct ifnet *ifp;
6886
6887 #if NSTAT_DEBUG
6888 printf("%s - for interface index %u with flags %d\n", __func__, ifindex, filter_flags);
6889 #endif
6890
6891 ifnet_head_lock_shared();
6892 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
6893 {
6894 if ((ifp->if_index == ifindex) ||
6895 nstat_interface_matches_filter_flag(filter_flags, ifp)) {
6896 #if NSTAT_DEBUG
6897 printf("%s - *matched* interface index %d, enable: %d\n", __func__, ifp->if_index, enable_flags);
6898 #endif
6899 error = if_probe_connectivity(ifp, enable_flags);
6900 if (error) {
6901 printf("%s (%d) - nstat set tcp probe %d for interface index %d\n", __func__, error, enable_flags, ifp->if_index);
6902 }
6903 }
6904 }
6905 ifnet_head_done();
6906
6907 return error;
6908 }
6909
6910
6911 __private_extern__ int
ntstat_tcp_progress_indicators(struct sysctl_req * req)6912 ntstat_tcp_progress_indicators(struct sysctl_req *req)
6913 {
6914 struct xtcpprogress_indicators indicators = {};
6915 int error = 0;
6916 struct tcpprogressreq requested;
6917
6918 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
6919 return EACCES;
6920 }
6921 if (req->newptr == USER_ADDR_NULL) {
6922 return EINVAL;
6923 }
6924 if (req->newlen < sizeof(req)) {
6925 return EINVAL;
6926 }
6927 error = SYSCTL_IN(req, &requested, sizeof(requested));
6928 if (error != 0) {
6929 return error;
6930 }
6931 error = tcp_progress_indicators_for_interface((unsigned int)requested.ifindex, requested.recentflow_maxduration, (uint32_t)requested.filter_flags, &indicators);
6932 if (error != 0) {
6933 return error;
6934 }
6935 error = SYSCTL_OUT(req, &indicators, sizeof(indicators));
6936
6937 return error;
6938 }
6939
6940
6941 __private_extern__ int
ntstat_tcp_progress_enable(struct sysctl_req * req)6942 ntstat_tcp_progress_enable(struct sysctl_req *req)
6943 {
6944 int error = 0;
6945 struct tcpprobereq requested;
6946
6947 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
6948 return EACCES;
6949 }
6950 if (req->newptr == USER_ADDR_NULL) {
6951 return EINVAL;
6952 }
6953 if (req->newlen < sizeof(req)) {
6954 return EINVAL;
6955 }
6956 error = SYSCTL_IN(req, &requested, sizeof(requested));
6957 if (error != 0) {
6958 return error;
6959 }
6960 error = tcp_progress_probe_enable_for_interface((unsigned int)requested.ifindex, (uint32_t)requested.filter_flags, (uint32_t)requested.enable);
6961
6962 return error;
6963 }
6964
6965
6966 #if SKYWALK
6967
6968 #pragma mark -- netstat support for user level providers --
6969
6970 typedef struct nstat_flow_data {
6971 nstat_counts counts;
6972 union {
6973 nstat_udp_descriptor udp_descriptor;
6974 nstat_tcp_descriptor tcp_descriptor;
6975 } flow_descriptor;
6976 } nstat_flow_data;
6977
6978 static int
nstat_gather_flow_data(nstat_provider_id_t provider,nstat_flow_data * flow_data,int n)6979 nstat_gather_flow_data(nstat_provider_id_t provider, nstat_flow_data *flow_data, int n)
6980 {
6981 struct nstat_tu_shadow *shad;
6982 int prepared = 0;
6983 errno_t err;
6984
6985 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
6986 assert(shad->shad_magic == TU_SHADOW_MAGIC);
6987
6988 if ((shad->shad_provider == provider) && (shad->shad_live)) {
6989 if (prepared >= n) {
6990 break;
6991 }
6992 err = nstat_userland_tu_copy_descriptor((nstat_provider_cookie_t) shad,
6993 &flow_data->flow_descriptor, sizeof(flow_data->flow_descriptor));
6994
6995 if (err != 0) {
6996 printf("%s - nstat_userland_tu_copy_descriptor returned %d\n", __func__, err);
6997 }
6998 err = nstat_userland_tu_counts((nstat_provider_cookie_t) shad,
6999 &flow_data->counts, NULL);
7000 if (err != 0) {
7001 printf("%s - nstat_userland_tu_counts returned %d\n", __func__, err);
7002 }
7003 flow_data++;
7004 prepared++;
7005 }
7006 }
7007 return prepared;
7008 }
7009
7010 static void
nstat_userland_to_xinpcb_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xinpcb_n * xinp)7011 nstat_userland_to_xinpcb_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xinpcb_n *xinp)
7012 {
7013 xinp->xi_len = sizeof(struct xinpcb_n);
7014 xinp->xi_kind = XSO_INPCB;
7015
7016 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7017 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7018 struct sockaddr_in *sa = &desc->local.v4;
7019 if (sa->sin_family == AF_INET) {
7020 xinp->inp_vflag = INP_IPV4;
7021 xinp->inp_laddr = desc->local.v4.sin_addr;
7022 xinp->inp_lport = desc->local.v4.sin_port;
7023 xinp->inp_faddr = desc->remote.v4.sin_addr;
7024 xinp->inp_fport = desc->remote.v4.sin_port;
7025 } else if (sa->sin_family == AF_INET6) {
7026 xinp->inp_vflag = INP_IPV6;
7027 xinp->in6p_laddr = desc->local.v6.sin6_addr;
7028 xinp->in6p_lport = desc->local.v6.sin6_port;
7029 xinp->in6p_faddr = desc->remote.v6.sin6_addr;
7030 xinp->in6p_fport = desc->remote.v6.sin6_port;
7031 }
7032 } else if (provider == NSTAT_PROVIDER_UDP_USERLAND) {
7033 nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor;
7034 struct sockaddr_in *sa = &desc->local.v4;
7035 if (sa->sin_family == AF_INET) {
7036 xinp->inp_vflag = INP_IPV4;
7037 xinp->inp_laddr = desc->local.v4.sin_addr;
7038 xinp->inp_lport = desc->local.v4.sin_port;
7039 xinp->inp_faddr = desc->remote.v4.sin_addr;
7040 xinp->inp_fport = desc->remote.v4.sin_port;
7041 } else if (sa->sin_family == AF_INET6) {
7042 xinp->inp_vflag = INP_IPV6;
7043 xinp->in6p_laddr = desc->local.v6.sin6_addr;
7044 xinp->in6p_lport = desc->local.v6.sin6_port;
7045 xinp->in6p_faddr = desc->remote.v6.sin6_addr;
7046 xinp->in6p_fport = desc->remote.v6.sin6_port;
7047 }
7048 }
7049 }
7050
7051 static void
nstat_userland_to_xsocket_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xsocket_n * xso)7052 nstat_userland_to_xsocket_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsocket_n *xso)
7053 {
7054 xso->xso_len = sizeof(struct xsocket_n);
7055 xso->xso_kind = XSO_SOCKET;
7056
7057 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7058 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7059 xso->xso_protocol = IPPROTO_TCP;
7060 xso->so_e_pid = desc->epid;
7061 xso->so_last_pid = desc->pid;
7062 } else {
7063 nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor;
7064 xso->xso_protocol = IPPROTO_UDP;
7065 xso->so_e_pid = desc->epid;
7066 xso->so_last_pid = desc->pid;
7067 }
7068 }
7069
7070 static void
nstat_userland_to_rcv_xsockbuf_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xsockbuf_n * xsbrcv)7071 nstat_userland_to_rcv_xsockbuf_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsockbuf_n *xsbrcv)
7072 {
7073 xsbrcv->xsb_len = sizeof(struct xsockbuf_n);
7074 xsbrcv->xsb_kind = XSO_RCVBUF;
7075
7076 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7077 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7078 xsbrcv->sb_hiwat = desc->rcvbufsize;
7079 xsbrcv->sb_cc = desc->rcvbufused;
7080 } else {
7081 nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor;
7082 xsbrcv->sb_hiwat = desc->rcvbufsize;
7083 xsbrcv->sb_cc = desc->rcvbufused;
7084 }
7085 }
7086
7087 static void
nstat_userland_to_snd_xsockbuf_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xsockbuf_n * xsbsnd)7088 nstat_userland_to_snd_xsockbuf_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsockbuf_n *xsbsnd)
7089 {
7090 xsbsnd->xsb_len = sizeof(struct xsockbuf_n);
7091 xsbsnd->xsb_kind = XSO_SNDBUF;
7092
7093 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7094 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7095 xsbsnd->sb_hiwat = desc->sndbufsize;
7096 xsbsnd->sb_cc = desc->sndbufused;
7097 } else {
7098 }
7099 }
7100
7101 static void
nstat_userland_to_xsockstat_n(nstat_flow_data * flow_data,struct xsockstat_n * xst)7102 nstat_userland_to_xsockstat_n(nstat_flow_data *flow_data, struct xsockstat_n *xst)
7103 {
7104 xst->xst_len = sizeof(struct xsockstat_n);
7105 xst->xst_kind = XSO_STATS;
7106
7107 // The kernel version supports an array of counts, here we only support one and map to first entry
7108 xst->xst_tc_stats[0].rxpackets = flow_data->counts.nstat_rxpackets;
7109 xst->xst_tc_stats[0].rxbytes = flow_data->counts.nstat_rxbytes;
7110 xst->xst_tc_stats[0].txpackets = flow_data->counts.nstat_txpackets;
7111 xst->xst_tc_stats[0].txbytes = flow_data->counts.nstat_txbytes;
7112 }
7113
7114 static void
nstat_userland_to_xtcpcb_n(nstat_flow_data * flow_data,struct xtcpcb_n * xt)7115 nstat_userland_to_xtcpcb_n(nstat_flow_data *flow_data, struct xtcpcb_n *xt)
7116 {
7117 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7118 xt->xt_len = sizeof(struct xtcpcb_n);
7119 xt->xt_kind = XSO_TCPCB;
7120 xt->t_state = desc->state;
7121 xt->snd_wnd = desc->txwindow;
7122 xt->snd_cwnd = desc->txcwindow;
7123 }
7124
7125
7126 __private_extern__ int
ntstat_userland_count(short proto)7127 ntstat_userland_count(short proto)
7128 {
7129 int n = 0;
7130 if (proto == IPPROTO_TCP) {
7131 n = nstat_userland_tcp_shadows;
7132 } else if (proto == IPPROTO_UDP) {
7133 n = nstat_userland_udp_shadows;
7134 }
7135 return n;
7136 }
7137
7138 __private_extern__ int
nstat_userland_get_snapshot(short proto,void ** snapshotp,int * countp)7139 nstat_userland_get_snapshot(short proto, void **snapshotp, int *countp)
7140 {
7141 int error = 0;
7142 int n = 0;
7143 nstat_provider_id_t provider;
7144 nstat_flow_data *flow_data = NULL;
7145
7146 lck_mtx_lock(&nstat_mtx);
7147 if (proto == IPPROTO_TCP) {
7148 n = nstat_userland_tcp_shadows;
7149 provider = NSTAT_PROVIDER_TCP_USERLAND;
7150 } else if (proto == IPPROTO_UDP) {
7151 n = nstat_userland_udp_shadows;
7152 provider = NSTAT_PROVIDER_UDP_USERLAND;
7153 }
7154 if (n == 0) {
7155 goto done;
7156 }
7157
7158 flow_data = (nstat_flow_data *) kalloc_data(n * sizeof(*flow_data),
7159 Z_WAITOK | Z_ZERO);
7160 if (flow_data) {
7161 n = nstat_gather_flow_data(provider, flow_data, n);
7162 } else {
7163 error = ENOMEM;
7164 }
7165 done:
7166 lck_mtx_unlock(&nstat_mtx);
7167 *snapshotp = flow_data;
7168 *countp = n;
7169 return error;
7170 }
7171
7172 // nstat_userland_list_snapshot() does most of the work for a sysctl that uses a return format
7173 // as per get_pcblist_n() even though the vast majority of fields are unused.
7174 // Additional items are required in the sysctl output before and after the data added
7175 // by this function.
7176 __private_extern__ int
nstat_userland_list_snapshot(short proto,struct sysctl_req * req,void * userlandsnapshot,int n)7177 nstat_userland_list_snapshot(short proto, struct sysctl_req *req, void *userlandsnapshot, int n)
7178 {
7179 int error = 0;
7180 int i;
7181 nstat_provider_id_t provider;
7182 void *buf = NULL;
7183 nstat_flow_data *flow_data, *flow_data_array = NULL;
7184 size_t item_size = ROUNDUP64(sizeof(struct xinpcb_n)) +
7185 ROUNDUP64(sizeof(struct xsocket_n)) +
7186 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
7187 ROUNDUP64(sizeof(struct xsockstat_n));
7188
7189 if ((n == 0) || (userlandsnapshot == NULL)) {
7190 goto done;
7191 }
7192
7193 if (proto == IPPROTO_TCP) {
7194 item_size += ROUNDUP64(sizeof(struct xtcpcb_n));
7195 provider = NSTAT_PROVIDER_TCP_USERLAND;
7196 } else if (proto == IPPROTO_UDP) {
7197 provider = NSTAT_PROVIDER_UDP_USERLAND;
7198 } else {
7199 error = EINVAL;
7200 goto done;
7201 }
7202
7203 buf = (void *) kalloc_data(item_size, Z_WAITOK);
7204 if (buf) {
7205 struct xinpcb_n *xi = (struct xinpcb_n *)buf;
7206 struct xsocket_n *xso = (struct xsocket_n *) ADVANCE64(xi, sizeof(*xi));
7207 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) ADVANCE64(xso, sizeof(*xso));
7208 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) ADVANCE64(xsbrcv, sizeof(*xsbrcv));
7209 struct xsockstat_n *xsostats = (struct xsockstat_n *) ADVANCE64(xsbsnd, sizeof(*xsbsnd));
7210 struct xtcpcb_n *xt = (struct xtcpcb_n *) ADVANCE64(xsostats, sizeof(*xsostats));
7211
7212 flow_data_array = (nstat_flow_data *)userlandsnapshot;
7213
7214 for (i = 0; i < n; i++) {
7215 flow_data = &flow_data_array[i];
7216 bzero(buf, item_size);
7217
7218 nstat_userland_to_xinpcb_n(provider, flow_data, xi);
7219 nstat_userland_to_xsocket_n(provider, flow_data, xso);
7220 nstat_userland_to_rcv_xsockbuf_n(provider, flow_data, xsbrcv);
7221 nstat_userland_to_snd_xsockbuf_n(provider, flow_data, xsbsnd);
7222 nstat_userland_to_xsockstat_n(flow_data, xsostats);
7223 if (proto == IPPROTO_TCP) {
7224 nstat_userland_to_xtcpcb_n(flow_data, xt);
7225 }
7226 error = SYSCTL_OUT(req, buf, item_size);
7227 if (error) {
7228 break;
7229 }
7230 }
7231 kfree_data(buf, item_size);
7232 } else {
7233 error = ENOMEM;
7234 }
7235 done:
7236 return error;
7237 }
7238
7239 __private_extern__ void
nstat_userland_release_snapshot(void * snapshot,int nuserland)7240 nstat_userland_release_snapshot(void *snapshot, int nuserland)
7241 {
7242 if (snapshot != NULL) {
7243 kfree_data(snapshot, nuserland * sizeof(nstat_flow_data));
7244 }
7245 }
7246
7247 #if NTSTAT_SUPPORTS_STANDALONE_SYSCTL
7248
7249 __private_extern__ int
ntstat_userland_list_n(short proto,struct sysctl_req * req)7250 ntstat_userland_list_n(short proto, struct sysctl_req *req)
7251 {
7252 int error = 0;
7253 int n;
7254 struct xinpgen xig;
7255 void *snapshot = NULL;
7256 size_t item_size = ROUNDUP64(sizeof(struct xinpcb_n)) +
7257 ROUNDUP64(sizeof(struct xsocket_n)) +
7258 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
7259 ROUNDUP64(sizeof(struct xsockstat_n));
7260
7261 if (proto == IPPROTO_TCP) {
7262 item_size += ROUNDUP64(sizeof(struct xtcpcb_n));
7263 }
7264
7265 if (req->oldptr == USER_ADDR_NULL) {
7266 n = ntstat_userland_count(proto);
7267 req->oldidx = 2 * (sizeof(xig)) + (n + 1 + n / 8) * item_size;
7268 goto done;
7269 }
7270
7271 if (req->newptr != USER_ADDR_NULL) {
7272 error = EPERM;
7273 goto done;
7274 }
7275
7276 error = nstat_userland_get_snapshot(proto, &snapshot, &n);
7277
7278 if (error) {
7279 goto done;
7280 }
7281
7282 bzero(&xig, sizeof(xig));
7283 xig.xig_len = sizeof(xig);
7284 xig.xig_gen = 0;
7285 xig.xig_sogen = 0;
7286 xig.xig_count = n;
7287 error = SYSCTL_OUT(req, &xig, sizeof(xig));
7288 if (error) {
7289 goto done;
7290 }
7291 /*
7292 * We are done if there are no flows
7293 */
7294 if (n == 0) {
7295 goto done;
7296 }
7297
7298 error = nstat_userland_list_snapshot(proto, req, snapshot, n);
7299
7300 if (!error) {
7301 /*
7302 * Give the user an updated idea of our state,
7303 * which is unchanged
7304 */
7305 error = SYSCTL_OUT(req, &xig, sizeof(xig));
7306 }
7307 done:
7308 nstat_userland_release_snapshot(snapshot, n);
7309 return error;
7310 }
7311
7312 #endif /* NTSTAT_SUPPORTS_STANDALONE_SYSCTL */
7313 #endif /* SKYWALK */
7314