1 /*
2 * Copyright (c) 2010-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40
41 #include <kern/clock.h>
42 #include <kern/debug.h>
43
44 #include <libkern/libkern.h>
45 #include <libkern/OSAtomic.h>
46 #include <libkern/locks.h>
47
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_types.h>
51 #include <net/route.h>
52 #include <net/dlil.h>
53
54 // These includes appear in ntstat.h but we include them here first so they won't trigger
55 // any clang diagnostic errors.
56 #include <netinet/in.h>
57 #include <netinet/in_stat.h>
58 #include <netinet/tcp.h>
59
60 #pragma clang diagnostic push
61 #pragma clang diagnostic error "-Wpadded"
62 #pragma clang diagnostic error "-Wpacked"
63 // This header defines structures shared with user space, so we need to ensure there is
64 // no compiler inserted padding in case the user space process isn't using the same
65 // architecture as the kernel (example: i386 process with x86_64 kernel).
66 #include <net/ntstat.h>
67 #pragma clang diagnostic pop
68
69 #include <netinet/ip_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/in_var.h>
72 #include <netinet/tcp_var.h>
73 #include <netinet/tcp_fsm.h>
74 #include <netinet/tcp_cc.h>
75 #include <netinet/udp.h>
76 #include <netinet/udp_var.h>
77 #include <netinet6/in6_pcb.h>
78 #include <netinet6/in6_var.h>
79
80 __private_extern__ int nstat_collect = 1;
81
82 #if (DEBUG || DEVELOPMENT)
83 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
84 &nstat_collect, 0, "Collect detailed statistics");
85 #endif /* (DEBUG || DEVELOPMENT) */
86
87 #if !XNU_TARGET_OS_OSX
88 static int nstat_privcheck = 1;
89 #else /* XNU_TARGET_OS_OSX */
90 static int nstat_privcheck = 0;
91 #endif /* XNU_TARGET_OS_OSX */
92 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
93 &nstat_privcheck, 0, "Entitlement check");
94
95 SYSCTL_NODE(_net, OID_AUTO, stats,
96 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "network statistics");
97
98 static int nstat_debug = 0;
99 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
100 &nstat_debug, 0, "");
101
102 static int nstat_debug_pid = 0; // Only log socket level debug for specified pid
103 SYSCTL_INT(_net_stats, OID_AUTO, debug_pid, CTLFLAG_RW | CTLFLAG_LOCKED,
104 &nstat_debug_pid, 0, "");
105
106 static int nstat_sendspace = 2048;
107 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
108 &nstat_sendspace, 0, "");
109
110 static int nstat_recvspace = 8192;
111 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
112 &nstat_recvspace, 0, "");
113
114 static struct nstat_stats nstat_stats;
115 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
116 &nstat_stats, nstat_stats, "");
117
118 static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */
119 static u_int32_t nstat_lim_min_tx_pkts = 100;
120 static u_int32_t nstat_lim_min_rx_pkts = 100;
121 #if (DEBUG || DEVELOPMENT)
122 SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval,
123 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0,
124 "Low internet stat report interval");
125
126 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts,
127 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0,
128 "Low Internet, min transmit packets threshold");
129
130 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts,
131 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0,
132 "Low Internet, min receive packets threshold");
133 #endif /* DEBUG || DEVELOPMENT */
134
135 static struct net_api_stats net_api_stats_before;
136 static u_int64_t net_api_stats_last_report_time;
137 #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
138 static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL;
139
140 #if (DEBUG || DEVELOPMENT)
141 SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval,
142 CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "");
143 #endif /* DEBUG || DEVELOPMENT */
144
145 #define NSTAT_DEBUG_SOCKET_PID_MATCHED(so) \
146 (so && (nstat_debug_pid == (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid)))
147
148 #define NSTAT_DEBUG_SOCKET_ON(so) \
149 ((nstat_debug && (!nstat_debug_pid || NSTAT_DEBUG_SOCKET_PID_MATCHED(so))) ? nstat_debug : 0)
150
151 #define NSTAT_DEBUG_SOCKET_LOG(so, fmt, ...) \
152 if (NSTAT_DEBUG_SOCKET_ON(so)) { \
153 printf("NSTAT_DEBUG_SOCKET <pid %d>: " fmt "\n", (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid), ##__VA_ARGS__); \
154 }
155
156 enum{
157 NSTAT_FLAG_CLEANUP = (1 << 0),
158 NSTAT_FLAG_REQCOUNTS = (1 << 1),
159 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
160 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
161 };
162
163 #if !XNU_TARGET_OS_OSX
164 #define QUERY_CONTINUATION_SRC_COUNT 50
165 #else /* XNU_TARGET_OS_OSX */
166 #define QUERY_CONTINUATION_SRC_COUNT 100
167 #endif /* XNU_TARGET_OS_OSX */
168
169 #ifndef ROUNDUP64
170 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
171 #endif
172
173 #ifndef ADVANCE64
174 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
175 #endif
176
177 typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src;
178 typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src;
179
180 typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
181 typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
182
183 typedef TAILQ_HEAD(, nstat_generic_shadow) tailq_head_generic_shadow;
184 typedef TAILQ_ENTRY(nstat_generic_shadow) tailq_entry_generic_shadow;
185
186 typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails;
187 typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails;
188
189 struct nstat_procdetails {
190 tailq_entry_procdetails pdet_link;
191 int pdet_pid;
192 u_int64_t pdet_upid;
193 char pdet_procname[64];
194 uuid_t pdet_uuid;
195 u_int32_t pdet_refcnt;
196 u_int32_t pdet_magic;
197 };
198
199 typedef struct nstat_provider_filter {
200 u_int64_t npf_flags;
201 u_int64_t npf_events;
202 u_int64_t npf_extensions;
203 pid_t npf_pid;
204 uuid_t npf_uuid;
205 } nstat_provider_filter;
206
207
208 typedef struct nstat_control_state {
209 struct nstat_control_state *ncs_next;
210 u_int32_t ncs_watching;
211 decl_lck_mtx_data(, ncs_mtx);
212 kern_ctl_ref ncs_kctl;
213 u_int32_t ncs_unit;
214 nstat_src_ref_t ncs_next_srcref;
215 tailq_head_nstat_src ncs_src_queue;
216 mbuf_t ncs_accumulated;
217 u_int32_t ncs_flags;
218 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
219 /* state maintained for partial query requests */
220 u_int64_t ncs_context;
221 u_int64_t ncs_seq;
222 /* For ease of debugging with lldb macros */
223 struct nstat_procdetails *ncs_procdetails;
224 } nstat_control_state;
225
226 typedef struct nstat_provider {
227 struct nstat_provider *next;
228 nstat_provider_id_t nstat_provider_id;
229 size_t nstat_descriptor_length;
230 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
231 int (*nstat_gone)(nstat_provider_cookie_t cookie);
232 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
233 errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req);
234 void (*nstat_watcher_remove)(nstat_control_state *state);
235 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, size_t len);
236 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
237 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, u_int64_t suppression_flags);
238 size_t (*nstat_copy_extension)(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len);
239 } nstat_provider;
240
241 typedef struct nstat_src {
242 tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over.
243 nstat_control_state *ns_control; // The nstat_control_state that this is a source for
244 nstat_src_ref_t srcref;
245 nstat_provider *provider;
246 nstat_provider_cookie_t cookie;
247 uint32_t filter;
248 bool ns_reported; // At least one update/counts/desc message has been sent
249 uint64_t seq;
250 } nstat_src;
251
252 static errno_t nstat_control_send_counts(nstat_control_state *, nstat_src *, unsigned long long, u_int16_t, int *);
253 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
254 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int64_t event, u_int16_t hdr_flags, int *gone);
255 static errno_t nstat_control_send_removed(nstat_control_state *state, nstat_src *src, u_int16_t hdr_flags);
256 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
257 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
258 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src, u_int64_t suppression_flags);
259 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
260 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
261 static void nstat_ifnet_report_ecn_stats(void);
262 static void nstat_ifnet_report_lim_stats(void);
263 static void nstat_net_api_report_stats(void);
264 static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req);
265 static errno_t nstat_control_send_event(nstat_control_state *state, nstat_src *src, u_int64_t event);
266
267 static u_int32_t nstat_udp_watchers = 0;
268 static u_int32_t nstat_tcp_watchers = 0;
269
270 static void nstat_control_register(void);
271
272 /*
273 * The lock order is as follows:
274 *
275 * socket_lock (inpcb)
276 * nstat_mtx
277 * state->ncs_mtx
278 */
279 static KALLOC_HEAP_DEFINE(KHEAP_NET_STAT, NET_STAT_CONTROL_NAME,
280 KHEAP_ID_DEFAULT);
281 static nstat_control_state *nstat_controls = NULL;
282 static uint64_t nstat_idle_time = 0;
283 static LCK_GRP_DECLARE(nstat_lck_grp, "network statistics kctl");
284 static LCK_MTX_DECLARE(nstat_mtx, &nstat_lck_grp);
285
286
287 /* some extern definitions */
288 extern void mbuf_report_peak_usage(void);
289 extern void tcp_report_stats(void);
290
291 static void
nstat_copy_sa_out(const struct sockaddr * src,struct sockaddr * dst,int maxlen)292 nstat_copy_sa_out(
293 const struct sockaddr *src,
294 struct sockaddr *dst,
295 int maxlen)
296 {
297 if (src->sa_len > maxlen) {
298 return;
299 }
300
301 bcopy(src, dst, src->sa_len);
302 if (src->sa_family == AF_INET6 &&
303 src->sa_len >= sizeof(struct sockaddr_in6)) {
304 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
305 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
306 sin6->sin6_scope_id = ((const struct sockaddr_in6*)(const void*)(src))->sin6_scope_id;
307 if (in6_embedded_scope) {
308 in6_verify_ifscope(&sin6->sin6_addr, sin6->sin6_scope_id);
309 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
310 sin6->sin6_addr.s6_addr16[1] = 0;
311 }
312 }
313 }
314 }
315
316 static void
nstat_ip_to_sockaddr(const struct in_addr * ip,u_int16_t port,struct sockaddr_in * sin,u_int32_t maxlen)317 nstat_ip_to_sockaddr(
318 const struct in_addr *ip,
319 u_int16_t port,
320 struct sockaddr_in *sin,
321 u_int32_t maxlen)
322 {
323 if (maxlen < sizeof(struct sockaddr_in)) {
324 return;
325 }
326
327 sin->sin_family = AF_INET;
328 sin->sin_len = sizeof(*sin);
329 sin->sin_port = port;
330 sin->sin_addr = *ip;
331 }
332
333 u_int16_t
nstat_ifnet_to_flags(struct ifnet * ifp)334 nstat_ifnet_to_flags(
335 struct ifnet *ifp)
336 {
337 u_int16_t flags = 0;
338 u_int32_t functional_type = if_functional_type(ifp, FALSE);
339
340 /* Panic if someone adds a functional type without updating ntstat. */
341 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
342
343 switch (functional_type) {
344 case IFRTYPE_FUNCTIONAL_UNKNOWN:
345 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
346 break;
347 case IFRTYPE_FUNCTIONAL_LOOPBACK:
348 flags |= NSTAT_IFNET_IS_LOOPBACK;
349 break;
350 case IFRTYPE_FUNCTIONAL_WIRED:
351 case IFRTYPE_FUNCTIONAL_INTCOPROC:
352 flags |= NSTAT_IFNET_IS_WIRED;
353 break;
354 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
355 flags |= NSTAT_IFNET_IS_WIFI;
356 break;
357 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
358 flags |= NSTAT_IFNET_IS_WIFI;
359 flags |= NSTAT_IFNET_IS_AWDL;
360 break;
361 case IFRTYPE_FUNCTIONAL_CELLULAR:
362 flags |= NSTAT_IFNET_IS_CELLULAR;
363 break;
364 case IFRTYPE_FUNCTIONAL_COMPANIONLINK:
365 flags |= NSTAT_IFNET_IS_COMPANIONLINK;
366 break;
367 }
368
369 if (IFNET_IS_EXPENSIVE(ifp)) {
370 flags |= NSTAT_IFNET_IS_EXPENSIVE;
371 }
372 if (IFNET_IS_CONSTRAINED(ifp)) {
373 flags |= NSTAT_IFNET_IS_CONSTRAINED;
374 }
375 if (ifp->if_xflags & IFXF_LOW_LATENCY) {
376 flags |= NSTAT_IFNET_IS_WIFI;
377 flags |= NSTAT_IFNET_IS_LLW;
378 }
379
380 return flags;
381 }
382
383 static u_int32_t
extend_ifnet_flags(u_int16_t condensed_flags)384 extend_ifnet_flags(
385 u_int16_t condensed_flags)
386 {
387 u_int32_t extended_flags = (u_int32_t)condensed_flags;
388
389 if ((extended_flags & NSTAT_IFNET_IS_WIFI) && ((extended_flags & (NSTAT_IFNET_IS_AWDL | NSTAT_IFNET_IS_LLW)) == 0)) {
390 extended_flags |= NSTAT_IFNET_IS_WIFI_INFRA;
391 }
392 return extended_flags;
393 }
394
395 u_int32_t
nstat_ifnet_to_flags_extended(struct ifnet * ifp)396 nstat_ifnet_to_flags_extended(
397 struct ifnet *ifp)
398 {
399 u_int32_t flags = extend_ifnet_flags(nstat_ifnet_to_flags(ifp));
400
401 return flags;
402 }
403
404 static u_int32_t
nstat_inpcb_to_flags(const struct inpcb * inp)405 nstat_inpcb_to_flags(
406 const struct inpcb *inp)
407 {
408 u_int32_t flags = 0;
409
410 if (inp != NULL) {
411 if (inp->inp_last_outifp != NULL) {
412 struct ifnet *ifp = inp->inp_last_outifp;
413 flags = nstat_ifnet_to_flags_extended(ifp);
414
415 struct tcpcb *tp = intotcpcb(inp);
416 if (tp) {
417 if (tp->t_flags & TF_LOCAL) {
418 flags |= NSTAT_IFNET_IS_LOCAL;
419 } else {
420 flags |= NSTAT_IFNET_IS_NON_LOCAL;
421 }
422 }
423 } else {
424 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
425 }
426 if (inp->inp_socket != NULL &&
427 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) {
428 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
429 }
430 }
431 return flags;
432 }
433
434 #pragma mark -- Network Statistic Providers --
435
436 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
437 struct nstat_provider *nstat_providers = NULL;
438
439 static struct nstat_provider*
nstat_find_provider_by_id(nstat_provider_id_t id)440 nstat_find_provider_by_id(
441 nstat_provider_id_t id)
442 {
443 struct nstat_provider *provider;
444
445 for (provider = nstat_providers; provider != NULL; provider = provider->next) {
446 if (provider->nstat_provider_id == id) {
447 break;
448 }
449 }
450
451 return provider;
452 }
453
454 static errno_t
nstat_lookup_entry(nstat_provider_id_t id,const void * data,u_int32_t length,nstat_provider ** out_provider,nstat_provider_cookie_t * out_cookie)455 nstat_lookup_entry(
456 nstat_provider_id_t id,
457 const void *data,
458 u_int32_t length,
459 nstat_provider **out_provider,
460 nstat_provider_cookie_t *out_cookie)
461 {
462 *out_provider = nstat_find_provider_by_id(id);
463 if (*out_provider == NULL) {
464 return ENOENT;
465 }
466
467 return (*out_provider)->nstat_lookup(data, length, out_cookie);
468 }
469
470 static void nstat_init_route_provider(void);
471 static void nstat_init_tcp_provider(void);
472 static void nstat_init_udp_provider(void);
473 #if SKYWALK
474 static void nstat_init_userland_tcp_provider(void);
475 static void nstat_init_userland_udp_provider(void);
476 static void nstat_init_userland_quic_provider(void);
477 #endif /* SKYWALK */
478 static void nstat_init_userland_conn_provider(void);
479 static void nstat_init_udp_subflow_provider(void);
480 static void nstat_init_ifnet_provider(void);
481
482 __private_extern__ void
nstat_init(void)483 nstat_init(void)
484 {
485 nstat_init_route_provider();
486 nstat_init_tcp_provider();
487 nstat_init_udp_provider();
488 #if SKYWALK
489 nstat_init_userland_tcp_provider();
490 nstat_init_userland_udp_provider();
491 nstat_init_userland_quic_provider();
492 #endif /* SKYWALK */
493 nstat_init_userland_conn_provider();
494 nstat_init_udp_subflow_provider();
495 nstat_init_ifnet_provider();
496 nstat_control_register();
497 }
498
499 #pragma mark -- Aligned Buffer Allocation --
500
501 struct align_header {
502 u_int32_t offset;
503 u_int32_t length;
504 };
505
506 static void*
nstat_malloc_aligned(size_t length,u_int8_t alignment,zalloc_flags_t flags)507 nstat_malloc_aligned(
508 size_t length,
509 u_int8_t alignment,
510 zalloc_flags_t flags)
511 {
512 struct align_header *hdr = NULL;
513 size_t size = length + sizeof(*hdr) + alignment - 1;
514
515 // Arbitrary limit to prevent abuse
516 if (length > (64 * 1024)) {
517 return NULL;
518 }
519 u_int8_t *buffer = (u_int8_t *)kalloc_data(size, flags);
520 if (buffer == NULL) {
521 return NULL;
522 }
523
524 u_int8_t *aligned = buffer + sizeof(*hdr);
525 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
526
527 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
528 hdr->offset = aligned - buffer;
529 hdr->length = size;
530
531 return aligned;
532 }
533
534 static void
nstat_free_aligned(void * buffer)535 nstat_free_aligned(
536 void *buffer)
537 {
538 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
539 char *offset_buffer = (char *)buffer - hdr->offset;
540 kfree_data(offset_buffer, hdr->length);
541 }
542
543 #pragma mark -- Utilities --
544
545 #define NSTAT_PROCDETAILS_MAGIC 0xfeedc001
546 #define NSTAT_PROCDETAILS_UNMAGIC 0xdeadc001
547
548 static tailq_head_procdetails nstat_procdetails_head = TAILQ_HEAD_INITIALIZER(nstat_procdetails_head);
549
550 static struct nstat_procdetails *
nstat_retain_curprocdetails(void)551 nstat_retain_curprocdetails(void)
552 {
553 struct nstat_procdetails *procdetails = NULL;
554 uint64_t upid = proc_uniqueid(current_proc());
555
556 lck_mtx_lock(&nstat_mtx);
557
558 TAILQ_FOREACH(procdetails, &nstat_procdetails_head, pdet_link) {
559 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
560
561 if (procdetails->pdet_upid == upid) {
562 OSIncrementAtomic(&procdetails->pdet_refcnt);
563 break;
564 }
565 }
566 lck_mtx_unlock(&nstat_mtx);
567 if (!procdetails) {
568 // No need for paranoia on locking, it would be OK if there are duplicate structs on the list
569 procdetails = kalloc_type(struct nstat_procdetails,
570 Z_WAITOK | Z_NOFAIL);
571 procdetails->pdet_pid = proc_selfpid();
572 procdetails->pdet_upid = upid;
573 proc_selfname(procdetails->pdet_procname, sizeof(procdetails->pdet_procname));
574 proc_getexecutableuuid(current_proc(), procdetails->pdet_uuid, sizeof(uuid_t));
575 procdetails->pdet_refcnt = 1;
576 procdetails->pdet_magic = NSTAT_PROCDETAILS_MAGIC;
577 lck_mtx_lock(&nstat_mtx);
578 TAILQ_INSERT_HEAD(&nstat_procdetails_head, procdetails, pdet_link);
579 lck_mtx_unlock(&nstat_mtx);
580 }
581
582 return procdetails;
583 }
584
585 static void
nstat_release_procdetails(struct nstat_procdetails * procdetails)586 nstat_release_procdetails(struct nstat_procdetails *procdetails)
587 {
588 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
589 // These are harvested later to amortize costs
590 OSDecrementAtomic(&procdetails->pdet_refcnt);
591 }
592
593 static void
nstat_prune_procdetails(void)594 nstat_prune_procdetails(void)
595 {
596 struct nstat_procdetails *procdetails;
597 struct nstat_procdetails *tmpdetails;
598 tailq_head_procdetails dead_list;
599
600 TAILQ_INIT(&dead_list);
601 lck_mtx_lock(&nstat_mtx);
602
603 TAILQ_FOREACH_SAFE(procdetails, &nstat_procdetails_head, pdet_link, tmpdetails)
604 {
605 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
606 if (procdetails->pdet_refcnt == 0) {
607 // Pull it off the list
608 TAILQ_REMOVE(&nstat_procdetails_head, procdetails, pdet_link);
609 TAILQ_INSERT_TAIL(&dead_list, procdetails, pdet_link);
610 }
611 }
612 lck_mtx_unlock(&nstat_mtx);
613
614 while ((procdetails = TAILQ_FIRST(&dead_list))) {
615 TAILQ_REMOVE(&dead_list, procdetails, pdet_link);
616 procdetails->pdet_magic = NSTAT_PROCDETAILS_UNMAGIC;
617 kfree_type(struct nstat_procdetails, procdetails);
618 }
619 }
620
621 #pragma mark -- Route Provider --
622
623 static nstat_provider nstat_route_provider;
624
625 static errno_t
nstat_route_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)626 nstat_route_lookup(
627 const void *data,
628 u_int32_t length,
629 nstat_provider_cookie_t *out_cookie)
630 {
631 // rt_lookup doesn't take const params but it doesn't modify the parameters for
632 // the lookup. So...we use a union to eliminate the warning.
633 union{
634 struct sockaddr *sa;
635 const struct sockaddr *const_sa;
636 } dst, mask;
637
638 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
639 *out_cookie = NULL;
640
641 if (length < sizeof(*param)) {
642 return EINVAL;
643 }
644
645 if (param->dst.v4.sin_family == 0 ||
646 param->dst.v4.sin_family > AF_MAX ||
647 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) {
648 return EINVAL;
649 }
650
651 if (param->dst.v4.sin_len > sizeof(param->dst) ||
652 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) {
653 return EINVAL;
654 }
655 if ((param->dst.v4.sin_family == AF_INET &&
656 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
657 (param->dst.v6.sin6_family == AF_INET6 &&
658 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) {
659 return EINVAL;
660 }
661
662 dst.const_sa = (const struct sockaddr*)¶m->dst;
663 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)¶m->mask : NULL;
664
665 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
666 if (rnh == NULL) {
667 return EAFNOSUPPORT;
668 }
669
670 lck_mtx_lock(rnh_lock);
671 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
672 lck_mtx_unlock(rnh_lock);
673
674 if (rt) {
675 *out_cookie = (nstat_provider_cookie_t)rt;
676 }
677
678 return rt ? 0 : ENOENT;
679 }
680
681 static int
nstat_route_gone(nstat_provider_cookie_t cookie)682 nstat_route_gone(
683 nstat_provider_cookie_t cookie)
684 {
685 struct rtentry *rt = (struct rtentry*)cookie;
686 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
687 }
688
689 static errno_t
nstat_route_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)690 nstat_route_counts(
691 nstat_provider_cookie_t cookie,
692 struct nstat_counts *out_counts,
693 int *out_gone)
694 {
695 struct rtentry *rt = (struct rtentry*)cookie;
696 struct nstat_counts *rt_stats = rt->rt_stats;
697
698 if (out_gone) {
699 *out_gone = 0;
700 }
701
702 if (out_gone && (rt->rt_flags & RTF_UP) == 0) {
703 *out_gone = 1;
704 }
705
706 if (rt_stats) {
707 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
708 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
709 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
710 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
711 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
712 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
713 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
714 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
715 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
716 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
717 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
718 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
719 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
720 } else {
721 bzero(out_counts, sizeof(*out_counts));
722 }
723
724 return 0;
725 }
726
727 static void
nstat_route_release(nstat_provider_cookie_t cookie,__unused int locked)728 nstat_route_release(
729 nstat_provider_cookie_t cookie,
730 __unused int locked)
731 {
732 rtfree((struct rtentry*)cookie);
733 }
734
735 static u_int32_t nstat_route_watchers = 0;
736
737 static int
nstat_route_walktree_add(struct radix_node * rn,void * context)738 nstat_route_walktree_add(
739 struct radix_node *rn,
740 void *context)
741 {
742 errno_t result = 0;
743 struct rtentry *rt = (struct rtentry *)rn;
744 nstat_control_state *state = (nstat_control_state*)context;
745
746 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
747
748 /* RTF_UP can't change while rnh_lock is held */
749 if ((rt->rt_flags & RTF_UP) != 0) {
750 /* Clear RTPRF_OURS if the route is still usable */
751 RT_LOCK(rt);
752 if (rt_validate(rt)) {
753 RT_ADDREF_LOCKED(rt);
754 RT_UNLOCK(rt);
755 } else {
756 RT_UNLOCK(rt);
757 rt = NULL;
758 }
759
760 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
761 if (rt == NULL) {
762 return 0;
763 }
764
765 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
766 if (result != 0) {
767 rtfree_locked(rt);
768 }
769 }
770
771 return result;
772 }
773
774 static errno_t
nstat_route_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)775 nstat_route_add_watcher(
776 nstat_control_state *state,
777 nstat_msg_add_all_srcs *req)
778 {
779 int i;
780 errno_t result = 0;
781
782 lck_mtx_lock(rnh_lock);
783
784 result = nstat_set_provider_filter(state, req);
785 if (result == 0) {
786 OSIncrementAtomic(&nstat_route_watchers);
787
788 for (i = 1; i < AF_MAX; i++) {
789 struct radix_node_head *rnh;
790 rnh = rt_tables[i];
791 if (!rnh) {
792 continue;
793 }
794
795 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
796 if (result != 0) {
797 // This is probably resource exhaustion.
798 // There currently isn't a good way to recover from this.
799 // Least bad seems to be to give up on the add-all but leave
800 // the watcher in place.
801 break;
802 }
803 }
804 }
805 lck_mtx_unlock(rnh_lock);
806
807 return result;
808 }
809
810 __private_extern__ void
nstat_route_new_entry(struct rtentry * rt)811 nstat_route_new_entry(
812 struct rtentry *rt)
813 {
814 if (nstat_route_watchers == 0) {
815 return;
816 }
817
818 lck_mtx_lock(&nstat_mtx);
819 if ((rt->rt_flags & RTF_UP) != 0) {
820 nstat_control_state *state;
821 for (state = nstat_controls; state; state = state->ncs_next) {
822 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) {
823 // this client is watching routes
824 // acquire a reference for the route
825 RT_ADDREF(rt);
826
827 // add the source, if that fails, release the reference
828 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0) {
829 RT_REMREF(rt);
830 }
831 }
832 }
833 }
834 lck_mtx_unlock(&nstat_mtx);
835 }
836
837 static void
nstat_route_remove_watcher(__unused nstat_control_state * state)838 nstat_route_remove_watcher(
839 __unused nstat_control_state *state)
840 {
841 OSDecrementAtomic(&nstat_route_watchers);
842 }
843
844 static errno_t
nstat_route_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)845 nstat_route_copy_descriptor(
846 nstat_provider_cookie_t cookie,
847 void *data,
848 size_t len)
849 {
850 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
851 if (len < sizeof(*desc)) {
852 return EINVAL;
853 }
854 bzero(desc, sizeof(*desc));
855
856 struct rtentry *rt = (struct rtentry*)cookie;
857 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
858 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
859 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
860
861
862 // key/dest
863 struct sockaddr *sa;
864 if ((sa = rt_key(rt))) {
865 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
866 }
867
868 // mask
869 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) {
870 memcpy(&desc->mask, sa, sa->sa_len);
871 }
872
873 // gateway
874 if ((sa = rt->rt_gateway)) {
875 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
876 }
877
878 if (rt->rt_ifp) {
879 desc->ifindex = rt->rt_ifp->if_index;
880 }
881
882 desc->flags = rt->rt_flags;
883
884 return 0;
885 }
886
887 static bool
nstat_route_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)888 nstat_route_reporting_allowed(
889 nstat_provider_cookie_t cookie,
890 nstat_provider_filter *filter,
891 __unused u_int64_t suppression_flags)
892 {
893 bool retval = true;
894
895 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
896 struct rtentry *rt = (struct rtentry*)cookie;
897 struct ifnet *ifp = rt->rt_ifp;
898
899 if (ifp) {
900 uint32_t interface_properties = nstat_ifnet_to_flags_extended(ifp);
901
902 if ((filter->npf_flags & interface_properties) == 0) {
903 retval = false;
904 }
905 }
906 }
907 return retval;
908 }
909
910 static void
nstat_init_route_provider(void)911 nstat_init_route_provider(void)
912 {
913 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
914 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
915 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
916 nstat_route_provider.nstat_lookup = nstat_route_lookup;
917 nstat_route_provider.nstat_gone = nstat_route_gone;
918 nstat_route_provider.nstat_counts = nstat_route_counts;
919 nstat_route_provider.nstat_release = nstat_route_release;
920 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
921 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
922 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
923 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
924 nstat_route_provider.next = nstat_providers;
925 nstat_providers = &nstat_route_provider;
926 }
927
928 #pragma mark -- Route Collection --
929
930 __private_extern__ struct nstat_counts*
nstat_route_attach(struct rtentry * rte)931 nstat_route_attach(
932 struct rtentry *rte)
933 {
934 struct nstat_counts *result = rte->rt_stats;
935 if (result) {
936 return result;
937 }
938
939 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t),
940 Z_WAITOK | Z_ZERO);
941 if (!result) {
942 return result;
943 }
944
945 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) {
946 nstat_free_aligned(result);
947 result = rte->rt_stats;
948 }
949
950 return result;
951 }
952
953 __private_extern__ void
nstat_route_detach(struct rtentry * rte)954 nstat_route_detach(
955 struct rtentry *rte)
956 {
957 if (rte->rt_stats) {
958 nstat_free_aligned(rte->rt_stats);
959 rte->rt_stats = NULL;
960 }
961 }
962
963 __private_extern__ void
nstat_route_connect_attempt(struct rtentry * rte)964 nstat_route_connect_attempt(
965 struct rtentry *rte)
966 {
967 while (rte) {
968 struct nstat_counts* stats = nstat_route_attach(rte);
969 if (stats) {
970 OSIncrementAtomic(&stats->nstat_connectattempts);
971 }
972
973 rte = rte->rt_parent;
974 }
975 }
976
977 __private_extern__ void
nstat_route_connect_success(struct rtentry * rte)978 nstat_route_connect_success(
979 struct rtentry *rte)
980 {
981 // This route
982 while (rte) {
983 struct nstat_counts* stats = nstat_route_attach(rte);
984 if (stats) {
985 OSIncrementAtomic(&stats->nstat_connectsuccesses);
986 }
987
988 rte = rte->rt_parent;
989 }
990 }
991
992 __private_extern__ void
nstat_route_tx(struct rtentry * rte,u_int32_t packets,u_int32_t bytes,u_int32_t flags)993 nstat_route_tx(
994 struct rtentry *rte,
995 u_int32_t packets,
996 u_int32_t bytes,
997 u_int32_t flags)
998 {
999 while (rte) {
1000 struct nstat_counts* stats = nstat_route_attach(rte);
1001 if (stats) {
1002 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) {
1003 OSAddAtomic(bytes, &stats->nstat_txretransmit);
1004 } else {
1005 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
1006 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
1007 }
1008 }
1009
1010 rte = rte->rt_parent;
1011 }
1012 }
1013
1014 __private_extern__ void
nstat_route_rx(struct rtentry * rte,u_int32_t packets,u_int32_t bytes,u_int32_t flags)1015 nstat_route_rx(
1016 struct rtentry *rte,
1017 u_int32_t packets,
1018 u_int32_t bytes,
1019 u_int32_t flags)
1020 {
1021 while (rte) {
1022 struct nstat_counts* stats = nstat_route_attach(rte);
1023 if (stats) {
1024 if (flags == 0) {
1025 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
1026 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
1027 } else {
1028 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) {
1029 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
1030 }
1031 if (flags & NSTAT_RX_FLAG_DUPLICATE) {
1032 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
1033 }
1034 }
1035 }
1036
1037 rte = rte->rt_parent;
1038 }
1039 }
1040
1041 /* atomically average current value at _val_addr with _new_val and store */
1042 #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
1043 volatile uint32_t _old_val; \
1044 volatile uint32_t _avg; \
1045 do { \
1046 _old_val = *_val_addr; \
1047 if (_old_val == 0) \
1048 { \
1049 _avg = _new_val; \
1050 } \
1051 else \
1052 { \
1053 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
1054 } \
1055 if (_old_val == _avg) break; \
1056 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
1057 } while (0);
1058
1059 /* atomically compute minimum of current value at _val_addr with _new_val and store */
1060 #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
1061 volatile uint32_t _old_val; \
1062 do { \
1063 _old_val = *_val_addr; \
1064 if (_old_val != 0 && _old_val < _new_val) \
1065 { \
1066 break; \
1067 } \
1068 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
1069 } while (0);
1070
1071 __private_extern__ void
nstat_route_rtt(struct rtentry * rte,u_int32_t rtt,u_int32_t rtt_var)1072 nstat_route_rtt(
1073 struct rtentry *rte,
1074 u_int32_t rtt,
1075 u_int32_t rtt_var)
1076 {
1077 const uint32_t decay = 3;
1078
1079 while (rte) {
1080 struct nstat_counts* stats = nstat_route_attach(rte);
1081 if (stats) {
1082 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
1083 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
1084 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
1085 }
1086 rte = rte->rt_parent;
1087 }
1088 }
1089
1090 __private_extern__ void
nstat_route_update(struct rtentry * rte,uint32_t connect_attempts,uint32_t connect_successes,uint32_t rx_packets,uint32_t rx_bytes,uint32_t rx_duplicatebytes,uint32_t rx_outoforderbytes,uint32_t tx_packets,uint32_t tx_bytes,uint32_t tx_retransmit,uint32_t rtt,uint32_t rtt_var)1091 nstat_route_update(
1092 struct rtentry *rte,
1093 uint32_t connect_attempts,
1094 uint32_t connect_successes,
1095 uint32_t rx_packets,
1096 uint32_t rx_bytes,
1097 uint32_t rx_duplicatebytes,
1098 uint32_t rx_outoforderbytes,
1099 uint32_t tx_packets,
1100 uint32_t tx_bytes,
1101 uint32_t tx_retransmit,
1102 uint32_t rtt,
1103 uint32_t rtt_var)
1104 {
1105 const uint32_t decay = 3;
1106
1107 while (rte) {
1108 struct nstat_counts* stats = nstat_route_attach(rte);
1109 if (stats) {
1110 OSAddAtomic(connect_attempts, &stats->nstat_connectattempts);
1111 OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses);
1112 OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets);
1113 OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes);
1114 OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit);
1115 OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets);
1116 OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes);
1117 OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes);
1118 OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes);
1119
1120 if (rtt != 0) {
1121 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
1122 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
1123 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
1124 }
1125 }
1126 rte = rte->rt_parent;
1127 }
1128 }
1129
1130 #pragma mark -- TCP Kernel Provider --
1131
1132 /*
1133 * Due to the way the kernel deallocates a process (the process structure
1134 * might be gone by the time we get the PCB detach notification),
1135 * we need to cache the process name. Without this, proc_name() would
1136 * return null and the process name would never be sent to userland.
1137 *
1138 * For UDP sockets, we also store the cached the connection tuples along with
1139 * the interface index. This is necessary because when UDP sockets are
1140 * disconnected, the connection tuples are forever lost from the inpcb, thus
1141 * we need to keep track of the last call to connect() in ntstat.
1142 */
1143 struct nstat_tucookie {
1144 struct inpcb *inp;
1145 char pname[MAXCOMLEN + 1];
1146 bool cached;
1147 union{
1148 struct sockaddr_in v4;
1149 struct sockaddr_in6 v6;
1150 } local;
1151 union{
1152 struct sockaddr_in v4;
1153 struct sockaddr_in6 v6;
1154 } remote;
1155 unsigned int if_index;
1156 uint32_t ifnet_properties;
1157 };
1158
1159 static struct nstat_tucookie *
nstat_tucookie_alloc_internal(struct inpcb * inp,bool ref,bool locked)1160 nstat_tucookie_alloc_internal(
1161 struct inpcb *inp,
1162 bool ref,
1163 bool locked)
1164 {
1165 struct nstat_tucookie *cookie;
1166
1167 cookie = kalloc_type(struct nstat_tucookie,
1168 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1169 if (!locked) {
1170 LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
1171 }
1172 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
1173 kfree_type(struct nstat_tucookie, cookie);
1174 return NULL;
1175 }
1176 cookie->inp = inp;
1177 proc_name(inp->inp_socket->last_pid, cookie->pname,
1178 sizeof(cookie->pname));
1179 /*
1180 * We only increment the reference count for UDP sockets because we
1181 * only cache UDP socket tuples.
1182 */
1183 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) {
1184 OSIncrementAtomic(&inp->inp_nstat_refcnt);
1185 }
1186
1187 return cookie;
1188 }
1189
1190 static struct nstat_tucookie *
nstat_tucookie_alloc(struct inpcb * inp)1191 nstat_tucookie_alloc(
1192 struct inpcb *inp)
1193 {
1194 return nstat_tucookie_alloc_internal(inp, false, false);
1195 }
1196
1197 static struct nstat_tucookie *
nstat_tucookie_alloc_ref(struct inpcb * inp)1198 nstat_tucookie_alloc_ref(
1199 struct inpcb *inp)
1200 {
1201 return nstat_tucookie_alloc_internal(inp, true, false);
1202 }
1203
1204 static struct nstat_tucookie *
nstat_tucookie_alloc_ref_locked(struct inpcb * inp)1205 nstat_tucookie_alloc_ref_locked(
1206 struct inpcb *inp)
1207 {
1208 return nstat_tucookie_alloc_internal(inp, true, true);
1209 }
1210
1211 static void
nstat_tucookie_release_internal(struct nstat_tucookie * cookie,int inplock)1212 nstat_tucookie_release_internal(
1213 struct nstat_tucookie *cookie,
1214 int inplock)
1215 {
1216 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) {
1217 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1218 }
1219 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1220 kfree_type(struct nstat_tucookie, cookie);
1221 }
1222
1223 static void
nstat_tucookie_release(struct nstat_tucookie * cookie)1224 nstat_tucookie_release(
1225 struct nstat_tucookie *cookie)
1226 {
1227 nstat_tucookie_release_internal(cookie, false);
1228 }
1229
1230 static void
nstat_tucookie_release_locked(struct nstat_tucookie * cookie)1231 nstat_tucookie_release_locked(
1232 struct nstat_tucookie *cookie)
1233 {
1234 nstat_tucookie_release_internal(cookie, true);
1235 }
1236
1237
1238 static size_t
nstat_inp_domain_info(struct inpcb * inp,nstat_domain_info * domain_info,size_t len)1239 nstat_inp_domain_info(struct inpcb *inp, nstat_domain_info *domain_info, size_t len)
1240 {
1241 // Note, the caller has guaranteed that the buffer has been zeroed, there is no need to clear it again
1242 struct socket *so = inp->inp_socket;
1243
1244 if (so == NULL) {
1245 return 0;
1246 }
1247
1248 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: Collecting stats");
1249
1250 if (domain_info == NULL) {
1251 return sizeof(nstat_domain_info);
1252 }
1253
1254 if (len < sizeof(nstat_domain_info)) {
1255 return 0;
1256 }
1257
1258 necp_copy_inp_domain_info(inp, so, domain_info);
1259
1260 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: <pid %d> Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> "
1261 "is_tracker %d is_non_app_initiated %d is_silent %d",
1262 so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid,
1263 domain_info->domain_name,
1264 domain_info->domain_owner,
1265 domain_info->domain_tracker_ctxt,
1266 domain_info->domain_attributed_bundle_id,
1267 domain_info->is_tracker,
1268 domain_info->is_non_app_initiated,
1269 domain_info->is_silent);
1270
1271 return sizeof(nstat_domain_info);
1272 }
1273
1274
1275 static nstat_provider nstat_tcp_provider;
1276
1277 static errno_t
nstat_tcpudp_lookup(struct inpcbinfo * inpinfo,const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)1278 nstat_tcpudp_lookup(
1279 struct inpcbinfo *inpinfo,
1280 const void *data,
1281 u_int32_t length,
1282 nstat_provider_cookie_t *out_cookie)
1283 {
1284 struct inpcb *inp = NULL;
1285
1286 // parameter validation
1287 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
1288 if (length < sizeof(*param)) {
1289 return EINVAL;
1290 }
1291
1292 // src and dst must match
1293 if (param->remote.v4.sin_family != 0 &&
1294 param->remote.v4.sin_family != param->local.v4.sin_family) {
1295 return EINVAL;
1296 }
1297
1298
1299 switch (param->local.v4.sin_family) {
1300 case AF_INET:
1301 {
1302 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
1303 (param->remote.v4.sin_family != 0 &&
1304 param->remote.v4.sin_len != sizeof(param->remote.v4))) {
1305 return EINVAL;
1306 }
1307
1308 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1309 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1310 }
1311 break;
1312
1313 case AF_INET6:
1314 {
1315 union{
1316 const struct in6_addr *in6c;
1317 struct in6_addr *in6;
1318 } local, remote;
1319
1320 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1321 (param->remote.v6.sin6_family != 0 &&
1322 param->remote.v6.sin6_len != sizeof(param->remote.v6))) {
1323 return EINVAL;
1324 }
1325
1326 local.in6c = ¶m->local.v6.sin6_addr;
1327 remote.in6c = ¶m->remote.v6.sin6_addr;
1328
1329 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port, param->remote.v6.sin6_scope_id,
1330 local.in6, param->local.v6.sin6_port, param->local.v6.sin6_scope_id, 1, NULL);
1331 }
1332 break;
1333
1334 default:
1335 return EINVAL;
1336 }
1337
1338 if (inp == NULL) {
1339 return ENOENT;
1340 }
1341
1342 // At this point we have a ref to the inpcb
1343 *out_cookie = nstat_tucookie_alloc(inp);
1344 if (*out_cookie == NULL) {
1345 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1346 }
1347
1348 return 0;
1349 }
1350
1351 static errno_t
nstat_tcp_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)1352 nstat_tcp_lookup(
1353 const void *data,
1354 u_int32_t length,
1355 nstat_provider_cookie_t *out_cookie)
1356 {
1357 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1358 }
1359
1360 static int
nstat_tcp_gone(nstat_provider_cookie_t cookie)1361 nstat_tcp_gone(
1362 nstat_provider_cookie_t cookie)
1363 {
1364 struct nstat_tucookie *tucookie =
1365 (struct nstat_tucookie *)cookie;
1366 struct inpcb *inp;
1367 struct tcpcb *tp;
1368
1369 return (!(inp = tucookie->inp) ||
1370 !(tp = intotcpcb(inp)) ||
1371 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1372 }
1373
1374 static errno_t
nstat_tcp_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)1375 nstat_tcp_counts(
1376 nstat_provider_cookie_t cookie,
1377 struct nstat_counts *out_counts,
1378 int *out_gone)
1379 {
1380 struct nstat_tucookie *tucookie =
1381 (struct nstat_tucookie *)cookie;
1382 struct inpcb *inp;
1383
1384 bzero(out_counts, sizeof(*out_counts));
1385
1386 if (out_gone) {
1387 *out_gone = 0;
1388 }
1389
1390 // if the pcb is in the dead state, we should stop using it
1391 if (nstat_tcp_gone(cookie)) {
1392 if (out_gone) {
1393 *out_gone = 1;
1394 }
1395 if (!(inp = tucookie->inp) || !intotcpcb(inp)) {
1396 return EINVAL;
1397 }
1398 }
1399 inp = tucookie->inp;
1400 struct tcpcb *tp = intotcpcb(inp);
1401
1402 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1403 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1404 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1405 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1406 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1407 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1408 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1409 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1410 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1411 out_counts->nstat_avg_rtt = tp->t_srtt;
1412 out_counts->nstat_min_rtt = tp->t_rttbest;
1413 out_counts->nstat_var_rtt = tp->t_rttvar;
1414 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) {
1415 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1416 }
1417 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1418 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1419 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1420 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1421 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1422 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1423
1424 return 0;
1425 }
1426
1427 static void
nstat_tcp_release(nstat_provider_cookie_t cookie,int locked)1428 nstat_tcp_release(
1429 nstat_provider_cookie_t cookie,
1430 int locked)
1431 {
1432 struct nstat_tucookie *tucookie =
1433 (struct nstat_tucookie *)cookie;
1434
1435 nstat_tucookie_release_internal(tucookie, locked);
1436 }
1437
1438 static errno_t
nstat_tcp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)1439 nstat_tcp_add_watcher(
1440 nstat_control_state *state,
1441 nstat_msg_add_all_srcs *req)
1442 {
1443 // There is a tricky issue around getting all TCP sockets added once
1444 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1445 // being placed on any lists where it might be found.
1446 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1447 // it should be impossible for a new socket to be added twice.
1448 // On the other hand, there is still a timing issue where a new socket
1449 // results in a call to nstat_tcp_new_pcb() before this watcher
1450 // is instantiated and yet the socket doesn't make it into ipi_listhead
1451 // prior to the scan. <rdar://problem/30361716>
1452
1453 errno_t result;
1454
1455 lck_rw_lock_shared(&tcbinfo.ipi_lock);
1456 result = nstat_set_provider_filter(state, req);
1457 if (result == 0) {
1458 OSIncrementAtomic(&nstat_tcp_watchers);
1459
1460 // Add all current tcp inpcbs. Ignore those in timewait
1461 struct inpcb *inp;
1462 struct nstat_tucookie *cookie;
1463 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1464 {
1465 cookie = nstat_tucookie_alloc_ref(inp);
1466 if (cookie == NULL) {
1467 continue;
1468 }
1469 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1470 cookie) != 0) {
1471 nstat_tucookie_release(cookie);
1472 break;
1473 }
1474 }
1475 }
1476
1477 lck_rw_done(&tcbinfo.ipi_lock);
1478
1479 return result;
1480 }
1481
1482 static void
nstat_tcp_remove_watcher(__unused nstat_control_state * state)1483 nstat_tcp_remove_watcher(
1484 __unused nstat_control_state *state)
1485 {
1486 OSDecrementAtomic(&nstat_tcp_watchers);
1487 }
1488
1489 __private_extern__ void
nstat_tcp_new_pcb(struct inpcb * inp)1490 nstat_tcp_new_pcb(
1491 struct inpcb *inp)
1492 {
1493 struct nstat_tucookie *cookie;
1494
1495 inp->inp_start_timestamp = mach_continuous_time();
1496
1497 if (nstat_tcp_watchers == 0) {
1498 return;
1499 }
1500
1501 socket_lock(inp->inp_socket, 0);
1502 lck_mtx_lock(&nstat_mtx);
1503 nstat_control_state *state;
1504 for (state = nstat_controls; state; state = state->ncs_next) {
1505 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) {
1506 // this client is watching tcp
1507 // acquire a reference for it
1508 cookie = nstat_tucookie_alloc_ref_locked(inp);
1509 if (cookie == NULL) {
1510 continue;
1511 }
1512 // add the source, if that fails, release the reference
1513 if (nstat_control_source_add(0, state,
1514 &nstat_tcp_provider, cookie) != 0) {
1515 nstat_tucookie_release_locked(cookie);
1516 break;
1517 }
1518 }
1519 }
1520 lck_mtx_unlock(&nstat_mtx);
1521 socket_unlock(inp->inp_socket, 0);
1522 }
1523
1524 __private_extern__ void
nstat_pcb_detach(struct inpcb * inp)1525 nstat_pcb_detach(struct inpcb *inp)
1526 {
1527 nstat_control_state *state;
1528 nstat_src *src;
1529 tailq_head_nstat_src dead_list;
1530 struct nstat_tucookie *tucookie;
1531 errno_t result;
1532
1533 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1534 return;
1535 }
1536
1537 TAILQ_INIT(&dead_list);
1538 lck_mtx_lock(&nstat_mtx);
1539 for (state = nstat_controls; state; state = state->ncs_next) {
1540 lck_mtx_lock(&state->ncs_mtx);
1541 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1542 {
1543 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
1544 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1545 tucookie = (struct nstat_tucookie *)src->cookie;
1546 if (tucookie->inp == inp) {
1547 break;
1548 }
1549 }
1550 }
1551
1552 if (src) {
1553 result = nstat_control_send_goodbye(state, src);
1554
1555 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
1556 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
1557 }
1558 lck_mtx_unlock(&state->ncs_mtx);
1559 }
1560 lck_mtx_unlock(&nstat_mtx);
1561
1562 while ((src = TAILQ_FIRST(&dead_list))) {
1563 TAILQ_REMOVE(&dead_list, src, ns_control_link);
1564 nstat_control_cleanup_source(NULL, src, TRUE);
1565 }
1566 }
1567
1568 __private_extern__ void
nstat_pcb_event(struct inpcb * inp,u_int64_t event)1569 nstat_pcb_event(struct inpcb *inp, u_int64_t event)
1570 {
1571 nstat_control_state *state;
1572 nstat_src *src;
1573 struct nstat_tucookie *tucookie;
1574 errno_t result;
1575 nstat_provider_id_t provider_id;
1576
1577 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1578 return;
1579 }
1580
1581 lck_mtx_lock(&nstat_mtx);
1582 for (state = nstat_controls; state; state = state->ncs_next) {
1583 if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_KERNEL].npf_events & event) == 0) &&
1584 ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_KERNEL].npf_events & event) == 0)) {
1585 continue;
1586 }
1587 lck_mtx_lock(&state->ncs_mtx);
1588 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1589 {
1590 provider_id = src->provider->nstat_provider_id;
1591 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1592 tucookie = (struct nstat_tucookie *)src->cookie;
1593 if (tucookie->inp == inp) {
1594 break;
1595 }
1596 }
1597 }
1598
1599 if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) {
1600 result = nstat_control_send_event(state, src, event);
1601 }
1602 lck_mtx_unlock(&state->ncs_mtx);
1603 }
1604 lck_mtx_unlock(&nstat_mtx);
1605 }
1606
1607
1608 __private_extern__ void
nstat_pcb_cache(struct inpcb * inp)1609 nstat_pcb_cache(struct inpcb *inp)
1610 {
1611 nstat_control_state *state;
1612 nstat_src *src;
1613 struct nstat_tucookie *tucookie;
1614
1615 if (inp == NULL || nstat_udp_watchers == 0 ||
1616 inp->inp_nstat_refcnt == 0) {
1617 return;
1618 }
1619 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1620 lck_mtx_lock(&nstat_mtx);
1621 for (state = nstat_controls; state; state = state->ncs_next) {
1622 lck_mtx_lock(&state->ncs_mtx);
1623 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1624 {
1625 tucookie = (struct nstat_tucookie *)src->cookie;
1626 if (tucookie->inp == inp) {
1627 if (inp->inp_vflag & INP_IPV6) {
1628 in6_ip6_to_sockaddr(&inp->in6p_laddr,
1629 inp->inp_lport,
1630 inp->inp_lifscope,
1631 &tucookie->local.v6,
1632 sizeof(tucookie->local));
1633 in6_ip6_to_sockaddr(&inp->in6p_faddr,
1634 inp->inp_fport,
1635 inp->inp_fifscope,
1636 &tucookie->remote.v6,
1637 sizeof(tucookie->remote));
1638 } else if (inp->inp_vflag & INP_IPV4) {
1639 nstat_ip_to_sockaddr(&inp->inp_laddr,
1640 inp->inp_lport,
1641 &tucookie->local.v4,
1642 sizeof(tucookie->local));
1643 nstat_ip_to_sockaddr(&inp->inp_faddr,
1644 inp->inp_fport,
1645 &tucookie->remote.v4,
1646 sizeof(tucookie->remote));
1647 }
1648 if (inp->inp_last_outifp) {
1649 tucookie->if_index =
1650 inp->inp_last_outifp->if_index;
1651 }
1652
1653 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1654 tucookie->cached = true;
1655 break;
1656 }
1657 }
1658 lck_mtx_unlock(&state->ncs_mtx);
1659 }
1660 lck_mtx_unlock(&nstat_mtx);
1661 }
1662
1663 __private_extern__ void
nstat_pcb_invalidate_cache(struct inpcb * inp)1664 nstat_pcb_invalidate_cache(struct inpcb *inp)
1665 {
1666 nstat_control_state *state;
1667 nstat_src *src;
1668 struct nstat_tucookie *tucookie;
1669
1670 if (inp == NULL || nstat_udp_watchers == 0 ||
1671 inp->inp_nstat_refcnt == 0) {
1672 return;
1673 }
1674 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1675 lck_mtx_lock(&nstat_mtx);
1676 for (state = nstat_controls; state; state = state->ncs_next) {
1677 lck_mtx_lock(&state->ncs_mtx);
1678 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1679 {
1680 tucookie = (struct nstat_tucookie *)src->cookie;
1681 if (tucookie->inp == inp) {
1682 tucookie->cached = false;
1683 break;
1684 }
1685 }
1686 lck_mtx_unlock(&state->ncs_mtx);
1687 }
1688 lck_mtx_unlock(&nstat_mtx);
1689 }
1690
1691 static errno_t
nstat_tcp_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)1692 nstat_tcp_copy_descriptor(
1693 nstat_provider_cookie_t cookie,
1694 void *data,
1695 size_t len)
1696 {
1697 if (len < sizeof(nstat_tcp_descriptor)) {
1698 return EINVAL;
1699 }
1700
1701 if (nstat_tcp_gone(cookie)) {
1702 return EINVAL;
1703 }
1704
1705 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1706 struct nstat_tucookie *tucookie =
1707 (struct nstat_tucookie *)cookie;
1708 struct inpcb *inp = tucookie->inp;
1709 struct tcpcb *tp = intotcpcb(inp);
1710 bzero(desc, sizeof(*desc));
1711
1712 if (inp->inp_vflag & INP_IPV6) {
1713 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope,
1714 &desc->local.v6, sizeof(desc->local));
1715 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope,
1716 &desc->remote.v6, sizeof(desc->remote));
1717 } else if (inp->inp_vflag & INP_IPV4) {
1718 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1719 &desc->local.v4, sizeof(desc->local));
1720 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1721 &desc->remote.v4, sizeof(desc->remote));
1722 }
1723
1724 desc->state = intotcpcb(inp)->t_state;
1725 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1726 inp->inp_last_outifp->if_index;
1727
1728 // danger - not locked, values could be bogus
1729 desc->txunacked = tp->snd_max - tp->snd_una;
1730 desc->txwindow = tp->snd_wnd;
1731 desc->txcwindow = tp->snd_cwnd;
1732
1733 if (CC_ALGO(tp)->name != NULL) {
1734 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1735 sizeof(desc->cc_algo));
1736 }
1737
1738 struct socket *so = inp->inp_socket;
1739 if (so) {
1740 // TBD - take the socket lock around these to make sure
1741 // they're in sync?
1742 desc->upid = so->last_upid;
1743 desc->pid = so->last_pid;
1744 desc->traffic_class = so->so_traffic_class;
1745 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) {
1746 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1747 }
1748 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) {
1749 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1750 }
1751 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1752 if (desc->pname[0] == 0) {
1753 strlcpy(desc->pname, tucookie->pname,
1754 sizeof(desc->pname));
1755 } else {
1756 desc->pname[sizeof(desc->pname) - 1] = 0;
1757 strlcpy(tucookie->pname, desc->pname,
1758 sizeof(tucookie->pname));
1759 }
1760 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1761 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1762 if (so->so_flags & SOF_DELEGATED) {
1763 desc->eupid = so->e_upid;
1764 desc->epid = so->e_pid;
1765 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1766 } else {
1767 desc->eupid = desc->upid;
1768 desc->epid = desc->pid;
1769 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1770 }
1771 uuid_copy(desc->fuuid, inp->necp_client_uuid);
1772 desc->sndbufsize = so->so_snd.sb_hiwat;
1773 desc->sndbufused = so->so_snd.sb_cc;
1774 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1775 desc->rcvbufused = so->so_rcv.sb_cc;
1776 }
1777
1778 tcp_get_connectivity_status(tp, &desc->connstatus);
1779 desc->ifnet_properties = (uint16_t)nstat_inpcb_to_flags(inp);
1780 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1781 desc->start_timestamp = inp->inp_start_timestamp;
1782 desc->timestamp = mach_continuous_time();
1783 return 0;
1784 }
1785
1786 static bool
nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,bool is_UDP)1787 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1788 {
1789 bool retval = true;
1790
1791 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS | NSTAT_FILTER_SPECIFIC_USER)) != 0) {
1792 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1793 struct inpcb *inp = tucookie->inp;
1794
1795 /* Only apply interface filter if at least one is allowed. */
1796 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
1797 uint32_t interface_properties = nstat_inpcb_to_flags(inp);
1798
1799 if ((filter->npf_flags & interface_properties) == 0) {
1800 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1801 // We allow reporting if there have been transfers of the requested kind.
1802 // This is imperfect as we cannot account for the expensive attribute over wifi.
1803 // We also assume that cellular is expensive and we have no way to select for AWDL
1804 if (is_UDP) {
1805 do{
1806 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR | NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1807 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) {
1808 break;
1809 }
1810 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1811 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) {
1812 break;
1813 }
1814 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1815 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) {
1816 break;
1817 }
1818 return false;
1819 } while (0);
1820 } else {
1821 return false;
1822 }
1823 }
1824 }
1825
1826 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) {
1827 struct socket *so = inp->inp_socket;
1828 retval = false;
1829
1830 if (so) {
1831 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1832 (filter->npf_pid == so->last_pid)) {
1833 retval = true;
1834 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1835 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) {
1836 retval = true;
1837 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1838 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0)) {
1839 retval = true;
1840 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1841 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1842 sizeof(so->last_uuid)) == 0)) {
1843 retval = true;
1844 }
1845 }
1846 }
1847 }
1848 return retval;
1849 }
1850
1851 static bool
nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)1852 nstat_tcp_reporting_allowed(
1853 nstat_provider_cookie_t cookie,
1854 nstat_provider_filter *filter,
1855 __unused u_int64_t suppression_flags)
1856 {
1857 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1858 }
1859
1860 static size_t
nstat_tcp_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)1861 nstat_tcp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
1862 {
1863 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1864 struct inpcb *inp = tucookie->inp;
1865
1866 if (nstat_tcp_gone(cookie)) {
1867 return 0;
1868 }
1869
1870 switch (extension_id) {
1871 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
1872 return nstat_inp_domain_info(inp, (nstat_domain_info *)buf, len);
1873
1874 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV:
1875 default:
1876 break;
1877 }
1878 return 0;
1879 }
1880
1881 static void
nstat_init_tcp_provider(void)1882 nstat_init_tcp_provider(void)
1883 {
1884 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1885 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1886 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1887 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1888 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1889 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1890 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1891 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1892 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1893 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1894 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1895 nstat_tcp_provider.nstat_copy_extension = nstat_tcp_extensions;
1896 nstat_tcp_provider.next = nstat_providers;
1897 nstat_providers = &nstat_tcp_provider;
1898 }
1899
1900 #pragma mark -- UDP Provider --
1901
1902 static nstat_provider nstat_udp_provider;
1903
1904 static errno_t
nstat_udp_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)1905 nstat_udp_lookup(
1906 const void *data,
1907 u_int32_t length,
1908 nstat_provider_cookie_t *out_cookie)
1909 {
1910 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1911 }
1912
1913 static int
nstat_udp_gone(nstat_provider_cookie_t cookie)1914 nstat_udp_gone(
1915 nstat_provider_cookie_t cookie)
1916 {
1917 struct nstat_tucookie *tucookie =
1918 (struct nstat_tucookie *)cookie;
1919 struct inpcb *inp;
1920
1921 return (!(inp = tucookie->inp) ||
1922 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1923 }
1924
1925 static errno_t
nstat_udp_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)1926 nstat_udp_counts(
1927 nstat_provider_cookie_t cookie,
1928 struct nstat_counts *out_counts,
1929 int *out_gone)
1930 {
1931 struct nstat_tucookie *tucookie =
1932 (struct nstat_tucookie *)cookie;
1933
1934 if (out_gone) {
1935 *out_gone = 0;
1936 }
1937
1938 // if the pcb is in the dead state, we should stop using it
1939 if (nstat_udp_gone(cookie)) {
1940 if (out_gone) {
1941 *out_gone = 1;
1942 }
1943 if (!tucookie->inp) {
1944 return EINVAL;
1945 }
1946 }
1947 struct inpcb *inp = tucookie->inp;
1948
1949 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1950 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1951 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1952 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1953 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1954 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1955 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1956 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1957 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1958 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1959
1960 return 0;
1961 }
1962
1963 static void
nstat_udp_release(nstat_provider_cookie_t cookie,int locked)1964 nstat_udp_release(
1965 nstat_provider_cookie_t cookie,
1966 int locked)
1967 {
1968 struct nstat_tucookie *tucookie =
1969 (struct nstat_tucookie *)cookie;
1970
1971 nstat_tucookie_release_internal(tucookie, locked);
1972 }
1973
1974 static errno_t
nstat_udp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)1975 nstat_udp_add_watcher(
1976 nstat_control_state *state,
1977 nstat_msg_add_all_srcs *req)
1978 {
1979 // There is a tricky issue around getting all UDP sockets added once
1980 // and only once. nstat_udp_new_pcb() is called prior to the new item
1981 // being placed on any lists where it might be found.
1982 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
1983 // it should be impossible for a new socket to be added twice.
1984 // On the other hand, there is still a timing issue where a new socket
1985 // results in a call to nstat_udp_new_pcb() before this watcher
1986 // is instantiated and yet the socket doesn't make it into ipi_listhead
1987 // prior to the scan. <rdar://problem/30361716>
1988
1989 errno_t result;
1990
1991 lck_rw_lock_shared(&udbinfo.ipi_lock);
1992 result = nstat_set_provider_filter(state, req);
1993
1994 if (result == 0) {
1995 struct inpcb *inp;
1996 struct nstat_tucookie *cookie;
1997
1998 OSIncrementAtomic(&nstat_udp_watchers);
1999
2000 // Add all current UDP inpcbs.
2001 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
2002 {
2003 cookie = nstat_tucookie_alloc_ref(inp);
2004 if (cookie == NULL) {
2005 continue;
2006 }
2007 if (nstat_control_source_add(0, state, &nstat_udp_provider,
2008 cookie) != 0) {
2009 nstat_tucookie_release(cookie);
2010 break;
2011 }
2012 }
2013 }
2014
2015 lck_rw_done(&udbinfo.ipi_lock);
2016
2017 return result;
2018 }
2019
2020 static void
nstat_udp_remove_watcher(__unused nstat_control_state * state)2021 nstat_udp_remove_watcher(
2022 __unused nstat_control_state *state)
2023 {
2024 OSDecrementAtomic(&nstat_udp_watchers);
2025 }
2026
2027 __private_extern__ void
nstat_udp_new_pcb(struct inpcb * inp)2028 nstat_udp_new_pcb(
2029 struct inpcb *inp)
2030 {
2031 struct nstat_tucookie *cookie;
2032
2033 inp->inp_start_timestamp = mach_continuous_time();
2034
2035 if (nstat_udp_watchers == 0) {
2036 return;
2037 }
2038
2039 socket_lock(inp->inp_socket, 0);
2040 lck_mtx_lock(&nstat_mtx);
2041 nstat_control_state *state;
2042 for (state = nstat_controls; state; state = state->ncs_next) {
2043 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) {
2044 // this client is watching tcp
2045 // acquire a reference for it
2046 cookie = nstat_tucookie_alloc_ref_locked(inp);
2047 if (cookie == NULL) {
2048 continue;
2049 }
2050 // add the source, if that fails, release the reference
2051 if (nstat_control_source_add(0, state,
2052 &nstat_udp_provider, cookie) != 0) {
2053 nstat_tucookie_release_locked(cookie);
2054 break;
2055 }
2056 }
2057 }
2058 lck_mtx_unlock(&nstat_mtx);
2059 socket_unlock(inp->inp_socket, 0);
2060 }
2061
2062 static errno_t
nstat_udp_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)2063 nstat_udp_copy_descriptor(
2064 nstat_provider_cookie_t cookie,
2065 void *data,
2066 size_t len)
2067 {
2068 if (len < sizeof(nstat_udp_descriptor)) {
2069 return EINVAL;
2070 }
2071
2072 if (nstat_udp_gone(cookie)) {
2073 return EINVAL;
2074 }
2075
2076 struct nstat_tucookie *tucookie =
2077 (struct nstat_tucookie *)cookie;
2078 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
2079 struct inpcb *inp = tucookie->inp;
2080
2081 bzero(desc, sizeof(*desc));
2082
2083 if (tucookie->cached == false) {
2084 if (inp->inp_vflag & INP_IPV6) {
2085 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope,
2086 &desc->local.v6, sizeof(desc->local.v6));
2087 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope,
2088 &desc->remote.v6, sizeof(desc->remote.v6));
2089 } else if (inp->inp_vflag & INP_IPV4) {
2090 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
2091 &desc->local.v4, sizeof(desc->local.v4));
2092 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
2093 &desc->remote.v4, sizeof(desc->remote.v4));
2094 }
2095 desc->ifnet_properties = (uint16_t)nstat_inpcb_to_flags(inp);
2096 } else {
2097 if (inp->inp_vflag & INP_IPV6) {
2098 memcpy(&desc->local.v6, &tucookie->local.v6,
2099 sizeof(desc->local.v6));
2100 memcpy(&desc->remote.v6, &tucookie->remote.v6,
2101 sizeof(desc->remote.v6));
2102 } else if (inp->inp_vflag & INP_IPV4) {
2103 memcpy(&desc->local.v4, &tucookie->local.v4,
2104 sizeof(desc->local.v4));
2105 memcpy(&desc->remote.v4, &tucookie->remote.v4,
2106 sizeof(desc->remote.v4));
2107 }
2108 desc->ifnet_properties = tucookie->ifnet_properties;
2109 }
2110
2111 if (inp->inp_last_outifp) {
2112 desc->ifindex = inp->inp_last_outifp->if_index;
2113 } else {
2114 desc->ifindex = tucookie->if_index;
2115 }
2116
2117 struct socket *so = inp->inp_socket;
2118 if (so) {
2119 // TBD - take the socket lock around these to make sure
2120 // they're in sync?
2121 desc->upid = so->last_upid;
2122 desc->pid = so->last_pid;
2123 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
2124 if (desc->pname[0] == 0) {
2125 strlcpy(desc->pname, tucookie->pname,
2126 sizeof(desc->pname));
2127 } else {
2128 desc->pname[sizeof(desc->pname) - 1] = 0;
2129 strlcpy(tucookie->pname, desc->pname,
2130 sizeof(tucookie->pname));
2131 }
2132 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
2133 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
2134 if (so->so_flags & SOF_DELEGATED) {
2135 desc->eupid = so->e_upid;
2136 desc->epid = so->e_pid;
2137 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
2138 } else {
2139 desc->eupid = desc->upid;
2140 desc->epid = desc->pid;
2141 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
2142 }
2143 uuid_copy(desc->fuuid, inp->necp_client_uuid);
2144 desc->rcvbufsize = so->so_rcv.sb_hiwat;
2145 desc->rcvbufused = so->so_rcv.sb_cc;
2146 desc->traffic_class = so->so_traffic_class;
2147 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
2148 desc->start_timestamp = inp->inp_start_timestamp;
2149 desc->timestamp = mach_continuous_time();
2150 }
2151
2152 return 0;
2153 }
2154
2155 static bool
nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2156 nstat_udp_reporting_allowed(
2157 nstat_provider_cookie_t cookie,
2158 nstat_provider_filter *filter,
2159 __unused u_int64_t suppression_flags)
2160 {
2161 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
2162 }
2163
2164
2165 static size_t
nstat_udp_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)2166 nstat_udp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
2167 {
2168 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
2169 struct inpcb *inp = tucookie->inp;
2170 if (nstat_udp_gone(cookie)) {
2171 return 0;
2172 }
2173
2174 switch (extension_id) {
2175 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
2176 return nstat_inp_domain_info(inp, (nstat_domain_info *)buf, len);
2177
2178 default:
2179 break;
2180 }
2181 return 0;
2182 }
2183
2184
2185 static void
nstat_init_udp_provider(void)2186 nstat_init_udp_provider(void)
2187 {
2188 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
2189 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
2190 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2191 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
2192 nstat_udp_provider.nstat_gone = nstat_udp_gone;
2193 nstat_udp_provider.nstat_counts = nstat_udp_counts;
2194 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
2195 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
2196 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
2197 nstat_udp_provider.nstat_release = nstat_udp_release;
2198 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
2199 nstat_udp_provider.nstat_copy_extension = nstat_udp_extensions;
2200 nstat_udp_provider.next = nstat_providers;
2201 nstat_providers = &nstat_udp_provider;
2202 }
2203
2204 #if SKYWALK
2205
2206 #pragma mark -- TCP/UDP/QUIC Userland
2207
2208 // Almost all of this infrastucture is common to both TCP and UDP
2209
2210 static u_int32_t nstat_userland_quic_watchers = 0;
2211 static u_int32_t nstat_userland_udp_watchers = 0;
2212 static u_int32_t nstat_userland_tcp_watchers = 0;
2213
2214 static u_int32_t nstat_userland_quic_shadows = 0;
2215 static u_int32_t nstat_userland_udp_shadows = 0;
2216 static u_int32_t nstat_userland_tcp_shadows = 0;
2217
2218 static nstat_provider nstat_userland_quic_provider;
2219 static nstat_provider nstat_userland_udp_provider;
2220 static nstat_provider nstat_userland_tcp_provider;
2221
2222 struct nstat_tu_shadow {
2223 tailq_entry_tu_shadow shad_link;
2224 userland_stats_request_vals_fn *shad_getvals_fn;
2225 userland_stats_request_extension_fn *shad_get_extension_fn;
2226 userland_stats_provider_context *shad_provider_context;
2227 u_int64_t shad_properties;
2228 u_int64_t shad_start_timestamp;
2229 nstat_provider_id_t shad_provider;
2230 struct nstat_procdetails *shad_procdetails;
2231 bool shad_live; // false if defunct
2232 uint32_t shad_magic;
2233 };
2234
2235 // Magic number checking should remain in place until the userland provider has been fully proven
2236 #define TU_SHADOW_MAGIC 0xfeedf00d
2237 #define TU_SHADOW_UNMAGIC 0xdeaddeed
2238
2239 static tailq_head_tu_shadow nstat_userprot_shad_head = TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head);
2240
2241 static errno_t
nstat_userland_tu_lookup(__unused const void * data,__unused u_int32_t length,__unused nstat_provider_cookie_t * out_cookie)2242 nstat_userland_tu_lookup(
2243 __unused const void *data,
2244 __unused u_int32_t length,
2245 __unused nstat_provider_cookie_t *out_cookie)
2246 {
2247 // Looking up a specific connection is not supported
2248 return ENOTSUP;
2249 }
2250
2251 static int
nstat_userland_tu_gone(__unused nstat_provider_cookie_t cookie)2252 nstat_userland_tu_gone(
2253 __unused nstat_provider_cookie_t cookie)
2254 {
2255 // Returns non-zero if the source has gone.
2256 // We don't keep a source hanging around, so the answer is always 0
2257 return 0;
2258 }
2259
2260 static errno_t
nstat_userland_tu_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)2261 nstat_userland_tu_counts(
2262 nstat_provider_cookie_t cookie,
2263 struct nstat_counts *out_counts,
2264 int *out_gone)
2265 {
2266 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2267 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2268 assert(shad->shad_live);
2269
2270 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, out_counts, NULL);
2271
2272 if (out_gone) {
2273 *out_gone = 0;
2274 }
2275
2276 return (result)? 0 : EIO;
2277 }
2278
2279
2280 static errno_t
nstat_userland_tu_copy_descriptor(nstat_provider_cookie_t cookie,void * data,__unused size_t len)2281 nstat_userland_tu_copy_descriptor(
2282 nstat_provider_cookie_t cookie,
2283 void *data,
2284 __unused size_t len)
2285 {
2286 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2287 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2288 assert(shad->shad_live);
2289 struct nstat_procdetails *procdetails = shad->shad_procdetails;
2290 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2291
2292 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, data);
2293
2294 switch (shad->shad_provider) {
2295 case NSTAT_PROVIDER_TCP_USERLAND:
2296 {
2297 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)data;
2298 desc->pid = procdetails->pdet_pid;
2299 desc->upid = procdetails->pdet_upid;
2300 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2301 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2302 desc->start_timestamp = shad->shad_start_timestamp;
2303 desc->timestamp = mach_continuous_time();
2304 }
2305 break;
2306 case NSTAT_PROVIDER_UDP_USERLAND:
2307 {
2308 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data;
2309 desc->pid = procdetails->pdet_pid;
2310 desc->upid = procdetails->pdet_upid;
2311 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2312 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2313 desc->start_timestamp = shad->shad_start_timestamp;
2314 desc->timestamp = mach_continuous_time();
2315 }
2316 break;
2317 case NSTAT_PROVIDER_QUIC_USERLAND:
2318 {
2319 nstat_quic_descriptor *desc = (nstat_quic_descriptor *)data;
2320 desc->pid = procdetails->pdet_pid;
2321 desc->upid = procdetails->pdet_upid;
2322 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2323 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2324 desc->start_timestamp = shad->shad_start_timestamp;
2325 desc->timestamp = mach_continuous_time();
2326 }
2327 break;
2328 default:
2329 break;
2330 }
2331 return (result)? 0 : EIO;
2332 }
2333
2334 static void
nstat_userland_tu_release(__unused nstat_provider_cookie_t cookie,__unused int locked)2335 nstat_userland_tu_release(
2336 __unused nstat_provider_cookie_t cookie,
2337 __unused int locked)
2338 {
2339 // Called when a nstat_src is detached.
2340 // We don't reference count or ask for delayed release so nothing to do here.
2341 // Note that any associated nstat_tu_shadow may already have been released.
2342 }
2343
2344 static bool
check_reporting_for_user(nstat_provider_filter * filter,pid_t pid,pid_t epid,uuid_t * uuid,uuid_t * euuid)2345 check_reporting_for_user(nstat_provider_filter *filter, pid_t pid, pid_t epid, uuid_t *uuid, uuid_t *euuid)
2346 {
2347 bool retval = true;
2348
2349 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2350 retval = false;
2351
2352 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
2353 (filter->npf_pid == pid)) {
2354 retval = true;
2355 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
2356 (filter->npf_pid == epid)) {
2357 retval = true;
2358 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
2359 (memcmp(filter->npf_uuid, uuid, sizeof(*uuid)) == 0)) {
2360 retval = true;
2361 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
2362 (memcmp(filter->npf_uuid, euuid, sizeof(*euuid)) == 0)) {
2363 retval = true;
2364 }
2365 }
2366 return retval;
2367 }
2368
2369 static bool
nstat_userland_tcp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2370 nstat_userland_tcp_reporting_allowed(
2371 nstat_provider_cookie_t cookie,
2372 nstat_provider_filter *filter,
2373 __unused u_int64_t suppression_flags)
2374 {
2375 bool retval = true;
2376 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2377
2378 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2379
2380 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2381 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2382
2383 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2384 u_int32_t extended_ifflags = extend_ifnet_flags(ifflags);
2385 if ((filter->npf_flags & extended_ifflags) == 0) {
2386 return false;
2387 }
2388 }
2389 }
2390
2391 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2392 nstat_tcp_descriptor tcp_desc; // Stack allocation - OK or pushing the limits too far?
2393 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &tcp_desc)) {
2394 retval = check_reporting_for_user(filter, (pid_t)tcp_desc.pid, (pid_t)tcp_desc.epid,
2395 &tcp_desc.uuid, &tcp_desc.euuid);
2396 } else {
2397 retval = false; // No further information, so might as well give up now.
2398 }
2399 }
2400 return retval;
2401 }
2402
2403 static size_t
nstat_userland_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)2404 nstat_userland_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
2405 {
2406 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2407 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2408 assert(shad->shad_live);
2409 assert(shad->shad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2410
2411 return shad->shad_get_extension_fn(shad->shad_provider_context, extension_id, buf, len);
2412 }
2413
2414
2415 static bool
nstat_userland_udp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2416 nstat_userland_udp_reporting_allowed(
2417 nstat_provider_cookie_t cookie,
2418 nstat_provider_filter *filter,
2419 __unused u_int64_t suppression_flags)
2420 {
2421 bool retval = true;
2422 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2423
2424 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2425
2426 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2427 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2428
2429 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2430 u_int32_t extended_ifflags = extend_ifnet_flags(ifflags);
2431 if ((filter->npf_flags & extended_ifflags) == 0) {
2432 return false;
2433 }
2434 }
2435 }
2436 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2437 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
2438 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &udp_desc)) {
2439 retval = check_reporting_for_user(filter, (pid_t)udp_desc.pid, (pid_t)udp_desc.epid,
2440 &udp_desc.uuid, &udp_desc.euuid);
2441 } else {
2442 retval = false; // No further information, so might as well give up now.
2443 }
2444 }
2445 return retval;
2446 }
2447
2448 static bool
nstat_userland_quic_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2449 nstat_userland_quic_reporting_allowed(
2450 nstat_provider_cookie_t cookie,
2451 nstat_provider_filter *filter,
2452 __unused u_int64_t suppression_flags)
2453 {
2454 bool retval = true;
2455 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2456
2457 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2458
2459 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2460 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2461
2462 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2463 u_int32_t extended_ifflags = extend_ifnet_flags(ifflags);
2464 if ((filter->npf_flags & extended_ifflags) == 0) {
2465 return false;
2466 }
2467 }
2468 }
2469 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2470 nstat_quic_descriptor quic_desc; // Stack allocation - OK or pushing the limits too far?
2471 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &quic_desc)) {
2472 retval = check_reporting_for_user(filter, (pid_t)quic_desc.pid, (pid_t)quic_desc.epid,
2473 &quic_desc.uuid, &quic_desc.euuid);
2474 } else {
2475 retval = false; // No further information, so might as well give up now.
2476 }
2477 }
2478 return retval;
2479 }
2480
2481 static errno_t
nstat_userland_protocol_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req,nstat_provider_type_t nstat_provider_type,nstat_provider * nstat_provider,u_int32_t * proto_watcher_cnt)2482 nstat_userland_protocol_add_watcher(
2483 nstat_control_state *state,
2484 nstat_msg_add_all_srcs *req,
2485 nstat_provider_type_t nstat_provider_type,
2486 nstat_provider *nstat_provider,
2487 u_int32_t *proto_watcher_cnt)
2488 {
2489 errno_t result;
2490
2491 lck_mtx_lock(&nstat_mtx);
2492 result = nstat_set_provider_filter(state, req);
2493
2494 if (result == 0) {
2495 struct nstat_tu_shadow *shad;
2496
2497 OSIncrementAtomic(proto_watcher_cnt);
2498
2499 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2500 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2501
2502 if ((shad->shad_provider == nstat_provider_type) && (shad->shad_live)) {
2503 result = nstat_control_source_add(0, state, nstat_provider, shad);
2504 if (result != 0) {
2505 printf("%s - nstat_control_source_add returned %d for "
2506 "provider type: %d\n", __func__, result, nstat_provider_type);
2507 break;
2508 }
2509 }
2510 }
2511 }
2512 lck_mtx_unlock(&nstat_mtx);
2513
2514 return result;
2515 }
2516
2517 static errno_t
nstat_userland_tcp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2518 nstat_userland_tcp_add_watcher(
2519 nstat_control_state *state,
2520 nstat_msg_add_all_srcs *req)
2521 {
2522 return nstat_userland_protocol_add_watcher(state, req, NSTAT_PROVIDER_TCP_USERLAND,
2523 &nstat_userland_tcp_provider, &nstat_userland_tcp_watchers);
2524 }
2525
2526 static errno_t
nstat_userland_udp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2527 nstat_userland_udp_add_watcher(
2528 nstat_control_state *state,
2529 nstat_msg_add_all_srcs *req)
2530 {
2531 return nstat_userland_protocol_add_watcher(state, req, NSTAT_PROVIDER_UDP_USERLAND,
2532 &nstat_userland_udp_provider, &nstat_userland_udp_watchers);
2533 }
2534
2535 static errno_t
nstat_userland_quic_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2536 nstat_userland_quic_add_watcher(
2537 nstat_control_state *state,
2538 nstat_msg_add_all_srcs *req)
2539 {
2540 return nstat_userland_protocol_add_watcher(state, req, NSTAT_PROVIDER_QUIC_USERLAND,
2541 &nstat_userland_quic_provider, &nstat_userland_quic_watchers);
2542 }
2543
2544 static void
nstat_userland_tcp_remove_watcher(__unused nstat_control_state * state)2545 nstat_userland_tcp_remove_watcher(
2546 __unused nstat_control_state *state)
2547 {
2548 OSDecrementAtomic(&nstat_userland_tcp_watchers);
2549 }
2550
2551 static void
nstat_userland_udp_remove_watcher(__unused nstat_control_state * state)2552 nstat_userland_udp_remove_watcher(
2553 __unused nstat_control_state *state)
2554 {
2555 OSDecrementAtomic(&nstat_userland_udp_watchers);
2556 }
2557
2558 static void
nstat_userland_quic_remove_watcher(__unused nstat_control_state * state)2559 nstat_userland_quic_remove_watcher(
2560 __unused nstat_control_state *state)
2561 {
2562 OSDecrementAtomic(&nstat_userland_quic_watchers);
2563 }
2564
2565
2566 static void
nstat_init_userland_tcp_provider(void)2567 nstat_init_userland_tcp_provider(void)
2568 {
2569 bzero(&nstat_userland_tcp_provider, sizeof(nstat_userland_tcp_provider));
2570 nstat_userland_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
2571 nstat_userland_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_USERLAND;
2572 nstat_userland_tcp_provider.nstat_lookup = nstat_userland_tu_lookup;
2573 nstat_userland_tcp_provider.nstat_gone = nstat_userland_tu_gone;
2574 nstat_userland_tcp_provider.nstat_counts = nstat_userland_tu_counts;
2575 nstat_userland_tcp_provider.nstat_release = nstat_userland_tu_release;
2576 nstat_userland_tcp_provider.nstat_watcher_add = nstat_userland_tcp_add_watcher;
2577 nstat_userland_tcp_provider.nstat_watcher_remove = nstat_userland_tcp_remove_watcher;
2578 nstat_userland_tcp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2579 nstat_userland_tcp_provider.nstat_reporting_allowed = nstat_userland_tcp_reporting_allowed;
2580 nstat_userland_tcp_provider.nstat_copy_extension = nstat_userland_extensions;
2581 nstat_userland_tcp_provider.next = nstat_providers;
2582 nstat_providers = &nstat_userland_tcp_provider;
2583 }
2584
2585
2586 static void
nstat_init_userland_udp_provider(void)2587 nstat_init_userland_udp_provider(void)
2588 {
2589 bzero(&nstat_userland_udp_provider, sizeof(nstat_userland_udp_provider));
2590 nstat_userland_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2591 nstat_userland_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_USERLAND;
2592 nstat_userland_udp_provider.nstat_lookup = nstat_userland_tu_lookup;
2593 nstat_userland_udp_provider.nstat_gone = nstat_userland_tu_gone;
2594 nstat_userland_udp_provider.nstat_counts = nstat_userland_tu_counts;
2595 nstat_userland_udp_provider.nstat_release = nstat_userland_tu_release;
2596 nstat_userland_udp_provider.nstat_watcher_add = nstat_userland_udp_add_watcher;
2597 nstat_userland_udp_provider.nstat_watcher_remove = nstat_userland_udp_remove_watcher;
2598 nstat_userland_udp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2599 nstat_userland_udp_provider.nstat_reporting_allowed = nstat_userland_udp_reporting_allowed;
2600 nstat_userland_udp_provider.nstat_copy_extension = nstat_userland_extensions;
2601 nstat_userland_udp_provider.next = nstat_providers;
2602 nstat_providers = &nstat_userland_udp_provider;
2603 }
2604
2605 static void
nstat_init_userland_quic_provider(void)2606 nstat_init_userland_quic_provider(void)
2607 {
2608 bzero(&nstat_userland_quic_provider, sizeof(nstat_userland_quic_provider));
2609 nstat_userland_quic_provider.nstat_descriptor_length = sizeof(nstat_quic_descriptor);
2610 nstat_userland_quic_provider.nstat_provider_id = NSTAT_PROVIDER_QUIC_USERLAND;
2611 nstat_userland_quic_provider.nstat_lookup = nstat_userland_tu_lookup;
2612 nstat_userland_quic_provider.nstat_gone = nstat_userland_tu_gone;
2613 nstat_userland_quic_provider.nstat_counts = nstat_userland_tu_counts;
2614 nstat_userland_quic_provider.nstat_release = nstat_userland_tu_release;
2615 nstat_userland_quic_provider.nstat_watcher_add = nstat_userland_quic_add_watcher;
2616 nstat_userland_quic_provider.nstat_watcher_remove = nstat_userland_quic_remove_watcher;
2617 nstat_userland_quic_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2618 nstat_userland_quic_provider.nstat_reporting_allowed = nstat_userland_quic_reporting_allowed;
2619 nstat_userland_quic_provider.nstat_copy_extension = nstat_userland_extensions;
2620 nstat_userland_quic_provider.next = nstat_providers;
2621 nstat_providers = &nstat_userland_quic_provider;
2622 }
2623
2624
2625 // Things get started with a call to netstats to say that there’s a new connection:
2626 __private_extern__ nstat_userland_context
ntstat_userland_stats_open(userland_stats_provider_context * ctx,int provider_id,u_int64_t properties,userland_stats_request_vals_fn req_fn,userland_stats_request_extension_fn req_extension_fn)2627 ntstat_userland_stats_open(userland_stats_provider_context *ctx,
2628 int provider_id,
2629 u_int64_t properties,
2630 userland_stats_request_vals_fn req_fn,
2631 userland_stats_request_extension_fn req_extension_fn)
2632 {
2633 struct nstat_tu_shadow *shad;
2634 struct nstat_procdetails *procdetails;
2635 nstat_provider *provider;
2636
2637 if ((provider_id != NSTAT_PROVIDER_TCP_USERLAND) &&
2638 (provider_id != NSTAT_PROVIDER_UDP_USERLAND) &&
2639 (provider_id != NSTAT_PROVIDER_QUIC_USERLAND)) {
2640 printf("%s - incorrect provider is supplied, %d\n", __func__, provider_id);
2641 return NULL;
2642 }
2643
2644 shad = kalloc_type(struct nstat_tu_shadow, Z_WAITOK | Z_NOFAIL);
2645
2646 procdetails = nstat_retain_curprocdetails();
2647
2648 if (procdetails == NULL) {
2649 kfree_type(struct nstat_tu_shadow, shad);
2650 return NULL;
2651 }
2652
2653 shad->shad_getvals_fn = req_fn;
2654 shad->shad_get_extension_fn = req_extension_fn;
2655 shad->shad_provider_context = ctx;
2656 shad->shad_provider = provider_id;
2657 shad->shad_properties = properties;
2658 shad->shad_procdetails = procdetails;
2659 shad->shad_start_timestamp = mach_continuous_time();
2660 shad->shad_live = true;
2661 shad->shad_magic = TU_SHADOW_MAGIC;
2662
2663 lck_mtx_lock(&nstat_mtx);
2664 nstat_control_state *state;
2665
2666 // Even if there are no watchers, we save the shadow structure
2667 TAILQ_INSERT_HEAD(&nstat_userprot_shad_head, shad, shad_link);
2668
2669 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND) {
2670 nstat_userland_tcp_shadows++;
2671 provider = &nstat_userland_tcp_provider;
2672 } else if (provider_id == NSTAT_PROVIDER_UDP_USERLAND) {
2673 nstat_userland_udp_shadows++;
2674 provider = &nstat_userland_udp_provider;
2675 } else {
2676 nstat_userland_quic_shadows++;
2677 provider = &nstat_userland_quic_provider;
2678 }
2679
2680 for (state = nstat_controls; state; state = state->ncs_next) {
2681 if ((state->ncs_watching & (1 << provider_id)) != 0) {
2682 // this client is watching tcp/udp/quic userland
2683 // Link to it.
2684 int result = nstat_control_source_add(0, state, provider, shad);
2685 if (result != 0) {
2686 // There should be some kind of statistics for failures like this.
2687 // <rdar://problem/31377195> The kernel ntstat component should keep some
2688 // internal counters reflecting operational state for eventual AWD reporting
2689 }
2690 }
2691 }
2692 lck_mtx_unlock(&nstat_mtx);
2693
2694 return (nstat_userland_context)shad;
2695 }
2696
2697
2698 __private_extern__ void
ntstat_userland_stats_close(nstat_userland_context nstat_ctx)2699 ntstat_userland_stats_close(nstat_userland_context nstat_ctx)
2700 {
2701 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2702 tailq_head_nstat_src dead_list;
2703 nstat_src *src;
2704
2705 if (shad == NULL) {
2706 return;
2707 }
2708
2709 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2710 TAILQ_INIT(&dead_list);
2711
2712 lck_mtx_lock(&nstat_mtx);
2713 if (nstat_userland_udp_watchers != 0 ||
2714 nstat_userland_tcp_watchers != 0 ||
2715 nstat_userland_quic_watchers != 0) {
2716 nstat_control_state *state;
2717 errno_t result;
2718
2719 for (state = nstat_controls; state; state = state->ncs_next) {
2720 lck_mtx_lock(&state->ncs_mtx);
2721 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2722 {
2723 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2724 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
2725 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND ||
2726 provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2727 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2728 break;
2729 }
2730 }
2731 }
2732
2733 if (src) {
2734 result = nstat_control_send_goodbye(state, src);
2735
2736 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2737 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2738 }
2739 lck_mtx_unlock(&state->ncs_mtx);
2740 }
2741 }
2742 TAILQ_REMOVE(&nstat_userprot_shad_head, shad, shad_link);
2743
2744 if (shad->shad_live) {
2745 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) {
2746 nstat_userland_tcp_shadows--;
2747 } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) {
2748 nstat_userland_udp_shadows--;
2749 } else {
2750 nstat_userland_quic_shadows--;
2751 }
2752 }
2753
2754 lck_mtx_unlock(&nstat_mtx);
2755
2756 while ((src = TAILQ_FIRST(&dead_list))) {
2757 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2758 nstat_control_cleanup_source(NULL, src, TRUE);
2759 }
2760 nstat_release_procdetails(shad->shad_procdetails);
2761 shad->shad_magic = TU_SHADOW_UNMAGIC;
2762
2763 kfree_type(struct nstat_tu_shadow, shad);
2764 }
2765
2766
2767 __private_extern__ void
ntstat_userland_stats_event(nstat_userland_context nstat_ctx,uint64_t event)2768 ntstat_userland_stats_event(
2769 nstat_userland_context nstat_ctx,
2770 uint64_t event)
2771 {
2772 // This will need refinement for when we do genuine stats filtering
2773 // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications
2774 // For now it deals only with events that potentially cause any traditional netstat sources to be closed
2775
2776 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2777 tailq_head_nstat_src dead_list;
2778 nstat_src *src;
2779
2780 if (shad == NULL) {
2781 return;
2782 }
2783
2784 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2785
2786 if (event & NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT) {
2787 TAILQ_INIT(&dead_list);
2788
2789 lck_mtx_lock(&nstat_mtx);
2790 if (nstat_userland_udp_watchers != 0 ||
2791 nstat_userland_tcp_watchers != 0 ||
2792 nstat_userland_quic_watchers != 0) {
2793 nstat_control_state *state;
2794 errno_t result;
2795
2796 for (state = nstat_controls; state; state = state->ncs_next) {
2797 lck_mtx_lock(&state->ncs_mtx);
2798 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2799 {
2800 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2801 break;
2802 }
2803 }
2804
2805 if (src) {
2806 if (!(src->filter & NSTAT_FILTER_TCP_NO_EARLY_CLOSE)) {
2807 result = nstat_control_send_goodbye(state, src);
2808
2809 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2810 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2811 }
2812 }
2813 lck_mtx_unlock(&state->ncs_mtx);
2814 }
2815 }
2816 lck_mtx_unlock(&nstat_mtx);
2817
2818 while ((src = TAILQ_FIRST(&dead_list))) {
2819 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2820 nstat_control_cleanup_source(NULL, src, TRUE);
2821 }
2822 }
2823 }
2824
2825 __private_extern__ void
nstats_userland_stats_defunct_for_process(int pid)2826 nstats_userland_stats_defunct_for_process(int pid)
2827 {
2828 // Note that this can be called multiple times for the same process
2829 tailq_head_nstat_src dead_list;
2830 nstat_src *src, *tmpsrc;
2831 struct nstat_tu_shadow *shad;
2832
2833 TAILQ_INIT(&dead_list);
2834
2835 lck_mtx_lock(&nstat_mtx);
2836
2837 if (nstat_userland_udp_watchers != 0 ||
2838 nstat_userland_tcp_watchers != 0 ||
2839 nstat_userland_quic_watchers != 0) {
2840 nstat_control_state *state;
2841 errno_t result;
2842
2843 for (state = nstat_controls; state; state = state->ncs_next) {
2844 lck_mtx_lock(&state->ncs_mtx);
2845 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
2846 {
2847 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
2848 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND ||
2849 provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2850 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2851 shad = (struct nstat_tu_shadow *)src->cookie;
2852 if (shad->shad_procdetails->pdet_pid == pid) {
2853 result = nstat_control_send_goodbye(state, src);
2854
2855 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2856 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2857 }
2858 }
2859 }
2860 lck_mtx_unlock(&state->ncs_mtx);
2861 }
2862 }
2863
2864 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2865 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2866
2867 if (shad->shad_live) {
2868 if (shad->shad_procdetails->pdet_pid == pid) {
2869 shad->shad_live = false;
2870 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) {
2871 nstat_userland_tcp_shadows--;
2872 } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) {
2873 nstat_userland_udp_shadows--;
2874 } else {
2875 nstat_userland_quic_shadows--;
2876 }
2877 }
2878 }
2879 }
2880
2881 lck_mtx_unlock(&nstat_mtx);
2882
2883 while ((src = TAILQ_FIRST(&dead_list))) {
2884 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2885 nstat_control_cleanup_source(NULL, src, TRUE);
2886 }
2887 }
2888
2889
2890 #pragma mark -- Generic Providers --
2891
2892 static nstat_provider nstat_userland_conn_provider;
2893 static nstat_provider nstat_udp_subflow_provider;
2894
2895 static u_int32_t nstat_generic_provider_watchers[NSTAT_PROVIDER_COUNT];
2896
2897 struct nstat_generic_shadow {
2898 tailq_entry_generic_shadow gshad_link;
2899 nstat_provider_context gshad_provider_context;
2900 nstat_provider_request_vals_fn *gshad_getvals_fn;
2901 nstat_provider_request_extensions_fn *gshad_getextensions_fn;
2902 u_int64_t gshad_properties;
2903 u_int64_t gshad_start_timestamp;
2904 struct nstat_procdetails *gshad_procdetails;
2905 nstat_provider_id_t gshad_provider;
2906 int32_t gshad_refcnt;
2907 uint32_t gshad_magic;
2908 };
2909
2910 // Magic number checking should remain in place until the userland provider has been fully proven
2911 #define NSTAT_GENERIC_SHADOW_MAGIC 0xfadef00d
2912 #define NSTAT_GENERIC_SHADOW_UNMAGIC 0xfadedead
2913
2914 static tailq_head_generic_shadow nstat_gshad_head = TAILQ_HEAD_INITIALIZER(nstat_gshad_head);
2915
2916 static void
nstat_release_gshad(struct nstat_generic_shadow * gshad)2917 nstat_release_gshad(
2918 struct nstat_generic_shadow *gshad)
2919 {
2920 assert(gshad->gshad_magic = NSTAT_GENERIC_SHADOW_MAGIC);
2921
2922 if (OSDecrementAtomic(&gshad->gshad_refcnt) == 1) {
2923 nstat_release_procdetails(gshad->gshad_procdetails);
2924 gshad->gshad_magic = NSTAT_GENERIC_SHADOW_UNMAGIC;
2925 kfree_type(struct nstat_generic_shadow, gshad);
2926 }
2927 }
2928
2929 static errno_t
nstat_generic_provider_lookup(__unused const void * data,__unused u_int32_t length,__unused nstat_provider_cookie_t * out_cookie)2930 nstat_generic_provider_lookup(
2931 __unused const void *data,
2932 __unused u_int32_t length,
2933 __unused nstat_provider_cookie_t *out_cookie)
2934 {
2935 // Looking up a specific connection is not supported
2936 return ENOTSUP;
2937 }
2938
2939 static int
nstat_generic_provider_gone(__unused nstat_provider_cookie_t cookie)2940 nstat_generic_provider_gone(
2941 __unused nstat_provider_cookie_t cookie)
2942 {
2943 // Returns non-zero if the source has gone.
2944 // We don't keep a source hanging around, so the answer is always 0
2945 return 0;
2946 }
2947
2948 static errno_t
nstat_generic_provider_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)2949 nstat_generic_provider_counts(
2950 nstat_provider_cookie_t cookie,
2951 struct nstat_counts *out_counts,
2952 int *out_gone)
2953 {
2954 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
2955 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
2956
2957 memset(out_counts, 0, sizeof(*out_counts));
2958
2959 bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, out_counts, NULL);
2960
2961 if (out_gone) {
2962 *out_gone = 0;
2963 }
2964 return (result)? 0 : EIO;
2965 }
2966
2967
2968 static errno_t
nstat_generic_provider_copy_descriptor(nstat_provider_cookie_t cookie,void * data,__unused size_t len)2969 nstat_generic_provider_copy_descriptor(
2970 nstat_provider_cookie_t cookie,
2971 void *data,
2972 __unused size_t len)
2973 {
2974 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
2975 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
2976 struct nstat_procdetails *procdetails = gshad->gshad_procdetails;
2977 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2978
2979 bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, data);
2980
2981 switch (gshad->gshad_provider) {
2982 case NSTAT_PROVIDER_CONN_USERLAND:
2983 {
2984 nstat_connection_descriptor *desc = (nstat_connection_descriptor *)data;
2985 desc->pid = procdetails->pdet_pid;
2986 desc->upid = procdetails->pdet_upid;
2987 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2988 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2989 desc->start_timestamp = gshad->gshad_start_timestamp;
2990 desc->timestamp = mach_continuous_time();
2991 break;
2992 }
2993 case NSTAT_PROVIDER_UDP_SUBFLOW:
2994 {
2995 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data;
2996 desc->pid = procdetails->pdet_pid;
2997 desc->upid = procdetails->pdet_upid;
2998 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2999 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
3000 desc->start_timestamp = gshad->gshad_start_timestamp;
3001 desc->timestamp = mach_continuous_time();
3002 break;
3003 }
3004 default:
3005 break;
3006 }
3007 return (result)? 0 : EIO;
3008 }
3009
3010 static void
nstat_generic_provider_release(__unused nstat_provider_cookie_t cookie,__unused int locked)3011 nstat_generic_provider_release(
3012 __unused nstat_provider_cookie_t cookie,
3013 __unused int locked)
3014 {
3015 // Called when a nstat_src is detached.
3016 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3017
3018 nstat_release_gshad(gshad);
3019 }
3020
3021 static bool
nstat_generic_provider_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,u_int64_t suppression_flags)3022 nstat_generic_provider_reporting_allowed(
3023 nstat_provider_cookie_t cookie,
3024 nstat_provider_filter *filter,
3025 u_int64_t suppression_flags)
3026 {
3027 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3028
3029 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3030
3031 if ((filter->npf_flags & NSTAT_FILTER_SUPPRESS_BORING_FLAGS) != 0) {
3032 if ((filter->npf_flags & suppression_flags) != 0) {
3033 return false;
3034 }
3035 }
3036
3037 // Filter based on interface and connection flags
3038 // If a provider doesn't support flags, a client shouldn't attempt to use filtering
3039 if ((filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS) != 0) {
3040 u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
3041
3042 if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, &ifflags, NULL, NULL)) {
3043 if ((filter->npf_flags & ifflags) == 0) {
3044 return false;
3045 }
3046 }
3047 }
3048
3049 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
3050 struct nstat_procdetails *procdetails = gshad->gshad_procdetails;
3051 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3052
3053 // Check details that we have readily to hand before asking the provider for descriptor items
3054 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
3055 (filter->npf_pid == procdetails->pdet_pid)) {
3056 return true;
3057 }
3058 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
3059 (memcmp(filter->npf_uuid, &procdetails->pdet_uuid, sizeof(filter->npf_uuid)) == 0)) {
3060 return true;
3061 }
3062 if ((filter->npf_flags & (NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID)) != 0) {
3063 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
3064 switch (gshad->gshad_provider) {
3065 case NSTAT_PROVIDER_CONN_USERLAND:
3066 // Filtering by effective uuid or effective pid is currently not supported
3067 filter->npf_flags &= ~((uint64_t)(NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID));
3068 printf("%s - attempt to filter conn provider by effective pid/uuid, not supported\n", __func__);
3069 return true;
3070
3071 case NSTAT_PROVIDER_UDP_SUBFLOW:
3072 if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, &udp_desc)) {
3073 if (check_reporting_for_user(filter, procdetails->pdet_pid, (pid_t)udp_desc.epid,
3074 &procdetails->pdet_uuid, &udp_desc.euuid)) {
3075 return true;
3076 }
3077 }
3078 break;
3079 default:
3080 break;
3081 }
3082 }
3083 return false;
3084 }
3085 return true;
3086 }
3087
3088 static size_t
nstat_generic_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)3089 nstat_generic_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
3090 {
3091 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3092 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3093 assert(gshad->gshad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3094
3095 if (gshad->gshad_getextensions_fn == NULL) {
3096 return 0;
3097 }
3098 return gshad->gshad_getextensions_fn(gshad->gshad_provider_context, extension_id, buf, len);
3099 }
3100
3101 static errno_t
nstat_generic_provider_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)3102 nstat_generic_provider_add_watcher(
3103 nstat_control_state *state,
3104 nstat_msg_add_all_srcs *req)
3105 {
3106 errno_t result;
3107 nstat_provider_id_t provider_id = req->provider;
3108 nstat_provider *provider;
3109
3110 switch (provider_id) {
3111 case NSTAT_PROVIDER_CONN_USERLAND:
3112 provider = &nstat_userland_conn_provider;
3113 break;
3114 case NSTAT_PROVIDER_UDP_SUBFLOW:
3115 provider = &nstat_udp_subflow_provider;
3116 break;
3117 default:
3118 return ENOTSUP;
3119 }
3120
3121 lck_mtx_lock(&nstat_mtx);
3122 result = nstat_set_provider_filter(state, req);
3123
3124 if (result == 0) {
3125 struct nstat_generic_shadow *gshad;
3126 nstat_provider_filter *filter = &state->ncs_provider_filters[provider_id];
3127
3128 OSIncrementAtomic(&nstat_generic_provider_watchers[provider_id]);
3129
3130 TAILQ_FOREACH(gshad, &nstat_gshad_head, gshad_link) {
3131 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3132
3133 if (gshad->gshad_provider == provider_id) {
3134 if (filter->npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) {
3135 u_int64_t npf_flags = filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS;
3136 if ((npf_flags != 0) && ((npf_flags & gshad->gshad_properties) == 0)) {
3137 // Skip this one
3138 // Note - no filtering by pid or UUID supported at this point, for simplicity
3139 continue;
3140 }
3141 }
3142 result = nstat_control_source_add(0, state, provider, gshad);
3143 if (result != 0) {
3144 printf("%s - nstat_control_source_add returned %d for "
3145 "provider type: %d\n", __func__, result, provider_id);
3146 break;
3147 } else {
3148 OSIncrementAtomic(&gshad->gshad_refcnt);
3149 }
3150 }
3151 }
3152 }
3153 lck_mtx_unlock(&nstat_mtx);
3154
3155 return result;
3156 }
3157
3158 static void
nstat_userland_conn_remove_watcher(__unused nstat_control_state * state)3159 nstat_userland_conn_remove_watcher(
3160 __unused nstat_control_state *state)
3161 {
3162 OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_CONN_USERLAND]);
3163 }
3164
3165 static void
nstat_udp_subflow_remove_watcher(__unused nstat_control_state * state)3166 nstat_udp_subflow_remove_watcher(
3167 __unused nstat_control_state *state)
3168 {
3169 OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_UDP_SUBFLOW]);
3170 }
3171
3172 static void
nstat_init_userland_conn_provider(void)3173 nstat_init_userland_conn_provider(void)
3174 {
3175 bzero(&nstat_userland_conn_provider, sizeof(nstat_userland_conn_provider));
3176 nstat_userland_conn_provider.nstat_descriptor_length = sizeof(nstat_connection_descriptor);
3177 nstat_userland_conn_provider.nstat_provider_id = NSTAT_PROVIDER_CONN_USERLAND;
3178 nstat_userland_conn_provider.nstat_lookup = nstat_generic_provider_lookup;
3179 nstat_userland_conn_provider.nstat_gone = nstat_generic_provider_gone;
3180 nstat_userland_conn_provider.nstat_counts = nstat_generic_provider_counts;
3181 nstat_userland_conn_provider.nstat_release = nstat_generic_provider_release;
3182 nstat_userland_conn_provider.nstat_watcher_add = nstat_generic_provider_add_watcher;
3183 nstat_userland_conn_provider.nstat_watcher_remove = nstat_userland_conn_remove_watcher;
3184 nstat_userland_conn_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor;
3185 nstat_userland_conn_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed;
3186 nstat_userland_conn_provider.nstat_copy_extension = nstat_generic_extensions;
3187 nstat_userland_conn_provider.next = nstat_providers;
3188 nstat_providers = &nstat_userland_conn_provider;
3189 }
3190
3191 static void
nstat_init_udp_subflow_provider(void)3192 nstat_init_udp_subflow_provider(void)
3193 {
3194 bzero(&nstat_udp_subflow_provider, sizeof(nstat_udp_subflow_provider));
3195 nstat_udp_subflow_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
3196 nstat_udp_subflow_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_SUBFLOW;
3197 nstat_udp_subflow_provider.nstat_lookup = nstat_generic_provider_lookup;
3198 nstat_udp_subflow_provider.nstat_gone = nstat_generic_provider_gone;
3199 nstat_udp_subflow_provider.nstat_counts = nstat_generic_provider_counts;
3200 nstat_udp_subflow_provider.nstat_release = nstat_generic_provider_release;
3201 nstat_udp_subflow_provider.nstat_watcher_add = nstat_generic_provider_add_watcher;
3202 nstat_udp_subflow_provider.nstat_watcher_remove = nstat_udp_subflow_remove_watcher;
3203 nstat_udp_subflow_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor;
3204 nstat_udp_subflow_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed;
3205 nstat_udp_subflow_provider.nstat_copy_extension = nstat_generic_extensions;
3206 nstat_udp_subflow_provider.next = nstat_providers;
3207 nstat_providers = &nstat_udp_subflow_provider;
3208 }
3209
3210 // Things get started with a call from the provider to netstats to say that there’s a new source
3211 __private_extern__ nstat_context
nstat_provider_stats_open(nstat_provider_context ctx,int provider_id,u_int64_t properties,nstat_provider_request_vals_fn req_fn,nstat_provider_request_extensions_fn req_extensions_fn)3212 nstat_provider_stats_open(nstat_provider_context ctx,
3213 int provider_id,
3214 u_int64_t properties,
3215 nstat_provider_request_vals_fn req_fn,
3216 nstat_provider_request_extensions_fn req_extensions_fn)
3217 {
3218 struct nstat_generic_shadow *gshad;
3219 struct nstat_procdetails *procdetails;
3220 nstat_provider *provider = nstat_find_provider_by_id(provider_id);
3221
3222 gshad = kalloc_type(struct nstat_generic_shadow, Z_WAITOK | Z_NOFAIL);
3223
3224 procdetails = nstat_retain_curprocdetails();
3225
3226 if (procdetails == NULL) {
3227 kfree_type(struct nstat_generic_shadow, gshad);
3228 return NULL;
3229 }
3230
3231 gshad->gshad_getvals_fn = req_fn;
3232 gshad->gshad_getextensions_fn = req_extensions_fn;
3233 gshad->gshad_provider_context = ctx;
3234 gshad->gshad_properties = properties;
3235 gshad->gshad_procdetails = procdetails;
3236 gshad->gshad_provider = provider_id;
3237 gshad->gshad_start_timestamp = mach_continuous_time();
3238 gshad->gshad_refcnt = 1;
3239 gshad->gshad_magic = NSTAT_GENERIC_SHADOW_MAGIC;
3240
3241 lck_mtx_lock(&nstat_mtx);
3242 nstat_control_state *state;
3243
3244 // Even if there are no watchers, we save the shadow structure
3245 TAILQ_INSERT_HEAD(&nstat_gshad_head, gshad, gshad_link);
3246
3247 for (state = nstat_controls; state; state = state->ncs_next) {
3248 if ((state->ncs_watching & (1 << provider_id)) != 0) {
3249 // Does this client want an initial filtering to be made?
3250 u_int64_t npf_flags = state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
3251 if (npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) {
3252 npf_flags &= NSTAT_FILTER_IFNET_AND_CONN_FLAGS;
3253 if ((npf_flags != 0) && ((npf_flags & properties) == 0)) {
3254 // Skip this one
3255 // Note - no filtering by pid or UUID supported at this point, for simplicity
3256 continue;
3257 }
3258 }
3259 // this client is watching, so link to it.
3260 int result = nstat_control_source_add(0, state, provider, gshad);
3261 if (result != 0) {
3262 // There should be some kind of statistics for failures like this.
3263 // <rdar://problem/31377195> The kernel ntstat component should keep some
3264 // internal counters reflecting operational state for eventual AWD reporting
3265 } else {
3266 OSIncrementAtomic(&gshad->gshad_refcnt);
3267 }
3268 }
3269 }
3270 lck_mtx_unlock(&nstat_mtx);
3271
3272 return (nstat_context) gshad;
3273 }
3274
3275
3276 // When the source is closed, netstats will make one last call on the request functions to retrieve final values
3277 __private_extern__ void
nstat_provider_stats_close(nstat_context nstat_ctx)3278 nstat_provider_stats_close(nstat_context nstat_ctx)
3279 {
3280 tailq_head_nstat_src dead_list;
3281 nstat_src *src;
3282 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)nstat_ctx;
3283
3284 if (gshad == NULL) {
3285 printf("%s - called with null reference", __func__);
3286 return;
3287 }
3288
3289 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3290
3291 if (gshad->gshad_magic != NSTAT_GENERIC_SHADOW_MAGIC) {
3292 printf("%s - called with incorrect shadow magic 0x%x", __func__, gshad->gshad_magic);
3293 }
3294
3295 TAILQ_INIT(&dead_list);
3296
3297 lck_mtx_lock(&nstat_mtx);
3298
3299 TAILQ_REMOVE(&nstat_gshad_head, gshad, gshad_link);
3300
3301 int32_t num_srcs = gshad->gshad_refcnt - 1;
3302 if ((nstat_generic_provider_watchers[gshad->gshad_provider] != 0) && (num_srcs > 0)) {
3303 nstat_control_state *state;
3304 errno_t result;
3305
3306 for (state = nstat_controls; state; state = state->ncs_next) {
3307 // Only scan further if this client is watching
3308 if ((state->ncs_watching & (1 << gshad->gshad_provider)) != 0) {
3309 lck_mtx_lock(&state->ncs_mtx);
3310 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3311 {
3312 if ((gshad == (struct nstat_generic_shadow *)src->cookie) &&
3313 (gshad->gshad_provider == src->provider->nstat_provider_id)) {
3314 break;
3315 }
3316 }
3317 if (src) {
3318 result = nstat_control_send_goodbye(state, src);
3319 // There is currently no recovery possible from failure to send,
3320 // so no need to check the return code.
3321 // rdar://28312774 (Scalability and resilience issues in ntstat.c)
3322
3323 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
3324 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
3325 --num_srcs;
3326 }
3327 lck_mtx_unlock(&state->ncs_mtx);
3328
3329 // Performance optimization, don't scan full lists if no chance of presence
3330 if (num_srcs == 0) {
3331 break;
3332 }
3333 }
3334 }
3335 }
3336 lck_mtx_unlock(&nstat_mtx);
3337
3338 while ((src = TAILQ_FIRST(&dead_list))) {
3339 TAILQ_REMOVE(&dead_list, src, ns_control_link);
3340 nstat_control_cleanup_source(NULL, src, TRUE);
3341 }
3342 nstat_release_gshad(gshad);
3343 }
3344
3345 // Events that cause a significant change may be reported via a flags word
3346 void
nstat_provider_stats_event(__unused nstat_context nstat_ctx,__unused uint64_t event)3347 nstat_provider_stats_event(__unused nstat_context nstat_ctx, __unused uint64_t event)
3348 {
3349 nstat_src *src;
3350 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)nstat_ctx;
3351
3352 if (gshad == NULL) {
3353 printf("%s - called with null reference", __func__);
3354 return;
3355 }
3356
3357 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3358
3359 if (gshad->gshad_magic != NSTAT_GENERIC_SHADOW_MAGIC) {
3360 printf("%s - called with incorrect shadow magic 0x%x", __func__, gshad->gshad_magic);
3361 }
3362
3363 lck_mtx_lock(&nstat_mtx);
3364
3365 if (nstat_generic_provider_watchers[gshad->gshad_provider] != 0) {
3366 nstat_control_state *state;
3367 errno_t result;
3368 nstat_provider_id_t provider_id = gshad->gshad_provider;
3369
3370 for (state = nstat_controls; state; state = state->ncs_next) {
3371 // Only scan further if this client is watching and has interest in the event
3372 // or the client has requested "boring" unchanged status to be ignored
3373 if (((state->ncs_watching & (1 << provider_id)) != 0) &&
3374 (((state->ncs_provider_filters[provider_id].npf_events & event) != 0) ||
3375 ((state->ncs_provider_filters[provider_id].npf_flags & NSTAT_FILTER_SUPPRESS_BORING_FLAGS) != 0))) {
3376 lck_mtx_lock(&state->ncs_mtx);
3377 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3378 {
3379 if (gshad == (struct nstat_generic_shadow *)src->cookie) {
3380 break;
3381 }
3382 }
3383
3384 if (src) {
3385 src->ns_reported = false;
3386 if ((state->ncs_provider_filters[provider_id].npf_events & event) != 0) {
3387 result = nstat_control_send_event(state, src, event);
3388 // There is currently no recovery possible from failure to send,
3389 // so no need to check the return code.
3390 // rdar://28312774 (Scalability and resilience issues in ntstat.c)
3391 }
3392 }
3393 lck_mtx_unlock(&state->ncs_mtx);
3394 }
3395 }
3396 }
3397 lck_mtx_unlock(&nstat_mtx);
3398 }
3399
3400 #endif /* SKYWALK */
3401
3402
3403 #pragma mark -- ifnet Provider --
3404
3405 static nstat_provider nstat_ifnet_provider;
3406
3407 /*
3408 * We store a pointer to the ifnet and the original threshold
3409 * requested by the client.
3410 */
3411 struct nstat_ifnet_cookie {
3412 struct ifnet *ifp;
3413 uint64_t threshold;
3414 };
3415
3416 static errno_t
nstat_ifnet_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)3417 nstat_ifnet_lookup(
3418 const void *data,
3419 u_int32_t length,
3420 nstat_provider_cookie_t *out_cookie)
3421 {
3422 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
3423 struct ifnet *ifp;
3424 boolean_t changed = FALSE;
3425 nstat_control_state *state;
3426 nstat_src *src;
3427 struct nstat_ifnet_cookie *cookie;
3428
3429 if (length < sizeof(*param) || param->threshold < 1024 * 1024) {
3430 return EINVAL;
3431 }
3432 if (nstat_privcheck != 0) {
3433 errno_t result = priv_check_cred(kauth_cred_get(),
3434 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
3435 if (result != 0) {
3436 return result;
3437 }
3438 }
3439 cookie = kalloc_type(struct nstat_ifnet_cookie,
3440 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3441
3442 ifnet_head_lock_shared();
3443 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
3444 {
3445 if (!ifnet_is_attached(ifp, 1)) {
3446 continue;
3447 }
3448 ifnet_lock_exclusive(ifp);
3449 if (ifp->if_index == param->ifindex) {
3450 cookie->ifp = ifp;
3451 cookie->threshold = param->threshold;
3452 *out_cookie = cookie;
3453 if (!ifp->if_data_threshold ||
3454 ifp->if_data_threshold > param->threshold) {
3455 changed = TRUE;
3456 ifp->if_data_threshold = param->threshold;
3457 }
3458 ifnet_lock_done(ifp);
3459 ifnet_reference(ifp);
3460 ifnet_decr_iorefcnt(ifp);
3461 break;
3462 }
3463 ifnet_lock_done(ifp);
3464 ifnet_decr_iorefcnt(ifp);
3465 }
3466 ifnet_head_done();
3467
3468 /*
3469 * When we change the threshold to something smaller, we notify
3470 * all of our clients with a description message.
3471 * We won't send a message to the client we are currently serving
3472 * because it has no `ifnet source' yet.
3473 */
3474 if (changed) {
3475 lck_mtx_lock(&nstat_mtx);
3476 for (state = nstat_controls; state; state = state->ncs_next) {
3477 lck_mtx_lock(&state->ncs_mtx);
3478 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3479 {
3480 if (src->provider != &nstat_ifnet_provider) {
3481 continue;
3482 }
3483 nstat_control_send_description(state, src, 0, 0);
3484 }
3485 lck_mtx_unlock(&state->ncs_mtx);
3486 }
3487 lck_mtx_unlock(&nstat_mtx);
3488 }
3489 if (cookie->ifp == NULL) {
3490 kfree_type(struct nstat_ifnet_cookie, cookie);
3491 }
3492
3493 return ifp ? 0 : EINVAL;
3494 }
3495
3496 static int
nstat_ifnet_gone(nstat_provider_cookie_t cookie)3497 nstat_ifnet_gone(
3498 nstat_provider_cookie_t cookie)
3499 {
3500 struct ifnet *ifp;
3501 struct nstat_ifnet_cookie *ifcookie =
3502 (struct nstat_ifnet_cookie *)cookie;
3503
3504 ifnet_head_lock_shared();
3505 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
3506 {
3507 if (ifp == ifcookie->ifp) {
3508 break;
3509 }
3510 }
3511 ifnet_head_done();
3512
3513 return ifp ? 0 : 1;
3514 }
3515
3516 static errno_t
nstat_ifnet_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)3517 nstat_ifnet_counts(
3518 nstat_provider_cookie_t cookie,
3519 struct nstat_counts *out_counts,
3520 int *out_gone)
3521 {
3522 struct nstat_ifnet_cookie *ifcookie =
3523 (struct nstat_ifnet_cookie *)cookie;
3524 struct ifnet *ifp = ifcookie->ifp;
3525
3526 if (out_gone) {
3527 *out_gone = 0;
3528 }
3529
3530 // if the ifnet is gone, we should stop using it
3531 if (nstat_ifnet_gone(cookie)) {
3532 if (out_gone) {
3533 *out_gone = 1;
3534 }
3535 return EINVAL;
3536 }
3537
3538 bzero(out_counts, sizeof(*out_counts));
3539 out_counts->nstat_rxpackets = ifp->if_ipackets;
3540 out_counts->nstat_rxbytes = ifp->if_ibytes;
3541 out_counts->nstat_txpackets = ifp->if_opackets;
3542 out_counts->nstat_txbytes = ifp->if_obytes;
3543 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
3544 return 0;
3545 }
3546
3547 static void
nstat_ifnet_release(nstat_provider_cookie_t cookie,__unused int locked)3548 nstat_ifnet_release(
3549 nstat_provider_cookie_t cookie,
3550 __unused int locked)
3551 {
3552 struct nstat_ifnet_cookie *ifcookie;
3553 struct ifnet *ifp;
3554 nstat_control_state *state;
3555 nstat_src *src;
3556 uint64_t minthreshold = UINT64_MAX;
3557
3558 /*
3559 * Find all the clients that requested a threshold
3560 * for this ifnet and re-calculate if_data_threshold.
3561 */
3562 lck_mtx_lock(&nstat_mtx);
3563 for (state = nstat_controls; state; state = state->ncs_next) {
3564 lck_mtx_lock(&state->ncs_mtx);
3565 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3566 {
3567 /* Skip the provider we are about to detach. */
3568 if (src->provider != &nstat_ifnet_provider ||
3569 src->cookie == cookie) {
3570 continue;
3571 }
3572 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
3573 if (ifcookie->threshold < minthreshold) {
3574 minthreshold = ifcookie->threshold;
3575 }
3576 }
3577 lck_mtx_unlock(&state->ncs_mtx);
3578 }
3579 lck_mtx_unlock(&nstat_mtx);
3580 /*
3581 * Reset if_data_threshold or disable it.
3582 */
3583 ifcookie = (struct nstat_ifnet_cookie *)cookie;
3584 ifp = ifcookie->ifp;
3585 if (ifnet_is_attached(ifp, 1)) {
3586 ifnet_lock_exclusive(ifp);
3587 if (minthreshold == UINT64_MAX) {
3588 ifp->if_data_threshold = 0;
3589 } else {
3590 ifp->if_data_threshold = minthreshold;
3591 }
3592 ifnet_lock_done(ifp);
3593 ifnet_decr_iorefcnt(ifp);
3594 }
3595 ifnet_release(ifp);
3596 kfree_type(struct nstat_ifnet_cookie, ifcookie);
3597 }
3598
3599 static void
nstat_ifnet_copy_link_status(struct ifnet * ifp,struct nstat_ifnet_descriptor * desc)3600 nstat_ifnet_copy_link_status(
3601 struct ifnet *ifp,
3602 struct nstat_ifnet_descriptor *desc)
3603 {
3604 struct if_link_status *ifsr = ifp->if_link_status;
3605 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
3606
3607 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
3608 if (ifsr == NULL) {
3609 return;
3610 }
3611
3612 lck_rw_lock_shared(&ifp->if_link_status_lock);
3613
3614 if (ifp->if_type == IFT_CELLULAR) {
3615 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
3616 struct if_cellular_status_v1 *if_cell_sr =
3617 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3618
3619 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1) {
3620 goto done;
3621 }
3622
3623 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
3624
3625 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
3626 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
3627 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
3628 }
3629 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
3630 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
3631 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
3632 }
3633 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
3634 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
3635 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
3636 }
3637 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
3638 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
3639 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
3640 }
3641 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
3642 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
3643 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
3644 }
3645 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
3646 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
3647 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
3648 }
3649 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
3650 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
3651 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE) {
3652 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
3653 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW) {
3654 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
3655 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM) {
3656 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
3657 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH) {
3658 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
3659 } else {
3660 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
3661 }
3662 }
3663 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
3664 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
3665 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
3666 }
3667 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
3668 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
3669 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
3670 }
3671 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
3672 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
3673 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
3674 }
3675 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
3676 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
3677 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
3678 }
3679 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
3680 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
3681 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
3682 }
3683 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
3684 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
3685 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
3686 }
3687 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
3688 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
3689 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
3690 }
3691 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
3692 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
3693 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
3694 }
3695 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
3696 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
3697 cell_status->mss_recommended = if_cell_sr->mss_recommended;
3698 }
3699 } else if (IFNET_IS_WIFI(ifp)) {
3700 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
3701 struct if_wifi_status_v1 *if_wifi_sr =
3702 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3703
3704 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1) {
3705 goto done;
3706 }
3707
3708 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
3709
3710 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
3711 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
3712 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
3713 }
3714 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
3715 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3716 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
3717 }
3718 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
3719 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
3720 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
3721 }
3722 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
3723 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
3724 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
3725 }
3726 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
3727 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
3728 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
3729 }
3730 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
3731 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
3732 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
3733 }
3734 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
3735 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
3736 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE) {
3737 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
3738 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW) {
3739 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
3740 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM) {
3741 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
3742 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH) {
3743 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
3744 } else {
3745 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
3746 }
3747 }
3748 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
3749 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
3750 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
3751 }
3752 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
3753 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
3754 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
3755 }
3756 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
3757 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3758 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
3759 }
3760 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
3761 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
3762 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
3763 }
3764 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
3765 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
3766 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
3767 }
3768 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
3769 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
3770 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
3771 }
3772 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
3773 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
3774 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
3775 }
3776 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
3777 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
3778 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
3779 }
3780 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
3781 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
3782 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ) {
3783 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
3784 } else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ) {
3785 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
3786 } else {
3787 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
3788 }
3789 }
3790 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
3791 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
3792 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
3793 }
3794 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
3795 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
3796 wifi_status->scan_count = if_wifi_sr->scan_count;
3797 }
3798 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
3799 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
3800 wifi_status->scan_duration = if_wifi_sr->scan_duration;
3801 }
3802 }
3803
3804 done:
3805 lck_rw_done(&ifp->if_link_status_lock);
3806 }
3807
3808 static u_int64_t nstat_ifnet_last_report_time = 0;
3809 extern int tcp_report_stats_interval;
3810
3811 static void
nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat * ifst)3812 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
3813 {
3814 /* Retransmit percentage */
3815 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
3816 /* shift by 10 for precision */
3817 ifst->rxmit_percent =
3818 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
3819 } else {
3820 ifst->rxmit_percent = 0;
3821 }
3822
3823 /* Out-of-order percentage */
3824 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
3825 /* shift by 10 for precision */
3826 ifst->oo_percent =
3827 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
3828 } else {
3829 ifst->oo_percent = 0;
3830 }
3831
3832 /* Reorder percentage */
3833 if (ifst->total_reorderpkts > 0 &&
3834 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
3835 /* shift by 10 for precision */
3836 ifst->reorder_percent =
3837 ((ifst->total_reorderpkts << 10) * 100) /
3838 (ifst->total_txpkts + ifst->total_rxpkts);
3839 } else {
3840 ifst->reorder_percent = 0;
3841 }
3842 }
3843
3844 static void
nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat * if_st)3845 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
3846 {
3847 u_int64_t ecn_on_conn, ecn_off_conn;
3848
3849 if (if_st == NULL) {
3850 return;
3851 }
3852 ecn_on_conn = if_st->ecn_client_success +
3853 if_st->ecn_server_success;
3854 ecn_off_conn = if_st->ecn_off_conn +
3855 (if_st->ecn_client_setup - if_st->ecn_client_success) +
3856 (if_st->ecn_server_setup - if_st->ecn_server_success);
3857
3858 /*
3859 * report sack episodes, rst_drop and rxmit_drop
3860 * as a ratio per connection, shift by 10 for precision
3861 */
3862 if (ecn_on_conn > 0) {
3863 if_st->ecn_on.sack_episodes =
3864 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
3865 if_st->ecn_on.rst_drop =
3866 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
3867 if_st->ecn_on.rxmit_drop =
3868 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
3869 } else {
3870 /* set to zero, just in case */
3871 if_st->ecn_on.sack_episodes = 0;
3872 if_st->ecn_on.rst_drop = 0;
3873 if_st->ecn_on.rxmit_drop = 0;
3874 }
3875
3876 if (ecn_off_conn > 0) {
3877 if_st->ecn_off.sack_episodes =
3878 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
3879 if_st->ecn_off.rst_drop =
3880 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
3881 if_st->ecn_off.rxmit_drop =
3882 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
3883 } else {
3884 if_st->ecn_off.sack_episodes = 0;
3885 if_st->ecn_off.rst_drop = 0;
3886 if_st->ecn_off.rxmit_drop = 0;
3887 }
3888 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
3889 }
3890
3891 static void
nstat_ifnet_report_ecn_stats(void)3892 nstat_ifnet_report_ecn_stats(void)
3893 {
3894 u_int64_t uptime, last_report_time;
3895 struct nstat_sysinfo_data data;
3896 struct nstat_sysinfo_ifnet_ecn_stats *st;
3897 struct ifnet *ifp;
3898
3899 uptime = net_uptime();
3900
3901 if ((int)(uptime - nstat_ifnet_last_report_time) <
3902 tcp_report_stats_interval) {
3903 return;
3904 }
3905
3906 last_report_time = nstat_ifnet_last_report_time;
3907 nstat_ifnet_last_report_time = uptime;
3908 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
3909 st = &data.u.ifnet_ecn_stats;
3910
3911 ifnet_head_lock_shared();
3912 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3913 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL) {
3914 continue;
3915 }
3916
3917 if (!IF_FULLY_ATTACHED(ifp)) {
3918 continue;
3919 }
3920
3921 /* Limit reporting to Wifi, Ethernet and cellular. */
3922 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
3923 continue;
3924 }
3925
3926 bzero(st, sizeof(*st));
3927 if (IFNET_IS_CELLULAR(ifp)) {
3928 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
3929 } else if (IFNET_IS_WIFI(ifp)) {
3930 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
3931 } else {
3932 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
3933 }
3934 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
3935 /* skip if there was no update since last report */
3936 if (ifp->if_ipv4_stat->timestamp <= 0 ||
3937 ifp->if_ipv4_stat->timestamp < last_report_time) {
3938 goto v6;
3939 }
3940 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
3941 /* compute percentages using packet counts */
3942 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
3943 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
3944 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
3945 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
3946 sizeof(st->ecn_stat));
3947 nstat_sysinfo_send_data(&data);
3948 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
3949
3950 v6:
3951 /* skip if there was no update since last report */
3952 if (ifp->if_ipv6_stat->timestamp <= 0 ||
3953 ifp->if_ipv6_stat->timestamp < last_report_time) {
3954 continue;
3955 }
3956 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
3957
3958 /* compute percentages using packet counts */
3959 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
3960 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
3961 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
3962 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
3963 sizeof(st->ecn_stat));
3964 nstat_sysinfo_send_data(&data);
3965
3966 /* Zero the stats in ifp */
3967 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
3968 }
3969 ifnet_head_done();
3970 }
3971
3972 /* Some thresholds to determine Low Iternet mode */
3973 #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
3974 #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
3975 #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
3976 #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
3977 #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
3978
3979 static boolean_t
nstat_lim_activity_check(struct if_lim_perf_stat * st)3980 nstat_lim_activity_check(struct if_lim_perf_stat *st)
3981 {
3982 /* check that the current activity is enough to report stats */
3983 if (st->lim_total_txpkts < nstat_lim_min_tx_pkts ||
3984 st->lim_total_rxpkts < nstat_lim_min_rx_pkts ||
3985 st->lim_conn_attempts == 0) {
3986 return FALSE;
3987 }
3988
3989 /*
3990 * Compute percentages if there was enough activity. Use
3991 * shift-left by 10 to preserve precision.
3992 */
3993 st->lim_packet_loss_percent = ((st->lim_total_retxpkts << 10) /
3994 st->lim_total_txpkts) * 100;
3995
3996 st->lim_packet_ooo_percent = ((st->lim_total_oopkts << 10) /
3997 st->lim_total_rxpkts) * 100;
3998
3999 st->lim_conn_timeout_percent = ((st->lim_conn_timeouts << 10) /
4000 st->lim_conn_attempts) * 100;
4001
4002 /*
4003 * Is Low Internet detected? First order metrics are bandwidth
4004 * and RTT. If these metrics are below the minimum thresholds
4005 * defined then the network attachment can be classified as
4006 * having Low Internet capacity.
4007 *
4008 * High connection timeout rate also indicates Low Internet
4009 * capacity.
4010 */
4011 if (st->lim_dl_max_bandwidth > 0 &&
4012 st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD) {
4013 st->lim_dl_detected = 1;
4014 }
4015
4016 if ((st->lim_ul_max_bandwidth > 0 &&
4017 st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) ||
4018 st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD) {
4019 st->lim_ul_detected = 1;
4020 }
4021
4022 if (st->lim_conn_attempts > 20 &&
4023 st->lim_conn_timeout_percent >=
4024 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD) {
4025 st->lim_ul_detected = 1;
4026 }
4027 /*
4028 * Second order metrics: If there was high packet loss even after
4029 * using delay based algorithms then we classify it as Low Internet
4030 * again
4031 */
4032 if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts &&
4033 st->lim_packet_loss_percent >=
4034 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD) {
4035 st->lim_ul_detected = 1;
4036 }
4037 return TRUE;
4038 }
4039
4040 static u_int64_t nstat_lim_last_report_time = 0;
4041 static void
nstat_ifnet_report_lim_stats(void)4042 nstat_ifnet_report_lim_stats(void)
4043 {
4044 u_int64_t uptime;
4045 struct nstat_sysinfo_data data;
4046 struct nstat_sysinfo_lim_stats *st;
4047 struct ifnet *ifp;
4048 int err;
4049
4050 uptime = net_uptime();
4051
4052 if ((u_int32_t)(uptime - nstat_lim_last_report_time) <
4053 nstat_lim_interval) {
4054 return;
4055 }
4056
4057 nstat_lim_last_report_time = uptime;
4058 data.flags = NSTAT_SYSINFO_LIM_STATS;
4059 st = &data.u.lim_stats;
4060 data.unsent_data_cnt = 0;
4061
4062 ifnet_head_lock_shared();
4063 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
4064 if (!IF_FULLY_ATTACHED(ifp)) {
4065 continue;
4066 }
4067
4068 /* Limit reporting to Wifi, Ethernet and cellular */
4069 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
4070 continue;
4071 }
4072
4073 if (!nstat_lim_activity_check(&ifp->if_lim_stat)) {
4074 continue;
4075 }
4076
4077 bzero(st, sizeof(*st));
4078 st->ifnet_siglen = sizeof(st->ifnet_signature);
4079 err = ifnet_get_netsignature(ifp, AF_INET,
4080 (u_int8_t *)&st->ifnet_siglen, NULL,
4081 st->ifnet_signature);
4082 if (err != 0) {
4083 err = ifnet_get_netsignature(ifp, AF_INET6,
4084 (u_int8_t *)&st->ifnet_siglen, NULL,
4085 st->ifnet_signature);
4086 if (err != 0) {
4087 continue;
4088 }
4089 }
4090 ifnet_lock_shared(ifp);
4091 if (IFNET_IS_CELLULAR(ifp)) {
4092 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
4093 } else if (IFNET_IS_WIFI(ifp)) {
4094 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
4095 } else {
4096 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET;
4097 }
4098 bcopy(&ifp->if_lim_stat, &st->lim_stat,
4099 sizeof(st->lim_stat));
4100
4101 /* Zero the stats in ifp */
4102 bzero(&ifp->if_lim_stat, sizeof(ifp->if_lim_stat));
4103 ifnet_lock_done(ifp);
4104 nstat_sysinfo_send_data(&data);
4105 }
4106 ifnet_head_done();
4107 }
4108
4109 static errno_t
nstat_ifnet_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)4110 nstat_ifnet_copy_descriptor(
4111 nstat_provider_cookie_t cookie,
4112 void *data,
4113 size_t len)
4114 {
4115 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
4116 struct nstat_ifnet_cookie *ifcookie =
4117 (struct nstat_ifnet_cookie *)cookie;
4118 struct ifnet *ifp = ifcookie->ifp;
4119
4120 if (len < sizeof(nstat_ifnet_descriptor)) {
4121 return EINVAL;
4122 }
4123
4124 if (nstat_ifnet_gone(cookie)) {
4125 return EINVAL;
4126 }
4127
4128 bzero(desc, sizeof(*desc));
4129 ifnet_lock_shared(ifp);
4130 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
4131 desc->ifindex = ifp->if_index;
4132 desc->threshold = ifp->if_data_threshold;
4133 desc->type = ifp->if_type;
4134 if (ifp->if_desc.ifd_len < sizeof(desc->description)) {
4135 memcpy(desc->description, ifp->if_desc.ifd_desc,
4136 sizeof(desc->description));
4137 }
4138 nstat_ifnet_copy_link_status(ifp, desc);
4139 ifnet_lock_done(ifp);
4140 return 0;
4141 }
4142
4143 static void
nstat_init_ifnet_provider(void)4144 nstat_init_ifnet_provider(void)
4145 {
4146 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
4147 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
4148 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
4149 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
4150 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
4151 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
4152 nstat_ifnet_provider.nstat_watcher_add = NULL;
4153 nstat_ifnet_provider.nstat_watcher_remove = NULL;
4154 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
4155 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
4156 nstat_ifnet_provider.next = nstat_providers;
4157 nstat_providers = &nstat_ifnet_provider;
4158 }
4159
4160 __private_extern__ void
nstat_ifnet_threshold_reached(unsigned int ifindex)4161 nstat_ifnet_threshold_reached(unsigned int ifindex)
4162 {
4163 nstat_control_state *state;
4164 nstat_src *src;
4165 struct ifnet *ifp;
4166 struct nstat_ifnet_cookie *ifcookie;
4167
4168 lck_mtx_lock(&nstat_mtx);
4169 for (state = nstat_controls; state; state = state->ncs_next) {
4170 lck_mtx_lock(&state->ncs_mtx);
4171 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4172 {
4173 if (src->provider != &nstat_ifnet_provider) {
4174 continue;
4175 }
4176 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
4177 ifp = ifcookie->ifp;
4178 if (ifp->if_index != ifindex) {
4179 continue;
4180 }
4181 nstat_control_send_counts(state, src, 0, 0, NULL);
4182 }
4183 lck_mtx_unlock(&state->ncs_mtx);
4184 }
4185 lck_mtx_unlock(&nstat_mtx);
4186 }
4187
4188 #pragma mark -- Sysinfo --
4189 static void
nstat_set_keyval_scalar(nstat_sysinfo_keyval * kv,int key,u_int32_t val)4190 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
4191 {
4192 kv->nstat_sysinfo_key = key;
4193 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
4194 kv->u.nstat_sysinfo_scalar = val;
4195 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
4196 }
4197
4198 static void
nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval * kv,int key,u_int64_t val)4199 nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval *kv, int key, u_int64_t val)
4200 {
4201 kv->nstat_sysinfo_key = key;
4202 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
4203 kv->u.nstat_sysinfo_scalar = val;
4204 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
4205 }
4206
4207 static void
nstat_set_keyval_string(nstat_sysinfo_keyval * kv,int key,u_int8_t * buf,u_int32_t len)4208 nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf,
4209 u_int32_t len)
4210 {
4211 kv->nstat_sysinfo_key = key;
4212 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_STRING;
4213 kv->nstat_sysinfo_valsize = min(len,
4214 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE);
4215 bcopy(buf, kv->u.nstat_sysinfo_string, kv->nstat_sysinfo_valsize);
4216 }
4217
4218 static void
nstat_sysinfo_send_data_internal(nstat_control_state * control,nstat_sysinfo_data * data)4219 nstat_sysinfo_send_data_internal(
4220 nstat_control_state *control,
4221 nstat_sysinfo_data *data)
4222 {
4223 nstat_msg_sysinfo_counts *syscnt = NULL;
4224 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
4225 nstat_sysinfo_keyval *kv;
4226 errno_t result = 0;
4227 size_t i = 0;
4228
4229 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
4230 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
4231 finalsize = allocsize;
4232
4233 /* get number of key-vals for each kind of stat */
4234 switch (data->flags) {
4235 case NSTAT_SYSINFO_MBUF_STATS:
4236 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
4237 sizeof(u_int32_t);
4238 break;
4239 case NSTAT_SYSINFO_TCP_STATS:
4240 nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT;
4241 break;
4242 case NSTAT_SYSINFO_IFNET_ECN_STATS:
4243 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
4244 sizeof(u_int64_t));
4245
4246 /* Two more keys for ifnet type and proto */
4247 nkeyvals += 2;
4248
4249 /* One key for unsent data. */
4250 nkeyvals++;
4251 break;
4252 case NSTAT_SYSINFO_LIM_STATS:
4253 nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT;
4254 break;
4255 case NSTAT_SYSINFO_NET_API_STATS:
4256 nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT;
4257 break;
4258 default:
4259 return;
4260 }
4261 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
4262 allocsize += countsize;
4263
4264 syscnt = (nstat_msg_sysinfo_counts *) kalloc_data(allocsize,
4265 Z_WAITOK | Z_ZERO);
4266 if (syscnt == NULL) {
4267 return;
4268 }
4269
4270 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
4271 switch (data->flags) {
4272 case NSTAT_SYSINFO_MBUF_STATS:
4273 {
4274 nstat_set_keyval_scalar(&kv[i++],
4275 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
4276 data->u.mb_stats.total_256b);
4277 nstat_set_keyval_scalar(&kv[i++],
4278 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
4279 data->u.mb_stats.total_2kb);
4280 nstat_set_keyval_scalar(&kv[i++],
4281 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
4282 data->u.mb_stats.total_4kb);
4283 nstat_set_keyval_scalar(&kv[i++],
4284 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
4285 data->u.mb_stats.total_16kb);
4286 nstat_set_keyval_scalar(&kv[i++],
4287 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
4288 data->u.mb_stats.sbmb_total);
4289 nstat_set_keyval_scalar(&kv[i++],
4290 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
4291 data->u.mb_stats.sb_atmbuflimit);
4292 nstat_set_keyval_scalar(&kv[i++],
4293 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
4294 data->u.mb_stats.draincnt);
4295 nstat_set_keyval_scalar(&kv[i++],
4296 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
4297 data->u.mb_stats.memreleased);
4298 nstat_set_keyval_scalar(&kv[i++],
4299 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
4300 data->u.mb_stats.sbmb_floor);
4301 VERIFY(i == nkeyvals);
4302 break;
4303 }
4304 case NSTAT_SYSINFO_TCP_STATS:
4305 {
4306 nstat_set_keyval_scalar(&kv[i++],
4307 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
4308 data->u.tcp_stats.ipv4_avgrtt);
4309 nstat_set_keyval_scalar(&kv[i++],
4310 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
4311 data->u.tcp_stats.ipv6_avgrtt);
4312 nstat_set_keyval_scalar(&kv[i++],
4313 NSTAT_SYSINFO_KEY_SEND_PLR,
4314 data->u.tcp_stats.send_plr);
4315 nstat_set_keyval_scalar(&kv[i++],
4316 NSTAT_SYSINFO_KEY_RECV_PLR,
4317 data->u.tcp_stats.recv_plr);
4318 nstat_set_keyval_scalar(&kv[i++],
4319 NSTAT_SYSINFO_KEY_SEND_TLRTO,
4320 data->u.tcp_stats.send_tlrto_rate);
4321 nstat_set_keyval_scalar(&kv[i++],
4322 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
4323 data->u.tcp_stats.send_reorder_rate);
4324 nstat_set_keyval_scalar(&kv[i++],
4325 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
4326 data->u.tcp_stats.connection_attempts);
4327 nstat_set_keyval_scalar(&kv[i++],
4328 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
4329 data->u.tcp_stats.connection_accepts);
4330 nstat_set_keyval_scalar(&kv[i++],
4331 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
4332 data->u.tcp_stats.ecn_client_enabled);
4333 nstat_set_keyval_scalar(&kv[i++],
4334 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
4335 data->u.tcp_stats.ecn_server_enabled);
4336 nstat_set_keyval_scalar(&kv[i++],
4337 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
4338 data->u.tcp_stats.ecn_client_setup);
4339 nstat_set_keyval_scalar(&kv[i++],
4340 NSTAT_SYSINFO_ECN_SERVER_SETUP,
4341 data->u.tcp_stats.ecn_server_setup);
4342 nstat_set_keyval_scalar(&kv[i++],
4343 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
4344 data->u.tcp_stats.ecn_client_success);
4345 nstat_set_keyval_scalar(&kv[i++],
4346 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
4347 data->u.tcp_stats.ecn_server_success);
4348 nstat_set_keyval_scalar(&kv[i++],
4349 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
4350 data->u.tcp_stats.ecn_not_supported);
4351 nstat_set_keyval_scalar(&kv[i++],
4352 NSTAT_SYSINFO_ECN_LOST_SYN,
4353 data->u.tcp_stats.ecn_lost_syn);
4354 nstat_set_keyval_scalar(&kv[i++],
4355 NSTAT_SYSINFO_ECN_LOST_SYNACK,
4356 data->u.tcp_stats.ecn_lost_synack);
4357 nstat_set_keyval_scalar(&kv[i++],
4358 NSTAT_SYSINFO_ECN_RECV_CE,
4359 data->u.tcp_stats.ecn_recv_ce);
4360 nstat_set_keyval_scalar(&kv[i++],
4361 NSTAT_SYSINFO_ECN_RECV_ECE,
4362 data->u.tcp_stats.ecn_recv_ece);
4363 nstat_set_keyval_scalar(&kv[i++],
4364 NSTAT_SYSINFO_ECN_SENT_ECE,
4365 data->u.tcp_stats.ecn_sent_ece);
4366 nstat_set_keyval_scalar(&kv[i++],
4367 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
4368 data->u.tcp_stats.ecn_conn_recv_ce);
4369 nstat_set_keyval_scalar(&kv[i++],
4370 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
4371 data->u.tcp_stats.ecn_conn_recv_ece);
4372 nstat_set_keyval_scalar(&kv[i++],
4373 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
4374 data->u.tcp_stats.ecn_conn_plnoce);
4375 nstat_set_keyval_scalar(&kv[i++],
4376 NSTAT_SYSINFO_ECN_CONN_PL_CE,
4377 data->u.tcp_stats.ecn_conn_pl_ce);
4378 nstat_set_keyval_scalar(&kv[i++],
4379 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
4380 data->u.tcp_stats.ecn_conn_nopl_ce);
4381 nstat_set_keyval_scalar(&kv[i++],
4382 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
4383 data->u.tcp_stats.ecn_fallback_synloss);
4384 nstat_set_keyval_scalar(&kv[i++],
4385 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
4386 data->u.tcp_stats.ecn_fallback_reorder);
4387 nstat_set_keyval_scalar(&kv[i++],
4388 NSTAT_SYSINFO_ECN_FALLBACK_CE,
4389 data->u.tcp_stats.ecn_fallback_ce);
4390 nstat_set_keyval_scalar(&kv[i++],
4391 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
4392 data->u.tcp_stats.tfo_syn_data_rcv);
4393 nstat_set_keyval_scalar(&kv[i++],
4394 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
4395 data->u.tcp_stats.tfo_cookie_req_rcv);
4396 nstat_set_keyval_scalar(&kv[i++],
4397 NSTAT_SYSINFO_TFO_COOKIE_SENT,
4398 data->u.tcp_stats.tfo_cookie_sent);
4399 nstat_set_keyval_scalar(&kv[i++],
4400 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
4401 data->u.tcp_stats.tfo_cookie_invalid);
4402 nstat_set_keyval_scalar(&kv[i++],
4403 NSTAT_SYSINFO_TFO_COOKIE_REQ,
4404 data->u.tcp_stats.tfo_cookie_req);
4405 nstat_set_keyval_scalar(&kv[i++],
4406 NSTAT_SYSINFO_TFO_COOKIE_RCV,
4407 data->u.tcp_stats.tfo_cookie_rcv);
4408 nstat_set_keyval_scalar(&kv[i++],
4409 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
4410 data->u.tcp_stats.tfo_syn_data_sent);
4411 nstat_set_keyval_scalar(&kv[i++],
4412 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
4413 data->u.tcp_stats.tfo_syn_data_acked);
4414 nstat_set_keyval_scalar(&kv[i++],
4415 NSTAT_SYSINFO_TFO_SYN_LOSS,
4416 data->u.tcp_stats.tfo_syn_loss);
4417 nstat_set_keyval_scalar(&kv[i++],
4418 NSTAT_SYSINFO_TFO_BLACKHOLE,
4419 data->u.tcp_stats.tfo_blackhole);
4420 nstat_set_keyval_scalar(&kv[i++],
4421 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
4422 data->u.tcp_stats.tfo_cookie_wrong);
4423 nstat_set_keyval_scalar(&kv[i++],
4424 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
4425 data->u.tcp_stats.tfo_no_cookie_rcv);
4426 nstat_set_keyval_scalar(&kv[i++],
4427 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
4428 data->u.tcp_stats.tfo_heuristics_disable);
4429 nstat_set_keyval_scalar(&kv[i++],
4430 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
4431 data->u.tcp_stats.tfo_sndblackhole);
4432 nstat_set_keyval_scalar(&kv[i++],
4433 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT,
4434 data->u.tcp_stats.mptcp_handover_attempt);
4435 nstat_set_keyval_scalar(&kv[i++],
4436 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT,
4437 data->u.tcp_stats.mptcp_interactive_attempt);
4438 nstat_set_keyval_scalar(&kv[i++],
4439 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT,
4440 data->u.tcp_stats.mptcp_aggregate_attempt);
4441 nstat_set_keyval_scalar(&kv[i++],
4442 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT,
4443 data->u.tcp_stats.mptcp_fp_handover_attempt);
4444 nstat_set_keyval_scalar(&kv[i++],
4445 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT,
4446 data->u.tcp_stats.mptcp_fp_interactive_attempt);
4447 nstat_set_keyval_scalar(&kv[i++],
4448 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT,
4449 data->u.tcp_stats.mptcp_fp_aggregate_attempt);
4450 nstat_set_keyval_scalar(&kv[i++],
4451 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK,
4452 data->u.tcp_stats.mptcp_heuristic_fallback);
4453 nstat_set_keyval_scalar(&kv[i++],
4454 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK,
4455 data->u.tcp_stats.mptcp_fp_heuristic_fallback);
4456 nstat_set_keyval_scalar(&kv[i++],
4457 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI,
4458 data->u.tcp_stats.mptcp_handover_success_wifi);
4459 nstat_set_keyval_scalar(&kv[i++],
4460 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL,
4461 data->u.tcp_stats.mptcp_handover_success_cell);
4462 nstat_set_keyval_scalar(&kv[i++],
4463 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS,
4464 data->u.tcp_stats.mptcp_interactive_success);
4465 nstat_set_keyval_scalar(&kv[i++],
4466 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS,
4467 data->u.tcp_stats.mptcp_aggregate_success);
4468 nstat_set_keyval_scalar(&kv[i++],
4469 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI,
4470 data->u.tcp_stats.mptcp_fp_handover_success_wifi);
4471 nstat_set_keyval_scalar(&kv[i++],
4472 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL,
4473 data->u.tcp_stats.mptcp_fp_handover_success_cell);
4474 nstat_set_keyval_scalar(&kv[i++],
4475 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS,
4476 data->u.tcp_stats.mptcp_fp_interactive_success);
4477 nstat_set_keyval_scalar(&kv[i++],
4478 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS,
4479 data->u.tcp_stats.mptcp_fp_aggregate_success);
4480 nstat_set_keyval_scalar(&kv[i++],
4481 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI,
4482 data->u.tcp_stats.mptcp_handover_cell_from_wifi);
4483 nstat_set_keyval_scalar(&kv[i++],
4484 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL,
4485 data->u.tcp_stats.mptcp_handover_wifi_from_cell);
4486 nstat_set_keyval_scalar(&kv[i++],
4487 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI,
4488 data->u.tcp_stats.mptcp_interactive_cell_from_wifi);
4489 nstat_set_keyval_u64_scalar(&kv[i++],
4490 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES,
4491 data->u.tcp_stats.mptcp_handover_cell_bytes);
4492 nstat_set_keyval_u64_scalar(&kv[i++],
4493 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES,
4494 data->u.tcp_stats.mptcp_interactive_cell_bytes);
4495 nstat_set_keyval_u64_scalar(&kv[i++],
4496 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES,
4497 data->u.tcp_stats.mptcp_aggregate_cell_bytes);
4498 nstat_set_keyval_u64_scalar(&kv[i++],
4499 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES,
4500 data->u.tcp_stats.mptcp_handover_all_bytes);
4501 nstat_set_keyval_u64_scalar(&kv[i++],
4502 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES,
4503 data->u.tcp_stats.mptcp_interactive_all_bytes);
4504 nstat_set_keyval_u64_scalar(&kv[i++],
4505 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES,
4506 data->u.tcp_stats.mptcp_aggregate_all_bytes);
4507 nstat_set_keyval_scalar(&kv[i++],
4508 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI,
4509 data->u.tcp_stats.mptcp_back_to_wifi);
4510 nstat_set_keyval_scalar(&kv[i++],
4511 NSTAT_SYSINFO_MPTCP_WIFI_PROXY,
4512 data->u.tcp_stats.mptcp_wifi_proxy);
4513 nstat_set_keyval_scalar(&kv[i++],
4514 NSTAT_SYSINFO_MPTCP_CELL_PROXY,
4515 data->u.tcp_stats.mptcp_cell_proxy);
4516 nstat_set_keyval_scalar(&kv[i++],
4517 NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL,
4518 data->u.tcp_stats.mptcp_triggered_cell);
4519 VERIFY(i == nkeyvals);
4520 break;
4521 }
4522 case NSTAT_SYSINFO_IFNET_ECN_STATS:
4523 {
4524 nstat_set_keyval_scalar(&kv[i++],
4525 NSTAT_SYSINFO_ECN_IFNET_TYPE,
4526 data->u.ifnet_ecn_stats.ifnet_type);
4527 nstat_set_keyval_scalar(&kv[i++],
4528 NSTAT_SYSINFO_ECN_IFNET_PROTO,
4529 data->u.ifnet_ecn_stats.ifnet_proto);
4530 nstat_set_keyval_u64_scalar(&kv[i++],
4531 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
4532 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
4533 nstat_set_keyval_u64_scalar(&kv[i++],
4534 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
4535 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
4536 nstat_set_keyval_u64_scalar(&kv[i++],
4537 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
4538 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
4539 nstat_set_keyval_u64_scalar(&kv[i++],
4540 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
4541 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
4542 nstat_set_keyval_u64_scalar(&kv[i++],
4543 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
4544 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
4545 nstat_set_keyval_u64_scalar(&kv[i++],
4546 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
4547 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
4548 nstat_set_keyval_u64_scalar(&kv[i++],
4549 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
4550 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
4551 nstat_set_keyval_u64_scalar(&kv[i++],
4552 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
4553 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
4554 nstat_set_keyval_u64_scalar(&kv[i++],
4555 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
4556 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
4557 nstat_set_keyval_u64_scalar(&kv[i++],
4558 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
4559 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
4560 nstat_set_keyval_u64_scalar(&kv[i++],
4561 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
4562 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
4563 nstat_set_keyval_u64_scalar(&kv[i++],
4564 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
4565 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
4566 nstat_set_keyval_u64_scalar(&kv[i++],
4567 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
4568 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
4569 nstat_set_keyval_u64_scalar(&kv[i++],
4570 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
4571 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
4572 nstat_set_keyval_u64_scalar(&kv[i++],
4573 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
4574 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
4575 nstat_set_keyval_u64_scalar(&kv[i++],
4576 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
4577 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
4578 nstat_set_keyval_u64_scalar(&kv[i++],
4579 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
4580 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
4581 nstat_set_keyval_u64_scalar(&kv[i++],
4582 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
4583 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
4584 nstat_set_keyval_u64_scalar(&kv[i++],
4585 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
4586 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
4587 nstat_set_keyval_u64_scalar(&kv[i++],
4588 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
4589 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
4590 nstat_set_keyval_u64_scalar(&kv[i++],
4591 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
4592 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
4593 nstat_set_keyval_u64_scalar(&kv[i++],
4594 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
4595 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
4596 nstat_set_keyval_u64_scalar(&kv[i++],
4597 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
4598 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
4599 nstat_set_keyval_u64_scalar(&kv[i++],
4600 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
4601 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
4602 nstat_set_keyval_u64_scalar(&kv[i++],
4603 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
4604 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
4605 nstat_set_keyval_u64_scalar(&kv[i++],
4606 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
4607 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
4608 nstat_set_keyval_u64_scalar(&kv[i++],
4609 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
4610 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
4611 nstat_set_keyval_u64_scalar(&kv[i++],
4612 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
4613 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
4614 nstat_set_keyval_u64_scalar(&kv[i++],
4615 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
4616 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
4617 nstat_set_keyval_u64_scalar(&kv[i++],
4618 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
4619 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
4620 nstat_set_keyval_u64_scalar(&kv[i++],
4621 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
4622 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
4623 nstat_set_keyval_u64_scalar(&kv[i++],
4624 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
4625 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
4626 nstat_set_keyval_u64_scalar(&kv[i++],
4627 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
4628 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
4629 nstat_set_keyval_u64_scalar(&kv[i++],
4630 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
4631 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
4632 nstat_set_keyval_u64_scalar(&kv[i++],
4633 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
4634 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
4635 nstat_set_keyval_u64_scalar(&kv[i++],
4636 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
4637 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
4638 nstat_set_keyval_u64_scalar(&kv[i++],
4639 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
4640 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
4641 nstat_set_keyval_u64_scalar(&kv[i++],
4642 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
4643 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
4644 nstat_set_keyval_u64_scalar(&kv[i++],
4645 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
4646 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
4647 nstat_set_keyval_u64_scalar(&kv[i++],
4648 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
4649 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
4650 nstat_set_keyval_u64_scalar(&kv[i++],
4651 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
4652 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
4653 nstat_set_keyval_u64_scalar(&kv[i++],
4654 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
4655 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
4656 nstat_set_keyval_scalar(&kv[i++],
4657 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
4658 data->unsent_data_cnt);
4659 nstat_set_keyval_u64_scalar(&kv[i++],
4660 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
4661 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
4662 nstat_set_keyval_u64_scalar(&kv[i++],
4663 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
4664 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
4665 nstat_set_keyval_u64_scalar(&kv[i++],
4666 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST,
4667 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst);
4668 break;
4669 }
4670 case NSTAT_SYSINFO_LIM_STATS:
4671 {
4672 nstat_set_keyval_string(&kv[i++],
4673 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE,
4674 data->u.lim_stats.ifnet_signature,
4675 data->u.lim_stats.ifnet_siglen);
4676 nstat_set_keyval_u64_scalar(&kv[i++],
4677 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH,
4678 data->u.lim_stats.lim_stat.lim_dl_max_bandwidth);
4679 nstat_set_keyval_u64_scalar(&kv[i++],
4680 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH,
4681 data->u.lim_stats.lim_stat.lim_ul_max_bandwidth);
4682 nstat_set_keyval_u64_scalar(&kv[i++],
4683 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT,
4684 data->u.lim_stats.lim_stat.lim_packet_loss_percent);
4685 nstat_set_keyval_u64_scalar(&kv[i++],
4686 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT,
4687 data->u.lim_stats.lim_stat.lim_packet_ooo_percent);
4688 nstat_set_keyval_u64_scalar(&kv[i++],
4689 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE,
4690 data->u.lim_stats.lim_stat.lim_rtt_variance);
4691 nstat_set_keyval_u64_scalar(&kv[i++],
4692 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN,
4693 data->u.lim_stats.lim_stat.lim_rtt_min);
4694 nstat_set_keyval_u64_scalar(&kv[i++],
4695 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG,
4696 data->u.lim_stats.lim_stat.lim_rtt_average);
4697 nstat_set_keyval_u64_scalar(&kv[i++],
4698 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT,
4699 data->u.lim_stats.lim_stat.lim_conn_timeout_percent);
4700 nstat_set_keyval_scalar(&kv[i++],
4701 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED,
4702 data->u.lim_stats.lim_stat.lim_dl_detected);
4703 nstat_set_keyval_scalar(&kv[i++],
4704 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED,
4705 data->u.lim_stats.lim_stat.lim_ul_detected);
4706 nstat_set_keyval_scalar(&kv[i++],
4707 NSTAT_SYSINFO_LIM_IFNET_TYPE,
4708 data->u.lim_stats.ifnet_type);
4709 break;
4710 }
4711 case NSTAT_SYSINFO_NET_API_STATS:
4712 {
4713 nstat_set_keyval_u64_scalar(&kv[i++],
4714 NSTAT_SYSINFO_API_IF_FLTR_ATTACH,
4715 data->u.net_api_stats.net_api_stats.nas_iflt_attach_total);
4716 nstat_set_keyval_u64_scalar(&kv[i++],
4717 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS,
4718 data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total);
4719 nstat_set_keyval_u64_scalar(&kv[i++],
4720 NSTAT_SYSINFO_API_IP_FLTR_ADD,
4721 data->u.net_api_stats.net_api_stats.nas_ipf_add_total);
4722 nstat_set_keyval_u64_scalar(&kv[i++],
4723 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS,
4724 data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total);
4725 nstat_set_keyval_u64_scalar(&kv[i++],
4726 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH,
4727 data->u.net_api_stats.net_api_stats.nas_sfltr_register_total);
4728 nstat_set_keyval_u64_scalar(&kv[i++],
4729 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS,
4730 data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total);
4731
4732
4733 nstat_set_keyval_u64_scalar(&kv[i++],
4734 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL,
4735 data->u.net_api_stats.net_api_stats.nas_socket_alloc_total);
4736 nstat_set_keyval_u64_scalar(&kv[i++],
4737 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL,
4738 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total);
4739 nstat_set_keyval_u64_scalar(&kv[i++],
4740 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS,
4741 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total);
4742 nstat_set_keyval_u64_scalar(&kv[i++],
4743 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID,
4744 data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total);
4745
4746 nstat_set_keyval_u64_scalar(&kv[i++],
4747 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL,
4748 data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total);
4749 nstat_set_keyval_u64_scalar(&kv[i++],
4750 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE,
4751 data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total);
4752 nstat_set_keyval_u64_scalar(&kv[i++],
4753 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET,
4754 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total);
4755 nstat_set_keyval_u64_scalar(&kv[i++],
4756 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6,
4757 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total);
4758 nstat_set_keyval_u64_scalar(&kv[i++],
4759 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM,
4760 data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total);
4761 nstat_set_keyval_u64_scalar(&kv[i++],
4762 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH,
4763 data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total);
4764 nstat_set_keyval_u64_scalar(&kv[i++],
4765 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY,
4766 data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total);
4767 nstat_set_keyval_u64_scalar(&kv[i++],
4768 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV,
4769 data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total);
4770 nstat_set_keyval_u64_scalar(&kv[i++],
4771 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER,
4772 data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total);
4773
4774 nstat_set_keyval_u64_scalar(&kv[i++],
4775 NSTAT_SYSINFO_API_SOCK_INET_STREAM,
4776 data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total);
4777 nstat_set_keyval_u64_scalar(&kv[i++],
4778 NSTAT_SYSINFO_API_SOCK_INET_DGRAM,
4779 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total);
4780 nstat_set_keyval_u64_scalar(&kv[i++],
4781 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED,
4782 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected);
4783 nstat_set_keyval_u64_scalar(&kv[i++],
4784 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS,
4785 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns);
4786 nstat_set_keyval_u64_scalar(&kv[i++],
4787 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA,
4788 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data);
4789
4790 nstat_set_keyval_u64_scalar(&kv[i++],
4791 NSTAT_SYSINFO_API_SOCK_INET6_STREAM,
4792 data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total);
4793 nstat_set_keyval_u64_scalar(&kv[i++],
4794 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM,
4795 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total);
4796 nstat_set_keyval_u64_scalar(&kv[i++],
4797 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED,
4798 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected);
4799 nstat_set_keyval_u64_scalar(&kv[i++],
4800 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS,
4801 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns);
4802 nstat_set_keyval_u64_scalar(&kv[i++],
4803 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA,
4804 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data);
4805
4806 nstat_set_keyval_u64_scalar(&kv[i++],
4807 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN,
4808 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total);
4809 nstat_set_keyval_u64_scalar(&kv[i++],
4810 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS,
4811 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total);
4812
4813 nstat_set_keyval_u64_scalar(&kv[i++],
4814 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM,
4815 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total);
4816 nstat_set_keyval_u64_scalar(&kv[i++],
4817 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM,
4818 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total);
4819
4820 nstat_set_keyval_u64_scalar(&kv[i++],
4821 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM,
4822 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total);
4823 nstat_set_keyval_u64_scalar(&kv[i++],
4824 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM,
4825 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total);
4826
4827 nstat_set_keyval_u64_scalar(&kv[i++],
4828 NSTAT_SYSINFO_API_IFNET_ALLOC,
4829 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total);
4830 nstat_set_keyval_u64_scalar(&kv[i++],
4831 NSTAT_SYSINFO_API_IFNET_ALLOC_OS,
4832 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total);
4833
4834 nstat_set_keyval_u64_scalar(&kv[i++],
4835 NSTAT_SYSINFO_API_PF_ADDRULE,
4836 data->u.net_api_stats.net_api_stats.nas_pf_addrule_total);
4837 nstat_set_keyval_u64_scalar(&kv[i++],
4838 NSTAT_SYSINFO_API_PF_ADDRULE_OS,
4839 data->u.net_api_stats.net_api_stats.nas_pf_addrule_os);
4840
4841 nstat_set_keyval_u64_scalar(&kv[i++],
4842 NSTAT_SYSINFO_API_VMNET_START,
4843 data->u.net_api_stats.net_api_stats.nas_vmnet_total);
4844
4845 #if SKYWALK
4846 nstat_set_keyval_scalar(&kv[i++],
4847 NSTAT_SYSINFO_API_IF_NETAGENT_ENABLED,
4848 if_is_fsw_transport_netagent_enabled());
4849 #endif /* SKYWALK */
4850
4851 nstat_set_keyval_scalar(&kv[i++],
4852 NSTAT_SYSINFO_API_REPORT_INTERVAL,
4853 data->u.net_api_stats.report_interval);
4854
4855 break;
4856 }
4857 }
4858 if (syscnt != NULL) {
4859 VERIFY(i > 0 && i <= nkeyvals);
4860 countsize = offsetof(nstat_sysinfo_counts,
4861 nstat_sysinfo_keyvals) +
4862 sizeof(nstat_sysinfo_keyval) * i;
4863 finalsize += countsize;
4864 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
4865 assert(finalsize <= MAX_NSTAT_MSG_HDR_LENGTH);
4866 syscnt->hdr.length = (u_int16_t)finalsize;
4867 syscnt->counts.nstat_sysinfo_len = (u_int32_t)countsize;
4868
4869 result = ctl_enqueuedata(control->ncs_kctl,
4870 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
4871 if (result != 0) {
4872 nstat_stats.nstat_sysinfofailures += 1;
4873 }
4874 kfree_data(syscnt, allocsize);
4875 }
4876 return;
4877 }
4878
4879 __private_extern__ void
nstat_sysinfo_send_data(nstat_sysinfo_data * data)4880 nstat_sysinfo_send_data(
4881 nstat_sysinfo_data *data)
4882 {
4883 nstat_control_state *control;
4884
4885 lck_mtx_lock(&nstat_mtx);
4886 for (control = nstat_controls; control; control = control->ncs_next) {
4887 lck_mtx_lock(&control->ncs_mtx);
4888 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0) {
4889 nstat_sysinfo_send_data_internal(control, data);
4890 }
4891 lck_mtx_unlock(&control->ncs_mtx);
4892 }
4893 lck_mtx_unlock(&nstat_mtx);
4894 }
4895
4896 static void
nstat_sysinfo_generate_report(void)4897 nstat_sysinfo_generate_report(void)
4898 {
4899 mbuf_report_peak_usage();
4900 tcp_report_stats();
4901 nstat_ifnet_report_ecn_stats();
4902 nstat_ifnet_report_lim_stats();
4903 nstat_net_api_report_stats();
4904 }
4905
4906 #pragma mark -- net_api --
4907
4908 static struct net_api_stats net_api_stats_before;
4909 static u_int64_t net_api_stats_last_report_time;
4910
4911 static void
nstat_net_api_report_stats(void)4912 nstat_net_api_report_stats(void)
4913 {
4914 struct nstat_sysinfo_data data;
4915 struct nstat_sysinfo_net_api_stats *st = &data.u.net_api_stats;
4916 u_int64_t uptime;
4917
4918 uptime = net_uptime();
4919
4920 if ((u_int32_t)(uptime - net_api_stats_last_report_time) <
4921 net_api_stats_report_interval) {
4922 return;
4923 }
4924
4925 st->report_interval = (u_int32_t)(uptime - net_api_stats_last_report_time);
4926 net_api_stats_last_report_time = uptime;
4927
4928 data.flags = NSTAT_SYSINFO_NET_API_STATS;
4929 data.unsent_data_cnt = 0;
4930
4931 /*
4932 * Some of the fields in the report are the current value and
4933 * other fields are the delta from the last report:
4934 * - Report difference for the per flow counters as they increase
4935 * with time
4936 * - Report current value for other counters as they tend not to change
4937 * much with time
4938 */
4939 #define STATCOPY(f) \
4940 (st->net_api_stats.f = net_api_stats.f)
4941 #define STATDIFF(f) \
4942 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
4943
4944 STATCOPY(nas_iflt_attach_count);
4945 STATCOPY(nas_iflt_attach_total);
4946 STATCOPY(nas_iflt_attach_os_total);
4947
4948 STATCOPY(nas_ipf_add_count);
4949 STATCOPY(nas_ipf_add_total);
4950 STATCOPY(nas_ipf_add_os_total);
4951
4952 STATCOPY(nas_sfltr_register_count);
4953 STATCOPY(nas_sfltr_register_total);
4954 STATCOPY(nas_sfltr_register_os_total);
4955
4956 STATDIFF(nas_socket_alloc_total);
4957 STATDIFF(nas_socket_in_kernel_total);
4958 STATDIFF(nas_socket_in_kernel_os_total);
4959 STATDIFF(nas_socket_necp_clientuuid_total);
4960
4961 STATDIFF(nas_socket_domain_local_total);
4962 STATDIFF(nas_socket_domain_route_total);
4963 STATDIFF(nas_socket_domain_inet_total);
4964 STATDIFF(nas_socket_domain_inet6_total);
4965 STATDIFF(nas_socket_domain_system_total);
4966 STATDIFF(nas_socket_domain_multipath_total);
4967 STATDIFF(nas_socket_domain_key_total);
4968 STATDIFF(nas_socket_domain_ndrv_total);
4969 STATDIFF(nas_socket_domain_other_total);
4970
4971 STATDIFF(nas_socket_inet_stream_total);
4972 STATDIFF(nas_socket_inet_dgram_total);
4973 STATDIFF(nas_socket_inet_dgram_connected);
4974 STATDIFF(nas_socket_inet_dgram_dns);
4975 STATDIFF(nas_socket_inet_dgram_no_data);
4976
4977 STATDIFF(nas_socket_inet6_stream_total);
4978 STATDIFF(nas_socket_inet6_dgram_total);
4979 STATDIFF(nas_socket_inet6_dgram_connected);
4980 STATDIFF(nas_socket_inet6_dgram_dns);
4981 STATDIFF(nas_socket_inet6_dgram_no_data);
4982
4983 STATDIFF(nas_socket_mcast_join_total);
4984 STATDIFF(nas_socket_mcast_join_os_total);
4985
4986 STATDIFF(nas_sock_inet6_stream_exthdr_in);
4987 STATDIFF(nas_sock_inet6_stream_exthdr_out);
4988 STATDIFF(nas_sock_inet6_dgram_exthdr_in);
4989 STATDIFF(nas_sock_inet6_dgram_exthdr_out);
4990
4991 STATDIFF(nas_nx_flow_inet_stream_total);
4992 STATDIFF(nas_nx_flow_inet_dgram_total);
4993
4994 STATDIFF(nas_nx_flow_inet6_stream_total);
4995 STATDIFF(nas_nx_flow_inet6_dgram_total);
4996
4997 STATCOPY(nas_ifnet_alloc_count);
4998 STATCOPY(nas_ifnet_alloc_total);
4999 STATCOPY(nas_ifnet_alloc_os_count);
5000 STATCOPY(nas_ifnet_alloc_os_total);
5001
5002 STATCOPY(nas_pf_addrule_total);
5003 STATCOPY(nas_pf_addrule_os);
5004
5005 STATCOPY(nas_vmnet_total);
5006
5007 #undef STATCOPY
5008 #undef STATDIFF
5009
5010 nstat_sysinfo_send_data(&data);
5011
5012 /*
5013 * Save a copy of the current fields so we can diff them the next time
5014 */
5015 memcpy(&net_api_stats_before, &net_api_stats,
5016 sizeof(struct net_api_stats));
5017 _CASSERT(sizeof(net_api_stats_before) == sizeof(net_api_stats));
5018 }
5019
5020
5021 #pragma mark -- Kernel Control Socket --
5022
5023 static kern_ctl_ref nstat_ctlref = NULL;
5024
5025 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
5026 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
5027 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
5028
5029 static errno_t
nstat_enqueue_success(uint64_t context,nstat_control_state * state,u_int16_t flags)5030 nstat_enqueue_success(
5031 uint64_t context,
5032 nstat_control_state *state,
5033 u_int16_t flags)
5034 {
5035 nstat_msg_hdr success;
5036 errno_t result;
5037
5038 bzero(&success, sizeof(success));
5039 success.context = context;
5040 success.type = NSTAT_MSG_TYPE_SUCCESS;
5041 success.length = sizeof(success);
5042 success.flags = flags;
5043 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
5044 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
5045 if (result != 0) {
5046 if (nstat_debug != 0) {
5047 printf("%s: could not enqueue success message %d\n",
5048 __func__, result);
5049 }
5050 nstat_stats.nstat_successmsgfailures += 1;
5051 }
5052 return result;
5053 }
5054
5055 static errno_t
nstat_control_send_event(nstat_control_state * state,nstat_src * src,u_int64_t event)5056 nstat_control_send_event(
5057 nstat_control_state *state,
5058 nstat_src *src,
5059 u_int64_t event)
5060 {
5061 errno_t result = 0;
5062 int failed = 0;
5063
5064 if (nstat_control_reporting_allowed(state, src, 0)) {
5065 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
5066 result = nstat_control_send_update(state, src, 0, event, 0, NULL);
5067 if (result != 0) {
5068 failed = 1;
5069 if (nstat_debug != 0) {
5070 printf("%s - nstat_control_send_event() %d\n", __func__, result);
5071 }
5072 }
5073 } else {
5074 if (nstat_debug != 0) {
5075 printf("%s - nstat_control_send_event() used when updates not supported\n", __func__);
5076 }
5077 }
5078 }
5079 return result;
5080 }
5081
5082 static errno_t
nstat_control_send_goodbye(nstat_control_state * state,nstat_src * src)5083 nstat_control_send_goodbye(
5084 nstat_control_state *state,
5085 nstat_src *src)
5086 {
5087 errno_t result = 0;
5088 int failed = 0;
5089 u_int16_t hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_FILTER;
5090
5091 if (nstat_control_reporting_allowed(state, src, (src->ns_reported)? NSTAT_FILTER_SUPPRESS_BORING_CLOSE: 0)) {
5092 hdr_flags = 0;
5093 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
5094 result = nstat_control_send_update(state, src, 0, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
5095 if (result != 0) {
5096 failed = 1;
5097 hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP;
5098 if (nstat_debug != 0) {
5099 printf("%s - nstat_control_send_update() %d\n", __func__, result);
5100 }
5101 }
5102 } else {
5103 // send one last counts notification
5104 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
5105 if (result != 0) {
5106 failed = 1;
5107 hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP;
5108 if (nstat_debug != 0) {
5109 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
5110 }
5111 }
5112
5113 // send a last description
5114 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
5115 if (result != 0) {
5116 failed = 1;
5117 hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP;
5118 if (nstat_debug != 0) {
5119 printf("%s - nstat_control_send_description() %d\n", __func__, result);
5120 }
5121 }
5122 }
5123 }
5124
5125 // send the source removed notification
5126 result = nstat_control_send_removed(state, src, hdr_flags);
5127 if (result != 0 && nstat_debug) {
5128 failed = 1;
5129 if (nstat_debug != 0) {
5130 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
5131 }
5132 }
5133
5134 if (failed != 0) {
5135 nstat_stats.nstat_control_send_goodbye_failures++;
5136 }
5137
5138
5139 return result;
5140 }
5141
5142 static errno_t
nstat_flush_accumulated_msgs(nstat_control_state * state)5143 nstat_flush_accumulated_msgs(
5144 nstat_control_state *state)
5145 {
5146 errno_t result = 0;
5147 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0) {
5148 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
5149 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
5150 if (result != 0) {
5151 nstat_stats.nstat_flush_accumulated_msgs_failures++;
5152 if (nstat_debug != 0) {
5153 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
5154 }
5155 mbuf_freem(state->ncs_accumulated);
5156 }
5157 state->ncs_accumulated = NULL;
5158 }
5159 return result;
5160 }
5161
5162 static errno_t
nstat_accumulate_msg(nstat_control_state * state,nstat_msg_hdr * hdr,size_t length)5163 nstat_accumulate_msg(
5164 nstat_control_state *state,
5165 nstat_msg_hdr *hdr,
5166 size_t length)
5167 {
5168 assert(length <= MAX_NSTAT_MSG_HDR_LENGTH);
5169
5170 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length) {
5171 // Will send the current mbuf
5172 nstat_flush_accumulated_msgs(state);
5173 }
5174
5175 errno_t result = 0;
5176
5177 if (state->ncs_accumulated == NULL) {
5178 unsigned int one = 1;
5179 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0) {
5180 if (nstat_debug != 0) {
5181 printf("%s - mbuf_allocpacket failed\n", __func__);
5182 }
5183 result = ENOMEM;
5184 } else {
5185 mbuf_setlen(state->ncs_accumulated, 0);
5186 }
5187 }
5188
5189 if (result == 0) {
5190 hdr->length = (u_int16_t)length;
5191 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
5192 length, hdr, MBUF_DONTWAIT);
5193 }
5194
5195 if (result != 0) {
5196 nstat_flush_accumulated_msgs(state);
5197 if (nstat_debug != 0) {
5198 printf("%s - resorting to ctl_enqueuedata\n", __func__);
5199 }
5200 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
5201 }
5202
5203 if (result != 0) {
5204 nstat_stats.nstat_accumulate_msg_failures++;
5205 }
5206
5207 return result;
5208 }
5209
5210 static void*
nstat_idle_check(__unused thread_call_param_t p0,__unused thread_call_param_t p1)5211 nstat_idle_check(
5212 __unused thread_call_param_t p0,
5213 __unused thread_call_param_t p1)
5214 {
5215 nstat_control_state *control;
5216 nstat_src *src, *tmpsrc;
5217 tailq_head_nstat_src dead_list;
5218 TAILQ_INIT(&dead_list);
5219
5220 lck_mtx_lock(&nstat_mtx);
5221
5222 nstat_idle_time = 0;
5223
5224 for (control = nstat_controls; control; control = control->ncs_next) {
5225 lck_mtx_lock(&control->ncs_mtx);
5226 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS)) {
5227 TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc)
5228 {
5229 if (src->provider->nstat_gone(src->cookie)) {
5230 errno_t result;
5231
5232 // Pull it off the list
5233 TAILQ_REMOVE(&control->ncs_src_queue, src, ns_control_link);
5234
5235 result = nstat_control_send_goodbye(control, src);
5236
5237 // Put this on the list to release later
5238 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
5239 }
5240 }
5241 }
5242 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
5243 lck_mtx_unlock(&control->ncs_mtx);
5244 }
5245
5246 if (nstat_controls) {
5247 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
5248 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
5249 }
5250
5251 lck_mtx_unlock(&nstat_mtx);
5252
5253 /* Generate any system level reports, if needed */
5254 nstat_sysinfo_generate_report();
5255
5256 // Release the sources now that we aren't holding lots of locks
5257 while ((src = TAILQ_FIRST(&dead_list))) {
5258 TAILQ_REMOVE(&dead_list, src, ns_control_link);
5259 nstat_control_cleanup_source(NULL, src, FALSE);
5260 }
5261
5262 nstat_prune_procdetails();
5263
5264 return NULL;
5265 }
5266
5267 static void
nstat_control_register(void)5268 nstat_control_register(void)
5269 {
5270 // Register the control
5271 struct kern_ctl_reg nstat_control;
5272 bzero(&nstat_control, sizeof(nstat_control));
5273 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
5274 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
5275 nstat_control.ctl_sendsize = nstat_sendspace;
5276 nstat_control.ctl_recvsize = nstat_recvspace;
5277 nstat_control.ctl_connect = nstat_control_connect;
5278 nstat_control.ctl_disconnect = nstat_control_disconnect;
5279 nstat_control.ctl_send = nstat_control_send;
5280
5281 ctl_register(&nstat_control, &nstat_ctlref);
5282 }
5283
5284 static void
nstat_control_cleanup_source(nstat_control_state * state,struct nstat_src * src,boolean_t locked)5285 nstat_control_cleanup_source(
5286 nstat_control_state *state,
5287 struct nstat_src *src,
5288 boolean_t locked)
5289 {
5290 errno_t result;
5291
5292 if (state) {
5293 result = nstat_control_send_removed(state, src, 0);
5294 if (result != 0) {
5295 nstat_stats.nstat_control_cleanup_source_failures++;
5296 if (nstat_debug != 0) {
5297 printf("%s - nstat_control_send_removed() %d\n",
5298 __func__, result);
5299 }
5300 }
5301 }
5302 // Cleanup the source if we found it.
5303 src->provider->nstat_release(src->cookie, locked);
5304 kfree_type(struct nstat_src, src);
5305 }
5306
5307
5308 static bool
nstat_control_reporting_allowed(nstat_control_state * state,nstat_src * src,u_int64_t suppression_flags)5309 nstat_control_reporting_allowed(
5310 nstat_control_state *state,
5311 nstat_src *src,
5312 u_int64_t suppression_flags)
5313 {
5314 if (src->provider->nstat_reporting_allowed == NULL) {
5315 return TRUE;
5316 }
5317
5318 return src->provider->nstat_reporting_allowed(src->cookie,
5319 &state->ncs_provider_filters[src->provider->nstat_provider_id], suppression_flags);
5320 }
5321
5322
5323 static errno_t
nstat_control_connect(kern_ctl_ref kctl,struct sockaddr_ctl * sac,void ** uinfo)5324 nstat_control_connect(
5325 kern_ctl_ref kctl,
5326 struct sockaddr_ctl *sac,
5327 void **uinfo)
5328 {
5329 nstat_control_state *state = kalloc_type(nstat_control_state,
5330 Z_WAITOK | Z_ZERO);
5331 if (state == NULL) {
5332 return ENOMEM;
5333 }
5334
5335 lck_mtx_init(&state->ncs_mtx, &nstat_lck_grp, NULL);
5336 state->ncs_kctl = kctl;
5337 state->ncs_unit = sac->sc_unit;
5338 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
5339 state->ncs_procdetails = nstat_retain_curprocdetails();
5340 *uinfo = state;
5341
5342 lck_mtx_lock(&nstat_mtx);
5343 state->ncs_next = nstat_controls;
5344 nstat_controls = state;
5345
5346 if (nstat_idle_time == 0) {
5347 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
5348 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
5349 }
5350
5351 lck_mtx_unlock(&nstat_mtx);
5352
5353 return 0;
5354 }
5355
5356 static errno_t
nstat_control_disconnect(__unused kern_ctl_ref kctl,__unused u_int32_t unit,void * uinfo)5357 nstat_control_disconnect(
5358 __unused kern_ctl_ref kctl,
5359 __unused u_int32_t unit,
5360 void *uinfo)
5361 {
5362 u_int32_t watching;
5363 nstat_control_state *state = (nstat_control_state*)uinfo;
5364 tailq_head_nstat_src cleanup_list;
5365 nstat_src *src;
5366
5367 TAILQ_INIT(&cleanup_list);
5368
5369 // pull it out of the global list of states
5370 lck_mtx_lock(&nstat_mtx);
5371 nstat_control_state **statepp;
5372 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next) {
5373 if (*statepp == state) {
5374 *statepp = state->ncs_next;
5375 break;
5376 }
5377 }
5378 lck_mtx_unlock(&nstat_mtx);
5379
5380 lck_mtx_lock(&state->ncs_mtx);
5381 // Stop watching for sources
5382 nstat_provider *provider;
5383 watching = state->ncs_watching;
5384 state->ncs_watching = 0;
5385 for (provider = nstat_providers; provider && watching; provider = provider->next) {
5386 if ((watching & (1 << provider->nstat_provider_id)) != 0) {
5387 watching &= ~(1 << provider->nstat_provider_id);
5388 provider->nstat_watcher_remove(state);
5389 }
5390 }
5391
5392 // set cleanup flags
5393 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
5394
5395 if (state->ncs_accumulated) {
5396 mbuf_freem(state->ncs_accumulated);
5397 state->ncs_accumulated = NULL;
5398 }
5399
5400 // Copy out the list of sources
5401 TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link);
5402 lck_mtx_unlock(&state->ncs_mtx);
5403
5404 while ((src = TAILQ_FIRST(&cleanup_list))) {
5405 TAILQ_REMOVE(&cleanup_list, src, ns_control_link);
5406 nstat_control_cleanup_source(NULL, src, FALSE);
5407 }
5408
5409 lck_mtx_destroy(&state->ncs_mtx, &nstat_lck_grp);
5410 nstat_release_procdetails(state->ncs_procdetails);
5411 kfree_type(struct nstat_control_state, state);
5412
5413 return 0;
5414 }
5415
5416 static nstat_src_ref_t
nstat_control_next_src_ref(nstat_control_state * state)5417 nstat_control_next_src_ref(
5418 nstat_control_state *state)
5419 {
5420 return ++state->ncs_next_srcref;
5421 }
5422
5423 static errno_t
nstat_control_send_counts(nstat_control_state * state,nstat_src * src,unsigned long long context,u_int16_t hdr_flags,int * gone)5424 nstat_control_send_counts(
5425 nstat_control_state *state,
5426 nstat_src *src,
5427 unsigned long long context,
5428 u_int16_t hdr_flags,
5429 int *gone)
5430 {
5431 nstat_msg_src_counts counts;
5432 errno_t result = 0;
5433
5434 /* Some providers may not have any counts to send */
5435 if (src->provider->nstat_counts == NULL) {
5436 return 0;
5437 }
5438
5439 bzero(&counts, sizeof(counts));
5440 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
5441 counts.hdr.length = sizeof(counts);
5442 counts.hdr.flags = hdr_flags;
5443 counts.hdr.context = context;
5444 counts.srcref = src->srcref;
5445 counts.event_flags = 0;
5446
5447 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0) {
5448 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
5449 counts.counts.nstat_rxbytes == 0 &&
5450 counts.counts.nstat_txbytes == 0) {
5451 result = EAGAIN;
5452 } else {
5453 result = ctl_enqueuedata(state->ncs_kctl,
5454 state->ncs_unit, &counts, sizeof(counts),
5455 CTL_DATA_EOR);
5456 if (result != 0) {
5457 nstat_stats.nstat_sendcountfailures += 1;
5458 }
5459 }
5460 }
5461 return result;
5462 }
5463
5464 static errno_t
nstat_control_append_counts(nstat_control_state * state,nstat_src * src,int * gone)5465 nstat_control_append_counts(
5466 nstat_control_state *state,
5467 nstat_src *src,
5468 int *gone)
5469 {
5470 /* Some providers may not have any counts to send */
5471 if (!src->provider->nstat_counts) {
5472 return 0;
5473 }
5474
5475 nstat_msg_src_counts counts;
5476 bzero(&counts, sizeof(counts));
5477 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
5478 counts.hdr.length = sizeof(counts);
5479 counts.srcref = src->srcref;
5480 counts.event_flags = 0;
5481
5482 errno_t result = 0;
5483 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
5484 if (result != 0) {
5485 return result;
5486 }
5487
5488 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
5489 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0) {
5490 return EAGAIN;
5491 }
5492
5493 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
5494 }
5495
5496 static int
nstat_control_send_description(nstat_control_state * state,nstat_src * src,u_int64_t context,u_int16_t hdr_flags)5497 nstat_control_send_description(
5498 nstat_control_state *state,
5499 nstat_src *src,
5500 u_int64_t context,
5501 u_int16_t hdr_flags)
5502 {
5503 // Provider doesn't support getting the descriptor? Done.
5504 if (src->provider->nstat_descriptor_length == 0 ||
5505 src->provider->nstat_copy_descriptor == NULL) {
5506 return EOPNOTSUPP;
5507 }
5508
5509 // Allocate storage for the descriptor message
5510 mbuf_t msg;
5511 unsigned int one = 1;
5512 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
5513 assert(size <= MAX_NSTAT_MSG_HDR_LENGTH);
5514
5515 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
5516 return ENOMEM;
5517 }
5518
5519 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
5520 bzero(desc, size);
5521 mbuf_setlen(msg, size);
5522 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
5523
5524 // Query the provider for the provider specific bits
5525 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
5526
5527 if (result != 0) {
5528 mbuf_freem(msg);
5529 return result;
5530 }
5531
5532 desc->hdr.context = context;
5533 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
5534 desc->hdr.length = (u_int16_t)size;
5535 desc->hdr.flags = hdr_flags;
5536 desc->srcref = src->srcref;
5537 desc->event_flags = 0;
5538 desc->provider = src->provider->nstat_provider_id;
5539
5540 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
5541 if (result != 0) {
5542 nstat_stats.nstat_descriptionfailures += 1;
5543 mbuf_freem(msg);
5544 }
5545
5546 return result;
5547 }
5548
5549 static errno_t
nstat_control_append_description(nstat_control_state * state,nstat_src * src)5550 nstat_control_append_description(
5551 nstat_control_state *state,
5552 nstat_src *src)
5553 {
5554 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
5555 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
5556 src->provider->nstat_copy_descriptor == NULL) {
5557 return EOPNOTSUPP;
5558 }
5559
5560 // Fill out a buffer on the stack, we will copy to the mbuf later
5561 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
5562 bzero(buffer, size);
5563
5564 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
5565 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
5566 desc->hdr.length = (u_int16_t)size;
5567 desc->srcref = src->srcref;
5568 desc->event_flags = 0;
5569 desc->provider = src->provider->nstat_provider_id;
5570
5571 errno_t result = 0;
5572 // Fill in the description
5573 // Query the provider for the provider specific bits
5574 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
5575 src->provider->nstat_descriptor_length);
5576 if (result != 0) {
5577 return result;
5578 }
5579
5580 return nstat_accumulate_msg(state, &desc->hdr, size);
5581 }
5582
5583 static uint64_t
nstat_extension_flags_for_source(nstat_control_state * state,nstat_src * src)5584 nstat_extension_flags_for_source(
5585 nstat_control_state *state,
5586 nstat_src *src)
5587 {
5588 VERIFY(state != NULL & src != NULL);
5589 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
5590
5591 return state->ncs_provider_filters[provider_id].npf_extensions;
5592 }
5593
5594 static int
nstat_control_send_update(nstat_control_state * state,nstat_src * src,u_int64_t context,u_int64_t event,u_int16_t hdr_flags,int * gone)5595 nstat_control_send_update(
5596 nstat_control_state *state,
5597 nstat_src *src,
5598 u_int64_t context,
5599 u_int64_t event,
5600 u_int16_t hdr_flags,
5601 int *gone)
5602 {
5603 // Provider doesn't support getting the descriptor or counts? Done.
5604 if ((src->provider->nstat_descriptor_length == 0 ||
5605 src->provider->nstat_copy_descriptor == NULL) &&
5606 src->provider->nstat_counts == NULL) {
5607 return EOPNOTSUPP;
5608 }
5609
5610 // Allocate storage for the descriptor message
5611 mbuf_t msg;
5612 unsigned int one = 1;
5613 size_t size = offsetof(nstat_msg_src_update, data) +
5614 src->provider->nstat_descriptor_length;
5615 size_t total_extension_size = 0;
5616 u_int32_t num_extensions = 0;
5617 u_int64_t extension_mask = nstat_extension_flags_for_source(state, src);
5618
5619 if ((extension_mask != 0) && (src->provider->nstat_copy_extension != NULL)) {
5620 uint32_t extension_id = 0;
5621 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5622 if ((extension_mask & (1ull << extension_id)) != 0) {
5623 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, NULL, 0);
5624 if (extension_size == 0) {
5625 extension_mask &= ~(1ull << extension_id);
5626 } else {
5627 num_extensions++;
5628 total_extension_size += ROUNDUP64(extension_size);
5629 }
5630 }
5631 }
5632 size += total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions);
5633 }
5634 assert(size <= MAX_NSTAT_MSG_HDR_LENGTH);
5635
5636 /*
5637 * XXX Would be interesting to see how extended updates affect mbuf
5638 * allocations, given the max segments defined as 1, one may get
5639 * allocations with higher fragmentation.
5640 */
5641 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
5642 return ENOMEM;
5643 }
5644
5645 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
5646 bzero(desc, size);
5647 desc->hdr.context = context;
5648 desc->hdr.type = (num_extensions == 0) ? NSTAT_MSG_TYPE_SRC_UPDATE :
5649 NSTAT_MSG_TYPE_SRC_EXTENDED_UPDATE;
5650 desc->hdr.length = (u_int16_t)size;
5651 desc->hdr.flags = hdr_flags;
5652 desc->srcref = src->srcref;
5653 desc->event_flags = event;
5654 desc->provider = src->provider->nstat_provider_id;
5655
5656 /*
5657 * XXX The following two lines are only valid when max-segments is passed
5658 * as one.
5659 * Other computations with offset also depend on that being true.
5660 * Be aware of that before making any modifications that changes that
5661 * behavior.
5662 */
5663 mbuf_setlen(msg, size);
5664 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
5665
5666 errno_t result = 0;
5667 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
5668 // Query the provider for the provider specific bits
5669 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
5670 src->provider->nstat_descriptor_length);
5671 if (result != 0) {
5672 mbuf_freem(msg);
5673 return result;
5674 }
5675 }
5676
5677 if (num_extensions > 0) {
5678 nstat_msg_src_extended_item_hdr *p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)mbuf_data(msg) +
5679 sizeof(nstat_msg_src_update_hdr) + src->provider->nstat_descriptor_length);
5680 uint32_t extension_id = 0;
5681
5682 bzero(p_extension_hdr, total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions));
5683
5684 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5685 if ((extension_mask & (1ull << extension_id)) != 0) {
5686 void *buf = (void *)(p_extension_hdr + 1);
5687 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, buf, total_extension_size);
5688 if ((extension_size == 0) || (extension_size > total_extension_size)) {
5689 // Something has gone wrong. Instead of attempting to wind back the excess buffer space, mark it as unused
5690 p_extension_hdr->type = NSTAT_EXTENDED_UPDATE_TYPE_UNKNOWN;
5691 p_extension_hdr->length = total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * (num_extensions - 1));
5692 break;
5693 } else {
5694 // The extension may be of any size alignment, reported as such in the extension header,
5695 // but we pad to ensure that whatever comes next is suitably aligned
5696 p_extension_hdr->type = extension_id;
5697 p_extension_hdr->length = extension_size;
5698 extension_size = ROUNDUP64(extension_size);
5699 total_extension_size -= extension_size;
5700 p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buf + extension_size);
5701 num_extensions--;
5702 }
5703 }
5704 }
5705 }
5706
5707 if (src->provider->nstat_counts) {
5708 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
5709 if (result == 0) {
5710 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
5711 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
5712 result = EAGAIN;
5713 } else {
5714 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
5715 }
5716 }
5717 }
5718
5719 if (result != 0) {
5720 nstat_stats.nstat_srcupatefailures += 1;
5721 mbuf_freem(msg);
5722 } else {
5723 src->ns_reported = true;
5724 }
5725
5726 return result;
5727 }
5728
5729 static errno_t
nstat_control_append_update(nstat_control_state * state,nstat_src * src,int * gone)5730 nstat_control_append_update(
5731 nstat_control_state *state,
5732 nstat_src *src,
5733 int *gone)
5734 {
5735 if ((src->provider->nstat_descriptor_length == 0 ||
5736 src->provider->nstat_copy_descriptor == NULL) &&
5737 src->provider->nstat_counts == NULL) {
5738 return EOPNOTSUPP;
5739 }
5740
5741 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
5742 size_t total_extension_size = 0;
5743 u_int32_t num_extensions = 0;
5744 u_int64_t extension_mask = nstat_extension_flags_for_source(state, src);
5745
5746 if ((extension_mask != 0) && (src->provider->nstat_copy_extension != NULL)) {
5747 uint32_t extension_id = 0;
5748 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5749 if ((extension_mask & (1ull << extension_id)) != 0) {
5750 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, NULL, 0);
5751 if (extension_size == 0) {
5752 extension_mask &= ~(1ull << extension_id);
5753 } else {
5754 num_extensions++;
5755 total_extension_size += ROUNDUP64(extension_size);
5756 }
5757 }
5758 }
5759 size += total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions);
5760 }
5761
5762 /*
5763 * This kind of limits extensions.
5764 * The optimization is around being able to deliver multiple
5765 * of updates bundled together.
5766 * Increasing the size runs the risk of too much stack usage.
5767 * One could potentially changed the allocation below to be on heap.
5768 * For now limiting it to half of NSTAT_MAX_MSG_SIZE.
5769 */
5770 if (size > (NSTAT_MAX_MSG_SIZE >> 1)) {
5771 return EOPNOTSUPP;
5772 }
5773
5774 // Fill out a buffer on the stack, we will copy to the mbuf later
5775 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
5776 bzero(buffer, size);
5777
5778 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
5779 desc->hdr.type = (num_extensions == 0) ? NSTAT_MSG_TYPE_SRC_UPDATE :
5780 NSTAT_MSG_TYPE_SRC_EXTENDED_UPDATE;
5781 desc->hdr.length = (u_int16_t)size;
5782 desc->srcref = src->srcref;
5783 desc->event_flags = 0;
5784 desc->provider = src->provider->nstat_provider_id;
5785
5786 errno_t result = 0;
5787 // Fill in the description
5788 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
5789 // Query the provider for the provider specific bits
5790 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
5791 src->provider->nstat_descriptor_length);
5792 if (result != 0) {
5793 nstat_stats.nstat_copy_descriptor_failures++;
5794 if (nstat_debug != 0) {
5795 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
5796 }
5797 return result;
5798 }
5799 }
5800
5801 if (num_extensions > 0) {
5802 nstat_msg_src_extended_item_hdr *p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buffer +
5803 sizeof(nstat_msg_src_update_hdr) + src->provider->nstat_descriptor_length);
5804 uint32_t extension_id = 0;
5805 bzero(p_extension_hdr, total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions));
5806
5807 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5808 if ((extension_mask & (1ull << extension_id)) != 0) {
5809 void *buf = (void *)(p_extension_hdr + 1);
5810 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, buf, total_extension_size);
5811 if ((extension_size == 0) || (extension_size > total_extension_size)) {
5812 // Something has gone wrong. Instead of attempting to wind back the excess buffer space, mark it as unused
5813 p_extension_hdr->type = NSTAT_EXTENDED_UPDATE_TYPE_UNKNOWN;
5814 p_extension_hdr->length = total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * (num_extensions - 1));
5815 break;
5816 } else {
5817 extension_size = ROUNDUP64(extension_size);
5818 p_extension_hdr->type = extension_id;
5819 p_extension_hdr->length = extension_size;
5820 total_extension_size -= extension_size;
5821 p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buf + extension_size);
5822 num_extensions--;
5823 }
5824 }
5825 }
5826 }
5827
5828 if (src->provider->nstat_counts) {
5829 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
5830 if (result != 0) {
5831 nstat_stats.nstat_provider_counts_failures++;
5832 if (nstat_debug != 0) {
5833 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
5834 }
5835 return result;
5836 }
5837
5838 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
5839 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
5840 return EAGAIN;
5841 }
5842 }
5843
5844 result = nstat_accumulate_msg(state, &desc->hdr, size);
5845 if (result == 0) {
5846 src->ns_reported = true;
5847 }
5848 return result;
5849 }
5850
5851 static errno_t
nstat_control_send_removed(nstat_control_state * state,nstat_src * src,u_int16_t hdr_flags)5852 nstat_control_send_removed(
5853 nstat_control_state *state,
5854 nstat_src *src,
5855 u_int16_t hdr_flags)
5856 {
5857 nstat_msg_src_removed removed;
5858 errno_t result;
5859
5860 bzero(&removed, sizeof(removed));
5861 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
5862 removed.hdr.length = sizeof(removed);
5863 removed.hdr.context = 0;
5864 removed.hdr.flags = hdr_flags;
5865 removed.srcref = src->srcref;
5866 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
5867 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
5868 if (result != 0) {
5869 nstat_stats.nstat_msgremovedfailures += 1;
5870 }
5871
5872 return result;
5873 }
5874
5875 static errno_t
nstat_control_handle_add_request(nstat_control_state * state,mbuf_t m)5876 nstat_control_handle_add_request(
5877 nstat_control_state *state,
5878 mbuf_t m)
5879 {
5880 errno_t result;
5881
5882 // Verify the header fits in the first mbuf
5883 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param)) {
5884 return EINVAL;
5885 }
5886
5887 // Calculate the length of the parameter field
5888 ssize_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
5889 if (paramlength < 0 || paramlength > 2 * 1024) {
5890 return EINVAL;
5891 }
5892
5893 nstat_provider *provider = NULL;
5894 nstat_provider_cookie_t cookie = NULL;
5895 nstat_msg_add_src_req *req = mbuf_data(m);
5896 if (mbuf_pkthdr_len(m) > mbuf_len(m)) {
5897 // parameter is too large, we need to make a contiguous copy
5898 void *data = (void *) kalloc_data(paramlength, Z_WAITOK);
5899
5900 if (!data) {
5901 return ENOMEM;
5902 }
5903 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
5904 if (result == 0) {
5905 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
5906 }
5907 kfree_data(data, paramlength);
5908 } else {
5909 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
5910 }
5911
5912 if (result != 0) {
5913 return result;
5914 }
5915
5916 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
5917 if (result != 0) {
5918 provider->nstat_release(cookie, 0);
5919 }
5920
5921 return result;
5922 }
5923
5924 static errno_t
nstat_set_provider_filter(nstat_control_state * state,nstat_msg_add_all_srcs * req)5925 nstat_set_provider_filter(
5926 nstat_control_state *state,
5927 nstat_msg_add_all_srcs *req)
5928 {
5929 nstat_provider_id_t provider_id = req->provider;
5930
5931 u_int32_t prev_ncs_watching = atomic_or_32_ov(&state->ncs_watching, (1 << provider_id));
5932
5933 if ((prev_ncs_watching & (1 << provider_id)) != 0) {
5934 return EALREADY;
5935 }
5936
5937 state->ncs_watching |= (1 << provider_id);
5938 state->ncs_provider_filters[provider_id].npf_events = req->events;
5939 state->ncs_provider_filters[provider_id].npf_flags = req->filter;
5940
5941 // The extensions should be populated by a more direct mechanism
5942 // Using the top 32 bits of the filter flags reduces the namespace of both,
5943 // but is a convenient workaround that avoids ntstat.h changes that would require rebuild of all clients
5944 state->ncs_provider_filters[provider_id].npf_extensions = (req->filter >> NSTAT_FILTER_ALLOWED_EXTENSIONS_SHIFT) & NSTAT_EXTENDED_UPDATE_FLAG_MASK;
5945 state->ncs_provider_filters[provider_id].npf_pid = req->target_pid;
5946 uuid_copy(state->ncs_provider_filters[provider_id].npf_uuid, req->target_uuid);
5947 return 0;
5948 }
5949
5950 static errno_t
nstat_control_handle_add_all(nstat_control_state * state,mbuf_t m)5951 nstat_control_handle_add_all(
5952 nstat_control_state *state,
5953 mbuf_t m)
5954 {
5955 errno_t result = 0;
5956
5957 // Verify the header fits in the first mbuf
5958 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs)) {
5959 return EINVAL;
5960 }
5961
5962 nstat_msg_add_all_srcs *req = mbuf_data(m);
5963 if (req->provider > NSTAT_PROVIDER_LAST) {
5964 return ENOENT;
5965 }
5966
5967 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
5968
5969 if (!provider) {
5970 return ENOENT;
5971 }
5972 if (provider->nstat_watcher_add == NULL) {
5973 return ENOTSUP;
5974 }
5975
5976 if (nstat_privcheck != 0) {
5977 result = priv_check_cred(kauth_cred_get(),
5978 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
5979 if (result != 0) {
5980 return result;
5981 }
5982 }
5983
5984 lck_mtx_lock(&state->ncs_mtx);
5985 if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED) {
5986 // Suppression of source messages implicitly requires the use of update messages
5987 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
5988 }
5989 lck_mtx_unlock(&state->ncs_mtx);
5990
5991 // rdar://problem/30301300 Different providers require different synchronization
5992 // to ensure that a new entry does not get double counted due to being added prior
5993 // to all current provider entries being added. Hence pass the provider the details
5994 // in the original request for this to be applied atomically
5995
5996 result = provider->nstat_watcher_add(state, req);
5997
5998 if (result == 0) {
5999 nstat_enqueue_success(req->hdr.context, state, 0);
6000 }
6001
6002 return result;
6003 }
6004
6005 static errno_t
nstat_control_source_add(u_int64_t context,nstat_control_state * state,nstat_provider * provider,nstat_provider_cookie_t cookie)6006 nstat_control_source_add(
6007 u_int64_t context,
6008 nstat_control_state *state,
6009 nstat_provider *provider,
6010 nstat_provider_cookie_t cookie)
6011 {
6012 // Fill out source added message if appropriate
6013 mbuf_t msg = NULL;
6014 nstat_src_ref_t *srcrefp = NULL;
6015
6016 u_int64_t provider_filter_flags =
6017 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
6018 boolean_t tell_user =
6019 ((provider_filter_flags & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
6020 u_int32_t src_filter =
6021 (provider_filter_flags & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
6022 ? NSTAT_FILTER_NOZEROBYTES : 0;
6023
6024 if (provider_filter_flags & NSTAT_FILTER_TCP_NO_EARLY_CLOSE) {
6025 src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE;
6026 }
6027
6028 if (tell_user) {
6029 unsigned int one = 1;
6030
6031 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
6032 &one, &msg) != 0) {
6033 return ENOMEM;
6034 }
6035
6036 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
6037 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
6038 nstat_msg_src_added *add = mbuf_data(msg);
6039 bzero(add, sizeof(*add));
6040 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
6041 assert(mbuf_len(msg) <= MAX_NSTAT_MSG_HDR_LENGTH);
6042 add->hdr.length = (u_int16_t)mbuf_len(msg);
6043 add->hdr.context = context;
6044 add->provider = provider->nstat_provider_id;
6045 srcrefp = &add->srcref;
6046 }
6047
6048 // Allocate storage for the source
6049 nstat_src *src = kalloc_type(struct nstat_src, Z_WAITOK);
6050 if (src == NULL) {
6051 if (msg) {
6052 mbuf_freem(msg);
6053 }
6054 return ENOMEM;
6055 }
6056
6057 // Fill in the source, including picking an unused source ref
6058 lck_mtx_lock(&state->ncs_mtx);
6059
6060 src->srcref = nstat_control_next_src_ref(state);
6061 if (srcrefp) {
6062 *srcrefp = src->srcref;
6063 }
6064
6065 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) {
6066 lck_mtx_unlock(&state->ncs_mtx);
6067 kfree_type(struct nstat_src, src);
6068 if (msg) {
6069 mbuf_freem(msg);
6070 }
6071 return EINVAL;
6072 }
6073 src->provider = provider;
6074 src->cookie = cookie;
6075 src->filter = src_filter;
6076 src->seq = 0;
6077
6078 if (msg) {
6079 // send the source added message if appropriate
6080 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
6081 CTL_DATA_EOR);
6082 if (result != 0) {
6083 nstat_stats.nstat_srcaddedfailures += 1;
6084 lck_mtx_unlock(&state->ncs_mtx);
6085 kfree_type(struct nstat_src, src);
6086 mbuf_freem(msg);
6087 return result;
6088 }
6089 }
6090 // Put the source in the list
6091 TAILQ_INSERT_HEAD(&state->ncs_src_queue, src, ns_control_link);
6092 src->ns_control = state;
6093
6094 lck_mtx_unlock(&state->ncs_mtx);
6095
6096 return 0;
6097 }
6098
6099 static errno_t
nstat_control_handle_remove_request(nstat_control_state * state,mbuf_t m)6100 nstat_control_handle_remove_request(
6101 nstat_control_state *state,
6102 mbuf_t m)
6103 {
6104 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
6105 nstat_src *src;
6106
6107 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0) {
6108 return EINVAL;
6109 }
6110
6111 lck_mtx_lock(&state->ncs_mtx);
6112
6113 // Remove this source as we look for it
6114 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
6115 {
6116 if (src->srcref == srcref) {
6117 break;
6118 }
6119 }
6120 if (src) {
6121 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
6122 }
6123
6124 lck_mtx_unlock(&state->ncs_mtx);
6125
6126 if (src) {
6127 nstat_control_cleanup_source(state, src, FALSE);
6128 }
6129
6130 return src ? 0 : ENOENT;
6131 }
6132
6133 static errno_t
nstat_control_handle_query_request(nstat_control_state * state,mbuf_t m)6134 nstat_control_handle_query_request(
6135 nstat_control_state *state,
6136 mbuf_t m)
6137 {
6138 // TBD: handle this from another thread so we can enqueue a lot of data
6139 // As written, if a client requests query all, this function will be
6140 // called from their send of the request message. We will attempt to write
6141 // responses and succeed until the buffer fills up. Since the clients thread
6142 // is blocked on send, it won't be reading unless the client has two threads
6143 // using this socket, one for read and one for write. Two threads probably
6144 // won't work with this code anyhow since we don't have proper locking in
6145 // place yet.
6146 tailq_head_nstat_src dead_list;
6147 errno_t result = ENOENT;
6148 nstat_msg_query_src_req req;
6149
6150 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6151 return EINVAL;
6152 }
6153
6154 TAILQ_INIT(&dead_list);
6155 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
6156
6157 lck_mtx_lock(&state->ncs_mtx);
6158
6159 if (all_srcs) {
6160 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
6161 }
6162 nstat_src *src, *tmpsrc;
6163 u_int64_t src_count = 0;
6164 boolean_t partial = FALSE;
6165
6166 /*
6167 * Error handling policy and sequence number generation is folded into
6168 * nstat_control_begin_query.
6169 */
6170 partial = nstat_control_begin_query(state, &req.hdr);
6171
6172
6173 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
6174 {
6175 int gone = 0;
6176
6177 // XXX ignore IFACE types?
6178 if (all_srcs || src->srcref == req.srcref) {
6179 if (nstat_control_reporting_allowed(state, src, 0)
6180 && (!partial || !all_srcs || src->seq != state->ncs_seq)) {
6181 if (all_srcs &&
6182 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0) {
6183 result = nstat_control_append_counts(state, src, &gone);
6184 } else {
6185 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
6186 }
6187
6188 if (ENOMEM == result || ENOBUFS == result) {
6189 /*
6190 * If the counts message failed to
6191 * enqueue then we should clear our flag so
6192 * that a client doesn't miss anything on
6193 * idle cleanup. We skip the "gone"
6194 * processing in the hope that we may
6195 * catch it another time.
6196 */
6197 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
6198 break;
6199 }
6200 if (partial) {
6201 /*
6202 * We skip over hard errors and
6203 * filtered sources.
6204 */
6205 src->seq = state->ncs_seq;
6206 src_count++;
6207 }
6208 }
6209 }
6210
6211 if (gone) {
6212 // send one last descriptor message so client may see last state
6213 // If we can't send the notification now, it
6214 // will be sent in the idle cleanup.
6215 result = nstat_control_send_description(state, src, 0, 0);
6216 if (result != 0) {
6217 nstat_stats.nstat_control_send_description_failures++;
6218 if (nstat_debug != 0) {
6219 printf("%s - nstat_control_send_description() %d\n", __func__, result);
6220 }
6221 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
6222 break;
6223 }
6224
6225 // pull src out of the list
6226 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
6227 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
6228 }
6229
6230 if (all_srcs) {
6231 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
6232 break;
6233 }
6234 } else if (req.srcref == src->srcref) {
6235 break;
6236 }
6237 }
6238
6239 nstat_flush_accumulated_msgs(state);
6240
6241 u_int16_t flags = 0;
6242 if (req.srcref == NSTAT_SRC_REF_ALL) {
6243 flags = nstat_control_end_query(state, src, partial);
6244 }
6245
6246 lck_mtx_unlock(&state->ncs_mtx);
6247
6248 /*
6249 * If an error occurred enqueueing data, then allow the error to
6250 * propagate to nstat_control_send. This way, the error is sent to
6251 * user-level.
6252 */
6253 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
6254 nstat_enqueue_success(req.hdr.context, state, flags);
6255 result = 0;
6256 }
6257
6258 while ((src = TAILQ_FIRST(&dead_list))) {
6259 TAILQ_REMOVE(&dead_list, src, ns_control_link);
6260 nstat_control_cleanup_source(state, src, FALSE);
6261 }
6262
6263 return result;
6264 }
6265
6266 static errno_t
nstat_control_handle_get_src_description(nstat_control_state * state,mbuf_t m)6267 nstat_control_handle_get_src_description(
6268 nstat_control_state *state,
6269 mbuf_t m)
6270 {
6271 nstat_msg_get_src_description req;
6272 errno_t result = ENOENT;
6273 nstat_src *src;
6274
6275 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6276 return EINVAL;
6277 }
6278
6279 lck_mtx_lock(&state->ncs_mtx);
6280 u_int64_t src_count = 0;
6281 boolean_t partial = FALSE;
6282 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
6283
6284 /*
6285 * Error handling policy and sequence number generation is folded into
6286 * nstat_control_begin_query.
6287 */
6288 partial = nstat_control_begin_query(state, &req.hdr);
6289
6290 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
6291 {
6292 if (all_srcs || src->srcref == req.srcref) {
6293 if (nstat_control_reporting_allowed(state, src, 0)
6294 && (!all_srcs || !partial || src->seq != state->ncs_seq)) {
6295 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs) {
6296 result = nstat_control_append_description(state, src);
6297 } else {
6298 result = nstat_control_send_description(state, src, req.hdr.context, 0);
6299 }
6300
6301 if (ENOMEM == result || ENOBUFS == result) {
6302 /*
6303 * If the description message failed to
6304 * enqueue then we give up for now.
6305 */
6306 break;
6307 }
6308 if (partial) {
6309 /*
6310 * Note, we skip over hard errors and
6311 * filtered sources.
6312 */
6313 src->seq = state->ncs_seq;
6314 src_count++;
6315 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
6316 break;
6317 }
6318 }
6319 }
6320
6321 if (!all_srcs) {
6322 break;
6323 }
6324 }
6325 }
6326 nstat_flush_accumulated_msgs(state);
6327
6328 u_int16_t flags = 0;
6329 if (req.srcref == NSTAT_SRC_REF_ALL) {
6330 flags = nstat_control_end_query(state, src, partial);
6331 }
6332
6333 lck_mtx_unlock(&state->ncs_mtx);
6334 /*
6335 * If an error occurred enqueueing data, then allow the error to
6336 * propagate to nstat_control_send. This way, the error is sent to
6337 * user-level.
6338 */
6339 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
6340 nstat_enqueue_success(req.hdr.context, state, flags);
6341 result = 0;
6342 }
6343
6344 return result;
6345 }
6346
6347 static errno_t
nstat_control_handle_set_filter(nstat_control_state * state,mbuf_t m)6348 nstat_control_handle_set_filter(
6349 nstat_control_state *state,
6350 mbuf_t m)
6351 {
6352 nstat_msg_set_filter req;
6353 nstat_src *src;
6354
6355 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6356 return EINVAL;
6357 }
6358 if (req.srcref == NSTAT_SRC_REF_ALL ||
6359 req.srcref == NSTAT_SRC_REF_INVALID) {
6360 return EINVAL;
6361 }
6362
6363 lck_mtx_lock(&state->ncs_mtx);
6364 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
6365 {
6366 if (req.srcref == src->srcref) {
6367 src->filter = req.filter;
6368 break;
6369 }
6370 }
6371 lck_mtx_unlock(&state->ncs_mtx);
6372 if (src == NULL) {
6373 return ENOENT;
6374 }
6375
6376 return 0;
6377 }
6378
6379 static void
nstat_send_error(nstat_control_state * state,u_int64_t context,u_int32_t error)6380 nstat_send_error(
6381 nstat_control_state *state,
6382 u_int64_t context,
6383 u_int32_t error)
6384 {
6385 errno_t result;
6386 struct nstat_msg_error err;
6387
6388 bzero(&err, sizeof(err));
6389 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
6390 err.hdr.length = sizeof(err);
6391 err.hdr.context = context;
6392 err.error = error;
6393
6394 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
6395 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
6396 if (result != 0) {
6397 nstat_stats.nstat_msgerrorfailures++;
6398 }
6399 }
6400
6401 static boolean_t
nstat_control_begin_query(nstat_control_state * state,const nstat_msg_hdr * hdrp)6402 nstat_control_begin_query(
6403 nstat_control_state *state,
6404 const nstat_msg_hdr *hdrp)
6405 {
6406 boolean_t partial = FALSE;
6407
6408 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION) {
6409 /* A partial query all has been requested. */
6410 partial = TRUE;
6411
6412 if (state->ncs_context != hdrp->context) {
6413 if (state->ncs_context != 0) {
6414 nstat_send_error(state, state->ncs_context, EAGAIN);
6415 }
6416
6417 /* Initialize state for a partial query all. */
6418 state->ncs_context = hdrp->context;
6419 state->ncs_seq++;
6420 }
6421 }
6422
6423 return partial;
6424 }
6425
6426 static u_int16_t
nstat_control_end_query(nstat_control_state * state,nstat_src * last_src,boolean_t partial)6427 nstat_control_end_query(
6428 nstat_control_state *state,
6429 nstat_src *last_src,
6430 boolean_t partial)
6431 {
6432 u_int16_t flags = 0;
6433
6434 if (last_src == NULL || !partial) {
6435 /*
6436 * We iterated through the entire srcs list or exited early
6437 * from the loop when a partial update was not requested (an
6438 * error occurred), so clear context to indicate internally
6439 * that the query is finished.
6440 */
6441 state->ncs_context = 0;
6442 } else {
6443 /*
6444 * Indicate to userlevel to make another partial request as
6445 * there are still sources left to be reported.
6446 */
6447 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
6448 }
6449
6450 return flags;
6451 }
6452
6453 static errno_t
nstat_control_handle_get_update(nstat_control_state * state,mbuf_t m)6454 nstat_control_handle_get_update(
6455 nstat_control_state *state,
6456 mbuf_t m)
6457 {
6458 nstat_msg_query_src_req req;
6459
6460 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6461 return EINVAL;
6462 }
6463
6464 lck_mtx_lock(&state->ncs_mtx);
6465
6466 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
6467
6468 errno_t result = ENOENT;
6469 nstat_src *src, *tmpsrc;
6470 tailq_head_nstat_src dead_list;
6471 u_int64_t src_count = 0;
6472 boolean_t partial = FALSE;
6473 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
6474 TAILQ_INIT(&dead_list);
6475
6476 /*
6477 * Error handling policy and sequence number generation is folded into
6478 * nstat_control_begin_query.
6479 */
6480 partial = nstat_control_begin_query(state, &req.hdr);
6481
6482 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc) {
6483 int gone = 0;
6484 if (all_srcs) {
6485 // Check to see if we should handle this source or if we're still skipping to find where to continue
6486 if ((FALSE == partial || src->seq != state->ncs_seq)) {
6487 u_int64_t suppression_flags = (src->ns_reported)? NSTAT_FILTER_SUPPRESS_BORING_POLL: 0;
6488 if (nstat_control_reporting_allowed(state, src, suppression_flags)) {
6489 result = nstat_control_append_update(state, src, &gone);
6490 if (ENOMEM == result || ENOBUFS == result) {
6491 /*
6492 * If the update message failed to
6493 * enqueue then give up.
6494 */
6495 break;
6496 }
6497 if (partial) {
6498 /*
6499 * We skip over hard errors and
6500 * filtered sources.
6501 */
6502 src->seq = state->ncs_seq;
6503 src_count++;
6504 }
6505 }
6506 }
6507 } else if (src->srcref == req.srcref) {
6508 if (nstat_control_reporting_allowed(state, src, 0)) {
6509 result = nstat_control_send_update(state, src, req.hdr.context, 0, 0, &gone);
6510 }
6511 }
6512
6513 if (gone) {
6514 // pull src out of the list
6515 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
6516 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
6517 }
6518
6519 if (!all_srcs && req.srcref == src->srcref) {
6520 break;
6521 }
6522 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
6523 break;
6524 }
6525 }
6526
6527 nstat_flush_accumulated_msgs(state);
6528
6529
6530 u_int16_t flags = 0;
6531 if (req.srcref == NSTAT_SRC_REF_ALL) {
6532 flags = nstat_control_end_query(state, src, partial);
6533 }
6534
6535 lck_mtx_unlock(&state->ncs_mtx);
6536 /*
6537 * If an error occurred enqueueing data, then allow the error to
6538 * propagate to nstat_control_send. This way, the error is sent to
6539 * user-level.
6540 */
6541 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
6542 nstat_enqueue_success(req.hdr.context, state, flags);
6543 result = 0;
6544 }
6545
6546 while ((src = TAILQ_FIRST(&dead_list))) {
6547 TAILQ_REMOVE(&dead_list, src, ns_control_link);
6548 // release src and send notification
6549 nstat_control_cleanup_source(state, src, FALSE);
6550 }
6551
6552 return result;
6553 }
6554
6555 static errno_t
nstat_control_handle_subscribe_sysinfo(nstat_control_state * state)6556 nstat_control_handle_subscribe_sysinfo(
6557 nstat_control_state *state)
6558 {
6559 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
6560
6561 if (result != 0) {
6562 return result;
6563 }
6564
6565 lck_mtx_lock(&state->ncs_mtx);
6566 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
6567 lck_mtx_unlock(&state->ncs_mtx);
6568
6569 return 0;
6570 }
6571
6572 static errno_t
nstat_control_send(kern_ctl_ref kctl,u_int32_t unit,void * uinfo,mbuf_t m,__unused int flags)6573 nstat_control_send(
6574 kern_ctl_ref kctl,
6575 u_int32_t unit,
6576 void *uinfo,
6577 mbuf_t m,
6578 __unused int flags)
6579 {
6580 nstat_control_state *state = (nstat_control_state*)uinfo;
6581 struct nstat_msg_hdr *hdr;
6582 struct nstat_msg_hdr storage;
6583 errno_t result = 0;
6584
6585 if (mbuf_pkthdr_len(m) < sizeof(*hdr)) {
6586 // Is this the right thing to do?
6587 mbuf_freem(m);
6588 return EINVAL;
6589 }
6590
6591 if (mbuf_len(m) >= sizeof(*hdr)) {
6592 hdr = mbuf_data(m);
6593 } else {
6594 mbuf_copydata(m, 0, sizeof(storage), &storage);
6595 hdr = &storage;
6596 }
6597
6598 // Legacy clients may not set the length
6599 // Those clients are likely not setting the flags either
6600 // Fix everything up so old clients continue to work
6601 if (hdr->length != mbuf_pkthdr_len(m)) {
6602 hdr->flags = 0;
6603 assert(mbuf_pkthdr_len(m) <= MAX_NSTAT_MSG_HDR_LENGTH);
6604 hdr->length = (u_int16_t)mbuf_pkthdr_len(m);
6605 if (hdr == &storage) {
6606 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
6607 }
6608 }
6609
6610 switch (hdr->type) {
6611 case NSTAT_MSG_TYPE_ADD_SRC:
6612 result = nstat_control_handle_add_request(state, m);
6613 break;
6614
6615 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
6616 result = nstat_control_handle_add_all(state, m);
6617 break;
6618
6619 case NSTAT_MSG_TYPE_REM_SRC:
6620 result = nstat_control_handle_remove_request(state, m);
6621 break;
6622
6623 case NSTAT_MSG_TYPE_QUERY_SRC:
6624 result = nstat_control_handle_query_request(state, m);
6625 break;
6626
6627 case NSTAT_MSG_TYPE_GET_SRC_DESC:
6628 result = nstat_control_handle_get_src_description(state, m);
6629 break;
6630
6631 case NSTAT_MSG_TYPE_SET_FILTER:
6632 result = nstat_control_handle_set_filter(state, m);
6633 break;
6634
6635 case NSTAT_MSG_TYPE_GET_UPDATE:
6636 result = nstat_control_handle_get_update(state, m);
6637 break;
6638
6639 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
6640 result = nstat_control_handle_subscribe_sysinfo(state);
6641 break;
6642
6643 default:
6644 result = EINVAL;
6645 break;
6646 }
6647
6648 if (result != 0) {
6649 struct nstat_msg_error err;
6650
6651 bzero(&err, sizeof(err));
6652 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
6653 err.hdr.length = (u_int16_t)(sizeof(err) + mbuf_pkthdr_len(m));
6654 err.hdr.context = hdr->context;
6655 err.error = result;
6656
6657 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
6658 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0) {
6659 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
6660 if (result != 0) {
6661 mbuf_freem(m);
6662 }
6663 m = NULL;
6664 }
6665
6666 if (result != 0) {
6667 // Unable to prepend the error to the request - just send the error
6668 err.hdr.length = sizeof(err);
6669 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
6670 CTL_DATA_EOR | CTL_DATA_CRIT);
6671 if (result != 0) {
6672 nstat_stats.nstat_msgerrorfailures += 1;
6673 }
6674 }
6675 nstat_stats.nstat_handle_msg_failures += 1;
6676 }
6677
6678 if (m) {
6679 mbuf_freem(m);
6680 }
6681
6682 return result;
6683 }
6684
6685
6686 /* Performs interface matching based on NSTAT_IFNET_IS… filter flags provided by an external caller */
6687 static bool
nstat_interface_matches_filter_flag(uint32_t filter_flags,struct ifnet * ifp)6688 nstat_interface_matches_filter_flag(uint32_t filter_flags, struct ifnet *ifp)
6689 {
6690 bool result = false;
6691
6692 if (ifp) {
6693 uint32_t flag_mask = (NSTAT_FILTER_IFNET_FLAGS & ~(NSTAT_IFNET_IS_NON_LOCAL | NSTAT_IFNET_IS_LOCAL));
6694 filter_flags &= flag_mask;
6695
6696 uint32_t flags = nstat_ifnet_to_flags_extended(ifp);
6697 if (filter_flags & flags) {
6698 result = true;
6699 }
6700 }
6701 return result;
6702 }
6703
6704
6705 static int
tcp_progress_indicators_for_interface(unsigned int ifindex,uint64_t recentflow_maxduration,uint32_t filter_flags,struct xtcpprogress_indicators * indicators)6706 tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_maxduration, uint32_t filter_flags, struct xtcpprogress_indicators *indicators)
6707 {
6708 int error = 0;
6709 struct inpcb *inp;
6710 uint64_t min_recent_start_time;
6711 #if SKYWALK
6712 struct nstat_tu_shadow *shad;
6713 #endif /* SKYWALK */
6714
6715 min_recent_start_time = mach_continuous_time() - recentflow_maxduration;
6716 bzero(indicators, sizeof(*indicators));
6717
6718 #if NSTAT_DEBUG
6719 /* interface index -1 may be passed in to only match against the filters specified in the flags */
6720 if (ifindex < UINT_MAX) {
6721 printf("%s - for interface index %u with flags %x\n", __func__, ifindex, filter_flags);
6722 } else {
6723 printf("%s - for matching interface with flags %x\n", __func__, filter_flags);
6724 }
6725 #endif
6726
6727 lck_rw_lock_shared(&tcbinfo.ipi_lock);
6728 /*
6729 * For progress indicators we don't need to special case TCP to collect time wait connections
6730 */
6731 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
6732 {
6733 struct tcpcb *tp = intotcpcb(inp);
6734 /* radar://57100452
6735 * The conditional logic implemented below performs an *inclusive* match based on the desired interface index in addition to any filter values.
6736 * While the general expectation is that only one criteria normally is used for queries, the capability exists satisfy any eccentric future needs.
6737 */
6738 if (tp &&
6739 inp->inp_state != INPCB_STATE_DEAD &&
6740 inp->inp_last_outifp &&
6741 /* matches the given interface index, or against any provided filter flags */
6742 (((inp->inp_last_outifp->if_index == ifindex) ||
6743 nstat_interface_matches_filter_flag(filter_flags, inp->inp_last_outifp)) &&
6744 /* perform flow state matching based any provided filter flags */
6745 (((filter_flags & (NSTAT_IFNET_IS_NON_LOCAL | NSTAT_IFNET_IS_LOCAL)) == 0) ||
6746 ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && !(tp->t_flags & TF_LOCAL)) ||
6747 ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (tp->t_flags & TF_LOCAL))))) {
6748 struct tcp_conn_status connstatus;
6749 #if NSTAT_DEBUG
6750 printf("%s - *matched non-Skywalk* [filter match: %d]\n", __func__, nstat_interface_matches_filter_flag(filter_flags, inp->inp_last_outifp));
6751 #endif
6752 indicators->xp_numflows++;
6753 tcp_get_connectivity_status(tp, &connstatus);
6754 if (connstatus.write_probe_failed) {
6755 indicators->xp_write_probe_fails++;
6756 }
6757 if (connstatus.read_probe_failed) {
6758 indicators->xp_read_probe_fails++;
6759 }
6760 if (connstatus.conn_probe_failed) {
6761 indicators->xp_conn_probe_fails++;
6762 }
6763 if (inp->inp_start_timestamp > min_recent_start_time) {
6764 uint64_t flow_count;
6765
6766 indicators->xp_recentflows++;
6767 atomic_get_64(flow_count, &inp->inp_stat->rxbytes);
6768 indicators->xp_recentflows_rxbytes += flow_count;
6769 atomic_get_64(flow_count, &inp->inp_stat->txbytes);
6770 indicators->xp_recentflows_txbytes += flow_count;
6771
6772 indicators->xp_recentflows_rxooo += tp->t_stat.rxoutoforderbytes;
6773 indicators->xp_recentflows_rxdup += tp->t_stat.rxduplicatebytes;
6774 indicators->xp_recentflows_retx += tp->t_stat.txretransmitbytes;
6775 if (tp->snd_max - tp->snd_una) {
6776 indicators->xp_recentflows_unacked++;
6777 }
6778 }
6779 }
6780 }
6781 lck_rw_done(&tcbinfo.ipi_lock);
6782
6783 #if SKYWALK
6784 lck_mtx_lock(&nstat_mtx);
6785
6786 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
6787 assert(shad->shad_magic == TU_SHADOW_MAGIC);
6788
6789 if ((shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) && (shad->shad_live)) {
6790 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6791 u_int32_t extended_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6792 if (filter_flags != 0) {
6793 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL);
6794 error = (result)? 0 : EIO;
6795 if (error) {
6796 printf("%s - nstat get ifflags %d\n", __func__, error);
6797 continue;
6798 }
6799 extended_ifflags = extend_ifnet_flags(ifflags);
6800
6801 if ((extended_ifflags & filter_flags) == 0) {
6802 continue;
6803 }
6804 // Skywalk locality flags are not yet in place, see <rdar://problem/35607563>
6805 // Instead of checking flags with a simple logical and, check the inverse.
6806 // This allows for default action of fallthrough if the flags are not set.
6807 if ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && (ifflags & NSTAT_IFNET_IS_LOCAL)) {
6808 continue;
6809 }
6810 if ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (ifflags & NSTAT_IFNET_IS_NON_LOCAL)) {
6811 continue;
6812 }
6813 }
6814
6815 nstat_progress_digest digest;
6816 bzero(&digest, sizeof(digest));
6817 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &digest, NULL, NULL);
6818
6819 error = (result)? 0 : EIO;
6820 if (error) {
6821 printf("%s - nstat get progressdigest returned %d\n", __func__, error);
6822 continue;
6823 }
6824 if ((digest.ifindex == (u_int32_t)ifindex) ||
6825 (filter_flags & extended_ifflags)) {
6826 #if NSTAT_DEBUG
6827 printf("%s - *matched Skywalk* [filter match: %x %x]\n", __func__, filter_flags, extended_flags);
6828 #endif
6829 indicators->xp_numflows++;
6830 if (digest.connstatus.write_probe_failed) {
6831 indicators->xp_write_probe_fails++;
6832 }
6833 if (digest.connstatus.read_probe_failed) {
6834 indicators->xp_read_probe_fails++;
6835 }
6836 if (digest.connstatus.conn_probe_failed) {
6837 indicators->xp_conn_probe_fails++;
6838 }
6839 if (shad->shad_start_timestamp > min_recent_start_time) {
6840 indicators->xp_recentflows++;
6841 indicators->xp_recentflows_rxbytes += digest.rxbytes;
6842 indicators->xp_recentflows_txbytes += digest.txbytes;
6843 indicators->xp_recentflows_rxooo += digest.rxoutoforderbytes;
6844 indicators->xp_recentflows_rxdup += digest.rxduplicatebytes;
6845 indicators->xp_recentflows_retx += digest.txretransmit;
6846 if (digest.txunacked) {
6847 indicators->xp_recentflows_unacked++;
6848 }
6849 }
6850 }
6851 }
6852 }
6853
6854 lck_mtx_unlock(&nstat_mtx);
6855
6856 #endif /* SKYWALK */
6857 return error;
6858 }
6859
6860
6861 static int
tcp_progress_probe_enable_for_interface(unsigned int ifindex,uint32_t filter_flags,uint32_t enable_flags)6862 tcp_progress_probe_enable_for_interface(unsigned int ifindex, uint32_t filter_flags, uint32_t enable_flags)
6863 {
6864 int error = 0;
6865 struct ifnet *ifp;
6866
6867 #if NSTAT_DEBUG
6868 printf("%s - for interface index %u with flags %d\n", __func__, ifindex, filter_flags);
6869 #endif
6870
6871 ifnet_head_lock_shared();
6872 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
6873 {
6874 if ((ifp->if_index == ifindex) ||
6875 nstat_interface_matches_filter_flag(filter_flags, ifp)) {
6876 #if NSTAT_DEBUG
6877 printf("%s - *matched* interface index %d, enable: %d\n", __func__, ifp->if_index, enable_flags);
6878 #endif
6879 error = if_probe_connectivity(ifp, enable_flags);
6880 if (error) {
6881 printf("%s (%d) - nstat set tcp probe %d for interface index %d\n", __func__, error, enable_flags, ifp->if_index);
6882 }
6883 }
6884 }
6885 ifnet_head_done();
6886
6887 return error;
6888 }
6889
6890
6891 __private_extern__ int
ntstat_tcp_progress_indicators(struct sysctl_req * req)6892 ntstat_tcp_progress_indicators(struct sysctl_req *req)
6893 {
6894 struct xtcpprogress_indicators indicators = {};
6895 int error = 0;
6896 struct tcpprogressreq requested;
6897
6898 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
6899 return EACCES;
6900 }
6901 if (req->newptr == USER_ADDR_NULL) {
6902 return EINVAL;
6903 }
6904 if (req->newlen < sizeof(req)) {
6905 return EINVAL;
6906 }
6907 error = SYSCTL_IN(req, &requested, sizeof(requested));
6908 if (error != 0) {
6909 return error;
6910 }
6911 error = tcp_progress_indicators_for_interface((unsigned int)requested.ifindex, requested.recentflow_maxduration, (uint32_t)requested.filter_flags, &indicators);
6912 if (error != 0) {
6913 return error;
6914 }
6915 error = SYSCTL_OUT(req, &indicators, sizeof(indicators));
6916
6917 return error;
6918 }
6919
6920
6921 __private_extern__ int
ntstat_tcp_progress_enable(struct sysctl_req * req)6922 ntstat_tcp_progress_enable(struct sysctl_req *req)
6923 {
6924 int error = 0;
6925 struct tcpprobereq requested;
6926
6927 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
6928 return EACCES;
6929 }
6930 if (req->newptr == USER_ADDR_NULL) {
6931 return EINVAL;
6932 }
6933 if (req->newlen < sizeof(req)) {
6934 return EINVAL;
6935 }
6936 error = SYSCTL_IN(req, &requested, sizeof(requested));
6937 if (error != 0) {
6938 return error;
6939 }
6940 error = tcp_progress_probe_enable_for_interface((unsigned int)requested.ifindex, (uint32_t)requested.filter_flags, (uint32_t)requested.enable);
6941
6942 return error;
6943 }
6944
6945
6946 #if SKYWALK
6947
6948 #pragma mark -- netstat support for user level providers --
6949
6950 typedef struct nstat_flow_data {
6951 nstat_counts counts;
6952 union {
6953 nstat_udp_descriptor udp_descriptor;
6954 nstat_tcp_descriptor tcp_descriptor;
6955 } flow_descriptor;
6956 } nstat_flow_data;
6957
6958 static int
nstat_gather_flow_data(nstat_provider_id_t provider,nstat_flow_data * flow_data,int n)6959 nstat_gather_flow_data(nstat_provider_id_t provider, nstat_flow_data *flow_data, int n)
6960 {
6961 struct nstat_tu_shadow *shad;
6962 int prepared = 0;
6963 errno_t err;
6964
6965 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
6966 assert(shad->shad_magic == TU_SHADOW_MAGIC);
6967
6968 if ((shad->shad_provider == provider) && (shad->shad_live)) {
6969 if (prepared >= n) {
6970 break;
6971 }
6972 err = nstat_userland_tu_copy_descriptor((nstat_provider_cookie_t) shad,
6973 &flow_data->flow_descriptor, sizeof(flow_data->flow_descriptor));
6974
6975 if (err != 0) {
6976 printf("%s - nstat_userland_tu_copy_descriptor returned %d\n", __func__, err);
6977 }
6978 err = nstat_userland_tu_counts((nstat_provider_cookie_t) shad,
6979 &flow_data->counts, NULL);
6980 if (err != 0) {
6981 printf("%s - nstat_userland_tu_counts returned %d\n", __func__, err);
6982 }
6983 flow_data++;
6984 prepared++;
6985 }
6986 }
6987 return prepared;
6988 }
6989
6990 static void
nstat_userland_to_xinpcb_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xinpcb_n * xinp)6991 nstat_userland_to_xinpcb_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xinpcb_n *xinp)
6992 {
6993 xinp->xi_len = sizeof(struct xinpcb_n);
6994 xinp->xi_kind = XSO_INPCB;
6995
6996 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
6997 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
6998 struct sockaddr_in *sa = &desc->local.v4;
6999 if (sa->sin_family == AF_INET) {
7000 xinp->inp_vflag = INP_IPV4;
7001 xinp->inp_laddr = desc->local.v4.sin_addr;
7002 xinp->inp_lport = desc->local.v4.sin_port;
7003 xinp->inp_faddr = desc->remote.v4.sin_addr;
7004 xinp->inp_fport = desc->remote.v4.sin_port;
7005 } else if (sa->sin_family == AF_INET6) {
7006 xinp->inp_vflag = INP_IPV6;
7007 xinp->in6p_laddr = desc->local.v6.sin6_addr;
7008 xinp->in6p_lport = desc->local.v6.sin6_port;
7009 xinp->in6p_faddr = desc->remote.v6.sin6_addr;
7010 xinp->in6p_fport = desc->remote.v6.sin6_port;
7011 }
7012 } else if (provider == NSTAT_PROVIDER_UDP_USERLAND) {
7013 nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor;
7014 struct sockaddr_in *sa = &desc->local.v4;
7015 if (sa->sin_family == AF_INET) {
7016 xinp->inp_vflag = INP_IPV4;
7017 xinp->inp_laddr = desc->local.v4.sin_addr;
7018 xinp->inp_lport = desc->local.v4.sin_port;
7019 xinp->inp_faddr = desc->remote.v4.sin_addr;
7020 xinp->inp_fport = desc->remote.v4.sin_port;
7021 } else if (sa->sin_family == AF_INET6) {
7022 xinp->inp_vflag = INP_IPV6;
7023 xinp->in6p_laddr = desc->local.v6.sin6_addr;
7024 xinp->in6p_lport = desc->local.v6.sin6_port;
7025 xinp->in6p_faddr = desc->remote.v6.sin6_addr;
7026 xinp->in6p_fport = desc->remote.v6.sin6_port;
7027 }
7028 }
7029 }
7030
7031 static void
nstat_userland_to_xsocket_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xsocket_n * xso)7032 nstat_userland_to_xsocket_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsocket_n *xso)
7033 {
7034 xso->xso_len = sizeof(struct xsocket_n);
7035 xso->xso_kind = XSO_SOCKET;
7036
7037 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7038 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7039 xso->xso_protocol = IPPROTO_TCP;
7040 xso->so_e_pid = desc->epid;
7041 xso->so_last_pid = desc->pid;
7042 } else {
7043 nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor;
7044 xso->xso_protocol = IPPROTO_UDP;
7045 xso->so_e_pid = desc->epid;
7046 xso->so_last_pid = desc->pid;
7047 }
7048 }
7049
7050 static void
nstat_userland_to_rcv_xsockbuf_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xsockbuf_n * xsbrcv)7051 nstat_userland_to_rcv_xsockbuf_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsockbuf_n *xsbrcv)
7052 {
7053 xsbrcv->xsb_len = sizeof(struct xsockbuf_n);
7054 xsbrcv->xsb_kind = XSO_RCVBUF;
7055
7056 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7057 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7058 xsbrcv->sb_hiwat = desc->rcvbufsize;
7059 xsbrcv->sb_cc = desc->rcvbufused;
7060 } else {
7061 nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor;
7062 xsbrcv->sb_hiwat = desc->rcvbufsize;
7063 xsbrcv->sb_cc = desc->rcvbufused;
7064 }
7065 }
7066
7067 static void
nstat_userland_to_snd_xsockbuf_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xsockbuf_n * xsbsnd)7068 nstat_userland_to_snd_xsockbuf_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsockbuf_n *xsbsnd)
7069 {
7070 xsbsnd->xsb_len = sizeof(struct xsockbuf_n);
7071 xsbsnd->xsb_kind = XSO_SNDBUF;
7072
7073 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7074 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7075 xsbsnd->sb_hiwat = desc->sndbufsize;
7076 xsbsnd->sb_cc = desc->sndbufused;
7077 } else {
7078 }
7079 }
7080
7081 static void
nstat_userland_to_xsockstat_n(nstat_flow_data * flow_data,struct xsockstat_n * xst)7082 nstat_userland_to_xsockstat_n(nstat_flow_data *flow_data, struct xsockstat_n *xst)
7083 {
7084 xst->xst_len = sizeof(struct xsockstat_n);
7085 xst->xst_kind = XSO_STATS;
7086
7087 // The kernel version supports an array of counts, here we only support one and map to first entry
7088 xst->xst_tc_stats[0].rxpackets = flow_data->counts.nstat_rxpackets;
7089 xst->xst_tc_stats[0].rxbytes = flow_data->counts.nstat_rxbytes;
7090 xst->xst_tc_stats[0].txpackets = flow_data->counts.nstat_txpackets;
7091 xst->xst_tc_stats[0].txbytes = flow_data->counts.nstat_txbytes;
7092 }
7093
7094 static void
nstat_userland_to_xtcpcb_n(nstat_flow_data * flow_data,struct xtcpcb_n * xt)7095 nstat_userland_to_xtcpcb_n(nstat_flow_data *flow_data, struct xtcpcb_n *xt)
7096 {
7097 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7098 xt->xt_len = sizeof(struct xtcpcb_n);
7099 xt->xt_kind = XSO_TCPCB;
7100 xt->t_state = desc->state;
7101 xt->snd_wnd = desc->txwindow;
7102 xt->snd_cwnd = desc->txcwindow;
7103 }
7104
7105
7106 __private_extern__ int
ntstat_userland_count(short proto)7107 ntstat_userland_count(short proto)
7108 {
7109 int n = 0;
7110 if (proto == IPPROTO_TCP) {
7111 n = nstat_userland_tcp_shadows;
7112 } else if (proto == IPPROTO_UDP) {
7113 n = nstat_userland_udp_shadows;
7114 }
7115 return n;
7116 }
7117
7118 __private_extern__ int
nstat_userland_get_snapshot(short proto,void ** snapshotp,int * countp)7119 nstat_userland_get_snapshot(short proto, void **snapshotp, int *countp)
7120 {
7121 int error = 0;
7122 int n = 0;
7123 nstat_provider_id_t provider;
7124 nstat_flow_data *flow_data = NULL;
7125
7126 lck_mtx_lock(&nstat_mtx);
7127 if (proto == IPPROTO_TCP) {
7128 n = nstat_userland_tcp_shadows;
7129 provider = NSTAT_PROVIDER_TCP_USERLAND;
7130 } else if (proto == IPPROTO_UDP) {
7131 n = nstat_userland_udp_shadows;
7132 provider = NSTAT_PROVIDER_UDP_USERLAND;
7133 }
7134 if (n == 0) {
7135 goto done;
7136 }
7137
7138 flow_data = (nstat_flow_data *) kalloc_data(n * sizeof(*flow_data),
7139 Z_WAITOK | Z_ZERO);
7140 if (flow_data) {
7141 n = nstat_gather_flow_data(provider, flow_data, n);
7142 } else {
7143 error = ENOMEM;
7144 }
7145 done:
7146 lck_mtx_unlock(&nstat_mtx);
7147 *snapshotp = flow_data;
7148 *countp = n;
7149 return error;
7150 }
7151
7152 // nstat_userland_list_snapshot() does most of the work for a sysctl that uses a return format
7153 // as per get_pcblist_n() even though the vast majority of fields are unused.
7154 // Additional items are required in the sysctl output before and after the data added
7155 // by this function.
7156 __private_extern__ int
nstat_userland_list_snapshot(short proto,struct sysctl_req * req,void * userlandsnapshot,int n)7157 nstat_userland_list_snapshot(short proto, struct sysctl_req *req, void *userlandsnapshot, int n)
7158 {
7159 int error = 0;
7160 int i;
7161 nstat_provider_id_t provider;
7162 void *buf = NULL;
7163 nstat_flow_data *flow_data, *flow_data_array = NULL;
7164 size_t item_size = ROUNDUP64(sizeof(struct xinpcb_n)) +
7165 ROUNDUP64(sizeof(struct xsocket_n)) +
7166 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
7167 ROUNDUP64(sizeof(struct xsockstat_n));
7168
7169 if ((n == 0) || (userlandsnapshot == NULL)) {
7170 goto done;
7171 }
7172
7173 if (proto == IPPROTO_TCP) {
7174 item_size += ROUNDUP64(sizeof(struct xtcpcb_n));
7175 provider = NSTAT_PROVIDER_TCP_USERLAND;
7176 } else if (proto == IPPROTO_UDP) {
7177 provider = NSTAT_PROVIDER_UDP_USERLAND;
7178 } else {
7179 error = EINVAL;
7180 goto done;
7181 }
7182
7183 buf = (void *) kalloc_data(item_size, Z_WAITOK);
7184 if (buf) {
7185 struct xinpcb_n *xi = (struct xinpcb_n *)buf;
7186 struct xsocket_n *xso = (struct xsocket_n *) ADVANCE64(xi, sizeof(*xi));
7187 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) ADVANCE64(xso, sizeof(*xso));
7188 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) ADVANCE64(xsbrcv, sizeof(*xsbrcv));
7189 struct xsockstat_n *xsostats = (struct xsockstat_n *) ADVANCE64(xsbsnd, sizeof(*xsbsnd));
7190 struct xtcpcb_n *xt = (struct xtcpcb_n *) ADVANCE64(xsostats, sizeof(*xsostats));
7191
7192 flow_data_array = (nstat_flow_data *)userlandsnapshot;
7193
7194 for (i = 0; i < n; i++) {
7195 flow_data = &flow_data_array[i];
7196 bzero(buf, item_size);
7197
7198 nstat_userland_to_xinpcb_n(provider, flow_data, xi);
7199 nstat_userland_to_xsocket_n(provider, flow_data, xso);
7200 nstat_userland_to_rcv_xsockbuf_n(provider, flow_data, xsbrcv);
7201 nstat_userland_to_snd_xsockbuf_n(provider, flow_data, xsbsnd);
7202 nstat_userland_to_xsockstat_n(flow_data, xsostats);
7203 if (proto == IPPROTO_TCP) {
7204 nstat_userland_to_xtcpcb_n(flow_data, xt);
7205 }
7206 error = SYSCTL_OUT(req, buf, item_size);
7207 if (error) {
7208 break;
7209 }
7210 }
7211 kfree_data(buf, item_size);
7212 } else {
7213 error = ENOMEM;
7214 }
7215 done:
7216 return error;
7217 }
7218
7219 __private_extern__ void
nstat_userland_release_snapshot(void * snapshot,int nuserland)7220 nstat_userland_release_snapshot(void *snapshot, int nuserland)
7221 {
7222 if (snapshot != NULL) {
7223 kfree_data(snapshot, nuserland * sizeof(nstat_flow_data));
7224 }
7225 }
7226
7227 #if NTSTAT_SUPPORTS_STANDALONE_SYSCTL
7228
7229 __private_extern__ int
ntstat_userland_list_n(short proto,struct sysctl_req * req)7230 ntstat_userland_list_n(short proto, struct sysctl_req *req)
7231 {
7232 int error = 0;
7233 int n;
7234 struct xinpgen xig;
7235 void *snapshot = NULL;
7236 size_t item_size = ROUNDUP64(sizeof(struct xinpcb_n)) +
7237 ROUNDUP64(sizeof(struct xsocket_n)) +
7238 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
7239 ROUNDUP64(sizeof(struct xsockstat_n));
7240
7241 if (proto == IPPROTO_TCP) {
7242 item_size += ROUNDUP64(sizeof(struct xtcpcb_n));
7243 }
7244
7245 if (req->oldptr == USER_ADDR_NULL) {
7246 n = ntstat_userland_count(proto);
7247 req->oldidx = 2 * (sizeof(xig)) + (n + 1 + n / 8) * item_size;
7248 goto done;
7249 }
7250
7251 if (req->newptr != USER_ADDR_NULL) {
7252 error = EPERM;
7253 goto done;
7254 }
7255
7256 error = nstat_userland_get_snapshot(proto, &snapshot, &n);
7257
7258 if (error) {
7259 goto done;
7260 }
7261
7262 bzero(&xig, sizeof(xig));
7263 xig.xig_len = sizeof(xig);
7264 xig.xig_gen = 0;
7265 xig.xig_sogen = 0;
7266 xig.xig_count = n;
7267 error = SYSCTL_OUT(req, &xig, sizeof(xig));
7268 if (error) {
7269 goto done;
7270 }
7271 /*
7272 * We are done if there are no flows
7273 */
7274 if (n == 0) {
7275 goto done;
7276 }
7277
7278 error = nstat_userland_list_snapshot(proto, req, snapshot, n);
7279
7280 if (!error) {
7281 /*
7282 * Give the user an updated idea of our state,
7283 * which is unchanged
7284 */
7285 error = SYSCTL_OUT(req, &xig, sizeof(xig));
7286 }
7287 done:
7288 nstat_userland_release_snapshot(snapshot, n);
7289 return error;
7290 }
7291
7292 #endif /* NTSTAT_SUPPORTS_STANDALONE_SYSCTL */
7293 #endif /* SKYWALK */
7294