1 /*
2 * Copyright (c) 2010-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/kpi_mbuf.h>
32 #include <sys/socket.h>
33 #include <sys/kern_control.h>
34 #include <sys/mcache.h>
35 #include <sys/socketvar.h>
36 #include <sys/sysctl.h>
37 #include <sys/queue.h>
38 #include <sys/priv.h>
39 #include <sys/protosw.h>
40 #include <sys/persona.h>
41
42 #include <kern/clock.h>
43 #include <kern/debug.h>
44
45 #include <libkern/libkern.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/locks.h>
48
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53 #include <net/dlil.h>
54
55 // These includes appear in ntstat.h but we include them here first so they won't trigger
56 // any clang diagnostic errors.
57 #include <netinet/in.h>
58 #include <netinet/in_stat.h>
59 #include <netinet/tcp.h>
60
61 #pragma clang diagnostic push
62 #pragma clang diagnostic error "-Wpadded"
63 #pragma clang diagnostic error "-Wpacked"
64 // This header defines structures shared with user space, so we need to ensure there is
65 // no compiler inserted padding in case the user space process isn't using the same
66 // architecture as the kernel (example: i386 process with x86_64 kernel).
67 #include <net/ntstat.h>
68 #pragma clang diagnostic pop
69
70 #include <netinet/ip_var.h>
71 #include <netinet/in_pcb.h>
72 #include <netinet/in_var.h>
73 #include <netinet/tcp_var.h>
74 #include <netinet/tcp_fsm.h>
75 #include <netinet/tcp_cc.h>
76 #include <netinet/udp.h>
77 #include <netinet/udp_var.h>
78 #include <netinet6/in6_pcb.h>
79 #include <netinet6/in6_var.h>
80
81 __private_extern__ int nstat_collect = 1;
82
83 #if (DEBUG || DEVELOPMENT)
84 SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
85 &nstat_collect, 0, "Collect detailed statistics");
86 #endif /* (DEBUG || DEVELOPMENT) */
87
88 #if !XNU_TARGET_OS_OSX
89 static int nstat_privcheck = 1;
90 #else /* XNU_TARGET_OS_OSX */
91 static int nstat_privcheck = 0;
92 #endif /* XNU_TARGET_OS_OSX */
93 SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
94 &nstat_privcheck, 0, "Entitlement check");
95
96 SYSCTL_NODE(_net, OID_AUTO, stats,
97 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "network statistics");
98
99 static int nstat_debug = 0;
100 SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
101 &nstat_debug, 0, "");
102
103 static int nstat_debug_pid = 0; // Only log socket level debug for specified pid
104 SYSCTL_INT(_net_stats, OID_AUTO, debug_pid, CTLFLAG_RW | CTLFLAG_LOCKED,
105 &nstat_debug_pid, 0, "");
106
107 static int nstat_sendspace = 2048;
108 SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
109 &nstat_sendspace, 0, "");
110
111 static int nstat_recvspace = 8192;
112 SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
113 &nstat_recvspace, 0, "");
114
115 static struct nstat_stats nstat_stats;
116 SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
117 &nstat_stats, nstat_stats, "");
118
119 static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */
120 static u_int32_t nstat_lim_min_tx_pkts = 100;
121 static u_int32_t nstat_lim_min_rx_pkts = 100;
122 #if (DEBUG || DEVELOPMENT)
123 SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval,
124 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0,
125 "Low internet stat report interval");
126
127 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts,
128 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0,
129 "Low Internet, min transmit packets threshold");
130
131 SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts,
132 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0,
133 "Low Internet, min receive packets threshold");
134 #endif /* DEBUG || DEVELOPMENT */
135
136 static struct net_api_stats net_api_stats_before;
137 static u_int64_t net_api_stats_last_report_time;
138 #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
139 static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL;
140
141 #if (DEBUG || DEVELOPMENT)
142 SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval,
143 CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "");
144 #endif /* DEBUG || DEVELOPMENT */
145
146 #define NSTAT_DEBUG_SOCKET_PID_MATCHED(so) \
147 (so && (nstat_debug_pid == (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid)))
148
149 #define NSTAT_DEBUG_SOCKET_ON(so) \
150 ((nstat_debug && (!nstat_debug_pid || NSTAT_DEBUG_SOCKET_PID_MATCHED(so))) ? nstat_debug : 0)
151
152 #define NSTAT_DEBUG_SOCKET_LOG(so, fmt, ...) \
153 if (NSTAT_DEBUG_SOCKET_ON(so)) { \
154 printf("NSTAT_DEBUG_SOCKET <pid %d>: " fmt "\n", (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid), ##__VA_ARGS__); \
155 }
156
157 enum{
158 NSTAT_FLAG_CLEANUP = (1 << 0),
159 NSTAT_FLAG_REQCOUNTS = (1 << 1),
160 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
161 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
162 };
163
164 #if !XNU_TARGET_OS_OSX
165 #define QUERY_CONTINUATION_SRC_COUNT 50
166 #else /* XNU_TARGET_OS_OSX */
167 #define QUERY_CONTINUATION_SRC_COUNT 100
168 #endif /* XNU_TARGET_OS_OSX */
169
170 #ifndef ROUNDUP64
171 #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
172 #endif
173
174 #ifndef ADVANCE64
175 #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
176 #endif
177
178 typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src;
179 typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src;
180
181 typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
182 typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
183
184 typedef TAILQ_HEAD(, nstat_generic_shadow) tailq_head_generic_shadow;
185 typedef TAILQ_ENTRY(nstat_generic_shadow) tailq_entry_generic_shadow;
186
187 typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails;
188 typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails;
189
190 struct nstat_procdetails {
191 tailq_entry_procdetails pdet_link;
192 int pdet_pid;
193 u_int64_t pdet_upid;
194 char pdet_procname[64];
195 uuid_t pdet_uuid;
196 u_int32_t pdet_refcnt;
197 u_int32_t pdet_magic;
198 };
199
200 typedef struct nstat_provider_filter {
201 u_int64_t npf_flags;
202 u_int64_t npf_events;
203 u_int64_t npf_extensions;
204 pid_t npf_pid;
205 uuid_t npf_uuid;
206 } nstat_provider_filter;
207
208
209 typedef struct nstat_control_state {
210 struct nstat_control_state *ncs_next;
211 /* A bitmask to indicate whether a provider ever done NSTAT_MSG_TYPE_ADD_ALL_SRCS */
212 u_int32_t ncs_watching;
213 /* A bitmask to indicate whether a provider ever done NSTAT_MSG_TYPE_ADD_SRC */
214 u_int32_t ncs_added_src;
215 decl_lck_mtx_data(, ncs_mtx);
216 kern_ctl_ref ncs_kctl;
217 u_int32_t ncs_unit;
218 nstat_src_ref_t ncs_next_srcref;
219 tailq_head_nstat_src ncs_src_queue;
220 mbuf_t ncs_accumulated;
221 u_int32_t ncs_flags;
222 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
223 /* state maintained for partial query requests */
224 u_int64_t ncs_context;
225 u_int64_t ncs_seq;
226 /* For ease of debugging with lldb macros */
227 struct nstat_procdetails *ncs_procdetails;
228 } nstat_control_state;
229
230 typedef struct nstat_provider {
231 struct nstat_provider *next;
232 nstat_provider_id_t nstat_provider_id;
233 size_t nstat_descriptor_length;
234 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
235 int (*nstat_gone)(nstat_provider_cookie_t cookie);
236 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
237 errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req);
238 void (*nstat_watcher_remove)(nstat_control_state *state);
239 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, size_t len);
240 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
241 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, u_int64_t suppression_flags);
242 bool (*nstat_cookie_equal)(nstat_provider_cookie_t cookie1, nstat_provider_cookie_t cookie2);
243 size_t (*nstat_copy_extension)(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len);
244 } nstat_provider;
245
246 typedef struct nstat_src {
247 tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over.
248 nstat_control_state *ns_control; // The nstat_control_state that this is a source for
249 nstat_src_ref_t srcref;
250 nstat_provider *provider;
251 nstat_provider_cookie_t cookie;
252 uint32_t filter;
253 bool ns_reported; // At least one update/counts/desc message has been sent
254 uint64_t seq;
255 } nstat_src;
256
257 // The merge structures are intended to give a global picture of what may be asked for by the current set of clients
258 // This is to avoid taking locks to check them all individually
259 typedef struct nstat_merged_provider_filter {
260 u_int64_t mf_events; // So far we only merge the events portion of any filters
261 } nstat_merged_provider_filter;
262
263 typedef struct nstat_merged_provider_filters {
264 nstat_merged_provider_filter mpf_filters[NSTAT_PROVIDER_COUNT];
265 } nstat_merged_provider_filters;
266
267 static errno_t nstat_control_send_counts(nstat_control_state *, nstat_src *, unsigned long long, u_int16_t, int *);
268 static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
269 static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int64_t event, u_int16_t hdr_flags, int *gone);
270 static errno_t nstat_control_send_removed(nstat_control_state *state, nstat_src *src, u_int16_t hdr_flags);
271 static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
272 static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
273 static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src, u_int64_t suppression_flags);
274 static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
275 static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
276 static void nstat_ifnet_report_ecn_stats(void);
277 static void nstat_ifnet_report_lim_stats(void);
278 static void nstat_net_api_report_stats(void);
279 static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req);
280 static errno_t nstat_control_send_event(nstat_control_state *state, nstat_src *src, u_int64_t event);
281
282 static u_int32_t nstat_udp_watchers = 0;
283 static u_int32_t nstat_tcp_watchers = 0;
284 static nstat_merged_provider_filters merged_filters = {};
285
286 static void nstat_control_register(void);
287
288 /*
289 * The lock order is as follows:
290 *
291 * socket_lock (inpcb)
292 * nstat_mtx
293 * state->ncs_mtx
294 */
295 static nstat_control_state *nstat_controls = NULL;
296 static uint64_t nstat_idle_time = 0;
297 static LCK_GRP_DECLARE(nstat_lck_grp, "network statistics kctl");
298 static LCK_MTX_DECLARE(nstat_mtx, &nstat_lck_grp);
299
300
301 /* some extern definitions */
302 extern void mbuf_report_peak_usage(void);
303 extern void tcp_report_stats(void);
304
305 static void
nstat_copy_sa_out(const struct sockaddr * src,struct sockaddr * dst,int maxlen)306 nstat_copy_sa_out(
307 const struct sockaddr *src,
308 struct sockaddr *dst,
309 int maxlen)
310 {
311 if (src->sa_len > maxlen) {
312 return;
313 }
314
315 bcopy(src, dst, src->sa_len);
316 if (src->sa_family == AF_INET6 &&
317 src->sa_len >= sizeof(struct sockaddr_in6)) {
318 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
319 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
320 sin6->sin6_scope_id = ((const struct sockaddr_in6*)(const void*)(src))->sin6_scope_id;
321 if (in6_embedded_scope) {
322 in6_verify_ifscope(&sin6->sin6_addr, sin6->sin6_scope_id);
323 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
324 sin6->sin6_addr.s6_addr16[1] = 0;
325 }
326 }
327 }
328 }
329
330 static void
nstat_ip_to_sockaddr(const struct in_addr * ip,u_int16_t port,struct sockaddr_in * sin,u_int32_t maxlen)331 nstat_ip_to_sockaddr(
332 const struct in_addr *ip,
333 u_int16_t port,
334 struct sockaddr_in *sin,
335 u_int32_t maxlen)
336 {
337 if (maxlen < sizeof(struct sockaddr_in)) {
338 return;
339 }
340
341 sin->sin_family = AF_INET;
342 sin->sin_len = sizeof(*sin);
343 sin->sin_port = port;
344 sin->sin_addr = *ip;
345 }
346
347 u_int16_t
nstat_ifnet_to_flags(struct ifnet * ifp)348 nstat_ifnet_to_flags(
349 struct ifnet *ifp)
350 {
351 u_int16_t flags = 0;
352 u_int32_t functional_type = if_functional_type(ifp, FALSE);
353
354 /* Panic if someone adds a functional type without updating ntstat. */
355 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
356
357 switch (functional_type) {
358 case IFRTYPE_FUNCTIONAL_UNKNOWN:
359 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
360 break;
361 case IFRTYPE_FUNCTIONAL_LOOPBACK:
362 flags |= NSTAT_IFNET_IS_LOOPBACK;
363 break;
364 case IFRTYPE_FUNCTIONAL_WIRED:
365 case IFRTYPE_FUNCTIONAL_INTCOPROC:
366 case IFRTYPE_FUNCTIONAL_MANAGEMENT:
367 flags |= NSTAT_IFNET_IS_WIRED;
368 break;
369 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
370 flags |= NSTAT_IFNET_IS_WIFI;
371 break;
372 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
373 flags |= NSTAT_IFNET_IS_WIFI;
374 flags |= NSTAT_IFNET_IS_AWDL;
375 break;
376 case IFRTYPE_FUNCTIONAL_CELLULAR:
377 flags |= NSTAT_IFNET_IS_CELLULAR;
378 break;
379 case IFRTYPE_FUNCTIONAL_COMPANIONLINK:
380 flags |= NSTAT_IFNET_IS_COMPANIONLINK;
381 break;
382 }
383
384 if (IFNET_IS_EXPENSIVE(ifp)) {
385 flags |= NSTAT_IFNET_IS_EXPENSIVE;
386 }
387 if (IFNET_IS_CONSTRAINED(ifp)) {
388 flags |= NSTAT_IFNET_IS_CONSTRAINED;
389 }
390 if (ifp->if_xflags & IFXF_LOW_LATENCY) {
391 flags |= NSTAT_IFNET_IS_WIFI;
392 flags |= NSTAT_IFNET_IS_LLW;
393 }
394
395 return flags;
396 }
397
398 static u_int32_t
extend_ifnet_flags(u_int16_t condensed_flags)399 extend_ifnet_flags(
400 u_int16_t condensed_flags)
401 {
402 u_int32_t extended_flags = (u_int32_t)condensed_flags;
403
404 if ((extended_flags & NSTAT_IFNET_IS_WIFI) && ((extended_flags & (NSTAT_IFNET_IS_AWDL | NSTAT_IFNET_IS_LLW)) == 0)) {
405 extended_flags |= NSTAT_IFNET_IS_WIFI_INFRA;
406 }
407 return extended_flags;
408 }
409
410 u_int32_t
nstat_ifnet_to_flags_extended(struct ifnet * ifp)411 nstat_ifnet_to_flags_extended(
412 struct ifnet *ifp)
413 {
414 u_int32_t flags = extend_ifnet_flags(nstat_ifnet_to_flags(ifp));
415
416 return flags;
417 }
418
419 static u_int32_t
nstat_inpcb_to_flags(const struct inpcb * inp)420 nstat_inpcb_to_flags(
421 const struct inpcb *inp)
422 {
423 u_int32_t flags = 0;
424
425 if (inp != NULL) {
426 if (inp->inp_last_outifp != NULL) {
427 struct ifnet *ifp = inp->inp_last_outifp;
428 flags = nstat_ifnet_to_flags_extended(ifp);
429
430 struct tcpcb *tp = intotcpcb(inp);
431 if (tp) {
432 if (tp->t_flags & TF_LOCAL) {
433 flags |= NSTAT_IFNET_IS_LOCAL;
434 } else {
435 flags |= NSTAT_IFNET_IS_NON_LOCAL;
436 }
437 }
438 } else {
439 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
440 }
441 if (inp->inp_socket != NULL &&
442 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) {
443 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
444 }
445 }
446 return flags;
447 }
448
449 static void
merge_current_event_filters(void)450 merge_current_event_filters(void)
451 {
452 // The nstat_mtx is assumed locked
453 nstat_merged_provider_filters new_merge = {};
454 nstat_provider_type_t provider;
455 nstat_control_state *state;
456
457 for (state = nstat_controls; state; state = state->ncs_next) {
458 for (provider = NSTAT_PROVIDER_NONE; provider <= NSTAT_PROVIDER_LAST; provider++) {
459 new_merge.mpf_filters[provider].mf_events |= state->ncs_provider_filters[provider].npf_events;
460 }
461 }
462 for (provider = NSTAT_PROVIDER_NONE; provider <= NSTAT_PROVIDER_LAST; provider++) {
463 // This should do atomic updates of the 64 bit words, where memcpy would be undefined
464 merged_filters.mpf_filters[provider].mf_events = new_merge.mpf_filters[provider].mf_events;
465 }
466 }
467
468
469 #pragma mark -- Network Statistic Providers --
470
471 static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
472 struct nstat_provider *nstat_providers = NULL;
473
474 static struct nstat_provider*
nstat_find_provider_by_id(nstat_provider_id_t id)475 nstat_find_provider_by_id(
476 nstat_provider_id_t id)
477 {
478 struct nstat_provider *provider;
479
480 for (provider = nstat_providers; provider != NULL; provider = provider->next) {
481 if (provider->nstat_provider_id == id) {
482 break;
483 }
484 }
485
486 return provider;
487 }
488
489 static errno_t
nstat_lookup_entry(nstat_provider_id_t id,const void * data,u_int32_t length,nstat_provider ** out_provider,nstat_provider_cookie_t * out_cookie)490 nstat_lookup_entry(
491 nstat_provider_id_t id,
492 const void *data,
493 u_int32_t length,
494 nstat_provider **out_provider,
495 nstat_provider_cookie_t *out_cookie)
496 {
497 *out_provider = nstat_find_provider_by_id(id);
498 if (*out_provider == NULL) {
499 return ENOENT;
500 }
501
502 return (*out_provider)->nstat_lookup(data, length, out_cookie);
503 }
504
505 static void
nstat_control_sanitize_cookie(nstat_control_state * state,nstat_provider_id_t id,nstat_provider_cookie_t cookie)506 nstat_control_sanitize_cookie(
507 nstat_control_state *state,
508 nstat_provider_id_t id,
509 nstat_provider_cookie_t cookie)
510 {
511 nstat_src *src = NULL;
512
513 // Scan the source list to find any duplicate entry and remove it.
514 lck_mtx_lock(&state->ncs_mtx);
515 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
516 {
517 nstat_provider *sp = src->provider;
518 if (sp->nstat_provider_id == id &&
519 sp->nstat_cookie_equal != NULL &&
520 sp->nstat_cookie_equal(src->cookie, cookie)) {
521 break;
522 }
523 }
524 if (src) {
525 nstat_control_send_goodbye(state, src);
526 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
527 }
528 lck_mtx_unlock(&state->ncs_mtx);
529
530 if (src) {
531 nstat_control_cleanup_source(NULL, src, TRUE);
532 }
533 }
534
535 static void nstat_init_route_provider(void);
536 static void nstat_init_tcp_provider(void);
537 static void nstat_init_udp_provider(void);
538 #if SKYWALK
539 static void nstat_init_userland_tcp_provider(void);
540 static void nstat_init_userland_udp_provider(void);
541 static void nstat_init_userland_quic_provider(void);
542 #endif /* SKYWALK */
543 static void nstat_init_userland_conn_provider(void);
544 static void nstat_init_udp_subflow_provider(void);
545 static void nstat_init_ifnet_provider(void);
546
547 __private_extern__ void
nstat_init(void)548 nstat_init(void)
549 {
550 nstat_init_route_provider();
551 nstat_init_tcp_provider();
552 nstat_init_udp_provider();
553 #if SKYWALK
554 nstat_init_userland_tcp_provider();
555 nstat_init_userland_udp_provider();
556 nstat_init_userland_quic_provider();
557 #endif /* SKYWALK */
558 nstat_init_userland_conn_provider();
559 nstat_init_udp_subflow_provider();
560 nstat_init_ifnet_provider();
561 nstat_control_register();
562 }
563
564 #pragma mark -- Aligned Buffer Allocation --
565
566 struct align_header {
567 u_int32_t offset;
568 u_int32_t length;
569 };
570
571 static void*
nstat_malloc_aligned(size_t length,u_int8_t alignment,zalloc_flags_t flags)572 nstat_malloc_aligned(
573 size_t length,
574 u_int8_t alignment,
575 zalloc_flags_t flags)
576 {
577 struct align_header *hdr = NULL;
578 size_t size = length + sizeof(*hdr) + alignment - 1;
579
580 // Arbitrary limit to prevent abuse
581 if (length > (64 * 1024)) {
582 return NULL;
583 }
584 u_int8_t *buffer = (u_int8_t *)kalloc_data(size, flags);
585 if (buffer == NULL) {
586 return NULL;
587 }
588
589 u_int8_t *aligned = buffer + sizeof(*hdr);
590 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
591
592 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
593 hdr->offset = aligned - buffer;
594 hdr->length = size;
595
596 return aligned;
597 }
598
599 static void
nstat_free_aligned(void * buffer)600 nstat_free_aligned(
601 void *buffer)
602 {
603 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
604 char *offset_buffer = (char *)buffer - hdr->offset;
605 kfree_data(offset_buffer, hdr->length);
606 }
607
608 #pragma mark -- Utilities --
609
610 #define NSTAT_PROCDETAILS_MAGIC 0xfeedc001
611 #define NSTAT_PROCDETAILS_UNMAGIC 0xdeadc001
612
613 static tailq_head_procdetails nstat_procdetails_head = TAILQ_HEAD_INITIALIZER(nstat_procdetails_head);
614
615 static struct nstat_procdetails *
nstat_retain_curprocdetails(void)616 nstat_retain_curprocdetails(void)
617 {
618 struct nstat_procdetails *procdetails = NULL;
619 uint64_t upid = proc_uniqueid(current_proc());
620
621 lck_mtx_lock(&nstat_mtx);
622
623 TAILQ_FOREACH(procdetails, &nstat_procdetails_head, pdet_link) {
624 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
625
626 if (procdetails->pdet_upid == upid) {
627 OSIncrementAtomic(&procdetails->pdet_refcnt);
628 break;
629 }
630 }
631 lck_mtx_unlock(&nstat_mtx);
632 if (!procdetails) {
633 // No need for paranoia on locking, it would be OK if there are duplicate structs on the list
634 procdetails = kalloc_type(struct nstat_procdetails,
635 Z_WAITOK | Z_NOFAIL);
636 procdetails->pdet_pid = proc_selfpid();
637 procdetails->pdet_upid = upid;
638 proc_selfname(procdetails->pdet_procname, sizeof(procdetails->pdet_procname));
639 proc_getexecutableuuid(current_proc(), procdetails->pdet_uuid, sizeof(uuid_t));
640 procdetails->pdet_refcnt = 1;
641 procdetails->pdet_magic = NSTAT_PROCDETAILS_MAGIC;
642 lck_mtx_lock(&nstat_mtx);
643 TAILQ_INSERT_HEAD(&nstat_procdetails_head, procdetails, pdet_link);
644 lck_mtx_unlock(&nstat_mtx);
645 }
646
647 return procdetails;
648 }
649
650 static void
nstat_release_procdetails(struct nstat_procdetails * procdetails)651 nstat_release_procdetails(struct nstat_procdetails *procdetails)
652 {
653 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
654 // These are harvested later to amortize costs
655 OSDecrementAtomic(&procdetails->pdet_refcnt);
656 }
657
658 static void
nstat_prune_procdetails(void)659 nstat_prune_procdetails(void)
660 {
661 struct nstat_procdetails *procdetails;
662 struct nstat_procdetails *tmpdetails;
663 tailq_head_procdetails dead_list;
664
665 TAILQ_INIT(&dead_list);
666 lck_mtx_lock(&nstat_mtx);
667
668 TAILQ_FOREACH_SAFE(procdetails, &nstat_procdetails_head, pdet_link, tmpdetails)
669 {
670 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
671 if (procdetails->pdet_refcnt == 0) {
672 // Pull it off the list
673 TAILQ_REMOVE(&nstat_procdetails_head, procdetails, pdet_link);
674 TAILQ_INSERT_TAIL(&dead_list, procdetails, pdet_link);
675 }
676 }
677 lck_mtx_unlock(&nstat_mtx);
678
679 while ((procdetails = TAILQ_FIRST(&dead_list))) {
680 TAILQ_REMOVE(&dead_list, procdetails, pdet_link);
681 procdetails->pdet_magic = NSTAT_PROCDETAILS_UNMAGIC;
682 kfree_type(struct nstat_procdetails, procdetails);
683 }
684 }
685
686 #pragma mark -- Route Provider --
687
688 static nstat_provider nstat_route_provider;
689
690 static errno_t
nstat_route_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)691 nstat_route_lookup(
692 const void *data,
693 u_int32_t length,
694 nstat_provider_cookie_t *out_cookie)
695 {
696 // rt_lookup doesn't take const params but it doesn't modify the parameters for
697 // the lookup. So...we use a union to eliminate the warning.
698 union{
699 struct sockaddr *sa;
700 const struct sockaddr *const_sa;
701 } dst, mask;
702
703 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
704 *out_cookie = NULL;
705
706 if (length < sizeof(*param)) {
707 return EINVAL;
708 }
709
710 if (param->dst.v4.sin_family == 0 ||
711 param->dst.v4.sin_family > AF_MAX ||
712 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) {
713 return EINVAL;
714 }
715
716 if (param->dst.v4.sin_len > sizeof(param->dst) ||
717 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) {
718 return EINVAL;
719 }
720 if ((param->dst.v4.sin_family == AF_INET &&
721 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
722 (param->dst.v6.sin6_family == AF_INET6 &&
723 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) {
724 return EINVAL;
725 }
726
727 dst.const_sa = (const struct sockaddr*)¶m->dst;
728 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)¶m->mask : NULL;
729
730 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
731 if (rnh == NULL) {
732 return EAFNOSUPPORT;
733 }
734
735 lck_mtx_lock(rnh_lock);
736 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
737 lck_mtx_unlock(rnh_lock);
738
739 if (rt) {
740 *out_cookie = (nstat_provider_cookie_t)rt;
741 }
742
743 return rt ? 0 : ENOENT;
744 }
745
746 static int
nstat_route_gone(nstat_provider_cookie_t cookie)747 nstat_route_gone(
748 nstat_provider_cookie_t cookie)
749 {
750 struct rtentry *rt = (struct rtentry*)cookie;
751 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
752 }
753
754 static errno_t
nstat_route_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)755 nstat_route_counts(
756 nstat_provider_cookie_t cookie,
757 struct nstat_counts *out_counts,
758 int *out_gone)
759 {
760 struct rtentry *rt = (struct rtentry*)cookie;
761 struct nstat_counts *rt_stats = rt->rt_stats;
762
763 if (out_gone) {
764 *out_gone = 0;
765 }
766
767 if (out_gone && (rt->rt_flags & RTF_UP) == 0) {
768 *out_gone = 1;
769 }
770
771 if (rt_stats) {
772 out_counts->nstat_rxpackets = os_atomic_load(&rt_stats->nstat_rxpackets, relaxed);
773 out_counts->nstat_rxbytes = os_atomic_load(&rt_stats->nstat_rxbytes, relaxed);
774 out_counts->nstat_txpackets = os_atomic_load(&rt_stats->nstat_txpackets, relaxed);
775 out_counts->nstat_txbytes = os_atomic_load(&rt_stats->nstat_txbytes, relaxed);
776 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
777 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
778 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
779 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
780 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
781 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
782 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
783 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
784 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
785 } else {
786 bzero(out_counts, sizeof(*out_counts));
787 }
788
789 return 0;
790 }
791
792 static void
nstat_route_release(nstat_provider_cookie_t cookie,__unused int locked)793 nstat_route_release(
794 nstat_provider_cookie_t cookie,
795 __unused int locked)
796 {
797 rtfree((struct rtentry*)cookie);
798 }
799
800 static u_int32_t nstat_route_watchers = 0;
801
802 static int
nstat_route_walktree_add(struct radix_node * rn,void * context)803 nstat_route_walktree_add(
804 struct radix_node *rn,
805 void *context)
806 {
807 errno_t result = 0;
808 struct rtentry *rt = (struct rtentry *)rn;
809 nstat_control_state *state = (nstat_control_state*)context;
810
811 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
812
813 /* RTF_UP can't change while rnh_lock is held */
814 if ((rt->rt_flags & RTF_UP) != 0) {
815 /* Clear RTPRF_OURS if the route is still usable */
816 RT_LOCK(rt);
817 if (rt_validate(rt)) {
818 RT_ADDREF_LOCKED(rt);
819 RT_UNLOCK(rt);
820 } else {
821 RT_UNLOCK(rt);
822 rt = NULL;
823 }
824
825 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
826 if (rt == NULL) {
827 return 0;
828 }
829
830 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
831 if (result != 0) {
832 rtfree_locked(rt);
833 }
834 }
835
836 return result;
837 }
838
839 static errno_t
nstat_route_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)840 nstat_route_add_watcher(
841 nstat_control_state *state,
842 nstat_msg_add_all_srcs *req)
843 {
844 int i;
845 errno_t result = 0;
846
847 lck_mtx_lock(rnh_lock);
848
849 result = nstat_set_provider_filter(state, req);
850 if (result == 0) {
851 OSIncrementAtomic(&nstat_route_watchers);
852
853 for (i = 1; i < AF_MAX; i++) {
854 struct radix_node_head *rnh;
855 rnh = rt_tables[i];
856 if (!rnh) {
857 continue;
858 }
859
860 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
861 if (result != 0) {
862 // This is probably resource exhaustion.
863 // There currently isn't a good way to recover from this.
864 // Least bad seems to be to give up on the add-all but leave
865 // the watcher in place.
866 break;
867 }
868 }
869 }
870 lck_mtx_unlock(rnh_lock);
871
872 return result;
873 }
874
875 __private_extern__ void
nstat_route_new_entry(struct rtentry * rt)876 nstat_route_new_entry(
877 struct rtentry *rt)
878 {
879 if (nstat_route_watchers == 0) {
880 return;
881 }
882
883 lck_mtx_lock(&nstat_mtx);
884 if ((rt->rt_flags & RTF_UP) != 0) {
885 nstat_control_state *state;
886 for (state = nstat_controls; state; state = state->ncs_next) {
887 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) {
888 // this client is watching routes
889 // acquire a reference for the route
890 RT_ADDREF(rt);
891
892 // add the source, if that fails, release the reference
893 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0) {
894 RT_REMREF(rt);
895 }
896 }
897 }
898 }
899 lck_mtx_unlock(&nstat_mtx);
900 }
901
902 static void
nstat_route_remove_watcher(__unused nstat_control_state * state)903 nstat_route_remove_watcher(
904 __unused nstat_control_state *state)
905 {
906 OSDecrementAtomic(&nstat_route_watchers);
907 }
908
909 static errno_t
nstat_route_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)910 nstat_route_copy_descriptor(
911 nstat_provider_cookie_t cookie,
912 void *data,
913 size_t len)
914 {
915 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
916 if (len < sizeof(*desc)) {
917 return EINVAL;
918 }
919 bzero(desc, sizeof(*desc));
920
921 struct rtentry *rt = (struct rtentry*)cookie;
922 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
923 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
924 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
925
926
927 // key/dest
928 struct sockaddr *sa;
929 if ((sa = rt_key(rt))) {
930 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
931 }
932
933 // mask
934 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) {
935 memcpy(&desc->mask, sa, sa->sa_len);
936 }
937
938 // gateway
939 if ((sa = rt->rt_gateway)) {
940 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
941 }
942
943 if (rt->rt_ifp) {
944 desc->ifindex = rt->rt_ifp->if_index;
945 }
946
947 desc->flags = rt->rt_flags;
948
949 return 0;
950 }
951
952 static bool
nstat_route_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)953 nstat_route_reporting_allowed(
954 nstat_provider_cookie_t cookie,
955 nstat_provider_filter *filter,
956 __unused u_int64_t suppression_flags)
957 {
958 bool retval = true;
959
960 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
961 struct rtentry *rt = (struct rtentry*)cookie;
962 struct ifnet *ifp = rt->rt_ifp;
963
964 if (ifp) {
965 uint32_t interface_properties = nstat_ifnet_to_flags_extended(ifp);
966
967 if ((filter->npf_flags & interface_properties) == 0) {
968 retval = false;
969 }
970 }
971 }
972 return retval;
973 }
974
975 static bool
nstat_route_cookie_equal(nstat_provider_cookie_t cookie1,nstat_provider_cookie_t cookie2)976 nstat_route_cookie_equal(
977 nstat_provider_cookie_t cookie1,
978 nstat_provider_cookie_t cookie2)
979 {
980 struct rtentry *rt1 = (struct rtentry *)cookie1;
981 struct rtentry *rt2 = (struct rtentry *)cookie2;
982
983 return (rt1 == rt2) ? true : false;
984 }
985
986 static void
nstat_init_route_provider(void)987 nstat_init_route_provider(void)
988 {
989 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
990 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
991 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
992 nstat_route_provider.nstat_lookup = nstat_route_lookup;
993 nstat_route_provider.nstat_gone = nstat_route_gone;
994 nstat_route_provider.nstat_counts = nstat_route_counts;
995 nstat_route_provider.nstat_release = nstat_route_release;
996 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
997 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
998 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
999 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
1000 nstat_route_provider.nstat_cookie_equal = nstat_route_cookie_equal;
1001 nstat_route_provider.next = nstat_providers;
1002 nstat_providers = &nstat_route_provider;
1003 }
1004
1005 #pragma mark -- Route Collection --
1006
1007 __private_extern__ struct nstat_counts*
nstat_route_attach(struct rtentry * rte)1008 nstat_route_attach(
1009 struct rtentry *rte)
1010 {
1011 struct nstat_counts *result = rte->rt_stats;
1012 if (result) {
1013 return result;
1014 }
1015
1016 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t),
1017 Z_WAITOK | Z_ZERO);
1018 if (!result) {
1019 return result;
1020 }
1021
1022 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) {
1023 nstat_free_aligned(result);
1024 result = rte->rt_stats;
1025 }
1026
1027 return result;
1028 }
1029
1030 __private_extern__ void
nstat_route_detach(struct rtentry * rte)1031 nstat_route_detach(
1032 struct rtentry *rte)
1033 {
1034 if (rte->rt_stats) {
1035 nstat_free_aligned(rte->rt_stats);
1036 rte->rt_stats = NULL;
1037 }
1038 }
1039
1040 __private_extern__ void
nstat_route_connect_attempt(struct rtentry * rte)1041 nstat_route_connect_attempt(
1042 struct rtentry *rte)
1043 {
1044 while (rte) {
1045 struct nstat_counts* stats = nstat_route_attach(rte);
1046 if (stats) {
1047 OSIncrementAtomic(&stats->nstat_connectattempts);
1048 }
1049
1050 rte = rte->rt_parent;
1051 }
1052 }
1053
1054 __private_extern__ void
nstat_route_connect_success(struct rtentry * rte)1055 nstat_route_connect_success(
1056 struct rtentry *rte)
1057 {
1058 // This route
1059 while (rte) {
1060 struct nstat_counts* stats = nstat_route_attach(rte);
1061 if (stats) {
1062 OSIncrementAtomic(&stats->nstat_connectsuccesses);
1063 }
1064
1065 rte = rte->rt_parent;
1066 }
1067 }
1068
1069 __private_extern__ void
nstat_route_tx(struct rtentry * rte,u_int32_t packets,u_int32_t bytes,u_int32_t flags)1070 nstat_route_tx(
1071 struct rtentry *rte,
1072 u_int32_t packets,
1073 u_int32_t bytes,
1074 u_int32_t flags)
1075 {
1076 while (rte) {
1077 struct nstat_counts* stats = nstat_route_attach(rte);
1078 if (stats) {
1079 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) {
1080 OSAddAtomic(bytes, &stats->nstat_txretransmit);
1081 } else {
1082 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
1083 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
1084 }
1085 }
1086
1087 rte = rte->rt_parent;
1088 }
1089 }
1090
1091 __private_extern__ void
nstat_route_rx(struct rtentry * rte,u_int32_t packets,u_int32_t bytes,u_int32_t flags)1092 nstat_route_rx(
1093 struct rtentry *rte,
1094 u_int32_t packets,
1095 u_int32_t bytes,
1096 u_int32_t flags)
1097 {
1098 while (rte) {
1099 struct nstat_counts* stats = nstat_route_attach(rte);
1100 if (stats) {
1101 if (flags == 0) {
1102 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
1103 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
1104 } else {
1105 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) {
1106 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
1107 }
1108 if (flags & NSTAT_RX_FLAG_DUPLICATE) {
1109 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
1110 }
1111 }
1112 }
1113
1114 rte = rte->rt_parent;
1115 }
1116 }
1117
1118 /* atomically average current value at _val_addr with _new_val and store */
1119 #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
1120 volatile uint32_t _old_val; \
1121 volatile uint32_t _avg; \
1122 do { \
1123 _old_val = *_val_addr; \
1124 if (_old_val == 0) \
1125 { \
1126 _avg = _new_val; \
1127 } \
1128 else \
1129 { \
1130 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
1131 } \
1132 if (_old_val == _avg) break; \
1133 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
1134 } while (0);
1135
1136 /* atomically compute minimum of current value at _val_addr with _new_val and store */
1137 #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
1138 volatile uint32_t _old_val; \
1139 do { \
1140 _old_val = *_val_addr; \
1141 if (_old_val != 0 && _old_val < _new_val) \
1142 { \
1143 break; \
1144 } \
1145 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
1146 } while (0);
1147
1148 __private_extern__ void
nstat_route_rtt(struct rtentry * rte,u_int32_t rtt,u_int32_t rtt_var)1149 nstat_route_rtt(
1150 struct rtentry *rte,
1151 u_int32_t rtt,
1152 u_int32_t rtt_var)
1153 {
1154 const uint32_t decay = 3;
1155
1156 while (rte) {
1157 struct nstat_counts* stats = nstat_route_attach(rte);
1158 if (stats) {
1159 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
1160 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
1161 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
1162 }
1163 rte = rte->rt_parent;
1164 }
1165 }
1166
1167 __private_extern__ void
nstat_route_update(struct rtentry * rte,uint32_t connect_attempts,uint32_t connect_successes,uint32_t rx_packets,uint32_t rx_bytes,uint32_t rx_duplicatebytes,uint32_t rx_outoforderbytes,uint32_t tx_packets,uint32_t tx_bytes,uint32_t tx_retransmit,uint32_t rtt,uint32_t rtt_var)1168 nstat_route_update(
1169 struct rtentry *rte,
1170 uint32_t connect_attempts,
1171 uint32_t connect_successes,
1172 uint32_t rx_packets,
1173 uint32_t rx_bytes,
1174 uint32_t rx_duplicatebytes,
1175 uint32_t rx_outoforderbytes,
1176 uint32_t tx_packets,
1177 uint32_t tx_bytes,
1178 uint32_t tx_retransmit,
1179 uint32_t rtt,
1180 uint32_t rtt_var)
1181 {
1182 const uint32_t decay = 3;
1183
1184 while (rte) {
1185 struct nstat_counts* stats = nstat_route_attach(rte);
1186 if (stats) {
1187 OSAddAtomic(connect_attempts, &stats->nstat_connectattempts);
1188 OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses);
1189 OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets);
1190 OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes);
1191 OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit);
1192 OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets);
1193 OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes);
1194 OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes);
1195 OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes);
1196
1197 if (rtt != 0) {
1198 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
1199 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
1200 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
1201 }
1202 }
1203 rte = rte->rt_parent;
1204 }
1205 }
1206
1207 #pragma mark -- TCP Kernel Provider --
1208
1209 /*
1210 * Due to the way the kernel deallocates a process (the process structure
1211 * might be gone by the time we get the PCB detach notification),
1212 * we need to cache the process name. Without this, proc_name() would
1213 * return null and the process name would never be sent to userland.
1214 *
1215 * For UDP sockets, we also store the cached the connection tuples along with
1216 * the interface index. This is necessary because when UDP sockets are
1217 * disconnected, the connection tuples are forever lost from the inpcb, thus
1218 * we need to keep track of the last call to connect() in ntstat.
1219 */
1220 struct nstat_tucookie {
1221 struct inpcb *inp;
1222 char pname[MAXCOMLEN + 1];
1223 bool cached;
1224 union{
1225 struct sockaddr_in v4;
1226 struct sockaddr_in6 v6;
1227 } local;
1228 union{
1229 struct sockaddr_in v4;
1230 struct sockaddr_in6 v6;
1231 } remote;
1232 unsigned int if_index;
1233 uint32_t ifnet_properties;
1234 };
1235
1236 static struct nstat_tucookie *
nstat_tucookie_alloc_internal(struct inpcb * inp,bool ref,bool locked)1237 nstat_tucookie_alloc_internal(
1238 struct inpcb *inp,
1239 bool ref,
1240 bool locked)
1241 {
1242 struct nstat_tucookie *cookie;
1243
1244 cookie = kalloc_type(struct nstat_tucookie,
1245 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1246 if (!locked) {
1247 LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
1248 }
1249 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
1250 kfree_type(struct nstat_tucookie, cookie);
1251 return NULL;
1252 }
1253 cookie->inp = inp;
1254 proc_name(inp->inp_socket->last_pid, cookie->pname,
1255 sizeof(cookie->pname));
1256 /*
1257 * We only increment the reference count for UDP sockets because we
1258 * only cache UDP socket tuples.
1259 */
1260 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) {
1261 OSIncrementAtomic(&inp->inp_nstat_refcnt);
1262 }
1263
1264 return cookie;
1265 }
1266
1267 __unused static struct nstat_tucookie *
nstat_tucookie_alloc(struct inpcb * inp)1268 nstat_tucookie_alloc(
1269 struct inpcb *inp)
1270 {
1271 return nstat_tucookie_alloc_internal(inp, false, false);
1272 }
1273
1274 static struct nstat_tucookie *
nstat_tucookie_alloc_ref(struct inpcb * inp)1275 nstat_tucookie_alloc_ref(
1276 struct inpcb *inp)
1277 {
1278 return nstat_tucookie_alloc_internal(inp, true, false);
1279 }
1280
1281 static struct nstat_tucookie *
nstat_tucookie_alloc_ref_locked(struct inpcb * inp)1282 nstat_tucookie_alloc_ref_locked(
1283 struct inpcb *inp)
1284 {
1285 return nstat_tucookie_alloc_internal(inp, true, true);
1286 }
1287
1288 static void
nstat_tucookie_release_internal(struct nstat_tucookie * cookie,int inplock)1289 nstat_tucookie_release_internal(
1290 struct nstat_tucookie *cookie,
1291 int inplock)
1292 {
1293 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) {
1294 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1295 }
1296 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1297 kfree_type(struct nstat_tucookie, cookie);
1298 }
1299
1300 static void
nstat_tucookie_release(struct nstat_tucookie * cookie)1301 nstat_tucookie_release(
1302 struct nstat_tucookie *cookie)
1303 {
1304 nstat_tucookie_release_internal(cookie, false);
1305 }
1306
1307 static void
nstat_tucookie_release_locked(struct nstat_tucookie * cookie)1308 nstat_tucookie_release_locked(
1309 struct nstat_tucookie *cookie)
1310 {
1311 nstat_tucookie_release_internal(cookie, true);
1312 }
1313
1314
1315 static size_t
nstat_inp_domain_info(struct inpcb * inp,nstat_domain_info * domain_info,size_t len)1316 nstat_inp_domain_info(struct inpcb *inp, nstat_domain_info *domain_info, size_t len)
1317 {
1318 // Note, the caller has guaranteed that the buffer has been zeroed, there is no need to clear it again
1319 struct socket *so = inp->inp_socket;
1320
1321 if (so == NULL) {
1322 return 0;
1323 }
1324
1325 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: Collecting stats");
1326
1327 if (domain_info == NULL) {
1328 return sizeof(nstat_domain_info);
1329 }
1330
1331 if (len < sizeof(nstat_domain_info)) {
1332 return 0;
1333 }
1334
1335 necp_copy_inp_domain_info(inp, so, domain_info);
1336
1337 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: <pid %d> Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> "
1338 "is_tracker %d is_non_app_initiated %d is_silent %d",
1339 so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid,
1340 domain_info->domain_name,
1341 domain_info->domain_owner,
1342 domain_info->domain_tracker_ctxt,
1343 domain_info->domain_attributed_bundle_id,
1344 domain_info->is_tracker,
1345 domain_info->is_non_app_initiated,
1346 domain_info->is_silent);
1347
1348 return sizeof(nstat_domain_info);
1349 }
1350
1351
1352 static nstat_provider nstat_tcp_provider;
1353
1354 static errno_t
nstat_tcp_lookup(__unused const void * data,__unused u_int32_t length,__unused nstat_provider_cookie_t * out_cookie)1355 nstat_tcp_lookup(
1356 __unused const void *data,
1357 __unused u_int32_t length,
1358 __unused nstat_provider_cookie_t *out_cookie)
1359 {
1360 // Looking up a specific connection is not supported.
1361 return ENOTSUP;
1362 }
1363
1364 static int
nstat_tcp_gone(nstat_provider_cookie_t cookie)1365 nstat_tcp_gone(
1366 nstat_provider_cookie_t cookie)
1367 {
1368 struct nstat_tucookie *tucookie =
1369 (struct nstat_tucookie *)cookie;
1370 struct inpcb *inp;
1371 struct tcpcb *tp;
1372
1373 return (!(inp = tucookie->inp) ||
1374 !(tp = intotcpcb(inp)) ||
1375 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1376 }
1377
1378 static errno_t
nstat_tcp_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)1379 nstat_tcp_counts(
1380 nstat_provider_cookie_t cookie,
1381 struct nstat_counts *out_counts,
1382 int *out_gone)
1383 {
1384 struct nstat_tucookie *tucookie =
1385 (struct nstat_tucookie *)cookie;
1386 struct inpcb *inp;
1387
1388 bzero(out_counts, sizeof(*out_counts));
1389
1390 if (out_gone) {
1391 *out_gone = 0;
1392 }
1393
1394 // if the pcb is in the dead state, we should stop using it
1395 if (nstat_tcp_gone(cookie)) {
1396 if (out_gone) {
1397 *out_gone = 1;
1398 }
1399 if (!(inp = tucookie->inp) || !intotcpcb(inp)) {
1400 return EINVAL;
1401 }
1402 }
1403 inp = tucookie->inp;
1404 struct tcpcb *tp = intotcpcb(inp);
1405
1406 out_counts->nstat_rxpackets = os_atomic_load(&inp->inp_stat->rxpackets, relaxed);
1407 out_counts->nstat_rxbytes = os_atomic_load(&inp->inp_stat->rxbytes, relaxed);
1408 out_counts->nstat_txpackets = os_atomic_load(&inp->inp_stat->txpackets, relaxed);
1409 out_counts->nstat_txbytes = os_atomic_load(&inp->inp_stat->txbytes, relaxed);
1410 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1411 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1412 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1413 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1414 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1415 out_counts->nstat_avg_rtt = tp->t_srtt;
1416 out_counts->nstat_min_rtt = tp->t_rttbest;
1417 out_counts->nstat_var_rtt = tp->t_rttvar;
1418 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) {
1419 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1420 }
1421 out_counts->nstat_cell_rxbytes = os_atomic_load(&inp->inp_cstat->rxbytes, relaxed);
1422 out_counts->nstat_cell_txbytes = os_atomic_load(&inp->inp_cstat->txbytes, relaxed);
1423 out_counts->nstat_wifi_rxbytes = os_atomic_load(&inp->inp_wstat->rxbytes, relaxed);
1424 out_counts->nstat_wifi_txbytes = os_atomic_load(&inp->inp_wstat->txbytes, relaxed);
1425 out_counts->nstat_wired_rxbytes = os_atomic_load(&inp->inp_Wstat->rxbytes, relaxed);
1426 out_counts->nstat_wired_txbytes = os_atomic_load(&inp->inp_Wstat->txbytes, relaxed);
1427
1428 return 0;
1429 }
1430
1431 static void
nstat_tcp_release(nstat_provider_cookie_t cookie,int locked)1432 nstat_tcp_release(
1433 nstat_provider_cookie_t cookie,
1434 int locked)
1435 {
1436 struct nstat_tucookie *tucookie =
1437 (struct nstat_tucookie *)cookie;
1438
1439 nstat_tucookie_release_internal(tucookie, locked);
1440 }
1441
1442 static errno_t
nstat_tcp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)1443 nstat_tcp_add_watcher(
1444 nstat_control_state *state,
1445 nstat_msg_add_all_srcs *req)
1446 {
1447 // There is a tricky issue around getting all TCP sockets added once
1448 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1449 // being placed on any lists where it might be found.
1450 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1451 // it should be impossible for a new socket to be added twice.
1452 // On the other hand, there is still a timing issue where a new socket
1453 // results in a call to nstat_tcp_new_pcb() before this watcher
1454 // is instantiated and yet the socket doesn't make it into ipi_listhead
1455 // prior to the scan. <rdar://problem/30361716>
1456
1457 errno_t result;
1458
1459 lck_rw_lock_shared(&tcbinfo.ipi_lock);
1460 result = nstat_set_provider_filter(state, req);
1461 if (result == 0) {
1462 OSIncrementAtomic(&nstat_tcp_watchers);
1463
1464 // Add all current tcp inpcbs. Ignore those in timewait
1465 struct inpcb *inp;
1466 struct nstat_tucookie *cookie;
1467 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1468 {
1469 cookie = nstat_tucookie_alloc_ref(inp);
1470 if (cookie == NULL) {
1471 continue;
1472 }
1473 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1474 cookie) != 0) {
1475 nstat_tucookie_release(cookie);
1476 break;
1477 }
1478 }
1479 }
1480
1481 lck_rw_done(&tcbinfo.ipi_lock);
1482
1483 return result;
1484 }
1485
1486 static void
nstat_tcp_remove_watcher(__unused nstat_control_state * state)1487 nstat_tcp_remove_watcher(
1488 __unused nstat_control_state *state)
1489 {
1490 OSDecrementAtomic(&nstat_tcp_watchers);
1491 }
1492
1493 __private_extern__ void
nstat_tcp_new_pcb(struct inpcb * inp)1494 nstat_tcp_new_pcb(
1495 struct inpcb *inp)
1496 {
1497 struct nstat_tucookie *cookie;
1498
1499 inp->inp_start_timestamp = mach_continuous_time();
1500
1501 if (nstat_tcp_watchers == 0) {
1502 return;
1503 }
1504
1505 socket_lock(inp->inp_socket, 0);
1506 lck_mtx_lock(&nstat_mtx);
1507 nstat_control_state *state;
1508 for (state = nstat_controls; state; state = state->ncs_next) {
1509 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) {
1510 // this client is watching tcp
1511 // acquire a reference for it
1512 cookie = nstat_tucookie_alloc_ref_locked(inp);
1513 if (cookie == NULL) {
1514 continue;
1515 }
1516 // add the source, if that fails, release the reference
1517 if (nstat_control_source_add(0, state,
1518 &nstat_tcp_provider, cookie) != 0) {
1519 nstat_tucookie_release_locked(cookie);
1520 break;
1521 }
1522 }
1523 }
1524 lck_mtx_unlock(&nstat_mtx);
1525 socket_unlock(inp->inp_socket, 0);
1526 }
1527
1528 __private_extern__ void
nstat_pcb_detach(struct inpcb * inp)1529 nstat_pcb_detach(struct inpcb *inp)
1530 {
1531 nstat_control_state *state;
1532 nstat_src *src;
1533 tailq_head_nstat_src dead_list;
1534 struct nstat_tucookie *tucookie;
1535 errno_t result;
1536
1537 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1538 return;
1539 }
1540
1541 TAILQ_INIT(&dead_list);
1542 lck_mtx_lock(&nstat_mtx);
1543 for (state = nstat_controls; state; state = state->ncs_next) {
1544 lck_mtx_lock(&state->ncs_mtx);
1545 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1546 {
1547 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
1548 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1549 tucookie = (struct nstat_tucookie *)src->cookie;
1550 if (tucookie->inp == inp) {
1551 break;
1552 }
1553 }
1554 }
1555
1556 if (src) {
1557 result = nstat_control_send_goodbye(state, src);
1558
1559 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
1560 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
1561 }
1562 lck_mtx_unlock(&state->ncs_mtx);
1563 }
1564 lck_mtx_unlock(&nstat_mtx);
1565
1566 while ((src = TAILQ_FIRST(&dead_list))) {
1567 TAILQ_REMOVE(&dead_list, src, ns_control_link);
1568 nstat_control_cleanup_source(NULL, src, TRUE);
1569 }
1570 }
1571
1572 __private_extern__ void
nstat_pcb_event(struct inpcb * inp,u_int64_t event)1573 nstat_pcb_event(struct inpcb *inp, u_int64_t event)
1574 {
1575 nstat_control_state *state;
1576 nstat_src *src;
1577 struct nstat_tucookie *tucookie;
1578 errno_t result;
1579 nstat_provider_id_t provider_id;
1580
1581 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1582 return;
1583 }
1584 if (((merged_filters.mpf_filters[NSTAT_PROVIDER_TCP_KERNEL].mf_events & event) == 0) &&
1585 ((merged_filters.mpf_filters[NSTAT_PROVIDER_UDP_KERNEL].mf_events & event) == 0)) {
1586 // There are clients for TCP and UDP, but none are interested in the event
1587 // This check saves taking the mutex and scanning the list
1588 return;
1589 }
1590 lck_mtx_lock(&nstat_mtx);
1591 for (state = nstat_controls; state; state = state->ncs_next) {
1592 if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_KERNEL].npf_events & event) == 0) &&
1593 ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_KERNEL].npf_events & event) == 0)) {
1594 continue;
1595 }
1596 lck_mtx_lock(&state->ncs_mtx);
1597 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1598 {
1599 provider_id = src->provider->nstat_provider_id;
1600 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1601 tucookie = (struct nstat_tucookie *)src->cookie;
1602 if (tucookie->inp == inp) {
1603 break;
1604 }
1605 }
1606 }
1607
1608 if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) {
1609 result = nstat_control_send_event(state, src, event);
1610 }
1611 lck_mtx_unlock(&state->ncs_mtx);
1612 }
1613 lck_mtx_unlock(&nstat_mtx);
1614 }
1615
1616
1617 __private_extern__ void
nstat_pcb_cache(struct inpcb * inp)1618 nstat_pcb_cache(struct inpcb *inp)
1619 {
1620 nstat_control_state *state;
1621 nstat_src *src;
1622 struct nstat_tucookie *tucookie;
1623
1624 if (inp == NULL || nstat_udp_watchers == 0 ||
1625 inp->inp_nstat_refcnt == 0) {
1626 return;
1627 }
1628 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1629 lck_mtx_lock(&nstat_mtx);
1630 for (state = nstat_controls; state; state = state->ncs_next) {
1631 lck_mtx_lock(&state->ncs_mtx);
1632 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1633 {
1634 tucookie = (struct nstat_tucookie *)src->cookie;
1635 if (tucookie->inp == inp) {
1636 if (inp->inp_vflag & INP_IPV6) {
1637 in6_ip6_to_sockaddr(&inp->in6p_laddr,
1638 inp->inp_lport,
1639 inp->inp_lifscope,
1640 &tucookie->local.v6,
1641 sizeof(tucookie->local));
1642 in6_ip6_to_sockaddr(&inp->in6p_faddr,
1643 inp->inp_fport,
1644 inp->inp_fifscope,
1645 &tucookie->remote.v6,
1646 sizeof(tucookie->remote));
1647 } else if (inp->inp_vflag & INP_IPV4) {
1648 nstat_ip_to_sockaddr(&inp->inp_laddr,
1649 inp->inp_lport,
1650 &tucookie->local.v4,
1651 sizeof(tucookie->local));
1652 nstat_ip_to_sockaddr(&inp->inp_faddr,
1653 inp->inp_fport,
1654 &tucookie->remote.v4,
1655 sizeof(tucookie->remote));
1656 }
1657 if (inp->inp_last_outifp) {
1658 tucookie->if_index =
1659 inp->inp_last_outifp->if_index;
1660 }
1661
1662 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1663 tucookie->cached = true;
1664 break;
1665 }
1666 }
1667 lck_mtx_unlock(&state->ncs_mtx);
1668 }
1669 lck_mtx_unlock(&nstat_mtx);
1670 }
1671
1672 __private_extern__ void
nstat_pcb_invalidate_cache(struct inpcb * inp)1673 nstat_pcb_invalidate_cache(struct inpcb *inp)
1674 {
1675 nstat_control_state *state;
1676 nstat_src *src;
1677 struct nstat_tucookie *tucookie;
1678
1679 if (inp == NULL || nstat_udp_watchers == 0 ||
1680 inp->inp_nstat_refcnt == 0) {
1681 return;
1682 }
1683 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1684 lck_mtx_lock(&nstat_mtx);
1685 for (state = nstat_controls; state; state = state->ncs_next) {
1686 lck_mtx_lock(&state->ncs_mtx);
1687 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1688 {
1689 tucookie = (struct nstat_tucookie *)src->cookie;
1690 if (tucookie->inp == inp) {
1691 tucookie->cached = false;
1692 break;
1693 }
1694 }
1695 lck_mtx_unlock(&state->ncs_mtx);
1696 }
1697 lck_mtx_unlock(&nstat_mtx);
1698 }
1699
1700 static errno_t
nstat_tcp_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)1701 nstat_tcp_copy_descriptor(
1702 nstat_provider_cookie_t cookie,
1703 void *data,
1704 size_t len)
1705 {
1706 if (len < sizeof(nstat_tcp_descriptor)) {
1707 return EINVAL;
1708 }
1709
1710 if (nstat_tcp_gone(cookie)) {
1711 return EINVAL;
1712 }
1713
1714 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1715 struct nstat_tucookie *tucookie =
1716 (struct nstat_tucookie *)cookie;
1717 struct inpcb *inp = tucookie->inp;
1718 struct tcpcb *tp = intotcpcb(inp);
1719 bzero(desc, sizeof(*desc));
1720
1721 if (inp->inp_vflag & INP_IPV6) {
1722 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope,
1723 &desc->local.v6, sizeof(desc->local));
1724 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope,
1725 &desc->remote.v6, sizeof(desc->remote));
1726 } else if (inp->inp_vflag & INP_IPV4) {
1727 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1728 &desc->local.v4, sizeof(desc->local));
1729 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1730 &desc->remote.v4, sizeof(desc->remote));
1731 }
1732
1733 desc->state = intotcpcb(inp)->t_state;
1734 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1735 inp->inp_last_outifp->if_index;
1736
1737 // danger - not locked, values could be bogus
1738 desc->txunacked = tp->snd_max - tp->snd_una;
1739 desc->txwindow = tp->snd_wnd;
1740 desc->txcwindow = tp->snd_cwnd;
1741 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1742
1743 if (CC_ALGO(tp)->name != NULL) {
1744 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1745 sizeof(desc->cc_algo));
1746 }
1747
1748 struct socket *so = inp->inp_socket;
1749 if (so) {
1750 // TBD - take the socket lock around these to make sure
1751 // they're in sync?
1752 desc->upid = so->last_upid;
1753 desc->pid = so->last_pid;
1754 desc->traffic_class = so->so_traffic_class;
1755 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) {
1756 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1757 }
1758 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) {
1759 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1760 }
1761 if (so->so_flags1 & SOF1_INBOUND) {
1762 desc->ifnet_properties |= NSTAT_SOURCE_IS_INBOUND;
1763 } else if (desc->state == TCPS_LISTEN) {
1764 desc->ifnet_properties |= NSTAT_SOURCE_IS_LISTENER;
1765 tucookie->ifnet_properties = NSTAT_SOURCE_IS_LISTENER;
1766 } else if (desc->state != TCPS_CLOSED) {
1767 desc->ifnet_properties |= NSTAT_SOURCE_IS_OUTBOUND;
1768 tucookie->ifnet_properties = NSTAT_SOURCE_IS_OUTBOUND;
1769 } else {
1770 desc->ifnet_properties |= tucookie->ifnet_properties;
1771 }
1772 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1773 if (desc->pname[0] == 0) {
1774 strlcpy(desc->pname, tucookie->pname,
1775 sizeof(desc->pname));
1776 } else {
1777 desc->pname[sizeof(desc->pname) - 1] = 0;
1778 strlcpy(tucookie->pname, desc->pname,
1779 sizeof(tucookie->pname));
1780 }
1781 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1782 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1783 if (so->so_flags & SOF_DELEGATED) {
1784 desc->eupid = so->e_upid;
1785 desc->epid = so->e_pid;
1786 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1787 } else {
1788 desc->eupid = desc->upid;
1789 desc->epid = desc->pid;
1790 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1791 }
1792 uuid_copy(desc->fuuid, inp->necp_client_uuid);
1793 desc->persona_id = so->so_persona_id;
1794 desc->uid = kauth_cred_getuid(so->so_cred);
1795 desc->sndbufsize = so->so_snd.sb_hiwat;
1796 desc->sndbufused = so->so_snd.sb_cc;
1797 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1798 desc->rcvbufused = so->so_rcv.sb_cc;
1799 desc->fallback_mode = so->so_fallback_mode;
1800
1801 if (nstat_debug) {
1802 uuid_string_t euuid_str = { 0 };
1803 uuid_unparse(desc->euuid, euuid_str);
1804 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: TCP - pid %d uid %d euuid %s persona id %d", desc->pid, desc->uid, euuid_str, desc->persona_id);
1805 }
1806 }
1807
1808 tcp_get_connectivity_status(tp, &desc->connstatus);
1809 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1810 desc->start_timestamp = inp->inp_start_timestamp;
1811 desc->timestamp = mach_continuous_time();
1812 return 0;
1813 }
1814
1815 static bool
nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,bool is_UDP)1816 nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1817 {
1818 bool retval = true;
1819
1820 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS | NSTAT_FILTER_SPECIFIC_USER)) != 0) {
1821 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1822 struct inpcb *inp = tucookie->inp;
1823
1824 /* Only apply interface filter if at least one is allowed. */
1825 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
1826 uint32_t interface_properties = nstat_inpcb_to_flags(inp);
1827
1828 if ((filter->npf_flags & interface_properties) == 0) {
1829 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1830 // We allow reporting if there have been transfers of the requested kind.
1831 // This is imperfect as we cannot account for the expensive attribute over wifi.
1832 // We also assume that cellular is expensive and we have no way to select for AWDL
1833 if (is_UDP) {
1834 do{
1835 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR | NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1836 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) {
1837 break;
1838 }
1839 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1840 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) {
1841 break;
1842 }
1843 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1844 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) {
1845 break;
1846 }
1847 return false;
1848 } while (0);
1849 } else {
1850 return false;
1851 }
1852 }
1853 }
1854
1855 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) {
1856 struct socket *so = inp->inp_socket;
1857 retval = false;
1858
1859 if (so) {
1860 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1861 (filter->npf_pid == so->last_pid)) {
1862 retval = true;
1863 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1864 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) {
1865 retval = true;
1866 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1867 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0)) {
1868 retval = true;
1869 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1870 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1871 sizeof(so->last_uuid)) == 0)) {
1872 retval = true;
1873 }
1874 }
1875 }
1876 }
1877 return retval;
1878 }
1879
1880 static bool
nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)1881 nstat_tcp_reporting_allowed(
1882 nstat_provider_cookie_t cookie,
1883 nstat_provider_filter *filter,
1884 __unused u_int64_t suppression_flags)
1885 {
1886 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1887 }
1888
1889 static size_t
nstat_tcp_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)1890 nstat_tcp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
1891 {
1892 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1893 struct inpcb *inp = tucookie->inp;
1894
1895 if (nstat_tcp_gone(cookie)) {
1896 return 0;
1897 }
1898
1899 switch (extension_id) {
1900 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
1901 return nstat_inp_domain_info(inp, (nstat_domain_info *)buf, len);
1902
1903 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV:
1904 default:
1905 break;
1906 }
1907 return 0;
1908 }
1909
1910 static void
nstat_init_tcp_provider(void)1911 nstat_init_tcp_provider(void)
1912 {
1913 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1914 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1915 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1916 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1917 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1918 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1919 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1920 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1921 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1922 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1923 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1924 nstat_tcp_provider.nstat_copy_extension = nstat_tcp_extensions;
1925 nstat_tcp_provider.next = nstat_providers;
1926 nstat_providers = &nstat_tcp_provider;
1927 }
1928
1929 #pragma mark -- UDP Provider --
1930
1931 static nstat_provider nstat_udp_provider;
1932
1933 static errno_t
nstat_udp_lookup(__unused const void * data,__unused u_int32_t length,__unused nstat_provider_cookie_t * out_cookie)1934 nstat_udp_lookup(
1935 __unused const void *data,
1936 __unused u_int32_t length,
1937 __unused nstat_provider_cookie_t *out_cookie)
1938 {
1939 // Looking up a specific connection is not supported.
1940 return ENOTSUP;
1941 }
1942
1943 static int
nstat_udp_gone(nstat_provider_cookie_t cookie)1944 nstat_udp_gone(
1945 nstat_provider_cookie_t cookie)
1946 {
1947 struct nstat_tucookie *tucookie =
1948 (struct nstat_tucookie *)cookie;
1949 struct inpcb *inp;
1950
1951 return (!(inp = tucookie->inp) ||
1952 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1953 }
1954
1955 static errno_t
nstat_udp_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)1956 nstat_udp_counts(
1957 nstat_provider_cookie_t cookie,
1958 struct nstat_counts *out_counts,
1959 int *out_gone)
1960 {
1961 struct nstat_tucookie *tucookie =
1962 (struct nstat_tucookie *)cookie;
1963
1964 if (out_gone) {
1965 *out_gone = 0;
1966 }
1967
1968 // if the pcb is in the dead state, we should stop using it
1969 if (nstat_udp_gone(cookie)) {
1970 if (out_gone) {
1971 *out_gone = 1;
1972 }
1973 if (!tucookie->inp) {
1974 return EINVAL;
1975 }
1976 }
1977 struct inpcb *inp = tucookie->inp;
1978
1979 out_counts->nstat_rxpackets = os_atomic_load(&inp->inp_stat->rxpackets, relaxed);
1980 out_counts->nstat_rxbytes = os_atomic_load(&inp->inp_stat->rxbytes, relaxed);
1981 out_counts->nstat_txpackets = os_atomic_load(&inp->inp_stat->txpackets, relaxed);
1982 out_counts->nstat_txbytes = os_atomic_load(&inp->inp_stat->txbytes, relaxed);
1983 out_counts->nstat_cell_rxbytes = os_atomic_load(&inp->inp_cstat->rxbytes, relaxed);
1984 out_counts->nstat_cell_txbytes = os_atomic_load(&inp->inp_cstat->txbytes, relaxed);
1985 out_counts->nstat_wifi_rxbytes = os_atomic_load(&inp->inp_wstat->rxbytes, relaxed);
1986 out_counts->nstat_wifi_txbytes = os_atomic_load(&inp->inp_wstat->txbytes, relaxed);
1987 out_counts->nstat_wired_rxbytes = os_atomic_load(&inp->inp_Wstat->rxbytes, relaxed);
1988 out_counts->nstat_wired_txbytes = os_atomic_load(&inp->inp_Wstat->txbytes, relaxed);
1989
1990 return 0;
1991 }
1992
1993 static void
nstat_udp_release(nstat_provider_cookie_t cookie,int locked)1994 nstat_udp_release(
1995 nstat_provider_cookie_t cookie,
1996 int locked)
1997 {
1998 struct nstat_tucookie *tucookie =
1999 (struct nstat_tucookie *)cookie;
2000
2001 nstat_tucookie_release_internal(tucookie, locked);
2002 }
2003
2004 static errno_t
nstat_udp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2005 nstat_udp_add_watcher(
2006 nstat_control_state *state,
2007 nstat_msg_add_all_srcs *req)
2008 {
2009 // There is a tricky issue around getting all UDP sockets added once
2010 // and only once. nstat_udp_new_pcb() is called prior to the new item
2011 // being placed on any lists where it might be found.
2012 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
2013 // it should be impossible for a new socket to be added twice.
2014 // On the other hand, there is still a timing issue where a new socket
2015 // results in a call to nstat_udp_new_pcb() before this watcher
2016 // is instantiated and yet the socket doesn't make it into ipi_listhead
2017 // prior to the scan. <rdar://problem/30361716>
2018
2019 errno_t result;
2020
2021 lck_rw_lock_shared(&udbinfo.ipi_lock);
2022 result = nstat_set_provider_filter(state, req);
2023
2024 if (result == 0) {
2025 struct inpcb *inp;
2026 struct nstat_tucookie *cookie;
2027
2028 OSIncrementAtomic(&nstat_udp_watchers);
2029
2030 // Add all current UDP inpcbs.
2031 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
2032 {
2033 cookie = nstat_tucookie_alloc_ref(inp);
2034 if (cookie == NULL) {
2035 continue;
2036 }
2037 if (nstat_control_source_add(0, state, &nstat_udp_provider,
2038 cookie) != 0) {
2039 nstat_tucookie_release(cookie);
2040 break;
2041 }
2042 }
2043 }
2044
2045 lck_rw_done(&udbinfo.ipi_lock);
2046
2047 return result;
2048 }
2049
2050 static void
nstat_udp_remove_watcher(__unused nstat_control_state * state)2051 nstat_udp_remove_watcher(
2052 __unused nstat_control_state *state)
2053 {
2054 OSDecrementAtomic(&nstat_udp_watchers);
2055 }
2056
2057 __private_extern__ void
nstat_udp_new_pcb(struct inpcb * inp)2058 nstat_udp_new_pcb(
2059 struct inpcb *inp)
2060 {
2061 struct nstat_tucookie *cookie;
2062
2063 inp->inp_start_timestamp = mach_continuous_time();
2064
2065 if (nstat_udp_watchers == 0) {
2066 return;
2067 }
2068
2069 socket_lock(inp->inp_socket, 0);
2070 lck_mtx_lock(&nstat_mtx);
2071 nstat_control_state *state;
2072 for (state = nstat_controls; state; state = state->ncs_next) {
2073 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) {
2074 // this client is watching tcp
2075 // acquire a reference for it
2076 cookie = nstat_tucookie_alloc_ref_locked(inp);
2077 if (cookie == NULL) {
2078 continue;
2079 }
2080 // add the source, if that fails, release the reference
2081 if (nstat_control_source_add(0, state,
2082 &nstat_udp_provider, cookie) != 0) {
2083 nstat_tucookie_release_locked(cookie);
2084 break;
2085 }
2086 }
2087 }
2088 lck_mtx_unlock(&nstat_mtx);
2089 socket_unlock(inp->inp_socket, 0);
2090 }
2091
2092 static errno_t
nstat_udp_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)2093 nstat_udp_copy_descriptor(
2094 nstat_provider_cookie_t cookie,
2095 void *data,
2096 size_t len)
2097 {
2098 if (len < sizeof(nstat_udp_descriptor)) {
2099 return EINVAL;
2100 }
2101
2102 if (nstat_udp_gone(cookie)) {
2103 return EINVAL;
2104 }
2105
2106 struct nstat_tucookie *tucookie =
2107 (struct nstat_tucookie *)cookie;
2108 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
2109 struct inpcb *inp = tucookie->inp;
2110
2111 bzero(desc, sizeof(*desc));
2112
2113 if (tucookie->cached == false) {
2114 if (inp->inp_vflag & INP_IPV6) {
2115 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope,
2116 &desc->local.v6, sizeof(desc->local.v6));
2117 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope,
2118 &desc->remote.v6, sizeof(desc->remote.v6));
2119 } else if (inp->inp_vflag & INP_IPV4) {
2120 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
2121 &desc->local.v4, sizeof(desc->local.v4));
2122 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
2123 &desc->remote.v4, sizeof(desc->remote.v4));
2124 }
2125 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
2126 } else {
2127 if (inp->inp_vflag & INP_IPV6) {
2128 memcpy(&desc->local.v6, &tucookie->local.v6,
2129 sizeof(desc->local.v6));
2130 memcpy(&desc->remote.v6, &tucookie->remote.v6,
2131 sizeof(desc->remote.v6));
2132 } else if (inp->inp_vflag & INP_IPV4) {
2133 memcpy(&desc->local.v4, &tucookie->local.v4,
2134 sizeof(desc->local.v4));
2135 memcpy(&desc->remote.v4, &tucookie->remote.v4,
2136 sizeof(desc->remote.v4));
2137 }
2138 desc->ifnet_properties = tucookie->ifnet_properties;
2139 }
2140
2141 if (inp->inp_last_outifp) {
2142 desc->ifindex = inp->inp_last_outifp->if_index;
2143 } else {
2144 desc->ifindex = tucookie->if_index;
2145 }
2146
2147 struct socket *so = inp->inp_socket;
2148 if (so) {
2149 // TBD - take the socket lock around these to make sure
2150 // they're in sync?
2151 desc->upid = so->last_upid;
2152 desc->pid = so->last_pid;
2153 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
2154 if (desc->pname[0] == 0) {
2155 strlcpy(desc->pname, tucookie->pname,
2156 sizeof(desc->pname));
2157 } else {
2158 desc->pname[sizeof(desc->pname) - 1] = 0;
2159 strlcpy(tucookie->pname, desc->pname,
2160 sizeof(tucookie->pname));
2161 }
2162 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
2163 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
2164 if (so->so_flags & SOF_DELEGATED) {
2165 desc->eupid = so->e_upid;
2166 desc->epid = so->e_pid;
2167 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
2168 } else {
2169 desc->eupid = desc->upid;
2170 desc->epid = desc->pid;
2171 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
2172 }
2173 uuid_copy(desc->fuuid, inp->necp_client_uuid);
2174 desc->persona_id = so->so_persona_id;
2175 desc->uid = kauth_cred_getuid(so->so_cred);
2176 desc->rcvbufsize = so->so_rcv.sb_hiwat;
2177 desc->rcvbufused = so->so_rcv.sb_cc;
2178 desc->traffic_class = so->so_traffic_class;
2179 desc->fallback_mode = so->so_fallback_mode;
2180 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
2181 desc->start_timestamp = inp->inp_start_timestamp;
2182 desc->timestamp = mach_continuous_time();
2183
2184 if (nstat_debug) {
2185 uuid_string_t euuid_str = { 0 };
2186 uuid_unparse(desc->euuid, euuid_str);
2187 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: UDP - pid %d uid %d euuid %s persona id %d", desc->pid, desc->uid, euuid_str, desc->persona_id);
2188 }
2189 }
2190
2191 return 0;
2192 }
2193
2194 static bool
nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2195 nstat_udp_reporting_allowed(
2196 nstat_provider_cookie_t cookie,
2197 nstat_provider_filter *filter,
2198 __unused u_int64_t suppression_flags)
2199 {
2200 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
2201 }
2202
2203
2204 static size_t
nstat_udp_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)2205 nstat_udp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
2206 {
2207 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
2208 struct inpcb *inp = tucookie->inp;
2209 if (nstat_udp_gone(cookie)) {
2210 return 0;
2211 }
2212
2213 switch (extension_id) {
2214 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
2215 return nstat_inp_domain_info(inp, (nstat_domain_info *)buf, len);
2216
2217 default:
2218 break;
2219 }
2220 return 0;
2221 }
2222
2223
2224 static void
nstat_init_udp_provider(void)2225 nstat_init_udp_provider(void)
2226 {
2227 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
2228 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
2229 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2230 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
2231 nstat_udp_provider.nstat_gone = nstat_udp_gone;
2232 nstat_udp_provider.nstat_counts = nstat_udp_counts;
2233 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
2234 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
2235 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
2236 nstat_udp_provider.nstat_release = nstat_udp_release;
2237 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
2238 nstat_udp_provider.nstat_copy_extension = nstat_udp_extensions;
2239 nstat_udp_provider.next = nstat_providers;
2240 nstat_providers = &nstat_udp_provider;
2241 }
2242
2243 #if SKYWALK
2244
2245 #pragma mark -- TCP/UDP/QUIC Userland
2246
2247 // Almost all of this infrastucture is common to both TCP and UDP
2248
2249 static u_int32_t nstat_userland_quic_watchers = 0;
2250 static u_int32_t nstat_userland_udp_watchers = 0;
2251 static u_int32_t nstat_userland_tcp_watchers = 0;
2252
2253 static u_int32_t nstat_userland_quic_shadows = 0;
2254 static u_int32_t nstat_userland_udp_shadows = 0;
2255 static u_int32_t nstat_userland_tcp_shadows = 0;
2256
2257 static nstat_provider nstat_userland_quic_provider;
2258 static nstat_provider nstat_userland_udp_provider;
2259 static nstat_provider nstat_userland_tcp_provider;
2260
2261 enum nstat_rnf_override {
2262 nstat_rnf_override_not_set,
2263 nstat_rnf_override_enabled,
2264 nstat_rnf_override_disabled
2265 };
2266
2267 struct nstat_tu_shadow {
2268 tailq_entry_tu_shadow shad_link;
2269 userland_stats_request_vals_fn *shad_getvals_fn;
2270 userland_stats_request_extension_fn *shad_get_extension_fn;
2271 userland_stats_provider_context *shad_provider_context;
2272 u_int64_t shad_properties;
2273 u_int64_t shad_start_timestamp;
2274 nstat_provider_id_t shad_provider;
2275 struct nstat_procdetails *shad_procdetails;
2276 bool shad_live; // false if defunct
2277 enum nstat_rnf_override shad_rnf_override;
2278 uint32_t shad_magic;
2279 };
2280
2281 // Magic number checking should remain in place until the userland provider has been fully proven
2282 #define TU_SHADOW_MAGIC 0xfeedf00d
2283 #define TU_SHADOW_UNMAGIC 0xdeaddeed
2284
2285 static tailq_head_tu_shadow nstat_userprot_shad_head = TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head);
2286
2287 static errno_t
nstat_userland_tu_lookup(__unused const void * data,__unused u_int32_t length,__unused nstat_provider_cookie_t * out_cookie)2288 nstat_userland_tu_lookup(
2289 __unused const void *data,
2290 __unused u_int32_t length,
2291 __unused nstat_provider_cookie_t *out_cookie)
2292 {
2293 // Looking up a specific connection is not supported
2294 return ENOTSUP;
2295 }
2296
2297 static int
nstat_userland_tu_gone(__unused nstat_provider_cookie_t cookie)2298 nstat_userland_tu_gone(
2299 __unused nstat_provider_cookie_t cookie)
2300 {
2301 // Returns non-zero if the source has gone.
2302 // We don't keep a source hanging around, so the answer is always 0
2303 return 0;
2304 }
2305
2306 static errno_t
nstat_userland_tu_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)2307 nstat_userland_tu_counts(
2308 nstat_provider_cookie_t cookie,
2309 struct nstat_counts *out_counts,
2310 int *out_gone)
2311 {
2312 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2313 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2314 assert(shad->shad_live);
2315
2316 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, out_counts, NULL);
2317
2318 if (out_gone) {
2319 *out_gone = 0;
2320 }
2321
2322 return (result)? 0 : EIO;
2323 }
2324
2325
2326 static errno_t
nstat_userland_tu_copy_descriptor(nstat_provider_cookie_t cookie,void * data,__unused size_t len)2327 nstat_userland_tu_copy_descriptor(
2328 nstat_provider_cookie_t cookie,
2329 void *data,
2330 __unused size_t len)
2331 {
2332 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2333 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2334 assert(shad->shad_live);
2335 struct nstat_procdetails *procdetails = shad->shad_procdetails;
2336 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2337
2338 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, data);
2339
2340 switch (shad->shad_provider) {
2341 case NSTAT_PROVIDER_TCP_USERLAND:
2342 {
2343 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)data;
2344 desc->pid = procdetails->pdet_pid;
2345 desc->upid = procdetails->pdet_upid;
2346 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2347 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2348 if (shad->shad_rnf_override == nstat_rnf_override_enabled) {
2349 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
2350 desc->fallback_mode = SO_FALLBACK_MODE_FAST;
2351 } else if (shad->shad_rnf_override == nstat_rnf_override_disabled) {
2352 desc->ifnet_properties &= ~NSTAT_IFNET_VIA_CELLFALLBACK;
2353 desc->fallback_mode = SO_FALLBACK_MODE_NONE;
2354 }
2355 desc->ifnet_properties |= (uint32_t)shad->shad_properties;
2356 desc->start_timestamp = shad->shad_start_timestamp;
2357 desc->timestamp = mach_continuous_time();
2358 }
2359 break;
2360 case NSTAT_PROVIDER_UDP_USERLAND:
2361 {
2362 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data;
2363 desc->pid = procdetails->pdet_pid;
2364 desc->upid = procdetails->pdet_upid;
2365 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2366 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2367 if (shad->shad_rnf_override == nstat_rnf_override_enabled) {
2368 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
2369 desc->fallback_mode = SO_FALLBACK_MODE_FAST;
2370 } else if (shad->shad_rnf_override == nstat_rnf_override_disabled) {
2371 desc->ifnet_properties &= ~NSTAT_IFNET_VIA_CELLFALLBACK;
2372 desc->fallback_mode = SO_FALLBACK_MODE_NONE;
2373 }
2374 desc->ifnet_properties |= (uint32_t)shad->shad_properties;
2375 desc->start_timestamp = shad->shad_start_timestamp;
2376 desc->timestamp = mach_continuous_time();
2377 }
2378 break;
2379 case NSTAT_PROVIDER_QUIC_USERLAND:
2380 {
2381 nstat_quic_descriptor *desc = (nstat_quic_descriptor *)data;
2382 desc->pid = procdetails->pdet_pid;
2383 desc->upid = procdetails->pdet_upid;
2384 uuid_copy(desc->uuid, procdetails->pdet_uuid);
2385 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
2386 if (shad->shad_rnf_override == nstat_rnf_override_enabled) {
2387 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
2388 desc->fallback_mode = SO_FALLBACK_MODE_FAST;
2389 } else if (shad->shad_rnf_override == nstat_rnf_override_disabled) {
2390 desc->ifnet_properties &= ~NSTAT_IFNET_VIA_CELLFALLBACK;
2391 desc->fallback_mode = SO_FALLBACK_MODE_NONE;
2392 }
2393 desc->ifnet_properties |= (uint32_t)shad->shad_properties;
2394 desc->start_timestamp = shad->shad_start_timestamp;
2395 desc->timestamp = mach_continuous_time();
2396 }
2397 break;
2398 default:
2399 break;
2400 }
2401 return (result)? 0 : EIO;
2402 }
2403
2404 static void
nstat_userland_tu_release(__unused nstat_provider_cookie_t cookie,__unused int locked)2405 nstat_userland_tu_release(
2406 __unused nstat_provider_cookie_t cookie,
2407 __unused int locked)
2408 {
2409 // Called when a nstat_src is detached.
2410 // We don't reference count or ask for delayed release so nothing to do here.
2411 // Note that any associated nstat_tu_shadow may already have been released.
2412 }
2413
2414 static bool
check_reporting_for_user(nstat_provider_filter * filter,pid_t pid,pid_t epid,uuid_t * uuid,uuid_t * euuid)2415 check_reporting_for_user(nstat_provider_filter *filter, pid_t pid, pid_t epid, uuid_t *uuid, uuid_t *euuid)
2416 {
2417 bool retval = true;
2418
2419 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2420 retval = false;
2421
2422 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
2423 (filter->npf_pid == pid)) {
2424 retval = true;
2425 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
2426 (filter->npf_pid == epid)) {
2427 retval = true;
2428 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
2429 (memcmp(filter->npf_uuid, uuid, sizeof(*uuid)) == 0)) {
2430 retval = true;
2431 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
2432 (memcmp(filter->npf_uuid, euuid, sizeof(*euuid)) == 0)) {
2433 retval = true;
2434 }
2435 }
2436 return retval;
2437 }
2438
2439 static bool
nstat_userland_tcp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2440 nstat_userland_tcp_reporting_allowed(
2441 nstat_provider_cookie_t cookie,
2442 nstat_provider_filter *filter,
2443 __unused u_int64_t suppression_flags)
2444 {
2445 bool retval = true;
2446 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2447
2448 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2449
2450 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2451 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2452
2453 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2454 u_int32_t extended_ifflags = extend_ifnet_flags(ifflags);
2455 if ((filter->npf_flags & extended_ifflags) == 0) {
2456 return false;
2457 }
2458 }
2459 }
2460
2461 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2462 nstat_tcp_descriptor tcp_desc; // Stack allocation - OK or pushing the limits too far?
2463 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &tcp_desc)) {
2464 retval = check_reporting_for_user(filter, (pid_t)tcp_desc.pid, (pid_t)tcp_desc.epid,
2465 &tcp_desc.uuid, &tcp_desc.euuid);
2466 } else {
2467 retval = false; // No further information, so might as well give up now.
2468 }
2469 }
2470 return retval;
2471 }
2472
2473 static size_t
nstat_userland_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)2474 nstat_userland_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
2475 {
2476 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2477 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2478 assert(shad->shad_live);
2479 assert(shad->shad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2480
2481 return shad->shad_get_extension_fn(shad->shad_provider_context, extension_id, buf, len);
2482 }
2483
2484
2485 static bool
nstat_userland_udp_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2486 nstat_userland_udp_reporting_allowed(
2487 nstat_provider_cookie_t cookie,
2488 nstat_provider_filter *filter,
2489 __unused u_int64_t suppression_flags)
2490 {
2491 bool retval = true;
2492 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2493
2494 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2495
2496 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2497 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2498
2499 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2500 u_int32_t extended_ifflags = extend_ifnet_flags(ifflags);
2501 if ((filter->npf_flags & extended_ifflags) == 0) {
2502 return false;
2503 }
2504 }
2505 }
2506 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2507 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
2508 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &udp_desc)) {
2509 retval = check_reporting_for_user(filter, (pid_t)udp_desc.pid, (pid_t)udp_desc.epid,
2510 &udp_desc.uuid, &udp_desc.euuid);
2511 } else {
2512 retval = false; // No further information, so might as well give up now.
2513 }
2514 }
2515 return retval;
2516 }
2517
2518 static bool
nstat_userland_quic_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,__unused u_int64_t suppression_flags)2519 nstat_userland_quic_reporting_allowed(
2520 nstat_provider_cookie_t cookie,
2521 nstat_provider_filter *filter,
2522 __unused u_int64_t suppression_flags)
2523 {
2524 bool retval = true;
2525 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2526
2527 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2528
2529 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2530 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2531
2532 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2533 u_int32_t extended_ifflags = extend_ifnet_flags(ifflags);
2534 if ((filter->npf_flags & extended_ifflags) == 0) {
2535 return false;
2536 }
2537 }
2538 }
2539 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2540 nstat_quic_descriptor quic_desc; // Stack allocation - OK or pushing the limits too far?
2541 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &quic_desc)) {
2542 retval = check_reporting_for_user(filter, (pid_t)quic_desc.pid, (pid_t)quic_desc.epid,
2543 &quic_desc.uuid, &quic_desc.euuid);
2544 } else {
2545 retval = false; // No further information, so might as well give up now.
2546 }
2547 }
2548 return retval;
2549 }
2550
2551 static errno_t
nstat_userland_protocol_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req,nstat_provider_type_t nstat_provider_type,nstat_provider * nstat_provider,u_int32_t * proto_watcher_cnt)2552 nstat_userland_protocol_add_watcher(
2553 nstat_control_state *state,
2554 nstat_msg_add_all_srcs *req,
2555 nstat_provider_type_t nstat_provider_type,
2556 nstat_provider *nstat_provider,
2557 u_int32_t *proto_watcher_cnt)
2558 {
2559 errno_t result;
2560
2561 lck_mtx_lock(&nstat_mtx);
2562 result = nstat_set_provider_filter(state, req);
2563
2564 if (result == 0) {
2565 struct nstat_tu_shadow *shad;
2566
2567 OSIncrementAtomic(proto_watcher_cnt);
2568
2569 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2570 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2571
2572 if ((shad->shad_provider == nstat_provider_type) && (shad->shad_live)) {
2573 result = nstat_control_source_add(0, state, nstat_provider, shad);
2574 if (result != 0) {
2575 printf("%s - nstat_control_source_add returned %d for "
2576 "provider type: %d\n", __func__, result, nstat_provider_type);
2577 break;
2578 }
2579 }
2580 }
2581 }
2582 lck_mtx_unlock(&nstat_mtx);
2583
2584 return result;
2585 }
2586
2587 static errno_t
nstat_userland_tcp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2588 nstat_userland_tcp_add_watcher(
2589 nstat_control_state *state,
2590 nstat_msg_add_all_srcs *req)
2591 {
2592 return nstat_userland_protocol_add_watcher(state, req, NSTAT_PROVIDER_TCP_USERLAND,
2593 &nstat_userland_tcp_provider, &nstat_userland_tcp_watchers);
2594 }
2595
2596 static errno_t
nstat_userland_udp_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2597 nstat_userland_udp_add_watcher(
2598 nstat_control_state *state,
2599 nstat_msg_add_all_srcs *req)
2600 {
2601 return nstat_userland_protocol_add_watcher(state, req, NSTAT_PROVIDER_UDP_USERLAND,
2602 &nstat_userland_udp_provider, &nstat_userland_udp_watchers);
2603 }
2604
2605 static errno_t
nstat_userland_quic_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)2606 nstat_userland_quic_add_watcher(
2607 nstat_control_state *state,
2608 nstat_msg_add_all_srcs *req)
2609 {
2610 return nstat_userland_protocol_add_watcher(state, req, NSTAT_PROVIDER_QUIC_USERLAND,
2611 &nstat_userland_quic_provider, &nstat_userland_quic_watchers);
2612 }
2613
2614 static void
nstat_userland_tcp_remove_watcher(__unused nstat_control_state * state)2615 nstat_userland_tcp_remove_watcher(
2616 __unused nstat_control_state *state)
2617 {
2618 OSDecrementAtomic(&nstat_userland_tcp_watchers);
2619 }
2620
2621 static void
nstat_userland_udp_remove_watcher(__unused nstat_control_state * state)2622 nstat_userland_udp_remove_watcher(
2623 __unused nstat_control_state *state)
2624 {
2625 OSDecrementAtomic(&nstat_userland_udp_watchers);
2626 }
2627
2628 static void
nstat_userland_quic_remove_watcher(__unused nstat_control_state * state)2629 nstat_userland_quic_remove_watcher(
2630 __unused nstat_control_state *state)
2631 {
2632 OSDecrementAtomic(&nstat_userland_quic_watchers);
2633 }
2634
2635
2636 static void
nstat_init_userland_tcp_provider(void)2637 nstat_init_userland_tcp_provider(void)
2638 {
2639 bzero(&nstat_userland_tcp_provider, sizeof(nstat_userland_tcp_provider));
2640 nstat_userland_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
2641 nstat_userland_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_USERLAND;
2642 nstat_userland_tcp_provider.nstat_lookup = nstat_userland_tu_lookup;
2643 nstat_userland_tcp_provider.nstat_gone = nstat_userland_tu_gone;
2644 nstat_userland_tcp_provider.nstat_counts = nstat_userland_tu_counts;
2645 nstat_userland_tcp_provider.nstat_release = nstat_userland_tu_release;
2646 nstat_userland_tcp_provider.nstat_watcher_add = nstat_userland_tcp_add_watcher;
2647 nstat_userland_tcp_provider.nstat_watcher_remove = nstat_userland_tcp_remove_watcher;
2648 nstat_userland_tcp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2649 nstat_userland_tcp_provider.nstat_reporting_allowed = nstat_userland_tcp_reporting_allowed;
2650 nstat_userland_tcp_provider.nstat_copy_extension = nstat_userland_extensions;
2651 nstat_userland_tcp_provider.next = nstat_providers;
2652 nstat_providers = &nstat_userland_tcp_provider;
2653 }
2654
2655
2656 static void
nstat_init_userland_udp_provider(void)2657 nstat_init_userland_udp_provider(void)
2658 {
2659 bzero(&nstat_userland_udp_provider, sizeof(nstat_userland_udp_provider));
2660 nstat_userland_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2661 nstat_userland_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_USERLAND;
2662 nstat_userland_udp_provider.nstat_lookup = nstat_userland_tu_lookup;
2663 nstat_userland_udp_provider.nstat_gone = nstat_userland_tu_gone;
2664 nstat_userland_udp_provider.nstat_counts = nstat_userland_tu_counts;
2665 nstat_userland_udp_provider.nstat_release = nstat_userland_tu_release;
2666 nstat_userland_udp_provider.nstat_watcher_add = nstat_userland_udp_add_watcher;
2667 nstat_userland_udp_provider.nstat_watcher_remove = nstat_userland_udp_remove_watcher;
2668 nstat_userland_udp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2669 nstat_userland_udp_provider.nstat_reporting_allowed = nstat_userland_udp_reporting_allowed;
2670 nstat_userland_udp_provider.nstat_copy_extension = nstat_userland_extensions;
2671 nstat_userland_udp_provider.next = nstat_providers;
2672 nstat_providers = &nstat_userland_udp_provider;
2673 }
2674
2675 static void
nstat_init_userland_quic_provider(void)2676 nstat_init_userland_quic_provider(void)
2677 {
2678 bzero(&nstat_userland_quic_provider, sizeof(nstat_userland_quic_provider));
2679 nstat_userland_quic_provider.nstat_descriptor_length = sizeof(nstat_quic_descriptor);
2680 nstat_userland_quic_provider.nstat_provider_id = NSTAT_PROVIDER_QUIC_USERLAND;
2681 nstat_userland_quic_provider.nstat_lookup = nstat_userland_tu_lookup;
2682 nstat_userland_quic_provider.nstat_gone = nstat_userland_tu_gone;
2683 nstat_userland_quic_provider.nstat_counts = nstat_userland_tu_counts;
2684 nstat_userland_quic_provider.nstat_release = nstat_userland_tu_release;
2685 nstat_userland_quic_provider.nstat_watcher_add = nstat_userland_quic_add_watcher;
2686 nstat_userland_quic_provider.nstat_watcher_remove = nstat_userland_quic_remove_watcher;
2687 nstat_userland_quic_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2688 nstat_userland_quic_provider.nstat_reporting_allowed = nstat_userland_quic_reporting_allowed;
2689 nstat_userland_quic_provider.nstat_copy_extension = nstat_userland_extensions;
2690 nstat_userland_quic_provider.next = nstat_providers;
2691 nstat_providers = &nstat_userland_quic_provider;
2692 }
2693
2694
2695 // Things get started with a call to netstats to say that there’s a new connection:
2696 __private_extern__ nstat_userland_context
ntstat_userland_stats_open(userland_stats_provider_context * ctx,int provider_id,u_int64_t properties,userland_stats_request_vals_fn req_fn,userland_stats_request_extension_fn req_extension_fn)2697 ntstat_userland_stats_open(userland_stats_provider_context *ctx,
2698 int provider_id,
2699 u_int64_t properties,
2700 userland_stats_request_vals_fn req_fn,
2701 userland_stats_request_extension_fn req_extension_fn)
2702 {
2703 struct nstat_tu_shadow *shad;
2704 struct nstat_procdetails *procdetails;
2705 nstat_provider *provider;
2706
2707 if ((provider_id != NSTAT_PROVIDER_TCP_USERLAND) &&
2708 (provider_id != NSTAT_PROVIDER_UDP_USERLAND) &&
2709 (provider_id != NSTAT_PROVIDER_QUIC_USERLAND)) {
2710 printf("%s - incorrect provider is supplied, %d\n", __func__, provider_id);
2711 return NULL;
2712 }
2713
2714 shad = kalloc_type(struct nstat_tu_shadow, Z_WAITOK | Z_NOFAIL);
2715
2716 procdetails = nstat_retain_curprocdetails();
2717
2718 if (procdetails == NULL) {
2719 kfree_type(struct nstat_tu_shadow, shad);
2720 return NULL;
2721 }
2722
2723 shad->shad_getvals_fn = req_fn;
2724 shad->shad_get_extension_fn = req_extension_fn;
2725 shad->shad_provider_context = ctx;
2726 shad->shad_provider = provider_id;
2727 shad->shad_properties = properties;
2728 shad->shad_procdetails = procdetails;
2729 shad->shad_rnf_override = nstat_rnf_override_not_set;
2730 shad->shad_start_timestamp = mach_continuous_time();
2731 shad->shad_live = true;
2732 shad->shad_magic = TU_SHADOW_MAGIC;
2733
2734 lck_mtx_lock(&nstat_mtx);
2735 nstat_control_state *state;
2736
2737 // Even if there are no watchers, we save the shadow structure
2738 TAILQ_INSERT_HEAD(&nstat_userprot_shad_head, shad, shad_link);
2739
2740 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND) {
2741 nstat_userland_tcp_shadows++;
2742 provider = &nstat_userland_tcp_provider;
2743 } else if (provider_id == NSTAT_PROVIDER_UDP_USERLAND) {
2744 nstat_userland_udp_shadows++;
2745 provider = &nstat_userland_udp_provider;
2746 } else {
2747 nstat_userland_quic_shadows++;
2748 provider = &nstat_userland_quic_provider;
2749 }
2750
2751 for (state = nstat_controls; state; state = state->ncs_next) {
2752 if ((state->ncs_watching & (1 << provider_id)) != 0) {
2753 // this client is watching tcp/udp/quic userland
2754 // Link to it.
2755 int result = nstat_control_source_add(0, state, provider, shad);
2756 if (result != 0) {
2757 // There should be some kind of statistics for failures like this.
2758 // <rdar://problem/31377195> The kernel ntstat component should keep some
2759 // internal counters reflecting operational state for eventual AWD reporting
2760 }
2761 }
2762 }
2763 lck_mtx_unlock(&nstat_mtx);
2764
2765 return (nstat_userland_context)shad;
2766 }
2767
2768
2769 __private_extern__ void
ntstat_userland_stats_close(nstat_userland_context nstat_ctx)2770 ntstat_userland_stats_close(nstat_userland_context nstat_ctx)
2771 {
2772 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2773 tailq_head_nstat_src dead_list;
2774 nstat_src *src;
2775
2776 if (shad == NULL) {
2777 return;
2778 }
2779
2780 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2781 TAILQ_INIT(&dead_list);
2782
2783 lck_mtx_lock(&nstat_mtx);
2784 if (nstat_userland_udp_watchers != 0 ||
2785 nstat_userland_tcp_watchers != 0 ||
2786 nstat_userland_quic_watchers != 0) {
2787 nstat_control_state *state;
2788 errno_t result;
2789
2790 for (state = nstat_controls; state; state = state->ncs_next) {
2791 lck_mtx_lock(&state->ncs_mtx);
2792 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2793 {
2794 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2795 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
2796 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND ||
2797 provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2798 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2799 break;
2800 }
2801 }
2802 }
2803
2804 if (src) {
2805 result = nstat_control_send_goodbye(state, src);
2806
2807 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2808 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2809 }
2810 lck_mtx_unlock(&state->ncs_mtx);
2811 }
2812 }
2813 TAILQ_REMOVE(&nstat_userprot_shad_head, shad, shad_link);
2814
2815 if (shad->shad_live) {
2816 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) {
2817 nstat_userland_tcp_shadows--;
2818 } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) {
2819 nstat_userland_udp_shadows--;
2820 } else {
2821 nstat_userland_quic_shadows--;
2822 }
2823 }
2824
2825 lck_mtx_unlock(&nstat_mtx);
2826
2827 while ((src = TAILQ_FIRST(&dead_list))) {
2828 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2829 nstat_control_cleanup_source(NULL, src, TRUE);
2830 }
2831 nstat_release_procdetails(shad->shad_procdetails);
2832 shad->shad_magic = TU_SHADOW_UNMAGIC;
2833
2834 kfree_type(struct nstat_tu_shadow, shad);
2835 }
2836
2837 static void
ntstat_userland_stats_event_locked(struct nstat_tu_shadow * shad,uint64_t event)2838 ntstat_userland_stats_event_locked(
2839 struct nstat_tu_shadow *shad,
2840 uint64_t event)
2841 {
2842 nstat_control_state *state;
2843 nstat_src *src;
2844 errno_t result;
2845 nstat_provider_id_t provider_id;
2846
2847 if (nstat_userland_udp_watchers != 0 || nstat_userland_tcp_watchers != 0 || nstat_userland_quic_watchers != 0) {
2848 for (state = nstat_controls; state; state = state->ncs_next) {
2849 if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_USERLAND].npf_events & event) == 0) &&
2850 ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_USERLAND].npf_events & event) == 0) &&
2851 ((state->ncs_provider_filters[NSTAT_PROVIDER_QUIC_USERLAND].npf_events & event) == 0)) {
2852 continue;
2853 }
2854 lck_mtx_lock(&state->ncs_mtx);
2855 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) {
2856 provider_id = src->provider->nstat_provider_id;
2857 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND || provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2858 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2859 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2860 break;
2861 }
2862 }
2863 }
2864 if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) {
2865 result = nstat_control_send_event(state, src, event);
2866 }
2867 lck_mtx_unlock(&state->ncs_mtx);
2868 }
2869 }
2870 }
2871
2872 __private_extern__ void
ntstat_userland_stats_event(nstat_userland_context nstat_ctx,uint64_t event)2873 ntstat_userland_stats_event(
2874 nstat_userland_context nstat_ctx,
2875 uint64_t event)
2876 {
2877 // This will need refinement for when we do genuine stats filtering
2878 // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications
2879 // For now it deals only with events that potentially cause any traditional netstat sources to be closed
2880
2881 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2882 tailq_head_nstat_src dead_list;
2883 nstat_src *src;
2884
2885 if (shad == NULL) {
2886 return;
2887 }
2888
2889 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2890
2891 if (event & NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT) {
2892 TAILQ_INIT(&dead_list);
2893
2894 lck_mtx_lock(&nstat_mtx);
2895 if (nstat_userland_udp_watchers != 0 ||
2896 nstat_userland_tcp_watchers != 0 ||
2897 nstat_userland_quic_watchers != 0) {
2898 nstat_control_state *state;
2899 errno_t result;
2900
2901 for (state = nstat_controls; state; state = state->ncs_next) {
2902 lck_mtx_lock(&state->ncs_mtx);
2903 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2904 {
2905 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2906 break;
2907 }
2908 }
2909
2910 if (src) {
2911 if (!(src->filter & NSTAT_FILTER_TCP_NO_EARLY_CLOSE)) {
2912 result = nstat_control_send_goodbye(state, src);
2913
2914 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2915 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2916 }
2917 }
2918 lck_mtx_unlock(&state->ncs_mtx);
2919 }
2920 }
2921 lck_mtx_unlock(&nstat_mtx);
2922
2923 while ((src = TAILQ_FIRST(&dead_list))) {
2924 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2925 nstat_control_cleanup_source(NULL, src, TRUE);
2926 }
2927 }
2928 }
2929
2930 __private_extern__ void
nstats_userland_stats_defunct_for_process(int pid)2931 nstats_userland_stats_defunct_for_process(int pid)
2932 {
2933 // Note that this can be called multiple times for the same process
2934 tailq_head_nstat_src dead_list;
2935 nstat_src *src, *tmpsrc;
2936 struct nstat_tu_shadow *shad;
2937
2938 TAILQ_INIT(&dead_list);
2939
2940 lck_mtx_lock(&nstat_mtx);
2941
2942 if (nstat_userland_udp_watchers != 0 ||
2943 nstat_userland_tcp_watchers != 0 ||
2944 nstat_userland_quic_watchers != 0) {
2945 nstat_control_state *state;
2946 errno_t result;
2947
2948 for (state = nstat_controls; state; state = state->ncs_next) {
2949 lck_mtx_lock(&state->ncs_mtx);
2950 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
2951 {
2952 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
2953 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND ||
2954 provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2955 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2956 shad = (struct nstat_tu_shadow *)src->cookie;
2957 if (shad->shad_procdetails->pdet_pid == pid) {
2958 result = nstat_control_send_goodbye(state, src);
2959
2960 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2961 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2962 }
2963 }
2964 }
2965 lck_mtx_unlock(&state->ncs_mtx);
2966 }
2967 }
2968
2969 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2970 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2971
2972 if (shad->shad_live) {
2973 if (shad->shad_procdetails->pdet_pid == pid) {
2974 shad->shad_live = false;
2975 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) {
2976 nstat_userland_tcp_shadows--;
2977 } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) {
2978 nstat_userland_udp_shadows--;
2979 } else {
2980 nstat_userland_quic_shadows--;
2981 }
2982 }
2983 }
2984 }
2985
2986 lck_mtx_unlock(&nstat_mtx);
2987
2988 while ((src = TAILQ_FIRST(&dead_list))) {
2989 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2990 nstat_control_cleanup_source(NULL, src, TRUE);
2991 }
2992 }
2993
2994 errno_t
nstat_userland_mark_rnf_override(uuid_t target_fuuid,bool rnf_override)2995 nstat_userland_mark_rnf_override(uuid_t target_fuuid, bool rnf_override)
2996 {
2997 // Note that this can be called multiple times for the same process
2998 struct nstat_tu_shadow *shad;
2999 uuid_t fuuid;
3000 errno_t result;
3001
3002 lck_mtx_lock(&nstat_mtx);
3003 // We set the fallback state regardles of watchers as there may be future ones that need to know
3004 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
3005 assert(shad->shad_magic == TU_SHADOW_MAGIC);
3006 assert(shad->shad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3007 if (shad->shad_get_extension_fn(shad->shad_provider_context, NSTAT_EXTENDED_UPDATE_TYPE_FUUID, fuuid, sizeof(fuuid))) {
3008 if (uuid_compare(fuuid, target_fuuid) == 0) {
3009 break;
3010 }
3011 }
3012 }
3013 if (shad) {
3014 if (shad->shad_procdetails->pdet_pid != proc_selfpid()) {
3015 result = EPERM;
3016 } else {
3017 result = 0;
3018 // It would be possible but awkward to check the previous value
3019 // for RNF override, and send an event only if changed.
3020 // In practice it's fine to send an event regardless,
3021 // which "pushes" the last statistics for the previous mode
3022 shad->shad_rnf_override = rnf_override ? nstat_rnf_override_enabled
3023 : nstat_rnf_override_disabled;
3024 ntstat_userland_stats_event_locked(shad,
3025 rnf_override ? NSTAT_EVENT_SRC_ENTER_CELLFALLBACK
3026 : NSTAT_EVENT_SRC_EXIT_CELLFALLBACK);
3027 }
3028 } else {
3029 result = EEXIST;
3030 }
3031
3032 lck_mtx_unlock(&nstat_mtx);
3033
3034 return result;
3035 }
3036
3037 #pragma mark -- Generic Providers --
3038
3039 static nstat_provider nstat_userland_conn_provider;
3040 static nstat_provider nstat_udp_subflow_provider;
3041
3042 static u_int32_t nstat_generic_provider_watchers[NSTAT_PROVIDER_COUNT];
3043
3044 struct nstat_generic_shadow {
3045 tailq_entry_generic_shadow gshad_link;
3046 nstat_provider_context gshad_provider_context;
3047 nstat_provider_request_vals_fn *gshad_getvals_fn;
3048 nstat_provider_request_extensions_fn *gshad_getextensions_fn;
3049 u_int64_t gshad_properties;
3050 u_int64_t gshad_start_timestamp;
3051 struct nstat_procdetails *gshad_procdetails;
3052 nstat_provider_id_t gshad_provider;
3053 int32_t gshad_refcnt;
3054 uint32_t gshad_magic;
3055 };
3056
3057 // Magic number checking should remain in place until the userland provider has been fully proven
3058 #define NSTAT_GENERIC_SHADOW_MAGIC 0xfadef00d
3059 #define NSTAT_GENERIC_SHADOW_UNMAGIC 0xfadedead
3060
3061 static tailq_head_generic_shadow nstat_gshad_head = TAILQ_HEAD_INITIALIZER(nstat_gshad_head);
3062
3063 static inline void
nstat_retain_gshad(struct nstat_generic_shadow * gshad)3064 nstat_retain_gshad(
3065 struct nstat_generic_shadow *gshad)
3066 {
3067 assert(gshad->gshad_magic = NSTAT_GENERIC_SHADOW_MAGIC);
3068
3069 OSIncrementAtomic(&gshad->gshad_refcnt);
3070 }
3071
3072 static void
nstat_release_gshad(struct nstat_generic_shadow * gshad)3073 nstat_release_gshad(
3074 struct nstat_generic_shadow *gshad)
3075 {
3076 assert(gshad->gshad_magic = NSTAT_GENERIC_SHADOW_MAGIC);
3077
3078 if (OSDecrementAtomic(&gshad->gshad_refcnt) == 1) {
3079 nstat_release_procdetails(gshad->gshad_procdetails);
3080 gshad->gshad_magic = NSTAT_GENERIC_SHADOW_UNMAGIC;
3081 kfree_type(struct nstat_generic_shadow, gshad);
3082 }
3083 }
3084
3085 static errno_t
nstat_generic_provider_lookup(__unused const void * data,__unused u_int32_t length,__unused nstat_provider_cookie_t * out_cookie)3086 nstat_generic_provider_lookup(
3087 __unused const void *data,
3088 __unused u_int32_t length,
3089 __unused nstat_provider_cookie_t *out_cookie)
3090 {
3091 // Looking up a specific connection is not supported
3092 return ENOTSUP;
3093 }
3094
3095 static int
nstat_generic_provider_gone(__unused nstat_provider_cookie_t cookie)3096 nstat_generic_provider_gone(
3097 __unused nstat_provider_cookie_t cookie)
3098 {
3099 // Returns non-zero if the source has gone.
3100 // We don't keep a source hanging around, so the answer is always 0
3101 return 0;
3102 }
3103
3104 static errno_t
nstat_generic_provider_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)3105 nstat_generic_provider_counts(
3106 nstat_provider_cookie_t cookie,
3107 struct nstat_counts *out_counts,
3108 int *out_gone)
3109 {
3110 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3111 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3112
3113 memset(out_counts, 0, sizeof(*out_counts));
3114
3115 bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, out_counts, NULL);
3116
3117 if (out_gone) {
3118 *out_gone = 0;
3119 }
3120 return (result)? 0 : EIO;
3121 }
3122
3123
3124 static errno_t
nstat_generic_provider_copy_descriptor(nstat_provider_cookie_t cookie,void * data,__unused size_t len)3125 nstat_generic_provider_copy_descriptor(
3126 nstat_provider_cookie_t cookie,
3127 void *data,
3128 __unused size_t len)
3129 {
3130 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3131 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3132 struct nstat_procdetails *procdetails = gshad->gshad_procdetails;
3133 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3134
3135 bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, data);
3136
3137 switch (gshad->gshad_provider) {
3138 case NSTAT_PROVIDER_CONN_USERLAND:
3139 {
3140 nstat_connection_descriptor *desc = (nstat_connection_descriptor *)data;
3141 desc->pid = procdetails->pdet_pid;
3142 desc->upid = procdetails->pdet_upid;
3143 uuid_copy(desc->uuid, procdetails->pdet_uuid);
3144 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
3145 desc->start_timestamp = gshad->gshad_start_timestamp;
3146 desc->timestamp = mach_continuous_time();
3147 break;
3148 }
3149 case NSTAT_PROVIDER_UDP_SUBFLOW:
3150 {
3151 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data;
3152 desc->pid = procdetails->pdet_pid;
3153 desc->upid = procdetails->pdet_upid;
3154 uuid_copy(desc->uuid, procdetails->pdet_uuid);
3155 strlcpy(desc->pname, procdetails->pdet_procname, sizeof(desc->pname));
3156 desc->start_timestamp = gshad->gshad_start_timestamp;
3157 desc->timestamp = mach_continuous_time();
3158 break;
3159 }
3160 default:
3161 break;
3162 }
3163 return (result)? 0 : EIO;
3164 }
3165
3166 static void
nstat_generic_provider_release(__unused nstat_provider_cookie_t cookie,__unused int locked)3167 nstat_generic_provider_release(
3168 __unused nstat_provider_cookie_t cookie,
3169 __unused int locked)
3170 {
3171 // Called when a nstat_src is detached.
3172 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3173
3174 nstat_release_gshad(gshad);
3175 }
3176
3177 static bool
nstat_generic_provider_reporting_allowed(nstat_provider_cookie_t cookie,nstat_provider_filter * filter,u_int64_t suppression_flags)3178 nstat_generic_provider_reporting_allowed(
3179 nstat_provider_cookie_t cookie,
3180 nstat_provider_filter *filter,
3181 u_int64_t suppression_flags)
3182 {
3183 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3184
3185 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3186
3187 if ((filter->npf_flags & NSTAT_FILTER_SUPPRESS_BORING_FLAGS) != 0) {
3188 if ((filter->npf_flags & suppression_flags) != 0) {
3189 return false;
3190 }
3191 }
3192
3193 // Filter based on interface and connection flags
3194 // If a provider doesn't support flags, a client shouldn't attempt to use filtering
3195 if ((filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS) != 0) {
3196 u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
3197
3198 if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, &ifflags, NULL, NULL)) {
3199 if ((filter->npf_flags & ifflags) == 0) {
3200 return false;
3201 }
3202 }
3203 }
3204
3205 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
3206 struct nstat_procdetails *procdetails = gshad->gshad_procdetails;
3207 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3208
3209 // Check details that we have readily to hand before asking the provider for descriptor items
3210 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
3211 (filter->npf_pid == procdetails->pdet_pid)) {
3212 return true;
3213 }
3214 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
3215 (memcmp(filter->npf_uuid, &procdetails->pdet_uuid, sizeof(filter->npf_uuid)) == 0)) {
3216 return true;
3217 }
3218 if ((filter->npf_flags & (NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID)) != 0) {
3219 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
3220 switch (gshad->gshad_provider) {
3221 case NSTAT_PROVIDER_CONN_USERLAND:
3222 // Filtering by effective uuid or effective pid is currently not supported
3223 filter->npf_flags &= ~((uint64_t)(NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID));
3224 printf("%s - attempt to filter conn provider by effective pid/uuid, not supported\n", __func__);
3225 return true;
3226
3227 case NSTAT_PROVIDER_UDP_SUBFLOW:
3228 if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, &udp_desc)) {
3229 if (check_reporting_for_user(filter, procdetails->pdet_pid, (pid_t)udp_desc.epid,
3230 &procdetails->pdet_uuid, &udp_desc.euuid)) {
3231 return true;
3232 }
3233 }
3234 break;
3235 default:
3236 break;
3237 }
3238 }
3239 return false;
3240 }
3241 return true;
3242 }
3243
3244 static size_t
nstat_generic_extensions(nstat_provider_cookie_t cookie,u_int32_t extension_id,void * buf,size_t len)3245 nstat_generic_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
3246 {
3247 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3248 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3249 assert(gshad->gshad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3250
3251 if (gshad->gshad_getextensions_fn == NULL) {
3252 return 0;
3253 }
3254 return gshad->gshad_getextensions_fn(gshad->gshad_provider_context, extension_id, buf, len);
3255 }
3256
3257 static errno_t
nstat_generic_provider_add_watcher(nstat_control_state * state,nstat_msg_add_all_srcs * req)3258 nstat_generic_provider_add_watcher(
3259 nstat_control_state *state,
3260 nstat_msg_add_all_srcs *req)
3261 {
3262 errno_t result;
3263 nstat_provider_id_t provider_id = req->provider;
3264 nstat_provider *provider;
3265
3266 switch (provider_id) {
3267 case NSTAT_PROVIDER_CONN_USERLAND:
3268 provider = &nstat_userland_conn_provider;
3269 break;
3270 case NSTAT_PROVIDER_UDP_SUBFLOW:
3271 provider = &nstat_udp_subflow_provider;
3272 break;
3273 default:
3274 return ENOTSUP;
3275 }
3276
3277 lck_mtx_lock(&nstat_mtx);
3278 result = nstat_set_provider_filter(state, req);
3279
3280 if (result == 0) {
3281 struct nstat_generic_shadow *gshad;
3282 nstat_provider_filter *filter = &state->ncs_provider_filters[provider_id];
3283
3284 OSIncrementAtomic(&nstat_generic_provider_watchers[provider_id]);
3285
3286 TAILQ_FOREACH(gshad, &nstat_gshad_head, gshad_link) {
3287 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3288
3289 if (gshad->gshad_provider == provider_id) {
3290 if (filter->npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) {
3291 u_int64_t npf_flags = filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS;
3292 if ((npf_flags != 0) && ((npf_flags & gshad->gshad_properties) == 0)) {
3293 // Skip this one
3294 // Note - no filtering by pid or UUID supported at this point, for simplicity
3295 continue;
3296 }
3297 }
3298 nstat_retain_gshad(gshad);
3299 result = nstat_control_source_add(0, state, provider, gshad);
3300 if (result != 0) {
3301 printf("%s - nstat_control_source_add returned %d for "
3302 "provider type: %d\n", __func__, result, provider_id);
3303 nstat_release_gshad(gshad);
3304 break;
3305 }
3306 }
3307 }
3308 }
3309 lck_mtx_unlock(&nstat_mtx);
3310
3311 return result;
3312 }
3313
3314 static void
nstat_userland_conn_remove_watcher(__unused nstat_control_state * state)3315 nstat_userland_conn_remove_watcher(
3316 __unused nstat_control_state *state)
3317 {
3318 OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_CONN_USERLAND]);
3319 }
3320
3321 static void
nstat_udp_subflow_remove_watcher(__unused nstat_control_state * state)3322 nstat_udp_subflow_remove_watcher(
3323 __unused nstat_control_state *state)
3324 {
3325 OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_UDP_SUBFLOW]);
3326 }
3327
3328 static void
nstat_init_userland_conn_provider(void)3329 nstat_init_userland_conn_provider(void)
3330 {
3331 bzero(&nstat_userland_conn_provider, sizeof(nstat_userland_conn_provider));
3332 nstat_userland_conn_provider.nstat_descriptor_length = sizeof(nstat_connection_descriptor);
3333 nstat_userland_conn_provider.nstat_provider_id = NSTAT_PROVIDER_CONN_USERLAND;
3334 nstat_userland_conn_provider.nstat_lookup = nstat_generic_provider_lookup;
3335 nstat_userland_conn_provider.nstat_gone = nstat_generic_provider_gone;
3336 nstat_userland_conn_provider.nstat_counts = nstat_generic_provider_counts;
3337 nstat_userland_conn_provider.nstat_release = nstat_generic_provider_release;
3338 nstat_userland_conn_provider.nstat_watcher_add = nstat_generic_provider_add_watcher;
3339 nstat_userland_conn_provider.nstat_watcher_remove = nstat_userland_conn_remove_watcher;
3340 nstat_userland_conn_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor;
3341 nstat_userland_conn_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed;
3342 nstat_userland_conn_provider.nstat_copy_extension = nstat_generic_extensions;
3343 nstat_userland_conn_provider.next = nstat_providers;
3344 nstat_providers = &nstat_userland_conn_provider;
3345 }
3346
3347 static void
nstat_init_udp_subflow_provider(void)3348 nstat_init_udp_subflow_provider(void)
3349 {
3350 bzero(&nstat_udp_subflow_provider, sizeof(nstat_udp_subflow_provider));
3351 nstat_udp_subflow_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
3352 nstat_udp_subflow_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_SUBFLOW;
3353 nstat_udp_subflow_provider.nstat_lookup = nstat_generic_provider_lookup;
3354 nstat_udp_subflow_provider.nstat_gone = nstat_generic_provider_gone;
3355 nstat_udp_subflow_provider.nstat_counts = nstat_generic_provider_counts;
3356 nstat_udp_subflow_provider.nstat_release = nstat_generic_provider_release;
3357 nstat_udp_subflow_provider.nstat_watcher_add = nstat_generic_provider_add_watcher;
3358 nstat_udp_subflow_provider.nstat_watcher_remove = nstat_udp_subflow_remove_watcher;
3359 nstat_udp_subflow_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor;
3360 nstat_udp_subflow_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed;
3361 nstat_udp_subflow_provider.nstat_copy_extension = nstat_generic_extensions;
3362 nstat_udp_subflow_provider.next = nstat_providers;
3363 nstat_providers = &nstat_udp_subflow_provider;
3364 }
3365
3366 // Things get started with a call from the provider to netstats to say that there’s a new source
3367 __private_extern__ nstat_context
nstat_provider_stats_open(nstat_provider_context ctx,int provider_id,u_int64_t properties,nstat_provider_request_vals_fn req_fn,nstat_provider_request_extensions_fn req_extensions_fn)3368 nstat_provider_stats_open(nstat_provider_context ctx,
3369 int provider_id,
3370 u_int64_t properties,
3371 nstat_provider_request_vals_fn req_fn,
3372 nstat_provider_request_extensions_fn req_extensions_fn)
3373 {
3374 struct nstat_generic_shadow *gshad;
3375 struct nstat_procdetails *procdetails;
3376 nstat_provider *provider = nstat_find_provider_by_id(provider_id);
3377
3378 gshad = kalloc_type(struct nstat_generic_shadow, Z_WAITOK | Z_NOFAIL);
3379
3380 procdetails = nstat_retain_curprocdetails();
3381
3382 if (procdetails == NULL) {
3383 kfree_type(struct nstat_generic_shadow, gshad);
3384 return NULL;
3385 }
3386
3387 gshad->gshad_getvals_fn = req_fn;
3388 gshad->gshad_getextensions_fn = req_extensions_fn;
3389 gshad->gshad_provider_context = ctx;
3390 gshad->gshad_properties = properties;
3391 gshad->gshad_procdetails = procdetails;
3392 gshad->gshad_provider = provider_id;
3393 gshad->gshad_start_timestamp = mach_continuous_time();
3394 gshad->gshad_refcnt = 0;
3395 gshad->gshad_magic = NSTAT_GENERIC_SHADOW_MAGIC;
3396 nstat_retain_gshad(gshad);
3397
3398 lck_mtx_lock(&nstat_mtx);
3399 nstat_control_state *state;
3400
3401 // Even if there are no watchers, we save the shadow structure
3402 TAILQ_INSERT_HEAD(&nstat_gshad_head, gshad, gshad_link);
3403
3404 for (state = nstat_controls; state; state = state->ncs_next) {
3405 if ((state->ncs_watching & (1 << provider_id)) != 0) {
3406 // Does this client want an initial filtering to be made?
3407 u_int64_t npf_flags = state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
3408 if (npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) {
3409 npf_flags &= NSTAT_FILTER_IFNET_AND_CONN_FLAGS;
3410 if ((npf_flags != 0) && ((npf_flags & properties) == 0)) {
3411 // Skip this one
3412 // Note - no filtering by pid or UUID supported at this point, for simplicity
3413 continue;
3414 }
3415 }
3416 // this client is watching, so link to it.
3417 nstat_retain_gshad(gshad);
3418 int result = nstat_control_source_add(0, state, provider, gshad);
3419 if (result != 0) {
3420 // There should be some kind of statistics for failures like this.
3421 // <rdar://problem/31377195> The kernel ntstat component should keep some
3422 // internal counters reflecting operational state for eventual AWD reporting
3423 nstat_release_gshad(gshad);
3424 }
3425 }
3426 }
3427 lck_mtx_unlock(&nstat_mtx);
3428
3429 return (nstat_context) gshad;
3430 }
3431
3432
3433 // When the source is closed, netstats will make one last call on the request functions to retrieve final values
3434 __private_extern__ void
nstat_provider_stats_close(nstat_context nstat_ctx)3435 nstat_provider_stats_close(nstat_context nstat_ctx)
3436 {
3437 tailq_head_nstat_src dead_list;
3438 nstat_src *src;
3439 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)nstat_ctx;
3440
3441 if (gshad == NULL) {
3442 printf("%s - called with null reference", __func__);
3443 return;
3444 }
3445
3446 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3447
3448 if (gshad->gshad_magic != NSTAT_GENERIC_SHADOW_MAGIC) {
3449 printf("%s - called with incorrect shadow magic 0x%x", __func__, gshad->gshad_magic);
3450 }
3451
3452 TAILQ_INIT(&dead_list);
3453
3454 lck_mtx_lock(&nstat_mtx);
3455
3456 TAILQ_REMOVE(&nstat_gshad_head, gshad, gshad_link);
3457
3458 int32_t num_srcs = gshad->gshad_refcnt - 1;
3459 if ((nstat_generic_provider_watchers[gshad->gshad_provider] != 0) && (num_srcs > 0)) {
3460 nstat_control_state *state;
3461 errno_t result;
3462
3463 for (state = nstat_controls; state; state = state->ncs_next) {
3464 // Only scan further if this client is watching
3465 if ((state->ncs_watching & (1 << gshad->gshad_provider)) != 0) {
3466 lck_mtx_lock(&state->ncs_mtx);
3467 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3468 {
3469 if ((gshad == (struct nstat_generic_shadow *)src->cookie) &&
3470 (gshad->gshad_provider == src->provider->nstat_provider_id)) {
3471 break;
3472 }
3473 }
3474 if (src) {
3475 result = nstat_control_send_goodbye(state, src);
3476 // There is currently no recovery possible from failure to send,
3477 // so no need to check the return code.
3478 // rdar://28312774 (Scalability and resilience issues in ntstat.c)
3479
3480 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
3481 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
3482 --num_srcs;
3483 }
3484 lck_mtx_unlock(&state->ncs_mtx);
3485
3486 // Performance optimization, don't scan full lists if no chance of presence
3487 if (num_srcs == 0) {
3488 break;
3489 }
3490 }
3491 }
3492 }
3493 lck_mtx_unlock(&nstat_mtx);
3494
3495 while ((src = TAILQ_FIRST(&dead_list))) {
3496 TAILQ_REMOVE(&dead_list, src, ns_control_link);
3497 nstat_control_cleanup_source(NULL, src, TRUE);
3498 }
3499 nstat_release_gshad(gshad);
3500 }
3501
3502 // Events that cause a significant change may be reported via a flags word
3503 void
nstat_provider_stats_event(__unused nstat_context nstat_ctx,__unused uint64_t event)3504 nstat_provider_stats_event(__unused nstat_context nstat_ctx, __unused uint64_t event)
3505 {
3506 nstat_src *src;
3507 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)nstat_ctx;
3508
3509 if (gshad == NULL) {
3510 printf("%s - called with null reference", __func__);
3511 return;
3512 }
3513
3514 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3515
3516 if (gshad->gshad_magic != NSTAT_GENERIC_SHADOW_MAGIC) {
3517 printf("%s - called with incorrect shadow magic 0x%x", __func__, gshad->gshad_magic);
3518 }
3519
3520 lck_mtx_lock(&nstat_mtx);
3521
3522 if (nstat_generic_provider_watchers[gshad->gshad_provider] != 0) {
3523 nstat_control_state *state;
3524 errno_t result;
3525 nstat_provider_id_t provider_id = gshad->gshad_provider;
3526
3527 for (state = nstat_controls; state; state = state->ncs_next) {
3528 // Only scan further if this client is watching and has interest in the event
3529 // or the client has requested "boring" unchanged status to be ignored
3530 if (((state->ncs_watching & (1 << provider_id)) != 0) &&
3531 (((state->ncs_provider_filters[provider_id].npf_events & event) != 0) ||
3532 ((state->ncs_provider_filters[provider_id].npf_flags & NSTAT_FILTER_SUPPRESS_BORING_FLAGS) != 0))) {
3533 lck_mtx_lock(&state->ncs_mtx);
3534 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3535 {
3536 if (gshad == (struct nstat_generic_shadow *)src->cookie) {
3537 break;
3538 }
3539 }
3540
3541 if (src) {
3542 src->ns_reported = false;
3543 if ((state->ncs_provider_filters[provider_id].npf_events & event) != 0) {
3544 result = nstat_control_send_event(state, src, event);
3545 // There is currently no recovery possible from failure to send,
3546 // so no need to check the return code.
3547 // rdar://28312774 (Scalability and resilience issues in ntstat.c)
3548 }
3549 }
3550 lck_mtx_unlock(&state->ncs_mtx);
3551 }
3552 }
3553 }
3554 lck_mtx_unlock(&nstat_mtx);
3555 }
3556
3557 #endif /* SKYWALK */
3558
3559
3560 #pragma mark -- ifnet Provider --
3561
3562 static nstat_provider nstat_ifnet_provider;
3563
3564 /*
3565 * We store a pointer to the ifnet and the original threshold
3566 * requested by the client.
3567 */
3568 struct nstat_ifnet_cookie {
3569 struct ifnet *ifp;
3570 uint64_t threshold;
3571 };
3572
3573 static errno_t
nstat_ifnet_lookup(const void * data,u_int32_t length,nstat_provider_cookie_t * out_cookie)3574 nstat_ifnet_lookup(
3575 const void *data,
3576 u_int32_t length,
3577 nstat_provider_cookie_t *out_cookie)
3578 {
3579 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
3580 struct ifnet *ifp;
3581 boolean_t changed = FALSE;
3582 nstat_control_state *state;
3583 nstat_src *src;
3584 struct nstat_ifnet_cookie *cookie;
3585
3586 if (length < sizeof(*param) || param->threshold < 1024 * 1024) {
3587 return EINVAL;
3588 }
3589 if (nstat_privcheck != 0) {
3590 errno_t result = priv_check_cred(kauth_cred_get(),
3591 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
3592 if (result != 0) {
3593 return result;
3594 }
3595 }
3596 cookie = kalloc_type(struct nstat_ifnet_cookie,
3597 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3598
3599 ifnet_head_lock_shared();
3600 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
3601 {
3602 if (!ifnet_is_attached(ifp, 1)) {
3603 continue;
3604 }
3605 ifnet_lock_exclusive(ifp);
3606 if (ifp->if_index == param->ifindex) {
3607 cookie->ifp = ifp;
3608 cookie->threshold = param->threshold;
3609 *out_cookie = cookie;
3610 if (!ifp->if_data_threshold ||
3611 ifp->if_data_threshold > param->threshold) {
3612 changed = TRUE;
3613 ifp->if_data_threshold = param->threshold;
3614 }
3615 ifnet_lock_done(ifp);
3616 ifnet_reference(ifp);
3617 ifnet_decr_iorefcnt(ifp);
3618 break;
3619 }
3620 ifnet_lock_done(ifp);
3621 ifnet_decr_iorefcnt(ifp);
3622 }
3623 ifnet_head_done();
3624
3625 /*
3626 * When we change the threshold to something smaller, we notify
3627 * all of our clients with a description message.
3628 * We won't send a message to the client we are currently serving
3629 * because it has no `ifnet source' yet.
3630 */
3631 if (changed) {
3632 lck_mtx_lock(&nstat_mtx);
3633 for (state = nstat_controls; state; state = state->ncs_next) {
3634 lck_mtx_lock(&state->ncs_mtx);
3635 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3636 {
3637 if (src->provider != &nstat_ifnet_provider) {
3638 continue;
3639 }
3640 nstat_control_send_description(state, src, 0, 0);
3641 }
3642 lck_mtx_unlock(&state->ncs_mtx);
3643 }
3644 lck_mtx_unlock(&nstat_mtx);
3645 }
3646 if (cookie->ifp == NULL) {
3647 kfree_type(struct nstat_ifnet_cookie, cookie);
3648 }
3649
3650 return ifp ? 0 : EINVAL;
3651 }
3652
3653 static int
nstat_ifnet_gone(nstat_provider_cookie_t cookie)3654 nstat_ifnet_gone(
3655 nstat_provider_cookie_t cookie)
3656 {
3657 struct ifnet *ifp;
3658 struct nstat_ifnet_cookie *ifcookie =
3659 (struct nstat_ifnet_cookie *)cookie;
3660
3661 ifnet_head_lock_shared();
3662 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
3663 {
3664 if (ifp == ifcookie->ifp) {
3665 break;
3666 }
3667 }
3668 ifnet_head_done();
3669
3670 return ifp ? 0 : 1;
3671 }
3672
3673 static errno_t
nstat_ifnet_counts(nstat_provider_cookie_t cookie,struct nstat_counts * out_counts,int * out_gone)3674 nstat_ifnet_counts(
3675 nstat_provider_cookie_t cookie,
3676 struct nstat_counts *out_counts,
3677 int *out_gone)
3678 {
3679 struct nstat_ifnet_cookie *ifcookie =
3680 (struct nstat_ifnet_cookie *)cookie;
3681 struct ifnet *ifp = ifcookie->ifp;
3682
3683 if (out_gone) {
3684 *out_gone = 0;
3685 }
3686
3687 // if the ifnet is gone, we should stop using it
3688 if (nstat_ifnet_gone(cookie)) {
3689 if (out_gone) {
3690 *out_gone = 1;
3691 }
3692 return EINVAL;
3693 }
3694
3695 bzero(out_counts, sizeof(*out_counts));
3696 out_counts->nstat_rxpackets = ifp->if_ipackets;
3697 out_counts->nstat_rxbytes = ifp->if_ibytes;
3698 out_counts->nstat_txpackets = ifp->if_opackets;
3699 out_counts->nstat_txbytes = ifp->if_obytes;
3700 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
3701 return 0;
3702 }
3703
3704 static void
nstat_ifnet_release(nstat_provider_cookie_t cookie,__unused int locked)3705 nstat_ifnet_release(
3706 nstat_provider_cookie_t cookie,
3707 __unused int locked)
3708 {
3709 struct nstat_ifnet_cookie *ifcookie;
3710 struct ifnet *ifp;
3711 nstat_control_state *state;
3712 nstat_src *src;
3713 uint64_t minthreshold = UINT64_MAX;
3714
3715 /*
3716 * Find all the clients that requested a threshold
3717 * for this ifnet and re-calculate if_data_threshold.
3718 */
3719 lck_mtx_lock(&nstat_mtx);
3720 for (state = nstat_controls; state; state = state->ncs_next) {
3721 lck_mtx_lock(&state->ncs_mtx);
3722 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
3723 {
3724 /* Skip the provider we are about to detach. */
3725 if (src->provider != &nstat_ifnet_provider ||
3726 src->cookie == cookie) {
3727 continue;
3728 }
3729 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
3730 if (ifcookie->threshold < minthreshold) {
3731 minthreshold = ifcookie->threshold;
3732 }
3733 }
3734 lck_mtx_unlock(&state->ncs_mtx);
3735 }
3736 lck_mtx_unlock(&nstat_mtx);
3737 /*
3738 * Reset if_data_threshold or disable it.
3739 */
3740 ifcookie = (struct nstat_ifnet_cookie *)cookie;
3741 ifp = ifcookie->ifp;
3742 if (ifnet_is_attached(ifp, 1)) {
3743 ifnet_lock_exclusive(ifp);
3744 if (minthreshold == UINT64_MAX) {
3745 ifp->if_data_threshold = 0;
3746 } else {
3747 ifp->if_data_threshold = minthreshold;
3748 }
3749 ifnet_lock_done(ifp);
3750 ifnet_decr_iorefcnt(ifp);
3751 }
3752 ifnet_release(ifp);
3753 kfree_type(struct nstat_ifnet_cookie, ifcookie);
3754 }
3755
3756 static void
nstat_ifnet_copy_link_status(struct ifnet * ifp,struct nstat_ifnet_descriptor * desc)3757 nstat_ifnet_copy_link_status(
3758 struct ifnet *ifp,
3759 struct nstat_ifnet_descriptor *desc)
3760 {
3761 struct if_link_status *ifsr = ifp->if_link_status;
3762 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
3763
3764 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
3765 if (ifsr == NULL) {
3766 return;
3767 }
3768
3769 lck_rw_lock_shared(&ifp->if_link_status_lock);
3770
3771 if (ifp->if_type == IFT_CELLULAR) {
3772 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
3773 struct if_cellular_status_v1 *if_cell_sr =
3774 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3775
3776 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1) {
3777 goto done;
3778 }
3779
3780 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
3781
3782 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
3783 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
3784 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
3785 }
3786 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
3787 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
3788 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
3789 }
3790 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
3791 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
3792 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
3793 }
3794 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
3795 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
3796 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
3797 }
3798 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
3799 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
3800 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
3801 }
3802 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
3803 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
3804 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
3805 }
3806 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
3807 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
3808 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE) {
3809 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
3810 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW) {
3811 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
3812 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM) {
3813 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
3814 } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH) {
3815 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
3816 } else {
3817 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
3818 }
3819 }
3820 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
3821 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
3822 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
3823 }
3824 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
3825 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
3826 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
3827 }
3828 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
3829 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
3830 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
3831 }
3832 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
3833 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
3834 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
3835 }
3836 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
3837 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
3838 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
3839 }
3840 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
3841 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
3842 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
3843 }
3844 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
3845 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
3846 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
3847 }
3848 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
3849 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
3850 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
3851 }
3852 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
3853 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
3854 cell_status->mss_recommended = if_cell_sr->mss_recommended;
3855 }
3856 } else if (IFNET_IS_WIFI(ifp)) {
3857 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
3858 struct if_wifi_status_v1 *if_wifi_sr =
3859 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3860
3861 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1) {
3862 goto done;
3863 }
3864
3865 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
3866
3867 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
3868 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
3869 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
3870 }
3871 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
3872 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3873 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
3874 }
3875 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
3876 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
3877 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
3878 }
3879 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
3880 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
3881 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
3882 }
3883 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
3884 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
3885 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
3886 }
3887 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
3888 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
3889 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
3890 }
3891 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
3892 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
3893 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE) {
3894 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
3895 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW) {
3896 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
3897 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM) {
3898 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
3899 } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH) {
3900 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
3901 } else {
3902 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
3903 }
3904 }
3905 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
3906 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
3907 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
3908 }
3909 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
3910 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
3911 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
3912 }
3913 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
3914 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3915 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
3916 }
3917 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
3918 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
3919 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
3920 }
3921 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
3922 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
3923 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
3924 }
3925 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
3926 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
3927 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
3928 }
3929 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
3930 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
3931 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
3932 }
3933 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
3934 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
3935 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
3936 }
3937 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
3938 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
3939 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ) {
3940 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
3941 } else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ) {
3942 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
3943 } else {
3944 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
3945 }
3946 }
3947 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
3948 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
3949 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
3950 }
3951 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
3952 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
3953 wifi_status->scan_count = if_wifi_sr->scan_count;
3954 }
3955 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
3956 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
3957 wifi_status->scan_duration = if_wifi_sr->scan_duration;
3958 }
3959 }
3960
3961 done:
3962 lck_rw_done(&ifp->if_link_status_lock);
3963 }
3964
3965 static u_int64_t nstat_ifnet_last_report_time = 0;
3966 extern int tcp_report_stats_interval;
3967
3968 static void
nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat * ifst)3969 nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
3970 {
3971 /* Retransmit percentage */
3972 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
3973 /* shift by 10 for precision */
3974 ifst->rxmit_percent =
3975 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
3976 } else {
3977 ifst->rxmit_percent = 0;
3978 }
3979
3980 /* Out-of-order percentage */
3981 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
3982 /* shift by 10 for precision */
3983 ifst->oo_percent =
3984 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
3985 } else {
3986 ifst->oo_percent = 0;
3987 }
3988
3989 /* Reorder percentage */
3990 if (ifst->total_reorderpkts > 0 &&
3991 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
3992 /* shift by 10 for precision */
3993 ifst->reorder_percent =
3994 ((ifst->total_reorderpkts << 10) * 100) /
3995 (ifst->total_txpkts + ifst->total_rxpkts);
3996 } else {
3997 ifst->reorder_percent = 0;
3998 }
3999 }
4000
4001 static void
nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat * if_st)4002 nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
4003 {
4004 u_int64_t ecn_on_conn, ecn_off_conn;
4005
4006 if (if_st == NULL) {
4007 return;
4008 }
4009 ecn_on_conn = if_st->ecn_client_success +
4010 if_st->ecn_server_success;
4011 ecn_off_conn = if_st->ecn_off_conn +
4012 (if_st->ecn_client_setup - if_st->ecn_client_success) +
4013 (if_st->ecn_server_setup - if_st->ecn_server_success);
4014
4015 /*
4016 * report sack episodes, rst_drop and rxmit_drop
4017 * as a ratio per connection, shift by 10 for precision
4018 */
4019 if (ecn_on_conn > 0) {
4020 if_st->ecn_on.sack_episodes =
4021 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
4022 if_st->ecn_on.rst_drop =
4023 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
4024 if_st->ecn_on.rxmit_drop =
4025 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
4026 } else {
4027 /* set to zero, just in case */
4028 if_st->ecn_on.sack_episodes = 0;
4029 if_st->ecn_on.rst_drop = 0;
4030 if_st->ecn_on.rxmit_drop = 0;
4031 }
4032
4033 if (ecn_off_conn > 0) {
4034 if_st->ecn_off.sack_episodes =
4035 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
4036 if_st->ecn_off.rst_drop =
4037 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
4038 if_st->ecn_off.rxmit_drop =
4039 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
4040 } else {
4041 if_st->ecn_off.sack_episodes = 0;
4042 if_st->ecn_off.rst_drop = 0;
4043 if_st->ecn_off.rxmit_drop = 0;
4044 }
4045 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
4046 }
4047
4048 static void
nstat_ifnet_report_ecn_stats(void)4049 nstat_ifnet_report_ecn_stats(void)
4050 {
4051 u_int64_t uptime, last_report_time;
4052 struct nstat_sysinfo_data data;
4053 struct nstat_sysinfo_ifnet_ecn_stats *st;
4054 struct ifnet *ifp;
4055
4056 uptime = net_uptime();
4057
4058 if ((int)(uptime - nstat_ifnet_last_report_time) <
4059 tcp_report_stats_interval) {
4060 return;
4061 }
4062
4063 last_report_time = nstat_ifnet_last_report_time;
4064 nstat_ifnet_last_report_time = uptime;
4065 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
4066 st = &data.u.ifnet_ecn_stats;
4067
4068 ifnet_head_lock_shared();
4069 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
4070 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL) {
4071 continue;
4072 }
4073
4074 if (!IF_FULLY_ATTACHED(ifp)) {
4075 continue;
4076 }
4077
4078 /* Limit reporting to Wifi, Ethernet and cellular. */
4079 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
4080 continue;
4081 }
4082
4083 bzero(st, sizeof(*st));
4084 if (IFNET_IS_CELLULAR(ifp)) {
4085 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
4086 } else if (IFNET_IS_WIFI(ifp)) {
4087 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
4088 } else {
4089 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
4090 }
4091 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
4092 /* skip if there was no update since last report */
4093 if (ifp->if_ipv4_stat->timestamp <= 0 ||
4094 ifp->if_ipv4_stat->timestamp < last_report_time) {
4095 goto v6;
4096 }
4097 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
4098 /* compute percentages using packet counts */
4099 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
4100 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
4101 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
4102 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
4103 sizeof(st->ecn_stat));
4104 nstat_sysinfo_send_data(&data);
4105 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
4106
4107 v6:
4108 /* skip if there was no update since last report */
4109 if (ifp->if_ipv6_stat->timestamp <= 0 ||
4110 ifp->if_ipv6_stat->timestamp < last_report_time) {
4111 continue;
4112 }
4113 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
4114
4115 /* compute percentages using packet counts */
4116 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
4117 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
4118 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
4119 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
4120 sizeof(st->ecn_stat));
4121 nstat_sysinfo_send_data(&data);
4122
4123 /* Zero the stats in ifp */
4124 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
4125 }
4126 ifnet_head_done();
4127 }
4128
4129 /* Some thresholds to determine Low Iternet mode */
4130 #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
4131 #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
4132 #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
4133 #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
4134 #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
4135
4136 static boolean_t
nstat_lim_activity_check(struct if_lim_perf_stat * st)4137 nstat_lim_activity_check(struct if_lim_perf_stat *st)
4138 {
4139 /* check that the current activity is enough to report stats */
4140 if (st->lim_total_txpkts < nstat_lim_min_tx_pkts ||
4141 st->lim_total_rxpkts < nstat_lim_min_rx_pkts ||
4142 st->lim_conn_attempts == 0) {
4143 return FALSE;
4144 }
4145
4146 /*
4147 * Compute percentages if there was enough activity. Use
4148 * shift-left by 10 to preserve precision.
4149 */
4150 st->lim_packet_loss_percent = ((st->lim_total_retxpkts << 10) /
4151 st->lim_total_txpkts) * 100;
4152
4153 st->lim_packet_ooo_percent = ((st->lim_total_oopkts << 10) /
4154 st->lim_total_rxpkts) * 100;
4155
4156 st->lim_conn_timeout_percent = ((st->lim_conn_timeouts << 10) /
4157 st->lim_conn_attempts) * 100;
4158
4159 /*
4160 * Is Low Internet detected? First order metrics are bandwidth
4161 * and RTT. If these metrics are below the minimum thresholds
4162 * defined then the network attachment can be classified as
4163 * having Low Internet capacity.
4164 *
4165 * High connection timeout rate also indicates Low Internet
4166 * capacity.
4167 */
4168 if (st->lim_dl_max_bandwidth > 0 &&
4169 st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD) {
4170 st->lim_dl_detected = 1;
4171 }
4172
4173 if ((st->lim_ul_max_bandwidth > 0 &&
4174 st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) ||
4175 st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD) {
4176 st->lim_ul_detected = 1;
4177 }
4178
4179 if (st->lim_conn_attempts > 20 &&
4180 st->lim_conn_timeout_percent >=
4181 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD) {
4182 st->lim_ul_detected = 1;
4183 }
4184 /*
4185 * Second order metrics: If there was high packet loss even after
4186 * using delay based algorithms then we classify it as Low Internet
4187 * again
4188 */
4189 if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts &&
4190 st->lim_packet_loss_percent >=
4191 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD) {
4192 st->lim_ul_detected = 1;
4193 }
4194 return TRUE;
4195 }
4196
4197 static u_int64_t nstat_lim_last_report_time = 0;
4198 static void
nstat_ifnet_report_lim_stats(void)4199 nstat_ifnet_report_lim_stats(void)
4200 {
4201 u_int64_t uptime;
4202 struct nstat_sysinfo_data data;
4203 struct nstat_sysinfo_lim_stats *st;
4204 struct ifnet *ifp;
4205 int err;
4206
4207 uptime = net_uptime();
4208
4209 if ((u_int32_t)(uptime - nstat_lim_last_report_time) <
4210 nstat_lim_interval) {
4211 return;
4212 }
4213
4214 nstat_lim_last_report_time = uptime;
4215 data.flags = NSTAT_SYSINFO_LIM_STATS;
4216 st = &data.u.lim_stats;
4217 data.unsent_data_cnt = 0;
4218
4219 ifnet_head_lock_shared();
4220 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
4221 if (!IF_FULLY_ATTACHED(ifp)) {
4222 continue;
4223 }
4224
4225 /* Limit reporting to Wifi, Ethernet and cellular */
4226 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) {
4227 continue;
4228 }
4229
4230 if (!nstat_lim_activity_check(&ifp->if_lim_stat)) {
4231 continue;
4232 }
4233
4234 bzero(st, sizeof(*st));
4235 st->ifnet_siglen = sizeof(st->ifnet_signature);
4236 err = ifnet_get_netsignature(ifp, AF_INET,
4237 (u_int8_t *)&st->ifnet_siglen, NULL,
4238 st->ifnet_signature);
4239 if (err != 0) {
4240 err = ifnet_get_netsignature(ifp, AF_INET6,
4241 (u_int8_t *)&st->ifnet_siglen, NULL,
4242 st->ifnet_signature);
4243 if (err != 0) {
4244 continue;
4245 }
4246 }
4247 ifnet_lock_shared(ifp);
4248 if (IFNET_IS_CELLULAR(ifp)) {
4249 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
4250 } else if (IFNET_IS_WIFI(ifp)) {
4251 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
4252 } else {
4253 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET;
4254 }
4255 bcopy(&ifp->if_lim_stat, &st->lim_stat,
4256 sizeof(st->lim_stat));
4257
4258 /* Zero the stats in ifp */
4259 bzero(&ifp->if_lim_stat, sizeof(ifp->if_lim_stat));
4260 ifnet_lock_done(ifp);
4261 nstat_sysinfo_send_data(&data);
4262 }
4263 ifnet_head_done();
4264 }
4265
4266 static errno_t
nstat_ifnet_copy_descriptor(nstat_provider_cookie_t cookie,void * data,size_t len)4267 nstat_ifnet_copy_descriptor(
4268 nstat_provider_cookie_t cookie,
4269 void *data,
4270 size_t len)
4271 {
4272 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
4273 struct nstat_ifnet_cookie *ifcookie =
4274 (struct nstat_ifnet_cookie *)cookie;
4275 struct ifnet *ifp = ifcookie->ifp;
4276
4277 if (len < sizeof(nstat_ifnet_descriptor)) {
4278 return EINVAL;
4279 }
4280
4281 if (nstat_ifnet_gone(cookie)) {
4282 return EINVAL;
4283 }
4284
4285 bzero(desc, sizeof(*desc));
4286 ifnet_lock_shared(ifp);
4287 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
4288 desc->ifindex = ifp->if_index;
4289 desc->threshold = ifp->if_data_threshold;
4290 desc->type = ifp->if_type;
4291 if (ifp->if_desc.ifd_len < sizeof(desc->description)) {
4292 memcpy(desc->description, ifp->if_desc.ifd_desc,
4293 sizeof(desc->description));
4294 }
4295 nstat_ifnet_copy_link_status(ifp, desc);
4296 ifnet_lock_done(ifp);
4297 return 0;
4298 }
4299
4300 static bool
nstat_ifnet_cookie_equal(nstat_provider_cookie_t cookie1,nstat_provider_cookie_t cookie2)4301 nstat_ifnet_cookie_equal(
4302 nstat_provider_cookie_t cookie1,
4303 nstat_provider_cookie_t cookie2)
4304 {
4305 struct nstat_ifnet_cookie *c1 = (struct nstat_ifnet_cookie *)cookie1;
4306 struct nstat_ifnet_cookie *c2 = (struct nstat_ifnet_cookie *)cookie2;
4307
4308 return (c1->ifp->if_index == c2->ifp->if_index) ? true : false;
4309 }
4310
4311 static void
nstat_init_ifnet_provider(void)4312 nstat_init_ifnet_provider(void)
4313 {
4314 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
4315 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
4316 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
4317 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
4318 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
4319 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
4320 nstat_ifnet_provider.nstat_watcher_add = NULL;
4321 nstat_ifnet_provider.nstat_watcher_remove = NULL;
4322 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
4323 nstat_ifnet_provider.nstat_cookie_equal = nstat_ifnet_cookie_equal;
4324 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
4325 nstat_ifnet_provider.next = nstat_providers;
4326 nstat_providers = &nstat_ifnet_provider;
4327 }
4328
4329 __private_extern__ void
nstat_ifnet_threshold_reached(unsigned int ifindex)4330 nstat_ifnet_threshold_reached(unsigned int ifindex)
4331 {
4332 nstat_control_state *state;
4333 nstat_src *src;
4334 struct ifnet *ifp;
4335 struct nstat_ifnet_cookie *ifcookie;
4336
4337 lck_mtx_lock(&nstat_mtx);
4338 for (state = nstat_controls; state; state = state->ncs_next) {
4339 lck_mtx_lock(&state->ncs_mtx);
4340 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4341 {
4342 if (src->provider != &nstat_ifnet_provider) {
4343 continue;
4344 }
4345 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
4346 ifp = ifcookie->ifp;
4347 if (ifp->if_index != ifindex) {
4348 continue;
4349 }
4350 nstat_control_send_counts(state, src, 0, 0, NULL);
4351 }
4352 lck_mtx_unlock(&state->ncs_mtx);
4353 }
4354 lck_mtx_unlock(&nstat_mtx);
4355 }
4356
4357 #pragma mark -- Sysinfo --
4358 static void
nstat_set_keyval_scalar(nstat_sysinfo_keyval * kv,int key,u_int32_t val)4359 nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
4360 {
4361 kv->nstat_sysinfo_key = key;
4362 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
4363 kv->u.nstat_sysinfo_scalar = val;
4364 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
4365 }
4366
4367 static void
nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval * kv,int key,u_int64_t val)4368 nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval *kv, int key, u_int64_t val)
4369 {
4370 kv->nstat_sysinfo_key = key;
4371 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
4372 kv->u.nstat_sysinfo_scalar = val;
4373 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
4374 }
4375
4376 static void
nstat_set_keyval_string(nstat_sysinfo_keyval * kv,int key,u_int8_t * buf,u_int32_t len)4377 nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf,
4378 u_int32_t len)
4379 {
4380 kv->nstat_sysinfo_key = key;
4381 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_STRING;
4382 kv->nstat_sysinfo_valsize = min(len,
4383 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE);
4384 bcopy(buf, kv->u.nstat_sysinfo_string, kv->nstat_sysinfo_valsize);
4385 }
4386
4387 static void
nstat_sysinfo_send_data_internal(nstat_control_state * control,nstat_sysinfo_data * data)4388 nstat_sysinfo_send_data_internal(
4389 nstat_control_state *control,
4390 nstat_sysinfo_data *data)
4391 {
4392 nstat_msg_sysinfo_counts *syscnt = NULL;
4393 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
4394 nstat_sysinfo_keyval *kv;
4395 errno_t result = 0;
4396 size_t i = 0;
4397
4398 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
4399 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
4400 finalsize = allocsize;
4401
4402 /* get number of key-vals for each kind of stat */
4403 switch (data->flags) {
4404 case NSTAT_SYSINFO_MBUF_STATS:
4405 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
4406 sizeof(u_int32_t);
4407 break;
4408 case NSTAT_SYSINFO_TCP_STATS:
4409 nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT;
4410 break;
4411 case NSTAT_SYSINFO_IFNET_ECN_STATS:
4412 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
4413 sizeof(u_int64_t));
4414
4415 /* Two more keys for ifnet type and proto */
4416 nkeyvals += 2;
4417
4418 /* One key for unsent data. */
4419 nkeyvals++;
4420 break;
4421 case NSTAT_SYSINFO_LIM_STATS:
4422 nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT;
4423 break;
4424 case NSTAT_SYSINFO_NET_API_STATS:
4425 nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT;
4426 break;
4427 default:
4428 return;
4429 }
4430 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
4431 allocsize += countsize;
4432
4433 syscnt = (nstat_msg_sysinfo_counts *) kalloc_data(allocsize,
4434 Z_WAITOK | Z_ZERO);
4435 if (syscnt == NULL) {
4436 return;
4437 }
4438
4439 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
4440 switch (data->flags) {
4441 case NSTAT_SYSINFO_MBUF_STATS:
4442 {
4443 nstat_set_keyval_scalar(&kv[i++],
4444 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
4445 data->u.mb_stats.total_256b);
4446 nstat_set_keyval_scalar(&kv[i++],
4447 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
4448 data->u.mb_stats.total_2kb);
4449 nstat_set_keyval_scalar(&kv[i++],
4450 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
4451 data->u.mb_stats.total_4kb);
4452 nstat_set_keyval_scalar(&kv[i++],
4453 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
4454 data->u.mb_stats.total_16kb);
4455 nstat_set_keyval_scalar(&kv[i++],
4456 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
4457 data->u.mb_stats.sbmb_total);
4458 nstat_set_keyval_scalar(&kv[i++],
4459 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
4460 data->u.mb_stats.sb_atmbuflimit);
4461 nstat_set_keyval_scalar(&kv[i++],
4462 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
4463 data->u.mb_stats.draincnt);
4464 nstat_set_keyval_scalar(&kv[i++],
4465 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
4466 data->u.mb_stats.memreleased);
4467 nstat_set_keyval_scalar(&kv[i++],
4468 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
4469 data->u.mb_stats.sbmb_floor);
4470 VERIFY(i == nkeyvals);
4471 break;
4472 }
4473 case NSTAT_SYSINFO_TCP_STATS:
4474 {
4475 nstat_set_keyval_scalar(&kv[i++],
4476 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
4477 data->u.tcp_stats.ipv4_avgrtt);
4478 nstat_set_keyval_scalar(&kv[i++],
4479 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
4480 data->u.tcp_stats.ipv6_avgrtt);
4481 nstat_set_keyval_scalar(&kv[i++],
4482 NSTAT_SYSINFO_KEY_SEND_PLR,
4483 data->u.tcp_stats.send_plr);
4484 nstat_set_keyval_scalar(&kv[i++],
4485 NSTAT_SYSINFO_KEY_RECV_PLR,
4486 data->u.tcp_stats.recv_plr);
4487 nstat_set_keyval_scalar(&kv[i++],
4488 NSTAT_SYSINFO_KEY_SEND_TLRTO,
4489 data->u.tcp_stats.send_tlrto_rate);
4490 nstat_set_keyval_scalar(&kv[i++],
4491 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
4492 data->u.tcp_stats.send_reorder_rate);
4493 nstat_set_keyval_scalar(&kv[i++],
4494 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
4495 data->u.tcp_stats.connection_attempts);
4496 nstat_set_keyval_scalar(&kv[i++],
4497 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
4498 data->u.tcp_stats.connection_accepts);
4499 nstat_set_keyval_scalar(&kv[i++],
4500 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
4501 data->u.tcp_stats.ecn_client_enabled);
4502 nstat_set_keyval_scalar(&kv[i++],
4503 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
4504 data->u.tcp_stats.ecn_server_enabled);
4505 nstat_set_keyval_scalar(&kv[i++],
4506 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
4507 data->u.tcp_stats.ecn_client_setup);
4508 nstat_set_keyval_scalar(&kv[i++],
4509 NSTAT_SYSINFO_ECN_SERVER_SETUP,
4510 data->u.tcp_stats.ecn_server_setup);
4511 nstat_set_keyval_scalar(&kv[i++],
4512 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
4513 data->u.tcp_stats.ecn_client_success);
4514 nstat_set_keyval_scalar(&kv[i++],
4515 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
4516 data->u.tcp_stats.ecn_server_success);
4517 nstat_set_keyval_scalar(&kv[i++],
4518 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
4519 data->u.tcp_stats.ecn_not_supported);
4520 nstat_set_keyval_scalar(&kv[i++],
4521 NSTAT_SYSINFO_ECN_LOST_SYN,
4522 data->u.tcp_stats.ecn_lost_syn);
4523 nstat_set_keyval_scalar(&kv[i++],
4524 NSTAT_SYSINFO_ECN_LOST_SYNACK,
4525 data->u.tcp_stats.ecn_lost_synack);
4526 nstat_set_keyval_scalar(&kv[i++],
4527 NSTAT_SYSINFO_ECN_RECV_CE,
4528 data->u.tcp_stats.ecn_recv_ce);
4529 nstat_set_keyval_scalar(&kv[i++],
4530 NSTAT_SYSINFO_ECN_RECV_ECE,
4531 data->u.tcp_stats.ecn_recv_ece);
4532 nstat_set_keyval_scalar(&kv[i++],
4533 NSTAT_SYSINFO_ECN_SENT_ECE,
4534 data->u.tcp_stats.ecn_sent_ece);
4535 nstat_set_keyval_scalar(&kv[i++],
4536 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
4537 data->u.tcp_stats.ecn_conn_recv_ce);
4538 nstat_set_keyval_scalar(&kv[i++],
4539 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
4540 data->u.tcp_stats.ecn_conn_recv_ece);
4541 nstat_set_keyval_scalar(&kv[i++],
4542 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
4543 data->u.tcp_stats.ecn_conn_plnoce);
4544 nstat_set_keyval_scalar(&kv[i++],
4545 NSTAT_SYSINFO_ECN_CONN_PL_CE,
4546 data->u.tcp_stats.ecn_conn_pl_ce);
4547 nstat_set_keyval_scalar(&kv[i++],
4548 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
4549 data->u.tcp_stats.ecn_conn_nopl_ce);
4550 nstat_set_keyval_scalar(&kv[i++],
4551 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
4552 data->u.tcp_stats.ecn_fallback_synloss);
4553 nstat_set_keyval_scalar(&kv[i++],
4554 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
4555 data->u.tcp_stats.ecn_fallback_reorder);
4556 nstat_set_keyval_scalar(&kv[i++],
4557 NSTAT_SYSINFO_ECN_FALLBACK_CE,
4558 data->u.tcp_stats.ecn_fallback_ce);
4559 nstat_set_keyval_scalar(&kv[i++],
4560 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
4561 data->u.tcp_stats.tfo_syn_data_rcv);
4562 nstat_set_keyval_scalar(&kv[i++],
4563 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
4564 data->u.tcp_stats.tfo_cookie_req_rcv);
4565 nstat_set_keyval_scalar(&kv[i++],
4566 NSTAT_SYSINFO_TFO_COOKIE_SENT,
4567 data->u.tcp_stats.tfo_cookie_sent);
4568 nstat_set_keyval_scalar(&kv[i++],
4569 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
4570 data->u.tcp_stats.tfo_cookie_invalid);
4571 nstat_set_keyval_scalar(&kv[i++],
4572 NSTAT_SYSINFO_TFO_COOKIE_REQ,
4573 data->u.tcp_stats.tfo_cookie_req);
4574 nstat_set_keyval_scalar(&kv[i++],
4575 NSTAT_SYSINFO_TFO_COOKIE_RCV,
4576 data->u.tcp_stats.tfo_cookie_rcv);
4577 nstat_set_keyval_scalar(&kv[i++],
4578 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
4579 data->u.tcp_stats.tfo_syn_data_sent);
4580 nstat_set_keyval_scalar(&kv[i++],
4581 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
4582 data->u.tcp_stats.tfo_syn_data_acked);
4583 nstat_set_keyval_scalar(&kv[i++],
4584 NSTAT_SYSINFO_TFO_SYN_LOSS,
4585 data->u.tcp_stats.tfo_syn_loss);
4586 nstat_set_keyval_scalar(&kv[i++],
4587 NSTAT_SYSINFO_TFO_BLACKHOLE,
4588 data->u.tcp_stats.tfo_blackhole);
4589 nstat_set_keyval_scalar(&kv[i++],
4590 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
4591 data->u.tcp_stats.tfo_cookie_wrong);
4592 nstat_set_keyval_scalar(&kv[i++],
4593 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
4594 data->u.tcp_stats.tfo_no_cookie_rcv);
4595 nstat_set_keyval_scalar(&kv[i++],
4596 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
4597 data->u.tcp_stats.tfo_heuristics_disable);
4598 nstat_set_keyval_scalar(&kv[i++],
4599 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
4600 data->u.tcp_stats.tfo_sndblackhole);
4601 nstat_set_keyval_scalar(&kv[i++],
4602 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT,
4603 data->u.tcp_stats.mptcp_handover_attempt);
4604 nstat_set_keyval_scalar(&kv[i++],
4605 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT,
4606 data->u.tcp_stats.mptcp_interactive_attempt);
4607 nstat_set_keyval_scalar(&kv[i++],
4608 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT,
4609 data->u.tcp_stats.mptcp_aggregate_attempt);
4610 nstat_set_keyval_scalar(&kv[i++],
4611 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT,
4612 data->u.tcp_stats.mptcp_fp_handover_attempt);
4613 nstat_set_keyval_scalar(&kv[i++],
4614 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT,
4615 data->u.tcp_stats.mptcp_fp_interactive_attempt);
4616 nstat_set_keyval_scalar(&kv[i++],
4617 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT,
4618 data->u.tcp_stats.mptcp_fp_aggregate_attempt);
4619 nstat_set_keyval_scalar(&kv[i++],
4620 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK,
4621 data->u.tcp_stats.mptcp_heuristic_fallback);
4622 nstat_set_keyval_scalar(&kv[i++],
4623 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK,
4624 data->u.tcp_stats.mptcp_fp_heuristic_fallback);
4625 nstat_set_keyval_scalar(&kv[i++],
4626 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI,
4627 data->u.tcp_stats.mptcp_handover_success_wifi);
4628 nstat_set_keyval_scalar(&kv[i++],
4629 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL,
4630 data->u.tcp_stats.mptcp_handover_success_cell);
4631 nstat_set_keyval_scalar(&kv[i++],
4632 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS,
4633 data->u.tcp_stats.mptcp_interactive_success);
4634 nstat_set_keyval_scalar(&kv[i++],
4635 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS,
4636 data->u.tcp_stats.mptcp_aggregate_success);
4637 nstat_set_keyval_scalar(&kv[i++],
4638 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI,
4639 data->u.tcp_stats.mptcp_fp_handover_success_wifi);
4640 nstat_set_keyval_scalar(&kv[i++],
4641 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL,
4642 data->u.tcp_stats.mptcp_fp_handover_success_cell);
4643 nstat_set_keyval_scalar(&kv[i++],
4644 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS,
4645 data->u.tcp_stats.mptcp_fp_interactive_success);
4646 nstat_set_keyval_scalar(&kv[i++],
4647 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS,
4648 data->u.tcp_stats.mptcp_fp_aggregate_success);
4649 nstat_set_keyval_scalar(&kv[i++],
4650 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI,
4651 data->u.tcp_stats.mptcp_handover_cell_from_wifi);
4652 nstat_set_keyval_scalar(&kv[i++],
4653 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL,
4654 data->u.tcp_stats.mptcp_handover_wifi_from_cell);
4655 nstat_set_keyval_scalar(&kv[i++],
4656 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI,
4657 data->u.tcp_stats.mptcp_interactive_cell_from_wifi);
4658 nstat_set_keyval_u64_scalar(&kv[i++],
4659 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES,
4660 data->u.tcp_stats.mptcp_handover_cell_bytes);
4661 nstat_set_keyval_u64_scalar(&kv[i++],
4662 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES,
4663 data->u.tcp_stats.mptcp_interactive_cell_bytes);
4664 nstat_set_keyval_u64_scalar(&kv[i++],
4665 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES,
4666 data->u.tcp_stats.mptcp_aggregate_cell_bytes);
4667 nstat_set_keyval_u64_scalar(&kv[i++],
4668 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES,
4669 data->u.tcp_stats.mptcp_handover_all_bytes);
4670 nstat_set_keyval_u64_scalar(&kv[i++],
4671 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES,
4672 data->u.tcp_stats.mptcp_interactive_all_bytes);
4673 nstat_set_keyval_u64_scalar(&kv[i++],
4674 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES,
4675 data->u.tcp_stats.mptcp_aggregate_all_bytes);
4676 nstat_set_keyval_scalar(&kv[i++],
4677 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI,
4678 data->u.tcp_stats.mptcp_back_to_wifi);
4679 nstat_set_keyval_scalar(&kv[i++],
4680 NSTAT_SYSINFO_MPTCP_WIFI_PROXY,
4681 data->u.tcp_stats.mptcp_wifi_proxy);
4682 nstat_set_keyval_scalar(&kv[i++],
4683 NSTAT_SYSINFO_MPTCP_CELL_PROXY,
4684 data->u.tcp_stats.mptcp_cell_proxy);
4685 nstat_set_keyval_scalar(&kv[i++],
4686 NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL,
4687 data->u.tcp_stats.mptcp_triggered_cell);
4688 VERIFY(i == nkeyvals);
4689 break;
4690 }
4691 case NSTAT_SYSINFO_IFNET_ECN_STATS:
4692 {
4693 nstat_set_keyval_scalar(&kv[i++],
4694 NSTAT_SYSINFO_ECN_IFNET_TYPE,
4695 data->u.ifnet_ecn_stats.ifnet_type);
4696 nstat_set_keyval_scalar(&kv[i++],
4697 NSTAT_SYSINFO_ECN_IFNET_PROTO,
4698 data->u.ifnet_ecn_stats.ifnet_proto);
4699 nstat_set_keyval_u64_scalar(&kv[i++],
4700 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
4701 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
4702 nstat_set_keyval_u64_scalar(&kv[i++],
4703 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
4704 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
4705 nstat_set_keyval_u64_scalar(&kv[i++],
4706 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
4707 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
4708 nstat_set_keyval_u64_scalar(&kv[i++],
4709 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
4710 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
4711 nstat_set_keyval_u64_scalar(&kv[i++],
4712 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
4713 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
4714 nstat_set_keyval_u64_scalar(&kv[i++],
4715 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
4716 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
4717 nstat_set_keyval_u64_scalar(&kv[i++],
4718 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
4719 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
4720 nstat_set_keyval_u64_scalar(&kv[i++],
4721 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
4722 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
4723 nstat_set_keyval_u64_scalar(&kv[i++],
4724 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
4725 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
4726 nstat_set_keyval_u64_scalar(&kv[i++],
4727 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
4728 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
4729 nstat_set_keyval_u64_scalar(&kv[i++],
4730 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
4731 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
4732 nstat_set_keyval_u64_scalar(&kv[i++],
4733 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
4734 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
4735 nstat_set_keyval_u64_scalar(&kv[i++],
4736 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
4737 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
4738 nstat_set_keyval_u64_scalar(&kv[i++],
4739 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
4740 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
4741 nstat_set_keyval_u64_scalar(&kv[i++],
4742 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
4743 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
4744 nstat_set_keyval_u64_scalar(&kv[i++],
4745 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
4746 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
4747 nstat_set_keyval_u64_scalar(&kv[i++],
4748 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
4749 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
4750 nstat_set_keyval_u64_scalar(&kv[i++],
4751 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
4752 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
4753 nstat_set_keyval_u64_scalar(&kv[i++],
4754 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
4755 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
4756 nstat_set_keyval_u64_scalar(&kv[i++],
4757 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
4758 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
4759 nstat_set_keyval_u64_scalar(&kv[i++],
4760 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
4761 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
4762 nstat_set_keyval_u64_scalar(&kv[i++],
4763 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
4764 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
4765 nstat_set_keyval_u64_scalar(&kv[i++],
4766 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
4767 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
4768 nstat_set_keyval_u64_scalar(&kv[i++],
4769 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
4770 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
4771 nstat_set_keyval_u64_scalar(&kv[i++],
4772 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
4773 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
4774 nstat_set_keyval_u64_scalar(&kv[i++],
4775 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
4776 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
4777 nstat_set_keyval_u64_scalar(&kv[i++],
4778 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
4779 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
4780 nstat_set_keyval_u64_scalar(&kv[i++],
4781 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
4782 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
4783 nstat_set_keyval_u64_scalar(&kv[i++],
4784 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
4785 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
4786 nstat_set_keyval_u64_scalar(&kv[i++],
4787 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
4788 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
4789 nstat_set_keyval_u64_scalar(&kv[i++],
4790 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
4791 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
4792 nstat_set_keyval_u64_scalar(&kv[i++],
4793 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
4794 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
4795 nstat_set_keyval_u64_scalar(&kv[i++],
4796 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
4797 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
4798 nstat_set_keyval_u64_scalar(&kv[i++],
4799 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
4800 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
4801 nstat_set_keyval_u64_scalar(&kv[i++],
4802 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
4803 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
4804 nstat_set_keyval_u64_scalar(&kv[i++],
4805 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
4806 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
4807 nstat_set_keyval_u64_scalar(&kv[i++],
4808 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
4809 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
4810 nstat_set_keyval_u64_scalar(&kv[i++],
4811 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
4812 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
4813 nstat_set_keyval_u64_scalar(&kv[i++],
4814 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
4815 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
4816 nstat_set_keyval_u64_scalar(&kv[i++],
4817 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
4818 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
4819 nstat_set_keyval_u64_scalar(&kv[i++],
4820 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
4821 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
4822 nstat_set_keyval_u64_scalar(&kv[i++],
4823 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
4824 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
4825 nstat_set_keyval_scalar(&kv[i++],
4826 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
4827 data->unsent_data_cnt);
4828 nstat_set_keyval_u64_scalar(&kv[i++],
4829 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
4830 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
4831 nstat_set_keyval_u64_scalar(&kv[i++],
4832 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
4833 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
4834 nstat_set_keyval_u64_scalar(&kv[i++],
4835 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST,
4836 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst);
4837 break;
4838 }
4839 case NSTAT_SYSINFO_LIM_STATS:
4840 {
4841 nstat_set_keyval_string(&kv[i++],
4842 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE,
4843 data->u.lim_stats.ifnet_signature,
4844 data->u.lim_stats.ifnet_siglen);
4845 nstat_set_keyval_u64_scalar(&kv[i++],
4846 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH,
4847 data->u.lim_stats.lim_stat.lim_dl_max_bandwidth);
4848 nstat_set_keyval_u64_scalar(&kv[i++],
4849 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH,
4850 data->u.lim_stats.lim_stat.lim_ul_max_bandwidth);
4851 nstat_set_keyval_u64_scalar(&kv[i++],
4852 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT,
4853 data->u.lim_stats.lim_stat.lim_packet_loss_percent);
4854 nstat_set_keyval_u64_scalar(&kv[i++],
4855 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT,
4856 data->u.lim_stats.lim_stat.lim_packet_ooo_percent);
4857 nstat_set_keyval_u64_scalar(&kv[i++],
4858 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE,
4859 data->u.lim_stats.lim_stat.lim_rtt_variance);
4860 nstat_set_keyval_u64_scalar(&kv[i++],
4861 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN,
4862 data->u.lim_stats.lim_stat.lim_rtt_min);
4863 nstat_set_keyval_u64_scalar(&kv[i++],
4864 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG,
4865 data->u.lim_stats.lim_stat.lim_rtt_average);
4866 nstat_set_keyval_u64_scalar(&kv[i++],
4867 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT,
4868 data->u.lim_stats.lim_stat.lim_conn_timeout_percent);
4869 nstat_set_keyval_scalar(&kv[i++],
4870 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED,
4871 data->u.lim_stats.lim_stat.lim_dl_detected);
4872 nstat_set_keyval_scalar(&kv[i++],
4873 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED,
4874 data->u.lim_stats.lim_stat.lim_ul_detected);
4875 nstat_set_keyval_scalar(&kv[i++],
4876 NSTAT_SYSINFO_LIM_IFNET_TYPE,
4877 data->u.lim_stats.ifnet_type);
4878 break;
4879 }
4880 case NSTAT_SYSINFO_NET_API_STATS:
4881 {
4882 nstat_set_keyval_u64_scalar(&kv[i++],
4883 NSTAT_SYSINFO_API_IF_FLTR_ATTACH,
4884 data->u.net_api_stats.net_api_stats.nas_iflt_attach_total);
4885 nstat_set_keyval_u64_scalar(&kv[i++],
4886 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS,
4887 data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total);
4888 nstat_set_keyval_u64_scalar(&kv[i++],
4889 NSTAT_SYSINFO_API_IP_FLTR_ADD,
4890 data->u.net_api_stats.net_api_stats.nas_ipf_add_total);
4891 nstat_set_keyval_u64_scalar(&kv[i++],
4892 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS,
4893 data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total);
4894 nstat_set_keyval_u64_scalar(&kv[i++],
4895 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH,
4896 data->u.net_api_stats.net_api_stats.nas_sfltr_register_total);
4897 nstat_set_keyval_u64_scalar(&kv[i++],
4898 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS,
4899 data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total);
4900
4901
4902 nstat_set_keyval_u64_scalar(&kv[i++],
4903 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL,
4904 data->u.net_api_stats.net_api_stats.nas_socket_alloc_total);
4905 nstat_set_keyval_u64_scalar(&kv[i++],
4906 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL,
4907 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total);
4908 nstat_set_keyval_u64_scalar(&kv[i++],
4909 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS,
4910 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total);
4911 nstat_set_keyval_u64_scalar(&kv[i++],
4912 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID,
4913 data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total);
4914
4915 nstat_set_keyval_u64_scalar(&kv[i++],
4916 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL,
4917 data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total);
4918 nstat_set_keyval_u64_scalar(&kv[i++],
4919 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE,
4920 data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total);
4921 nstat_set_keyval_u64_scalar(&kv[i++],
4922 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET,
4923 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total);
4924 nstat_set_keyval_u64_scalar(&kv[i++],
4925 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6,
4926 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total);
4927 nstat_set_keyval_u64_scalar(&kv[i++],
4928 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM,
4929 data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total);
4930 nstat_set_keyval_u64_scalar(&kv[i++],
4931 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH,
4932 data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total);
4933 nstat_set_keyval_u64_scalar(&kv[i++],
4934 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY,
4935 data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total);
4936 nstat_set_keyval_u64_scalar(&kv[i++],
4937 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV,
4938 data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total);
4939 nstat_set_keyval_u64_scalar(&kv[i++],
4940 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER,
4941 data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total);
4942
4943 nstat_set_keyval_u64_scalar(&kv[i++],
4944 NSTAT_SYSINFO_API_SOCK_INET_STREAM,
4945 data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total);
4946 nstat_set_keyval_u64_scalar(&kv[i++],
4947 NSTAT_SYSINFO_API_SOCK_INET_DGRAM,
4948 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total);
4949 nstat_set_keyval_u64_scalar(&kv[i++],
4950 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED,
4951 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected);
4952 nstat_set_keyval_u64_scalar(&kv[i++],
4953 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS,
4954 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns);
4955 nstat_set_keyval_u64_scalar(&kv[i++],
4956 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA,
4957 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data);
4958
4959 nstat_set_keyval_u64_scalar(&kv[i++],
4960 NSTAT_SYSINFO_API_SOCK_INET6_STREAM,
4961 data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total);
4962 nstat_set_keyval_u64_scalar(&kv[i++],
4963 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM,
4964 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total);
4965 nstat_set_keyval_u64_scalar(&kv[i++],
4966 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED,
4967 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected);
4968 nstat_set_keyval_u64_scalar(&kv[i++],
4969 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS,
4970 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns);
4971 nstat_set_keyval_u64_scalar(&kv[i++],
4972 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA,
4973 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data);
4974
4975 nstat_set_keyval_u64_scalar(&kv[i++],
4976 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN,
4977 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total);
4978 nstat_set_keyval_u64_scalar(&kv[i++],
4979 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS,
4980 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total);
4981
4982 nstat_set_keyval_u64_scalar(&kv[i++],
4983 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM,
4984 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total);
4985 nstat_set_keyval_u64_scalar(&kv[i++],
4986 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM,
4987 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total);
4988
4989 nstat_set_keyval_u64_scalar(&kv[i++],
4990 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM,
4991 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total);
4992 nstat_set_keyval_u64_scalar(&kv[i++],
4993 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM,
4994 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total);
4995
4996 nstat_set_keyval_u64_scalar(&kv[i++],
4997 NSTAT_SYSINFO_API_IFNET_ALLOC,
4998 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total);
4999 nstat_set_keyval_u64_scalar(&kv[i++],
5000 NSTAT_SYSINFO_API_IFNET_ALLOC_OS,
5001 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total);
5002
5003 nstat_set_keyval_u64_scalar(&kv[i++],
5004 NSTAT_SYSINFO_API_PF_ADDRULE,
5005 data->u.net_api_stats.net_api_stats.nas_pf_addrule_total);
5006 nstat_set_keyval_u64_scalar(&kv[i++],
5007 NSTAT_SYSINFO_API_PF_ADDRULE_OS,
5008 data->u.net_api_stats.net_api_stats.nas_pf_addrule_os);
5009
5010 nstat_set_keyval_u64_scalar(&kv[i++],
5011 NSTAT_SYSINFO_API_VMNET_START,
5012 data->u.net_api_stats.net_api_stats.nas_vmnet_total);
5013
5014 #if SKYWALK
5015 nstat_set_keyval_scalar(&kv[i++],
5016 NSTAT_SYSINFO_API_IF_NETAGENT_ENABLED,
5017 if_is_fsw_transport_netagent_enabled());
5018 #endif /* SKYWALK */
5019
5020 nstat_set_keyval_scalar(&kv[i++],
5021 NSTAT_SYSINFO_API_REPORT_INTERVAL,
5022 data->u.net_api_stats.report_interval);
5023
5024 break;
5025 }
5026 }
5027 if (syscnt != NULL) {
5028 VERIFY(i > 0 && i <= nkeyvals);
5029 countsize = offsetof(nstat_sysinfo_counts,
5030 nstat_sysinfo_keyvals) +
5031 sizeof(nstat_sysinfo_keyval) * i;
5032 finalsize += countsize;
5033 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
5034 assert(finalsize <= MAX_NSTAT_MSG_HDR_LENGTH);
5035 syscnt->hdr.length = (u_int16_t)finalsize;
5036 syscnt->counts.nstat_sysinfo_len = (u_int32_t)countsize;
5037
5038 result = ctl_enqueuedata(control->ncs_kctl,
5039 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
5040 if (result != 0) {
5041 nstat_stats.nstat_sysinfofailures += 1;
5042 }
5043 kfree_data(syscnt, allocsize);
5044 }
5045 return;
5046 }
5047
5048 __private_extern__ void
nstat_sysinfo_send_data(nstat_sysinfo_data * data)5049 nstat_sysinfo_send_data(
5050 nstat_sysinfo_data *data)
5051 {
5052 nstat_control_state *control;
5053
5054 lck_mtx_lock(&nstat_mtx);
5055 for (control = nstat_controls; control; control = control->ncs_next) {
5056 lck_mtx_lock(&control->ncs_mtx);
5057 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0) {
5058 nstat_sysinfo_send_data_internal(control, data);
5059 }
5060 lck_mtx_unlock(&control->ncs_mtx);
5061 }
5062 lck_mtx_unlock(&nstat_mtx);
5063 }
5064
5065 static void
nstat_sysinfo_generate_report(void)5066 nstat_sysinfo_generate_report(void)
5067 {
5068 mbuf_report_peak_usage();
5069 tcp_report_stats();
5070 nstat_ifnet_report_ecn_stats();
5071 nstat_ifnet_report_lim_stats();
5072 nstat_net_api_report_stats();
5073 }
5074
5075 #pragma mark -- net_api --
5076
5077 static struct net_api_stats net_api_stats_before;
5078 static u_int64_t net_api_stats_last_report_time;
5079
5080 static void
nstat_net_api_report_stats(void)5081 nstat_net_api_report_stats(void)
5082 {
5083 struct nstat_sysinfo_data data;
5084 struct nstat_sysinfo_net_api_stats *st = &data.u.net_api_stats;
5085 u_int64_t uptime;
5086
5087 uptime = net_uptime();
5088
5089 if ((u_int32_t)(uptime - net_api_stats_last_report_time) <
5090 net_api_stats_report_interval) {
5091 return;
5092 }
5093
5094 st->report_interval = (u_int32_t)(uptime - net_api_stats_last_report_time);
5095 net_api_stats_last_report_time = uptime;
5096
5097 data.flags = NSTAT_SYSINFO_NET_API_STATS;
5098 data.unsent_data_cnt = 0;
5099
5100 /*
5101 * Some of the fields in the report are the current value and
5102 * other fields are the delta from the last report:
5103 * - Report difference for the per flow counters as they increase
5104 * with time
5105 * - Report current value for other counters as they tend not to change
5106 * much with time
5107 */
5108 #define STATCOPY(f) \
5109 (st->net_api_stats.f = net_api_stats.f)
5110 #define STATDIFF(f) \
5111 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
5112
5113 STATCOPY(nas_iflt_attach_count);
5114 STATCOPY(nas_iflt_attach_total);
5115 STATCOPY(nas_iflt_attach_os_total);
5116
5117 STATCOPY(nas_ipf_add_count);
5118 STATCOPY(nas_ipf_add_total);
5119 STATCOPY(nas_ipf_add_os_total);
5120
5121 STATCOPY(nas_sfltr_register_count);
5122 STATCOPY(nas_sfltr_register_total);
5123 STATCOPY(nas_sfltr_register_os_total);
5124
5125 STATDIFF(nas_socket_alloc_total);
5126 STATDIFF(nas_socket_in_kernel_total);
5127 STATDIFF(nas_socket_in_kernel_os_total);
5128 STATDIFF(nas_socket_necp_clientuuid_total);
5129
5130 STATDIFF(nas_socket_domain_local_total);
5131 STATDIFF(nas_socket_domain_route_total);
5132 STATDIFF(nas_socket_domain_inet_total);
5133 STATDIFF(nas_socket_domain_inet6_total);
5134 STATDIFF(nas_socket_domain_system_total);
5135 STATDIFF(nas_socket_domain_multipath_total);
5136 STATDIFF(nas_socket_domain_key_total);
5137 STATDIFF(nas_socket_domain_ndrv_total);
5138 STATDIFF(nas_socket_domain_other_total);
5139
5140 STATDIFF(nas_socket_inet_stream_total);
5141 STATDIFF(nas_socket_inet_dgram_total);
5142 STATDIFF(nas_socket_inet_dgram_connected);
5143 STATDIFF(nas_socket_inet_dgram_dns);
5144 STATDIFF(nas_socket_inet_dgram_no_data);
5145
5146 STATDIFF(nas_socket_inet6_stream_total);
5147 STATDIFF(nas_socket_inet6_dgram_total);
5148 STATDIFF(nas_socket_inet6_dgram_connected);
5149 STATDIFF(nas_socket_inet6_dgram_dns);
5150 STATDIFF(nas_socket_inet6_dgram_no_data);
5151
5152 STATDIFF(nas_socket_mcast_join_total);
5153 STATDIFF(nas_socket_mcast_join_os_total);
5154
5155 STATDIFF(nas_sock_inet6_stream_exthdr_in);
5156 STATDIFF(nas_sock_inet6_stream_exthdr_out);
5157 STATDIFF(nas_sock_inet6_dgram_exthdr_in);
5158 STATDIFF(nas_sock_inet6_dgram_exthdr_out);
5159
5160 STATDIFF(nas_nx_flow_inet_stream_total);
5161 STATDIFF(nas_nx_flow_inet_dgram_total);
5162
5163 STATDIFF(nas_nx_flow_inet6_stream_total);
5164 STATDIFF(nas_nx_flow_inet6_dgram_total);
5165
5166 STATCOPY(nas_ifnet_alloc_count);
5167 STATCOPY(nas_ifnet_alloc_total);
5168 STATCOPY(nas_ifnet_alloc_os_count);
5169 STATCOPY(nas_ifnet_alloc_os_total);
5170
5171 STATCOPY(nas_pf_addrule_total);
5172 STATCOPY(nas_pf_addrule_os);
5173
5174 STATCOPY(nas_vmnet_total);
5175
5176 #undef STATCOPY
5177 #undef STATDIFF
5178
5179 nstat_sysinfo_send_data(&data);
5180
5181 /*
5182 * Save a copy of the current fields so we can diff them the next time
5183 */
5184 memcpy(&net_api_stats_before, &net_api_stats,
5185 sizeof(struct net_api_stats));
5186 _CASSERT(sizeof(net_api_stats_before) == sizeof(net_api_stats));
5187 }
5188
5189
5190 #pragma mark -- Kernel Control Socket --
5191
5192 static kern_ctl_ref nstat_ctlref = NULL;
5193
5194 static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
5195 static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
5196 static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
5197
5198 static errno_t
nstat_enqueue_success(uint64_t context,nstat_control_state * state,u_int16_t flags)5199 nstat_enqueue_success(
5200 uint64_t context,
5201 nstat_control_state *state,
5202 u_int16_t flags)
5203 {
5204 nstat_msg_hdr success;
5205 errno_t result;
5206
5207 bzero(&success, sizeof(success));
5208 success.context = context;
5209 success.type = NSTAT_MSG_TYPE_SUCCESS;
5210 success.length = sizeof(success);
5211 success.flags = flags;
5212 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
5213 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
5214 if (result != 0) {
5215 if (nstat_debug != 0) {
5216 printf("%s: could not enqueue success message %d\n",
5217 __func__, result);
5218 }
5219 nstat_stats.nstat_successmsgfailures += 1;
5220 }
5221 return result;
5222 }
5223
5224 static errno_t
nstat_control_send_event(nstat_control_state * state,nstat_src * src,u_int64_t event)5225 nstat_control_send_event(
5226 nstat_control_state *state,
5227 nstat_src *src,
5228 u_int64_t event)
5229 {
5230 errno_t result = 0;
5231 int failed = 0;
5232
5233 if (nstat_control_reporting_allowed(state, src, 0)) {
5234 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
5235 result = nstat_control_send_update(state, src, 0, event, 0, NULL);
5236 if (result != 0) {
5237 failed = 1;
5238 if (nstat_debug != 0) {
5239 printf("%s - nstat_control_send_event() %d\n", __func__, result);
5240 }
5241 }
5242 } else {
5243 if (nstat_debug != 0) {
5244 printf("%s - nstat_control_send_event() used when updates not supported\n", __func__);
5245 }
5246 }
5247 }
5248 return result;
5249 }
5250
5251 static errno_t
nstat_control_send_goodbye(nstat_control_state * state,nstat_src * src)5252 nstat_control_send_goodbye(
5253 nstat_control_state *state,
5254 nstat_src *src)
5255 {
5256 errno_t result = 0;
5257 int failed = 0;
5258 u_int16_t hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_FILTER;
5259
5260 if (nstat_control_reporting_allowed(state, src, (src->ns_reported)? NSTAT_FILTER_SUPPRESS_BORING_CLOSE: 0)) {
5261 hdr_flags = 0;
5262 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) {
5263 result = nstat_control_send_update(state, src, 0, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
5264 if (result != 0) {
5265 failed = 1;
5266 hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP;
5267 if (nstat_debug != 0) {
5268 printf("%s - nstat_control_send_update() %d\n", __func__, result);
5269 }
5270 }
5271 } else {
5272 // send one last counts notification
5273 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
5274 if (result != 0) {
5275 failed = 1;
5276 hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP;
5277 if (nstat_debug != 0) {
5278 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
5279 }
5280 }
5281
5282 // send a last description
5283 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
5284 if (result != 0) {
5285 failed = 1;
5286 hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP;
5287 if (nstat_debug != 0) {
5288 printf("%s - nstat_control_send_description() %d\n", __func__, result);
5289 }
5290 }
5291 }
5292 }
5293
5294 // send the source removed notification
5295 result = nstat_control_send_removed(state, src, hdr_flags);
5296 if (result != 0 && nstat_debug) {
5297 failed = 1;
5298 if (nstat_debug != 0) {
5299 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
5300 }
5301 }
5302
5303 if (failed != 0) {
5304 nstat_stats.nstat_control_send_goodbye_failures++;
5305 }
5306
5307
5308 return result;
5309 }
5310
5311 static errno_t
nstat_flush_accumulated_msgs(nstat_control_state * state)5312 nstat_flush_accumulated_msgs(
5313 nstat_control_state *state)
5314 {
5315 errno_t result = 0;
5316 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0) {
5317 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
5318 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
5319 if (result != 0) {
5320 nstat_stats.nstat_flush_accumulated_msgs_failures++;
5321 if (nstat_debug != 0) {
5322 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
5323 }
5324 mbuf_freem(state->ncs_accumulated);
5325 }
5326 state->ncs_accumulated = NULL;
5327 }
5328 return result;
5329 }
5330
5331 static errno_t
nstat_accumulate_msg(nstat_control_state * state,nstat_msg_hdr * hdr,size_t length)5332 nstat_accumulate_msg(
5333 nstat_control_state *state,
5334 nstat_msg_hdr *hdr,
5335 size_t length)
5336 {
5337 assert(length <= MAX_NSTAT_MSG_HDR_LENGTH);
5338
5339 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length) {
5340 // Will send the current mbuf
5341 nstat_flush_accumulated_msgs(state);
5342 }
5343
5344 errno_t result = 0;
5345
5346 if (state->ncs_accumulated == NULL) {
5347 unsigned int one = 1;
5348 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0) {
5349 if (nstat_debug != 0) {
5350 printf("%s - mbuf_allocpacket failed\n", __func__);
5351 }
5352 result = ENOMEM;
5353 } else {
5354 mbuf_setlen(state->ncs_accumulated, 0);
5355 }
5356 }
5357
5358 if (result == 0) {
5359 hdr->length = (u_int16_t)length;
5360 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
5361 length, hdr, MBUF_DONTWAIT);
5362 }
5363
5364 if (result != 0) {
5365 nstat_flush_accumulated_msgs(state);
5366 if (nstat_debug != 0) {
5367 printf("%s - resorting to ctl_enqueuedata\n", __func__);
5368 }
5369 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
5370 }
5371
5372 if (result != 0) {
5373 nstat_stats.nstat_accumulate_msg_failures++;
5374 }
5375
5376 return result;
5377 }
5378
5379 static void
nstat_idle_check(__unused thread_call_param_t p0,__unused thread_call_param_t p1)5380 nstat_idle_check(
5381 __unused thread_call_param_t p0,
5382 __unused thread_call_param_t p1)
5383 {
5384 nstat_control_state *control;
5385 nstat_src *src, *tmpsrc;
5386 tailq_head_nstat_src dead_list;
5387 TAILQ_INIT(&dead_list);
5388
5389 lck_mtx_lock(&nstat_mtx);
5390
5391 nstat_idle_time = 0;
5392
5393 for (control = nstat_controls; control; control = control->ncs_next) {
5394 lck_mtx_lock(&control->ncs_mtx);
5395 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS)) {
5396 TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc)
5397 {
5398 if (src->provider->nstat_gone(src->cookie)) {
5399 errno_t result;
5400
5401 // Pull it off the list
5402 TAILQ_REMOVE(&control->ncs_src_queue, src, ns_control_link);
5403
5404 result = nstat_control_send_goodbye(control, src);
5405
5406 // Put this on the list to release later
5407 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
5408 }
5409 }
5410 }
5411 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
5412 lck_mtx_unlock(&control->ncs_mtx);
5413 }
5414
5415 if (nstat_controls) {
5416 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
5417 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
5418 }
5419
5420 lck_mtx_unlock(&nstat_mtx);
5421
5422 /* Generate any system level reports, if needed */
5423 nstat_sysinfo_generate_report();
5424
5425 // Release the sources now that we aren't holding lots of locks
5426 while ((src = TAILQ_FIRST(&dead_list))) {
5427 TAILQ_REMOVE(&dead_list, src, ns_control_link);
5428 nstat_control_cleanup_source(NULL, src, FALSE);
5429 }
5430
5431 nstat_prune_procdetails();
5432 }
5433
5434 static void
nstat_control_register(void)5435 nstat_control_register(void)
5436 {
5437 // Register the control
5438 struct kern_ctl_reg nstat_control;
5439 bzero(&nstat_control, sizeof(nstat_control));
5440 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
5441 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
5442 nstat_control.ctl_sendsize = nstat_sendspace;
5443 nstat_control.ctl_recvsize = nstat_recvspace;
5444 nstat_control.ctl_connect = nstat_control_connect;
5445 nstat_control.ctl_disconnect = nstat_control_disconnect;
5446 nstat_control.ctl_send = nstat_control_send;
5447
5448 ctl_register(&nstat_control, &nstat_ctlref);
5449 }
5450
5451 static void
nstat_control_cleanup_source(nstat_control_state * state,struct nstat_src * src,boolean_t locked)5452 nstat_control_cleanup_source(
5453 nstat_control_state *state,
5454 struct nstat_src *src,
5455 boolean_t locked)
5456 {
5457 errno_t result;
5458
5459 if (state) {
5460 result = nstat_control_send_removed(state, src, 0);
5461 if (result != 0) {
5462 nstat_stats.nstat_control_cleanup_source_failures++;
5463 if (nstat_debug != 0) {
5464 printf("%s - nstat_control_send_removed() %d\n",
5465 __func__, result);
5466 }
5467 }
5468 }
5469 // Cleanup the source if we found it.
5470 src->provider->nstat_release(src->cookie, locked);
5471 kfree_type(struct nstat_src, src);
5472 }
5473
5474
5475 static bool
nstat_control_reporting_allowed(nstat_control_state * state,nstat_src * src,u_int64_t suppression_flags)5476 nstat_control_reporting_allowed(
5477 nstat_control_state *state,
5478 nstat_src *src,
5479 u_int64_t suppression_flags)
5480 {
5481 if (src->provider->nstat_reporting_allowed == NULL) {
5482 return TRUE;
5483 }
5484
5485 return src->provider->nstat_reporting_allowed(src->cookie,
5486 &state->ncs_provider_filters[src->provider->nstat_provider_id], suppression_flags);
5487 }
5488
5489
5490 static errno_t
nstat_control_connect(kern_ctl_ref kctl,struct sockaddr_ctl * sac,void ** uinfo)5491 nstat_control_connect(
5492 kern_ctl_ref kctl,
5493 struct sockaddr_ctl *sac,
5494 void **uinfo)
5495 {
5496 nstat_control_state *state = kalloc_type(nstat_control_state,
5497 Z_WAITOK | Z_ZERO);
5498 if (state == NULL) {
5499 return ENOMEM;
5500 }
5501
5502 lck_mtx_init(&state->ncs_mtx, &nstat_lck_grp, NULL);
5503 state->ncs_kctl = kctl;
5504 state->ncs_unit = sac->sc_unit;
5505 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
5506 state->ncs_procdetails = nstat_retain_curprocdetails();
5507 *uinfo = state;
5508
5509 lck_mtx_lock(&nstat_mtx);
5510 state->ncs_next = nstat_controls;
5511 nstat_controls = state;
5512
5513 if (nstat_idle_time == 0) {
5514 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
5515 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
5516 }
5517
5518 merge_current_event_filters();
5519 lck_mtx_unlock(&nstat_mtx);
5520
5521 return 0;
5522 }
5523
5524 static errno_t
nstat_control_disconnect(__unused kern_ctl_ref kctl,__unused u_int32_t unit,void * uinfo)5525 nstat_control_disconnect(
5526 __unused kern_ctl_ref kctl,
5527 __unused u_int32_t unit,
5528 void *uinfo)
5529 {
5530 u_int32_t watching;
5531 nstat_control_state *state = (nstat_control_state*)uinfo;
5532 tailq_head_nstat_src cleanup_list;
5533 nstat_src *src;
5534
5535 TAILQ_INIT(&cleanup_list);
5536
5537 // pull it out of the global list of states
5538 lck_mtx_lock(&nstat_mtx);
5539 nstat_control_state **statepp;
5540 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next) {
5541 if (*statepp == state) {
5542 *statepp = state->ncs_next;
5543 break;
5544 }
5545 }
5546 merge_current_event_filters();
5547 lck_mtx_unlock(&nstat_mtx);
5548
5549 lck_mtx_lock(&state->ncs_mtx);
5550 // Stop watching for sources
5551 nstat_provider *provider;
5552 watching = state->ncs_watching;
5553 state->ncs_watching = 0;
5554 for (provider = nstat_providers; provider && watching; provider = provider->next) {
5555 if ((watching & (1 << provider->nstat_provider_id)) != 0) {
5556 watching &= ~(1 << provider->nstat_provider_id);
5557 provider->nstat_watcher_remove(state);
5558 }
5559 }
5560
5561 // set cleanup flags
5562 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
5563
5564 if (state->ncs_accumulated) {
5565 mbuf_freem(state->ncs_accumulated);
5566 state->ncs_accumulated = NULL;
5567 }
5568
5569 // Copy out the list of sources
5570 TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link);
5571 lck_mtx_unlock(&state->ncs_mtx);
5572
5573 while ((src = TAILQ_FIRST(&cleanup_list))) {
5574 TAILQ_REMOVE(&cleanup_list, src, ns_control_link);
5575 nstat_control_cleanup_source(NULL, src, FALSE);
5576 }
5577
5578 lck_mtx_destroy(&state->ncs_mtx, &nstat_lck_grp);
5579 nstat_release_procdetails(state->ncs_procdetails);
5580 kfree_type(struct nstat_control_state, state);
5581
5582 return 0;
5583 }
5584
5585 static nstat_src_ref_t
nstat_control_next_src_ref(nstat_control_state * state)5586 nstat_control_next_src_ref(
5587 nstat_control_state *state)
5588 {
5589 return ++state->ncs_next_srcref;
5590 }
5591
5592 static errno_t
nstat_control_send_counts(nstat_control_state * state,nstat_src * src,unsigned long long context,u_int16_t hdr_flags,int * gone)5593 nstat_control_send_counts(
5594 nstat_control_state *state,
5595 nstat_src *src,
5596 unsigned long long context,
5597 u_int16_t hdr_flags,
5598 int *gone)
5599 {
5600 nstat_msg_src_counts counts;
5601 errno_t result = 0;
5602
5603 /* Some providers may not have any counts to send */
5604 if (src->provider->nstat_counts == NULL) {
5605 return 0;
5606 }
5607
5608 bzero(&counts, sizeof(counts));
5609 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
5610 counts.hdr.length = sizeof(counts);
5611 counts.hdr.flags = hdr_flags;
5612 counts.hdr.context = context;
5613 counts.srcref = src->srcref;
5614 counts.event_flags = 0;
5615
5616 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0) {
5617 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
5618 counts.counts.nstat_rxbytes == 0 &&
5619 counts.counts.nstat_txbytes == 0) {
5620 result = EAGAIN;
5621 } else {
5622 result = ctl_enqueuedata(state->ncs_kctl,
5623 state->ncs_unit, &counts, sizeof(counts),
5624 CTL_DATA_EOR);
5625 if (result != 0) {
5626 nstat_stats.nstat_sendcountfailures += 1;
5627 }
5628 }
5629 }
5630 return result;
5631 }
5632
5633 static errno_t
nstat_control_append_counts(nstat_control_state * state,nstat_src * src,int * gone)5634 nstat_control_append_counts(
5635 nstat_control_state *state,
5636 nstat_src *src,
5637 int *gone)
5638 {
5639 /* Some providers may not have any counts to send */
5640 if (!src->provider->nstat_counts) {
5641 return 0;
5642 }
5643
5644 nstat_msg_src_counts counts;
5645 bzero(&counts, sizeof(counts));
5646 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
5647 counts.hdr.length = sizeof(counts);
5648 counts.srcref = src->srcref;
5649 counts.event_flags = 0;
5650
5651 errno_t result = 0;
5652 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
5653 if (result != 0) {
5654 return result;
5655 }
5656
5657 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
5658 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0) {
5659 return EAGAIN;
5660 }
5661
5662 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
5663 }
5664
5665 static int
nstat_control_send_description(nstat_control_state * state,nstat_src * src,u_int64_t context,u_int16_t hdr_flags)5666 nstat_control_send_description(
5667 nstat_control_state *state,
5668 nstat_src *src,
5669 u_int64_t context,
5670 u_int16_t hdr_flags)
5671 {
5672 // Provider doesn't support getting the descriptor? Done.
5673 if (src->provider->nstat_descriptor_length == 0 ||
5674 src->provider->nstat_copy_descriptor == NULL) {
5675 return EOPNOTSUPP;
5676 }
5677
5678 // Allocate storage for the descriptor message
5679 mbuf_t msg;
5680 unsigned int one = 1;
5681 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
5682 assert(size <= MAX_NSTAT_MSG_HDR_LENGTH);
5683
5684 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
5685 return ENOMEM;
5686 }
5687
5688 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
5689 bzero(desc, size);
5690 mbuf_setlen(msg, size);
5691 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
5692
5693 // Query the provider for the provider specific bits
5694 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
5695
5696 if (result != 0) {
5697 mbuf_freem(msg);
5698 return result;
5699 }
5700
5701 desc->hdr.context = context;
5702 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
5703 desc->hdr.length = (u_int16_t)size;
5704 desc->hdr.flags = hdr_flags;
5705 desc->srcref = src->srcref;
5706 desc->event_flags = 0;
5707 desc->provider = src->provider->nstat_provider_id;
5708
5709 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
5710 if (result != 0) {
5711 nstat_stats.nstat_descriptionfailures += 1;
5712 mbuf_freem(msg);
5713 }
5714
5715 return result;
5716 }
5717
5718 static errno_t
nstat_control_append_description(nstat_control_state * state,nstat_src * src)5719 nstat_control_append_description(
5720 nstat_control_state *state,
5721 nstat_src *src)
5722 {
5723 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
5724 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
5725 src->provider->nstat_copy_descriptor == NULL) {
5726 return EOPNOTSUPP;
5727 }
5728
5729 // Fill out a buffer on the stack, we will copy to the mbuf later
5730 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
5731 bzero(buffer, size);
5732
5733 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
5734 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
5735 desc->hdr.length = (u_int16_t)size;
5736 desc->srcref = src->srcref;
5737 desc->event_flags = 0;
5738 desc->provider = src->provider->nstat_provider_id;
5739
5740 errno_t result = 0;
5741 // Fill in the description
5742 // Query the provider for the provider specific bits
5743 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
5744 src->provider->nstat_descriptor_length);
5745 if (result != 0) {
5746 return result;
5747 }
5748
5749 return nstat_accumulate_msg(state, &desc->hdr, size);
5750 }
5751
5752 static uint64_t
nstat_extension_flags_for_source(nstat_control_state * state,nstat_src * src)5753 nstat_extension_flags_for_source(
5754 nstat_control_state *state,
5755 nstat_src *src)
5756 {
5757 VERIFY(state != NULL & src != NULL);
5758 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
5759
5760 return state->ncs_provider_filters[provider_id].npf_extensions;
5761 }
5762
5763 static int
nstat_control_send_update(nstat_control_state * state,nstat_src * src,u_int64_t context,u_int64_t event,u_int16_t hdr_flags,int * gone)5764 nstat_control_send_update(
5765 nstat_control_state *state,
5766 nstat_src *src,
5767 u_int64_t context,
5768 u_int64_t event,
5769 u_int16_t hdr_flags,
5770 int *gone)
5771 {
5772 // Provider doesn't support getting the descriptor or counts? Done.
5773 if ((src->provider->nstat_descriptor_length == 0 ||
5774 src->provider->nstat_copy_descriptor == NULL) &&
5775 src->provider->nstat_counts == NULL) {
5776 return EOPNOTSUPP;
5777 }
5778
5779 // Allocate storage for the descriptor message
5780 mbuf_t msg;
5781 unsigned int one = 1;
5782 size_t size = offsetof(nstat_msg_src_update, data) +
5783 src->provider->nstat_descriptor_length;
5784 size_t total_extension_size = 0;
5785 u_int32_t num_extensions = 0;
5786 u_int64_t extension_mask = nstat_extension_flags_for_source(state, src);
5787
5788 if ((extension_mask != 0) && (src->provider->nstat_copy_extension != NULL)) {
5789 uint32_t extension_id = 0;
5790 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5791 if ((extension_mask & (1ull << extension_id)) != 0) {
5792 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, NULL, 0);
5793 if (extension_size == 0) {
5794 extension_mask &= ~(1ull << extension_id);
5795 } else {
5796 num_extensions++;
5797 total_extension_size += ROUNDUP64(extension_size);
5798 }
5799 }
5800 }
5801 size += total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions);
5802 }
5803 assert(size <= MAX_NSTAT_MSG_HDR_LENGTH);
5804
5805 /*
5806 * XXX Would be interesting to see how extended updates affect mbuf
5807 * allocations, given the max segments defined as 1, one may get
5808 * allocations with higher fragmentation.
5809 */
5810 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) {
5811 return ENOMEM;
5812 }
5813
5814 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
5815 bzero(desc, size);
5816 desc->hdr.context = context;
5817 desc->hdr.type = (num_extensions == 0) ? NSTAT_MSG_TYPE_SRC_UPDATE :
5818 NSTAT_MSG_TYPE_SRC_EXTENDED_UPDATE;
5819 desc->hdr.length = (u_int16_t)size;
5820 desc->hdr.flags = hdr_flags;
5821 desc->srcref = src->srcref;
5822 desc->event_flags = event;
5823 desc->provider = src->provider->nstat_provider_id;
5824
5825 /*
5826 * XXX The following two lines are only valid when max-segments is passed
5827 * as one.
5828 * Other computations with offset also depend on that being true.
5829 * Be aware of that before making any modifications that changes that
5830 * behavior.
5831 */
5832 mbuf_setlen(msg, size);
5833 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
5834
5835 errno_t result = 0;
5836 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
5837 // Query the provider for the provider specific bits
5838 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
5839 src->provider->nstat_descriptor_length);
5840 if (result != 0) {
5841 mbuf_freem(msg);
5842 return result;
5843 }
5844 }
5845
5846 if (num_extensions > 0) {
5847 nstat_msg_src_extended_item_hdr *p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)mbuf_data(msg) +
5848 sizeof(nstat_msg_src_update_hdr) + src->provider->nstat_descriptor_length);
5849 uint32_t extension_id = 0;
5850
5851 bzero(p_extension_hdr, total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions));
5852
5853 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5854 if ((extension_mask & (1ull << extension_id)) != 0) {
5855 void *buf = (void *)(p_extension_hdr + 1);
5856 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, buf, total_extension_size);
5857 if ((extension_size == 0) || (extension_size > total_extension_size)) {
5858 // Something has gone wrong. Instead of attempting to wind back the excess buffer space, mark it as unused
5859 p_extension_hdr->type = NSTAT_EXTENDED_UPDATE_TYPE_UNKNOWN;
5860 p_extension_hdr->length = total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * (num_extensions - 1));
5861 break;
5862 } else {
5863 // The extension may be of any size alignment, reported as such in the extension header,
5864 // but we pad to ensure that whatever comes next is suitably aligned
5865 p_extension_hdr->type = extension_id;
5866 p_extension_hdr->length = extension_size;
5867 extension_size = ROUNDUP64(extension_size);
5868 total_extension_size -= extension_size;
5869 p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buf + extension_size);
5870 num_extensions--;
5871 }
5872 }
5873 }
5874 }
5875
5876 if (src->provider->nstat_counts) {
5877 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
5878 if (result == 0) {
5879 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
5880 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
5881 result = EAGAIN;
5882 } else {
5883 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
5884 }
5885 }
5886 }
5887
5888 if (result != 0) {
5889 nstat_stats.nstat_srcupatefailures += 1;
5890 mbuf_freem(msg);
5891 } else {
5892 src->ns_reported = true;
5893 }
5894
5895 return result;
5896 }
5897
5898 static errno_t
nstat_control_append_update(nstat_control_state * state,nstat_src * src,int * gone)5899 nstat_control_append_update(
5900 nstat_control_state *state,
5901 nstat_src *src,
5902 int *gone)
5903 {
5904 if ((src->provider->nstat_descriptor_length == 0 ||
5905 src->provider->nstat_copy_descriptor == NULL) &&
5906 src->provider->nstat_counts == NULL) {
5907 return EOPNOTSUPP;
5908 }
5909
5910 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
5911 size_t total_extension_size = 0;
5912 u_int32_t num_extensions = 0;
5913 u_int64_t extension_mask = nstat_extension_flags_for_source(state, src);
5914
5915 if ((extension_mask != 0) && (src->provider->nstat_copy_extension != NULL)) {
5916 uint32_t extension_id = 0;
5917 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5918 if ((extension_mask & (1ull << extension_id)) != 0) {
5919 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, NULL, 0);
5920 if (extension_size == 0) {
5921 extension_mask &= ~(1ull << extension_id);
5922 } else {
5923 num_extensions++;
5924 total_extension_size += ROUNDUP64(extension_size);
5925 }
5926 }
5927 }
5928 size += total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions);
5929 }
5930
5931 /*
5932 * This kind of limits extensions.
5933 * The optimization is around being able to deliver multiple
5934 * of updates bundled together.
5935 * Increasing the size runs the risk of too much stack usage.
5936 * One could potentially changed the allocation below to be on heap.
5937 * For now limiting it to half of NSTAT_MAX_MSG_SIZE.
5938 */
5939 if (size > (NSTAT_MAX_MSG_SIZE >> 1)) {
5940 return EOPNOTSUPP;
5941 }
5942
5943 // Fill out a buffer on the stack, we will copy to the mbuf later
5944 u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
5945 bzero(buffer, size);
5946
5947 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
5948 desc->hdr.type = (num_extensions == 0) ? NSTAT_MSG_TYPE_SRC_UPDATE :
5949 NSTAT_MSG_TYPE_SRC_EXTENDED_UPDATE;
5950 desc->hdr.length = (u_int16_t)size;
5951 desc->srcref = src->srcref;
5952 desc->event_flags = 0;
5953 desc->provider = src->provider->nstat_provider_id;
5954
5955 errno_t result = 0;
5956 // Fill in the description
5957 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) {
5958 // Query the provider for the provider specific bits
5959 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
5960 src->provider->nstat_descriptor_length);
5961 if (result != 0) {
5962 nstat_stats.nstat_copy_descriptor_failures++;
5963 if (nstat_debug != 0) {
5964 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
5965 }
5966 return result;
5967 }
5968 }
5969
5970 if (num_extensions > 0) {
5971 nstat_msg_src_extended_item_hdr *p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buffer +
5972 sizeof(nstat_msg_src_update_hdr) + src->provider->nstat_descriptor_length);
5973 uint32_t extension_id = 0;
5974 bzero(p_extension_hdr, total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions));
5975
5976 for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) {
5977 if ((extension_mask & (1ull << extension_id)) != 0) {
5978 void *buf = (void *)(p_extension_hdr + 1);
5979 size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, buf, total_extension_size);
5980 if ((extension_size == 0) || (extension_size > total_extension_size)) {
5981 // Something has gone wrong. Instead of attempting to wind back the excess buffer space, mark it as unused
5982 p_extension_hdr->type = NSTAT_EXTENDED_UPDATE_TYPE_UNKNOWN;
5983 p_extension_hdr->length = total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * (num_extensions - 1));
5984 break;
5985 } else {
5986 extension_size = ROUNDUP64(extension_size);
5987 p_extension_hdr->type = extension_id;
5988 p_extension_hdr->length = extension_size;
5989 total_extension_size -= extension_size;
5990 p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buf + extension_size);
5991 num_extensions--;
5992 }
5993 }
5994 }
5995 }
5996
5997 if (src->provider->nstat_counts) {
5998 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
5999 if (result != 0) {
6000 nstat_stats.nstat_provider_counts_failures++;
6001 if (nstat_debug != 0) {
6002 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
6003 }
6004 return result;
6005 }
6006
6007 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
6008 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) {
6009 return EAGAIN;
6010 }
6011 }
6012
6013 result = nstat_accumulate_msg(state, &desc->hdr, size);
6014 if (result == 0) {
6015 src->ns_reported = true;
6016 }
6017 return result;
6018 }
6019
6020 static errno_t
nstat_control_send_removed(nstat_control_state * state,nstat_src * src,u_int16_t hdr_flags)6021 nstat_control_send_removed(
6022 nstat_control_state *state,
6023 nstat_src *src,
6024 u_int16_t hdr_flags)
6025 {
6026 nstat_msg_src_removed removed;
6027 errno_t result;
6028
6029 bzero(&removed, sizeof(removed));
6030 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
6031 removed.hdr.length = sizeof(removed);
6032 removed.hdr.context = 0;
6033 removed.hdr.flags = hdr_flags;
6034 removed.srcref = src->srcref;
6035 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
6036 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
6037 if (result != 0) {
6038 nstat_stats.nstat_msgremovedfailures += 1;
6039 }
6040
6041 return result;
6042 }
6043
6044 static errno_t
nstat_control_handle_add_request(nstat_control_state * state,mbuf_t m)6045 nstat_control_handle_add_request(
6046 nstat_control_state *state,
6047 mbuf_t m)
6048 {
6049 errno_t result;
6050
6051 // Verify the header fits in the first mbuf
6052 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param)) {
6053 return EINVAL;
6054 }
6055
6056 // Calculate the length of the parameter field
6057 ssize_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
6058 if (paramlength < 0 || paramlength > 2 * 1024) {
6059 return EINVAL;
6060 }
6061
6062 nstat_provider *provider = NULL;
6063 nstat_provider_cookie_t cookie = NULL;
6064 nstat_msg_add_src_req *req = mbuf_data(m);
6065 if (mbuf_pkthdr_len(m) > mbuf_len(m)) {
6066 // parameter is too large, we need to make a contiguous copy
6067 void *data = (void *) kalloc_data(paramlength, Z_WAITOK);
6068
6069 if (!data) {
6070 return ENOMEM;
6071 }
6072 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
6073 if (result == 0) {
6074 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
6075 }
6076 kfree_data(data, paramlength);
6077 } else {
6078 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
6079 }
6080
6081 if (result != 0) {
6082 return result;
6083 }
6084
6085 // sanitize cookie
6086 nstat_control_sanitize_cookie(state, provider->nstat_provider_id, cookie);
6087
6088 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
6089 if (result != 0) {
6090 provider->nstat_release(cookie, 0);
6091 }
6092
6093 // Set the flag if a provider added a single source
6094 os_atomic_or(&state->ncs_added_src, (1 << provider->nstat_provider_id), relaxed);
6095
6096 return result;
6097 }
6098
6099 static errno_t
nstat_set_provider_filter(nstat_control_state * state,nstat_msg_add_all_srcs * req)6100 nstat_set_provider_filter(
6101 nstat_control_state *state,
6102 nstat_msg_add_all_srcs *req)
6103 {
6104 nstat_provider_id_t provider_id = req->provider;
6105
6106 u_int32_t prev_ncs_watching = os_atomic_or_orig(&state->ncs_watching, (1 << provider_id), relaxed);
6107
6108 // Reject it if the client is already watching all the sources.
6109 if ((prev_ncs_watching & (1 << provider_id)) != 0) {
6110 return EALREADY;
6111 }
6112
6113 // Reject it if any single source has already been added.
6114 u_int32_t ncs_added_src = os_atomic_load(&state->ncs_added_src, relaxed);
6115 if ((ncs_added_src & (1 << provider_id)) != 0) {
6116 return EALREADY;
6117 }
6118
6119 state->ncs_watching |= (1 << provider_id);
6120 state->ncs_provider_filters[provider_id].npf_events = req->events;
6121 state->ncs_provider_filters[provider_id].npf_flags = req->filter;
6122 state->ncs_provider_filters[provider_id].npf_pid = req->target_pid;
6123 uuid_copy(state->ncs_provider_filters[provider_id].npf_uuid, req->target_uuid);
6124
6125 // The extensions should be populated by a more direct mechanism
6126 // Using the top 32 bits of the filter flags reduces the namespace of both,
6127 // but is a convenient workaround that avoids ntstat.h changes that would require rebuild of all clients
6128 // Extensions give away additional privacy information and are subject to unconditional privilege check,
6129 // unconstrained by the value of nstat_privcheck
6130 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) == 0) {
6131 state->ncs_provider_filters[provider_id].npf_extensions = (req->filter >> NSTAT_FILTER_ALLOWED_EXTENSIONS_SHIFT) & NSTAT_EXTENDED_UPDATE_FLAG_MASK;
6132 }
6133 return 0;
6134 }
6135
6136 static errno_t
nstat_control_handle_add_all(nstat_control_state * state,mbuf_t m)6137 nstat_control_handle_add_all(
6138 nstat_control_state *state,
6139 mbuf_t m)
6140 {
6141 errno_t result = 0;
6142
6143 // Verify the header fits in the first mbuf
6144 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs)) {
6145 return EINVAL;
6146 }
6147
6148 nstat_msg_add_all_srcs *req = mbuf_data(m);
6149 if (req->provider > NSTAT_PROVIDER_LAST) {
6150 return ENOENT;
6151 }
6152
6153 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
6154
6155 if (!provider) {
6156 return ENOENT;
6157 }
6158 if (provider->nstat_watcher_add == NULL) {
6159 return ENOTSUP;
6160 }
6161
6162 // Traditionally the nstat_privcheck value allowed for easy access to ntstat on the Mac.
6163 // Keep backwards compatibility while being more stringent with recent providers
6164 if ((nstat_privcheck != 0) || (req->provider == NSTAT_PROVIDER_UDP_SUBFLOW) || (req->provider == NSTAT_PROVIDER_CONN_USERLAND)) {
6165 result = priv_check_cred(kauth_cred_get(),
6166 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
6167 if (result != 0) {
6168 return result;
6169 }
6170 }
6171
6172 lck_mtx_lock(&state->ncs_mtx);
6173 if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED) {
6174 // Suppression of source messages implicitly requires the use of update messages
6175 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
6176 }
6177 lck_mtx_unlock(&state->ncs_mtx);
6178
6179 // rdar://problem/30301300 Different providers require different synchronization
6180 // to ensure that a new entry does not get double counted due to being added prior
6181 // to all current provider entries being added. Hence pass the provider the details
6182 // in the original request for this to be applied atomically
6183
6184 result = provider->nstat_watcher_add(state, req);
6185
6186 if (result == 0) {
6187 nstat_enqueue_success(req->hdr.context, state, 0);
6188 }
6189
6190 return result;
6191 }
6192
6193 static errno_t
nstat_control_source_add(u_int64_t context,nstat_control_state * state,nstat_provider * provider,nstat_provider_cookie_t cookie)6194 nstat_control_source_add(
6195 u_int64_t context,
6196 nstat_control_state *state,
6197 nstat_provider *provider,
6198 nstat_provider_cookie_t cookie)
6199 {
6200 // Fill out source added message if appropriate
6201 mbuf_t msg = NULL;
6202 nstat_src_ref_t *srcrefp = NULL;
6203
6204 u_int64_t provider_filter_flags =
6205 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
6206 boolean_t tell_user =
6207 ((provider_filter_flags & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
6208 u_int32_t src_filter =
6209 (provider_filter_flags & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
6210 ? NSTAT_FILTER_NOZEROBYTES : 0;
6211
6212 if (provider_filter_flags & NSTAT_FILTER_TCP_NO_EARLY_CLOSE) {
6213 src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE;
6214 }
6215
6216 if (tell_user) {
6217 unsigned int one = 1;
6218
6219 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
6220 &one, &msg) != 0) {
6221 return ENOMEM;
6222 }
6223
6224 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
6225 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
6226 nstat_msg_src_added *add = mbuf_data(msg);
6227 bzero(add, sizeof(*add));
6228 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
6229 assert(mbuf_len(msg) <= MAX_NSTAT_MSG_HDR_LENGTH);
6230 add->hdr.length = (u_int16_t)mbuf_len(msg);
6231 add->hdr.context = context;
6232 add->provider = provider->nstat_provider_id;
6233 srcrefp = &add->srcref;
6234 }
6235
6236 // Allocate storage for the source
6237 nstat_src *src = kalloc_type(struct nstat_src, Z_WAITOK);
6238 if (src == NULL) {
6239 if (msg) {
6240 mbuf_freem(msg);
6241 }
6242 return ENOMEM;
6243 }
6244
6245 // Fill in the source, including picking an unused source ref
6246 lck_mtx_lock(&state->ncs_mtx);
6247
6248 src->srcref = nstat_control_next_src_ref(state);
6249 if (srcrefp) {
6250 *srcrefp = src->srcref;
6251 }
6252
6253 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) {
6254 lck_mtx_unlock(&state->ncs_mtx);
6255 kfree_type(struct nstat_src, src);
6256 if (msg) {
6257 mbuf_freem(msg);
6258 }
6259 return EINVAL;
6260 }
6261 src->provider = provider;
6262 src->cookie = cookie;
6263 src->filter = src_filter;
6264 src->seq = 0;
6265
6266 if (msg) {
6267 // send the source added message if appropriate
6268 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
6269 CTL_DATA_EOR);
6270 if (result != 0) {
6271 nstat_stats.nstat_srcaddedfailures += 1;
6272 lck_mtx_unlock(&state->ncs_mtx);
6273 kfree_type(struct nstat_src, src);
6274 mbuf_freem(msg);
6275 return result;
6276 }
6277 }
6278 // Put the source in the list
6279 TAILQ_INSERT_HEAD(&state->ncs_src_queue, src, ns_control_link);
6280 src->ns_control = state;
6281
6282 lck_mtx_unlock(&state->ncs_mtx);
6283
6284 return 0;
6285 }
6286
6287 static errno_t
nstat_control_handle_remove_request(nstat_control_state * state,mbuf_t m)6288 nstat_control_handle_remove_request(
6289 nstat_control_state *state,
6290 mbuf_t m)
6291 {
6292 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
6293 nstat_src *src;
6294
6295 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0) {
6296 return EINVAL;
6297 }
6298
6299 lck_mtx_lock(&state->ncs_mtx);
6300
6301 // Remove this source as we look for it
6302 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
6303 {
6304 if (src->srcref == srcref) {
6305 break;
6306 }
6307 }
6308 if (src) {
6309 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
6310 }
6311
6312 lck_mtx_unlock(&state->ncs_mtx);
6313
6314 if (src) {
6315 nstat_control_cleanup_source(state, src, FALSE);
6316 }
6317
6318 return src ? 0 : ENOENT;
6319 }
6320
6321 static errno_t
nstat_control_handle_query_request(nstat_control_state * state,mbuf_t m)6322 nstat_control_handle_query_request(
6323 nstat_control_state *state,
6324 mbuf_t m)
6325 {
6326 // TBD: handle this from another thread so we can enqueue a lot of data
6327 // As written, if a client requests query all, this function will be
6328 // called from their send of the request message. We will attempt to write
6329 // responses and succeed until the buffer fills up. Since the clients thread
6330 // is blocked on send, it won't be reading unless the client has two threads
6331 // using this socket, one for read and one for write. Two threads probably
6332 // won't work with this code anyhow since we don't have proper locking in
6333 // place yet.
6334 tailq_head_nstat_src dead_list;
6335 errno_t result = ENOENT;
6336 nstat_msg_query_src_req req;
6337
6338 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6339 return EINVAL;
6340 }
6341
6342 TAILQ_INIT(&dead_list);
6343 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
6344
6345 lck_mtx_lock(&state->ncs_mtx);
6346
6347 if (all_srcs) {
6348 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
6349 }
6350 nstat_src *src, *tmpsrc;
6351 u_int64_t src_count = 0;
6352 boolean_t partial = FALSE;
6353
6354 /*
6355 * Error handling policy and sequence number generation is folded into
6356 * nstat_control_begin_query.
6357 */
6358 partial = nstat_control_begin_query(state, &req.hdr);
6359
6360
6361 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
6362 {
6363 int gone = 0;
6364
6365 // XXX ignore IFACE types?
6366 if (all_srcs || src->srcref == req.srcref) {
6367 if (nstat_control_reporting_allowed(state, src, 0)
6368 && (!partial || !all_srcs || src->seq != state->ncs_seq)) {
6369 if (all_srcs &&
6370 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0) {
6371 result = nstat_control_append_counts(state, src, &gone);
6372 } else {
6373 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
6374 }
6375
6376 if (ENOMEM == result || ENOBUFS == result) {
6377 /*
6378 * If the counts message failed to
6379 * enqueue then we should clear our flag so
6380 * that a client doesn't miss anything on
6381 * idle cleanup. We skip the "gone"
6382 * processing in the hope that we may
6383 * catch it another time.
6384 */
6385 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
6386 break;
6387 }
6388 if (partial) {
6389 /*
6390 * We skip over hard errors and
6391 * filtered sources.
6392 */
6393 src->seq = state->ncs_seq;
6394 src_count++;
6395 }
6396 }
6397 }
6398
6399 if (gone) {
6400 // send one last descriptor message so client may see last state
6401 // If we can't send the notification now, it
6402 // will be sent in the idle cleanup.
6403 result = nstat_control_send_description(state, src, 0, 0);
6404 if (result != 0) {
6405 nstat_stats.nstat_control_send_description_failures++;
6406 if (nstat_debug != 0) {
6407 printf("%s - nstat_control_send_description() %d\n", __func__, result);
6408 }
6409 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
6410 break;
6411 }
6412
6413 // pull src out of the list
6414 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
6415 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
6416 }
6417
6418 if (all_srcs) {
6419 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
6420 break;
6421 }
6422 } else if (req.srcref == src->srcref) {
6423 break;
6424 }
6425 }
6426
6427 nstat_flush_accumulated_msgs(state);
6428
6429 u_int16_t flags = 0;
6430 if (req.srcref == NSTAT_SRC_REF_ALL) {
6431 flags = nstat_control_end_query(state, src, partial);
6432 }
6433
6434 lck_mtx_unlock(&state->ncs_mtx);
6435
6436 /*
6437 * If an error occurred enqueueing data, then allow the error to
6438 * propagate to nstat_control_send. This way, the error is sent to
6439 * user-level.
6440 */
6441 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
6442 nstat_enqueue_success(req.hdr.context, state, flags);
6443 result = 0;
6444 }
6445
6446 while ((src = TAILQ_FIRST(&dead_list))) {
6447 TAILQ_REMOVE(&dead_list, src, ns_control_link);
6448 nstat_control_cleanup_source(state, src, FALSE);
6449 }
6450
6451 return result;
6452 }
6453
6454 static errno_t
nstat_control_handle_get_src_description(nstat_control_state * state,mbuf_t m)6455 nstat_control_handle_get_src_description(
6456 nstat_control_state *state,
6457 mbuf_t m)
6458 {
6459 nstat_msg_get_src_description req;
6460 errno_t result = ENOENT;
6461 nstat_src *src;
6462
6463 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6464 return EINVAL;
6465 }
6466
6467 lck_mtx_lock(&state->ncs_mtx);
6468 u_int64_t src_count = 0;
6469 boolean_t partial = FALSE;
6470 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
6471
6472 /*
6473 * Error handling policy and sequence number generation is folded into
6474 * nstat_control_begin_query.
6475 */
6476 partial = nstat_control_begin_query(state, &req.hdr);
6477
6478 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
6479 {
6480 if (all_srcs || src->srcref == req.srcref) {
6481 if (nstat_control_reporting_allowed(state, src, 0)
6482 && (!all_srcs || !partial || src->seq != state->ncs_seq)) {
6483 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs) {
6484 result = nstat_control_append_description(state, src);
6485 } else {
6486 result = nstat_control_send_description(state, src, req.hdr.context, 0);
6487 }
6488
6489 if (ENOMEM == result || ENOBUFS == result) {
6490 /*
6491 * If the description message failed to
6492 * enqueue then we give up for now.
6493 */
6494 break;
6495 }
6496 if (partial) {
6497 /*
6498 * Note, we skip over hard errors and
6499 * filtered sources.
6500 */
6501 src->seq = state->ncs_seq;
6502 src_count++;
6503 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
6504 break;
6505 }
6506 }
6507 }
6508
6509 if (!all_srcs) {
6510 break;
6511 }
6512 }
6513 }
6514 nstat_flush_accumulated_msgs(state);
6515
6516 u_int16_t flags = 0;
6517 if (req.srcref == NSTAT_SRC_REF_ALL) {
6518 flags = nstat_control_end_query(state, src, partial);
6519 }
6520
6521 lck_mtx_unlock(&state->ncs_mtx);
6522 /*
6523 * If an error occurred enqueueing data, then allow the error to
6524 * propagate to nstat_control_send. This way, the error is sent to
6525 * user-level.
6526 */
6527 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
6528 nstat_enqueue_success(req.hdr.context, state, flags);
6529 result = 0;
6530 }
6531
6532 return result;
6533 }
6534
6535 static errno_t
nstat_control_handle_set_filter(nstat_control_state * state,mbuf_t m)6536 nstat_control_handle_set_filter(
6537 nstat_control_state *state,
6538 mbuf_t m)
6539 {
6540 nstat_msg_set_filter req;
6541 nstat_src *src;
6542
6543 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6544 return EINVAL;
6545 }
6546 if (req.srcref == NSTAT_SRC_REF_ALL ||
6547 req.srcref == NSTAT_SRC_REF_INVALID) {
6548 return EINVAL;
6549 }
6550
6551 lck_mtx_lock(&state->ncs_mtx);
6552 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
6553 {
6554 if (req.srcref == src->srcref) {
6555 src->filter = req.filter;
6556 break;
6557 }
6558 }
6559 lck_mtx_unlock(&state->ncs_mtx);
6560 if (src == NULL) {
6561 return ENOENT;
6562 }
6563
6564 return 0;
6565 }
6566
6567 static void
nstat_send_error(nstat_control_state * state,u_int64_t context,u_int32_t error)6568 nstat_send_error(
6569 nstat_control_state *state,
6570 u_int64_t context,
6571 u_int32_t error)
6572 {
6573 errno_t result;
6574 struct nstat_msg_error err;
6575
6576 bzero(&err, sizeof(err));
6577 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
6578 err.hdr.length = sizeof(err);
6579 err.hdr.context = context;
6580 err.error = error;
6581
6582 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
6583 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
6584 if (result != 0) {
6585 nstat_stats.nstat_msgerrorfailures++;
6586 }
6587 }
6588
6589 static boolean_t
nstat_control_begin_query(nstat_control_state * state,const nstat_msg_hdr * hdrp)6590 nstat_control_begin_query(
6591 nstat_control_state *state,
6592 const nstat_msg_hdr *hdrp)
6593 {
6594 boolean_t partial = FALSE;
6595
6596 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION) {
6597 /* A partial query all has been requested. */
6598 partial = TRUE;
6599
6600 if (state->ncs_context != hdrp->context) {
6601 if (state->ncs_context != 0) {
6602 nstat_send_error(state, state->ncs_context, EAGAIN);
6603 }
6604
6605 /* Initialize state for a partial query all. */
6606 state->ncs_context = hdrp->context;
6607 state->ncs_seq++;
6608 }
6609 }
6610
6611 return partial;
6612 }
6613
6614 static u_int16_t
nstat_control_end_query(nstat_control_state * state,nstat_src * last_src,boolean_t partial)6615 nstat_control_end_query(
6616 nstat_control_state *state,
6617 nstat_src *last_src,
6618 boolean_t partial)
6619 {
6620 u_int16_t flags = 0;
6621
6622 if (last_src == NULL || !partial) {
6623 /*
6624 * We iterated through the entire srcs list or exited early
6625 * from the loop when a partial update was not requested (an
6626 * error occurred), so clear context to indicate internally
6627 * that the query is finished.
6628 */
6629 state->ncs_context = 0;
6630 } else {
6631 /*
6632 * Indicate to userlevel to make another partial request as
6633 * there are still sources left to be reported.
6634 */
6635 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
6636 }
6637
6638 return flags;
6639 }
6640
6641 static errno_t
nstat_control_handle_get_update(nstat_control_state * state,mbuf_t m)6642 nstat_control_handle_get_update(
6643 nstat_control_state *state,
6644 mbuf_t m)
6645 {
6646 nstat_msg_query_src_req req;
6647
6648 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) {
6649 return EINVAL;
6650 }
6651
6652 lck_mtx_lock(&state->ncs_mtx);
6653
6654 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
6655
6656 errno_t result = ENOENT;
6657 nstat_src *src, *tmpsrc;
6658 tailq_head_nstat_src dead_list;
6659 u_int64_t src_count = 0;
6660 boolean_t partial = FALSE;
6661 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
6662 TAILQ_INIT(&dead_list);
6663
6664 /*
6665 * Error handling policy and sequence number generation is folded into
6666 * nstat_control_begin_query.
6667 */
6668 partial = nstat_control_begin_query(state, &req.hdr);
6669
6670 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc) {
6671 int gone = 0;
6672 if (all_srcs) {
6673 // Check to see if we should handle this source or if we're still skipping to find where to continue
6674 if ((FALSE == partial || src->seq != state->ncs_seq)) {
6675 u_int64_t suppression_flags = (src->ns_reported)? NSTAT_FILTER_SUPPRESS_BORING_POLL: 0;
6676 if (nstat_control_reporting_allowed(state, src, suppression_flags)) {
6677 result = nstat_control_append_update(state, src, &gone);
6678 if (ENOMEM == result || ENOBUFS == result) {
6679 /*
6680 * If the update message failed to
6681 * enqueue then give up.
6682 */
6683 break;
6684 }
6685 if (partial) {
6686 /*
6687 * We skip over hard errors and
6688 * filtered sources.
6689 */
6690 src->seq = state->ncs_seq;
6691 src_count++;
6692 }
6693 }
6694 }
6695 } else if (src->srcref == req.srcref) {
6696 if (nstat_control_reporting_allowed(state, src, 0)) {
6697 result = nstat_control_send_update(state, src, req.hdr.context, 0, 0, &gone);
6698 }
6699 }
6700
6701 if (gone) {
6702 // pull src out of the list
6703 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
6704 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
6705 }
6706
6707 if (!all_srcs && req.srcref == src->srcref) {
6708 break;
6709 }
6710 if (src_count >= QUERY_CONTINUATION_SRC_COUNT) {
6711 break;
6712 }
6713 }
6714
6715 nstat_flush_accumulated_msgs(state);
6716
6717
6718 u_int16_t flags = 0;
6719 if (req.srcref == NSTAT_SRC_REF_ALL) {
6720 flags = nstat_control_end_query(state, src, partial);
6721 }
6722
6723 lck_mtx_unlock(&state->ncs_mtx);
6724 /*
6725 * If an error occurred enqueueing data, then allow the error to
6726 * propagate to nstat_control_send. This way, the error is sent to
6727 * user-level.
6728 */
6729 if (all_srcs && ENOMEM != result && ENOBUFS != result) {
6730 nstat_enqueue_success(req.hdr.context, state, flags);
6731 result = 0;
6732 }
6733
6734 while ((src = TAILQ_FIRST(&dead_list))) {
6735 TAILQ_REMOVE(&dead_list, src, ns_control_link);
6736 // release src and send notification
6737 nstat_control_cleanup_source(state, src, FALSE);
6738 }
6739
6740 return result;
6741 }
6742
6743 static errno_t
nstat_control_handle_subscribe_sysinfo(nstat_control_state * state)6744 nstat_control_handle_subscribe_sysinfo(
6745 nstat_control_state *state)
6746 {
6747 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
6748
6749 if (result != 0) {
6750 return result;
6751 }
6752
6753 lck_mtx_lock(&state->ncs_mtx);
6754 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
6755 lck_mtx_unlock(&state->ncs_mtx);
6756
6757 return 0;
6758 }
6759
6760 static errno_t
nstat_control_send(kern_ctl_ref kctl,u_int32_t unit,void * uinfo,mbuf_t m,__unused int flags)6761 nstat_control_send(
6762 kern_ctl_ref kctl,
6763 u_int32_t unit,
6764 void *uinfo,
6765 mbuf_t m,
6766 __unused int flags)
6767 {
6768 nstat_control_state *state = (nstat_control_state*)uinfo;
6769 struct nstat_msg_hdr *hdr;
6770 struct nstat_msg_hdr storage;
6771 errno_t result = 0;
6772
6773 if (mbuf_pkthdr_len(m) < sizeof(*hdr)) {
6774 // Is this the right thing to do?
6775 mbuf_freem(m);
6776 return EINVAL;
6777 }
6778
6779 if (mbuf_len(m) >= sizeof(*hdr)) {
6780 hdr = mbuf_data(m);
6781 } else {
6782 mbuf_copydata(m, 0, sizeof(storage), &storage);
6783 hdr = &storage;
6784 }
6785
6786 // Legacy clients may not set the length
6787 // Those clients are likely not setting the flags either
6788 // Fix everything up so old clients continue to work
6789 if (hdr->length != mbuf_pkthdr_len(m)) {
6790 hdr->flags = 0;
6791 assert(mbuf_pkthdr_len(m) <= MAX_NSTAT_MSG_HDR_LENGTH);
6792 hdr->length = (u_int16_t)mbuf_pkthdr_len(m);
6793 if (hdr == &storage) {
6794 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
6795 }
6796 }
6797
6798 switch (hdr->type) {
6799 case NSTAT_MSG_TYPE_ADD_SRC:
6800 result = nstat_control_handle_add_request(state, m);
6801 break;
6802
6803 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
6804 result = nstat_control_handle_add_all(state, m);
6805 break;
6806
6807 case NSTAT_MSG_TYPE_REM_SRC:
6808 result = nstat_control_handle_remove_request(state, m);
6809 break;
6810
6811 case NSTAT_MSG_TYPE_QUERY_SRC:
6812 result = nstat_control_handle_query_request(state, m);
6813 break;
6814
6815 case NSTAT_MSG_TYPE_GET_SRC_DESC:
6816 result = nstat_control_handle_get_src_description(state, m);
6817 break;
6818
6819 case NSTAT_MSG_TYPE_SET_FILTER:
6820 result = nstat_control_handle_set_filter(state, m);
6821 break;
6822
6823 case NSTAT_MSG_TYPE_GET_UPDATE:
6824 result = nstat_control_handle_get_update(state, m);
6825 break;
6826
6827 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
6828 result = nstat_control_handle_subscribe_sysinfo(state);
6829 break;
6830
6831 default:
6832 result = EINVAL;
6833 break;
6834 }
6835
6836 if (result != 0) {
6837 struct nstat_msg_error err;
6838
6839 bzero(&err, sizeof(err));
6840 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
6841 err.hdr.length = (u_int16_t)(sizeof(err) + mbuf_pkthdr_len(m));
6842 err.hdr.context = hdr->context;
6843 err.error = result;
6844
6845 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
6846 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0) {
6847 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
6848 if (result != 0) {
6849 mbuf_freem(m);
6850 }
6851 m = NULL;
6852 }
6853
6854 if (result != 0) {
6855 // Unable to prepend the error to the request - just send the error
6856 err.hdr.length = sizeof(err);
6857 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
6858 CTL_DATA_EOR | CTL_DATA_CRIT);
6859 if (result != 0) {
6860 nstat_stats.nstat_msgerrorfailures += 1;
6861 }
6862 }
6863 nstat_stats.nstat_handle_msg_failures += 1;
6864 }
6865
6866 if (m) {
6867 mbuf_freem(m);
6868 }
6869
6870 return result;
6871 }
6872
6873
6874 /* Performs interface matching based on NSTAT_IFNET_IS… filter flags provided by an external caller */
6875 static bool
nstat_interface_matches_filter_flag(uint32_t filter_flags,struct ifnet * ifp)6876 nstat_interface_matches_filter_flag(uint32_t filter_flags, struct ifnet *ifp)
6877 {
6878 bool result = false;
6879
6880 if (ifp) {
6881 uint32_t flag_mask = (NSTAT_FILTER_IFNET_FLAGS & ~(NSTAT_IFNET_IS_NON_LOCAL | NSTAT_IFNET_IS_LOCAL));
6882 filter_flags &= flag_mask;
6883
6884 uint32_t flags = nstat_ifnet_to_flags_extended(ifp);
6885 if (filter_flags & flags) {
6886 result = true;
6887 }
6888 }
6889 return result;
6890 }
6891
6892
6893 static int
tcp_progress_indicators_for_interface(unsigned int ifindex,uint64_t recentflow_maxduration,uint32_t filter_flags,struct xtcpprogress_indicators * indicators)6894 tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_maxduration, uint32_t filter_flags, struct xtcpprogress_indicators *indicators)
6895 {
6896 int error = 0;
6897 struct inpcb *inp;
6898 uint64_t min_recent_start_time;
6899 #if SKYWALK
6900 struct nstat_tu_shadow *shad;
6901 #endif /* SKYWALK */
6902
6903 min_recent_start_time = mach_continuous_time() - recentflow_maxduration;
6904 bzero(indicators, sizeof(*indicators));
6905
6906 #if NSTAT_DEBUG
6907 /* interface index -1 may be passed in to only match against the filters specified in the flags */
6908 if (ifindex < UINT_MAX) {
6909 printf("%s - for interface index %u with flags %x\n", __func__, ifindex, filter_flags);
6910 } else {
6911 printf("%s - for matching interface with flags %x\n", __func__, filter_flags);
6912 }
6913 #endif
6914
6915 lck_rw_lock_shared(&tcbinfo.ipi_lock);
6916 /*
6917 * For progress indicators we don't need to special case TCP to collect time wait connections
6918 */
6919 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
6920 {
6921 struct tcpcb *tp = intotcpcb(inp);
6922 /* radar://57100452
6923 * The conditional logic implemented below performs an *inclusive* match based on the desired interface index in addition to any filter values.
6924 * While the general expectation is that only one criteria normally is used for queries, the capability exists satisfy any eccentric future needs.
6925 */
6926 if (tp &&
6927 inp->inp_state != INPCB_STATE_DEAD &&
6928 inp->inp_last_outifp &&
6929 /* matches the given interface index, or against any provided filter flags */
6930 (((inp->inp_last_outifp->if_index == ifindex) ||
6931 nstat_interface_matches_filter_flag(filter_flags, inp->inp_last_outifp)) &&
6932 /* perform flow state matching based any provided filter flags */
6933 (((filter_flags & (NSTAT_IFNET_IS_NON_LOCAL | NSTAT_IFNET_IS_LOCAL)) == 0) ||
6934 ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && !(tp->t_flags & TF_LOCAL)) ||
6935 ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (tp->t_flags & TF_LOCAL))))) {
6936 struct tcp_conn_status connstatus;
6937 #if NSTAT_DEBUG
6938 printf("%s - *matched non-Skywalk* [filter match: %d]\n", __func__, nstat_interface_matches_filter_flag(filter_flags, inp->inp_last_outifp));
6939 #endif
6940 indicators->xp_numflows++;
6941 tcp_get_connectivity_status(tp, &connstatus);
6942 if (connstatus.write_probe_failed) {
6943 indicators->xp_write_probe_fails++;
6944 }
6945 if (connstatus.read_probe_failed) {
6946 indicators->xp_read_probe_fails++;
6947 }
6948 if (connstatus.conn_probe_failed) {
6949 indicators->xp_conn_probe_fails++;
6950 }
6951 if (inp->inp_start_timestamp > min_recent_start_time) {
6952 uint64_t flow_count;
6953
6954 indicators->xp_recentflows++;
6955 flow_count = os_atomic_load(&inp->inp_stat->rxbytes, relaxed);
6956 indicators->xp_recentflows_rxbytes += flow_count;
6957 flow_count = os_atomic_load(&inp->inp_stat->txbytes, relaxed);
6958 indicators->xp_recentflows_txbytes += flow_count;
6959
6960 indicators->xp_recentflows_rxooo += tp->t_stat.rxoutoforderbytes;
6961 indicators->xp_recentflows_rxdup += tp->t_stat.rxduplicatebytes;
6962 indicators->xp_recentflows_retx += tp->t_stat.txretransmitbytes;
6963 if (tp->snd_max - tp->snd_una) {
6964 indicators->xp_recentflows_unacked++;
6965 }
6966 }
6967 }
6968 }
6969 lck_rw_done(&tcbinfo.ipi_lock);
6970
6971 #if SKYWALK
6972 lck_mtx_lock(&nstat_mtx);
6973
6974 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
6975 assert(shad->shad_magic == TU_SHADOW_MAGIC);
6976
6977 if ((shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) && (shad->shad_live)) {
6978 u_int16_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6979 u_int32_t extended_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6980 if (filter_flags != 0) {
6981 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL);
6982 error = (result)? 0 : EIO;
6983 if (error) {
6984 printf("%s - nstat get ifflags %d\n", __func__, error);
6985 continue;
6986 }
6987 extended_ifflags = extend_ifnet_flags(ifflags);
6988
6989 if ((extended_ifflags & filter_flags) == 0) {
6990 continue;
6991 }
6992 // Skywalk locality flags are not yet in place, see <rdar://problem/35607563>
6993 // Instead of checking flags with a simple logical and, check the inverse.
6994 // This allows for default action of fallthrough if the flags are not set.
6995 if ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && (ifflags & NSTAT_IFNET_IS_LOCAL)) {
6996 continue;
6997 }
6998 if ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (ifflags & NSTAT_IFNET_IS_NON_LOCAL)) {
6999 continue;
7000 }
7001 }
7002
7003 nstat_progress_digest digest;
7004 bzero(&digest, sizeof(digest));
7005 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &digest, NULL, NULL);
7006
7007 error = (result)? 0 : EIO;
7008 if (error) {
7009 printf("%s - nstat get progressdigest returned %d\n", __func__, error);
7010 continue;
7011 }
7012 if ((digest.ifindex == (u_int32_t)ifindex) ||
7013 (filter_flags & extended_ifflags)) {
7014 #if NSTAT_DEBUG
7015 printf("%s - *matched Skywalk* [filter match: %x %x]\n", __func__, filter_flags, extended_flags);
7016 #endif
7017 indicators->xp_numflows++;
7018 if (digest.connstatus.write_probe_failed) {
7019 indicators->xp_write_probe_fails++;
7020 }
7021 if (digest.connstatus.read_probe_failed) {
7022 indicators->xp_read_probe_fails++;
7023 }
7024 if (digest.connstatus.conn_probe_failed) {
7025 indicators->xp_conn_probe_fails++;
7026 }
7027 if (shad->shad_start_timestamp > min_recent_start_time) {
7028 indicators->xp_recentflows++;
7029 indicators->xp_recentflows_rxbytes += digest.rxbytes;
7030 indicators->xp_recentflows_txbytes += digest.txbytes;
7031 indicators->xp_recentflows_rxooo += digest.rxoutoforderbytes;
7032 indicators->xp_recentflows_rxdup += digest.rxduplicatebytes;
7033 indicators->xp_recentflows_retx += digest.txretransmit;
7034 if (digest.txunacked) {
7035 indicators->xp_recentflows_unacked++;
7036 }
7037 }
7038 }
7039 }
7040 }
7041
7042 lck_mtx_unlock(&nstat_mtx);
7043
7044 #endif /* SKYWALK */
7045 return error;
7046 }
7047
7048
7049 static int
tcp_progress_probe_enable_for_interface(unsigned int ifindex,uint32_t filter_flags,uint32_t enable_flags)7050 tcp_progress_probe_enable_for_interface(unsigned int ifindex, uint32_t filter_flags, uint32_t enable_flags)
7051 {
7052 int error = 0;
7053 struct ifnet *ifp;
7054
7055 #if NSTAT_DEBUG
7056 printf("%s - for interface index %u with flags %d\n", __func__, ifindex, filter_flags);
7057 #endif
7058
7059 ifnet_head_lock_shared();
7060 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
7061 {
7062 if ((ifp->if_index == ifindex) ||
7063 nstat_interface_matches_filter_flag(filter_flags, ifp)) {
7064 #if NSTAT_DEBUG
7065 printf("%s - *matched* interface index %d, enable: %d\n", __func__, ifp->if_index, enable_flags);
7066 #endif
7067 error = if_probe_connectivity(ifp, enable_flags);
7068 if (error) {
7069 printf("%s (%d) - nstat set tcp probe %d for interface index %d\n", __func__, error, enable_flags, ifp->if_index);
7070 }
7071 }
7072 }
7073 ifnet_head_done();
7074
7075 return error;
7076 }
7077
7078
7079 __private_extern__ int
ntstat_tcp_progress_indicators(struct sysctl_req * req)7080 ntstat_tcp_progress_indicators(struct sysctl_req *req)
7081 {
7082 struct xtcpprogress_indicators indicators = {};
7083 int error = 0;
7084 struct tcpprogressreq requested;
7085
7086 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
7087 return EACCES;
7088 }
7089 if (req->newptr == USER_ADDR_NULL) {
7090 return EINVAL;
7091 }
7092 if (req->newlen < sizeof(req)) {
7093 return EINVAL;
7094 }
7095 error = SYSCTL_IN(req, &requested, sizeof(requested));
7096 if (error != 0) {
7097 return error;
7098 }
7099 error = tcp_progress_indicators_for_interface((unsigned int)requested.ifindex, requested.recentflow_maxduration, (uint32_t)requested.filter_flags, &indicators);
7100 if (error != 0) {
7101 return error;
7102 }
7103 error = SYSCTL_OUT(req, &indicators, sizeof(indicators));
7104
7105 return error;
7106 }
7107
7108
7109 __private_extern__ int
ntstat_tcp_progress_enable(struct sysctl_req * req)7110 ntstat_tcp_progress_enable(struct sysctl_req *req)
7111 {
7112 int error = 0;
7113 struct tcpprobereq requested;
7114
7115 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
7116 return EACCES;
7117 }
7118 if (req->newptr == USER_ADDR_NULL) {
7119 return EINVAL;
7120 }
7121 if (req->newlen < sizeof(req)) {
7122 return EINVAL;
7123 }
7124 error = SYSCTL_IN(req, &requested, sizeof(requested));
7125 if (error != 0) {
7126 return error;
7127 }
7128 error = tcp_progress_probe_enable_for_interface((unsigned int)requested.ifindex, (uint32_t)requested.filter_flags, (uint32_t)requested.enable);
7129
7130 return error;
7131 }
7132
7133
7134 #if SKYWALK
7135
7136 #pragma mark -- netstat support for user level providers --
7137
7138 typedef struct nstat_flow_data {
7139 nstat_counts counts;
7140 union {
7141 nstat_udp_descriptor udp_descriptor;
7142 nstat_tcp_descriptor tcp_descriptor;
7143 } flow_descriptor;
7144 } nstat_flow_data;
7145
7146 static int
nstat_gather_flow_data(nstat_provider_id_t provider,nstat_flow_data * flow_data,int n)7147 nstat_gather_flow_data(nstat_provider_id_t provider, nstat_flow_data *flow_data, int n)
7148 {
7149 struct nstat_tu_shadow *shad;
7150 int prepared = 0;
7151 errno_t err;
7152
7153 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
7154 assert(shad->shad_magic == TU_SHADOW_MAGIC);
7155
7156 if ((shad->shad_provider == provider) && (shad->shad_live)) {
7157 if (prepared >= n) {
7158 break;
7159 }
7160 err = nstat_userland_tu_copy_descriptor((nstat_provider_cookie_t) shad,
7161 &flow_data->flow_descriptor, sizeof(flow_data->flow_descriptor));
7162
7163 if (err != 0) {
7164 printf("%s - nstat_userland_tu_copy_descriptor returned %d\n", __func__, err);
7165 }
7166 err = nstat_userland_tu_counts((nstat_provider_cookie_t) shad,
7167 &flow_data->counts, NULL);
7168 if (err != 0) {
7169 printf("%s - nstat_userland_tu_counts returned %d\n", __func__, err);
7170 }
7171 flow_data++;
7172 prepared++;
7173 }
7174 }
7175 return prepared;
7176 }
7177
7178 static void
nstat_userland_to_xinpcb_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xinpcb_n * xinp)7179 nstat_userland_to_xinpcb_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xinpcb_n *xinp)
7180 {
7181 xinp->xi_len = sizeof(struct xinpcb_n);
7182 xinp->xi_kind = XSO_INPCB;
7183
7184 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7185 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7186 struct sockaddr_in *sa = &desc->local.v4;
7187 if (sa->sin_family == AF_INET) {
7188 xinp->inp_vflag = INP_IPV4;
7189 xinp->inp_laddr = desc->local.v4.sin_addr;
7190 xinp->inp_lport = desc->local.v4.sin_port;
7191 xinp->inp_faddr = desc->remote.v4.sin_addr;
7192 xinp->inp_fport = desc->remote.v4.sin_port;
7193 } else if (sa->sin_family == AF_INET6) {
7194 xinp->inp_vflag = INP_IPV6;
7195 xinp->in6p_laddr = desc->local.v6.sin6_addr;
7196 xinp->in6p_lport = desc->local.v6.sin6_port;
7197 xinp->in6p_faddr = desc->remote.v6.sin6_addr;
7198 xinp->in6p_fport = desc->remote.v6.sin6_port;
7199 }
7200 } else if (provider == NSTAT_PROVIDER_UDP_USERLAND) {
7201 nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor;
7202 struct sockaddr_in *sa = &desc->local.v4;
7203 if (sa->sin_family == AF_INET) {
7204 xinp->inp_vflag = INP_IPV4;
7205 xinp->inp_laddr = desc->local.v4.sin_addr;
7206 xinp->inp_lport = desc->local.v4.sin_port;
7207 xinp->inp_faddr = desc->remote.v4.sin_addr;
7208 xinp->inp_fport = desc->remote.v4.sin_port;
7209 } else if (sa->sin_family == AF_INET6) {
7210 xinp->inp_vflag = INP_IPV6;
7211 xinp->in6p_laddr = desc->local.v6.sin6_addr;
7212 xinp->in6p_lport = desc->local.v6.sin6_port;
7213 xinp->in6p_faddr = desc->remote.v6.sin6_addr;
7214 xinp->in6p_fport = desc->remote.v6.sin6_port;
7215 }
7216 }
7217 }
7218
7219 static void
nstat_userland_to_xsocket_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xsocket_n * xso)7220 nstat_userland_to_xsocket_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsocket_n *xso)
7221 {
7222 xso->xso_len = sizeof(struct xsocket_n);
7223 xso->xso_kind = XSO_SOCKET;
7224
7225 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7226 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7227 xso->xso_protocol = IPPROTO_TCP;
7228 xso->so_e_pid = desc->epid;
7229 xso->so_last_pid = desc->pid;
7230 } else {
7231 nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor;
7232 xso->xso_protocol = IPPROTO_UDP;
7233 xso->so_e_pid = desc->epid;
7234 xso->so_last_pid = desc->pid;
7235 }
7236 }
7237
7238 static void
nstat_userland_to_rcv_xsockbuf_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xsockbuf_n * xsbrcv)7239 nstat_userland_to_rcv_xsockbuf_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsockbuf_n *xsbrcv)
7240 {
7241 xsbrcv->xsb_len = sizeof(struct xsockbuf_n);
7242 xsbrcv->xsb_kind = XSO_RCVBUF;
7243
7244 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7245 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7246 xsbrcv->sb_hiwat = desc->rcvbufsize;
7247 xsbrcv->sb_cc = desc->rcvbufused;
7248 } else {
7249 nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor;
7250 xsbrcv->sb_hiwat = desc->rcvbufsize;
7251 xsbrcv->sb_cc = desc->rcvbufused;
7252 }
7253 }
7254
7255 static void
nstat_userland_to_snd_xsockbuf_n(nstat_provider_id_t provider,nstat_flow_data * flow_data,struct xsockbuf_n * xsbsnd)7256 nstat_userland_to_snd_xsockbuf_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsockbuf_n *xsbsnd)
7257 {
7258 xsbsnd->xsb_len = sizeof(struct xsockbuf_n);
7259 xsbsnd->xsb_kind = XSO_SNDBUF;
7260
7261 if (provider == NSTAT_PROVIDER_TCP_USERLAND) {
7262 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7263 xsbsnd->sb_hiwat = desc->sndbufsize;
7264 xsbsnd->sb_cc = desc->sndbufused;
7265 } else {
7266 }
7267 }
7268
7269 static void
nstat_userland_to_xsockstat_n(nstat_flow_data * flow_data,struct xsockstat_n * xst)7270 nstat_userland_to_xsockstat_n(nstat_flow_data *flow_data, struct xsockstat_n *xst)
7271 {
7272 xst->xst_len = sizeof(struct xsockstat_n);
7273 xst->xst_kind = XSO_STATS;
7274
7275 // The kernel version supports an array of counts, here we only support one and map to first entry
7276 xst->xst_tc_stats[0].rxpackets = flow_data->counts.nstat_rxpackets;
7277 xst->xst_tc_stats[0].rxbytes = flow_data->counts.nstat_rxbytes;
7278 xst->xst_tc_stats[0].txpackets = flow_data->counts.nstat_txpackets;
7279 xst->xst_tc_stats[0].txbytes = flow_data->counts.nstat_txbytes;
7280 }
7281
7282 static void
nstat_userland_to_xtcpcb_n(nstat_flow_data * flow_data,struct xtcpcb_n * xt)7283 nstat_userland_to_xtcpcb_n(nstat_flow_data *flow_data, struct xtcpcb_n *xt)
7284 {
7285 nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor;
7286 xt->xt_len = sizeof(struct xtcpcb_n);
7287 xt->xt_kind = XSO_TCPCB;
7288 xt->t_state = desc->state;
7289 xt->snd_wnd = desc->txwindow;
7290 xt->snd_cwnd = desc->txcwindow;
7291 }
7292
7293
7294 __private_extern__ int
ntstat_userland_count(short proto)7295 ntstat_userland_count(short proto)
7296 {
7297 int n = 0;
7298 if (proto == IPPROTO_TCP) {
7299 n = nstat_userland_tcp_shadows;
7300 } else if (proto == IPPROTO_UDP) {
7301 n = nstat_userland_udp_shadows;
7302 }
7303 return n;
7304 }
7305
7306 __private_extern__ int
nstat_userland_get_snapshot(short proto,void ** snapshotp,int * countp)7307 nstat_userland_get_snapshot(short proto, void **snapshotp, int *countp)
7308 {
7309 int error = 0;
7310 int n = 0;
7311 nstat_provider_id_t provider;
7312 nstat_flow_data *flow_data = NULL;
7313
7314 lck_mtx_lock(&nstat_mtx);
7315 if (proto == IPPROTO_TCP) {
7316 n = nstat_userland_tcp_shadows;
7317 provider = NSTAT_PROVIDER_TCP_USERLAND;
7318 } else if (proto == IPPROTO_UDP) {
7319 n = nstat_userland_udp_shadows;
7320 provider = NSTAT_PROVIDER_UDP_USERLAND;
7321 }
7322 if (n == 0) {
7323 goto done;
7324 }
7325
7326 flow_data = (nstat_flow_data *) kalloc_data(n * sizeof(*flow_data),
7327 Z_WAITOK | Z_ZERO);
7328 if (flow_data) {
7329 n = nstat_gather_flow_data(provider, flow_data, n);
7330 } else {
7331 error = ENOMEM;
7332 }
7333 done:
7334 lck_mtx_unlock(&nstat_mtx);
7335 *snapshotp = flow_data;
7336 *countp = n;
7337 return error;
7338 }
7339
7340 // nstat_userland_list_snapshot() does most of the work for a sysctl that uses a return format
7341 // as per get_pcblist_n() even though the vast majority of fields are unused.
7342 // Additional items are required in the sysctl output before and after the data added
7343 // by this function.
7344 __private_extern__ int
nstat_userland_list_snapshot(short proto,struct sysctl_req * req,void * userlandsnapshot,int n)7345 nstat_userland_list_snapshot(short proto, struct sysctl_req *req, void *userlandsnapshot, int n)
7346 {
7347 int error = 0;
7348 int i;
7349 nstat_provider_id_t provider;
7350 void *buf = NULL;
7351 nstat_flow_data *flow_data, *flow_data_array = NULL;
7352 size_t item_size = ROUNDUP64(sizeof(struct xinpcb_n)) +
7353 ROUNDUP64(sizeof(struct xsocket_n)) +
7354 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
7355 ROUNDUP64(sizeof(struct xsockstat_n));
7356
7357 if ((n == 0) || (userlandsnapshot == NULL)) {
7358 goto done;
7359 }
7360
7361 if (proto == IPPROTO_TCP) {
7362 item_size += ROUNDUP64(sizeof(struct xtcpcb_n));
7363 provider = NSTAT_PROVIDER_TCP_USERLAND;
7364 } else if (proto == IPPROTO_UDP) {
7365 provider = NSTAT_PROVIDER_UDP_USERLAND;
7366 } else {
7367 error = EINVAL;
7368 goto done;
7369 }
7370
7371 buf = (void *) kalloc_data(item_size, Z_WAITOK);
7372 if (buf) {
7373 struct xinpcb_n *xi = (struct xinpcb_n *)buf;
7374 struct xsocket_n *xso = (struct xsocket_n *) ADVANCE64(xi, sizeof(*xi));
7375 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) ADVANCE64(xso, sizeof(*xso));
7376 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) ADVANCE64(xsbrcv, sizeof(*xsbrcv));
7377 struct xsockstat_n *xsostats = (struct xsockstat_n *) ADVANCE64(xsbsnd, sizeof(*xsbsnd));
7378 struct xtcpcb_n *xt = (struct xtcpcb_n *) ADVANCE64(xsostats, sizeof(*xsostats));
7379
7380 flow_data_array = (nstat_flow_data *)userlandsnapshot;
7381
7382 for (i = 0; i < n; i++) {
7383 flow_data = &flow_data_array[i];
7384 bzero(buf, item_size);
7385
7386 nstat_userland_to_xinpcb_n(provider, flow_data, xi);
7387 nstat_userland_to_xsocket_n(provider, flow_data, xso);
7388 nstat_userland_to_rcv_xsockbuf_n(provider, flow_data, xsbrcv);
7389 nstat_userland_to_snd_xsockbuf_n(provider, flow_data, xsbsnd);
7390 nstat_userland_to_xsockstat_n(flow_data, xsostats);
7391 if (proto == IPPROTO_TCP) {
7392 nstat_userland_to_xtcpcb_n(flow_data, xt);
7393 }
7394 error = SYSCTL_OUT(req, buf, item_size);
7395 if (error) {
7396 break;
7397 }
7398 }
7399 kfree_data(buf, item_size);
7400 } else {
7401 error = ENOMEM;
7402 }
7403 done:
7404 return error;
7405 }
7406
7407 __private_extern__ void
nstat_userland_release_snapshot(void * snapshot,int nuserland)7408 nstat_userland_release_snapshot(void *snapshot, int nuserland)
7409 {
7410 if (snapshot != NULL) {
7411 kfree_data(snapshot, nuserland * sizeof(nstat_flow_data));
7412 }
7413 }
7414
7415 #if NTSTAT_SUPPORTS_STANDALONE_SYSCTL
7416
7417 __private_extern__ int
ntstat_userland_list_n(short proto,struct sysctl_req * req)7418 ntstat_userland_list_n(short proto, struct sysctl_req *req)
7419 {
7420 int error = 0;
7421 int n;
7422 struct xinpgen xig;
7423 void *snapshot = NULL;
7424 size_t item_size = ROUNDUP64(sizeof(struct xinpcb_n)) +
7425 ROUNDUP64(sizeof(struct xsocket_n)) +
7426 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
7427 ROUNDUP64(sizeof(struct xsockstat_n));
7428
7429 if (proto == IPPROTO_TCP) {
7430 item_size += ROUNDUP64(sizeof(struct xtcpcb_n));
7431 }
7432
7433 if (req->oldptr == USER_ADDR_NULL) {
7434 n = ntstat_userland_count(proto);
7435 req->oldidx = 2 * (sizeof(xig)) + (n + 1 + n / 8) * item_size;
7436 goto done;
7437 }
7438
7439 if (req->newptr != USER_ADDR_NULL) {
7440 error = EPERM;
7441 goto done;
7442 }
7443
7444 error = nstat_userland_get_snapshot(proto, &snapshot, &n);
7445
7446 if (error) {
7447 goto done;
7448 }
7449
7450 bzero(&xig, sizeof(xig));
7451 xig.xig_len = sizeof(xig);
7452 xig.xig_gen = 0;
7453 xig.xig_sogen = 0;
7454 xig.xig_count = n;
7455 error = SYSCTL_OUT(req, &xig, sizeof(xig));
7456 if (error) {
7457 goto done;
7458 }
7459 /*
7460 * We are done if there are no flows
7461 */
7462 if (n == 0) {
7463 goto done;
7464 }
7465
7466 error = nstat_userland_list_snapshot(proto, req, snapshot, n);
7467
7468 if (!error) {
7469 /*
7470 * Give the user an updated idea of our state,
7471 * which is unchanged
7472 */
7473 error = SYSCTL_OUT(req, &xig, sizeof(xig));
7474 }
7475 done:
7476 nstat_userland_release_snapshot(snapshot, n);
7477 return error;
7478 }
7479
7480 #endif /* NTSTAT_SUPPORTS_STANDALONE_SYSCTL */
7481 #endif /* SKYWALK */
7482