1 /*
2 * Copyright (c) 2015-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30
31 #include <kern/thread_call.h>
32 #include <kern/uipc_domain.h>
33 #include <kern/zalloc.h>
34
35 #include <net/if.h>
36 #include <net/if_types.h>
37 #include <net/if_var.h>
38 #include <net/net_api_stats.h>
39 #include <net/necp.h>
40 #include <net/network_agent.h>
41 #include <net/ntstat.h>
42 #include <net/aop/kpi_aop.h>
43 #include <net/aop/aop_stats.h>
44
45 #include <netinet/in_pcb.h>
46 #include <netinet/in_var.h>
47 #include <netinet/ip.h>
48 #include <netinet/ip6.h>
49 #include <netinet/mp_pcb.h>
50 #include <netinet/tcp_cc.h>
51 #include <netinet/tcp_fsm.h>
52 #include <netinet/tcp_cache.h>
53 #include <netinet6/in6_var.h>
54
55 #include <sys/domain.h>
56 #include <sys/file_internal.h>
57 #include <sys/kauth.h>
58 #include <sys/kernel.h>
59 #include <sys/malloc.h>
60 #include <sys/poll.h>
61 #include <sys/priv.h>
62 #include <sys/protosw.h>
63 #include <sys/queue.h>
64 #include <sys/socket.h>
65 #include <sys/socketvar.h>
66 #include <sys/sysproto.h>
67 #include <sys/systm.h>
68 #include <sys/types.h>
69 #include <sys/codesign.h>
70 #include <libkern/section_keywords.h>
71 #include <IOKit/IOBSD.h>
72
73 #include <os/refcnt.h>
74
75 #include <CodeSignature/Entitlements.h>
76
77 #if SKYWALK
78 #include <skywalk/os_skywalk_private.h>
79 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
80 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
81 #endif /* SKYWALK */
82
83 #if CONFIG_MACF
84 #include <security/mac_framework.h>
85 #endif
86
87 #include <net/sockaddr_utils.h>
88
89 /*
90 * NECP Client Architecture
91 * ------------------------------------------------
92 * See <net/necp.c> for a discussion on NECP database architecture.
93 *
94 * Each client of NECP provides a set of parameters for a connection or network state
95 * evaluation, on which NECP policy evaluation is run. This produces a policy result
96 * which can be accessed by the originating process, along with events for when policies
97 * results have changed.
98 *
99 * ------------------------------------------------
100 * NECP Client FD
101 * ------------------------------------------------
102 * A process opens an NECP file descriptor using necp_open(). This is a very simple
103 * file descriptor, upon which the process may do the following operations:
104 * - necp_client_action(...), to add/remove/query clients
105 * - kqueue, to watch for readable events
106 * - close(), to close the client session and release all clients
107 *
108 * Client objects are allocated structures that hang off of the file descriptor. Each
109 * client contains:
110 * - Client ID, a UUID that references the client across the system
111 * - Parameters, a buffer of TLVs that describe the client's connection parameters,
112 * such as the remote and local endpoints, interface requirements, etc.
113 * - Result, a buffer of TLVs containing the current policy evaluation for the client.
114 * This result will be updated whenever a network change occurs that impacts the
115 * policy result for that client.
116 *
117 * +--------------+
118 * | NECP fd |
119 * +--------------+
120 * ||
121 * ==================================
122 * || || ||
123 * +--------------+ +--------------+ +--------------+
124 * | Client ID | | Client ID | | Client ID |
125 * | ---- | | ---- | | ---- |
126 * | Parameters | | Parameters | | Parameters |
127 * | ---- | | ---- | | ---- |
128 * | Result | | Result | | Result |
129 * +--------------+ +--------------+ +--------------+
130 *
131 * ------------------------------------------------
132 * Client Actions
133 * ------------------------------------------------
134 * - Add. Input parameters as a buffer of TLVs, and output a client ID. Allocates a
135 * new client structure on the file descriptor.
136 * - Remove. Input a client ID. Removes a client structure from the file descriptor.
137 * - Copy Parameters. Input a client ID, and output parameter TLVs.
138 * - Copy Result. Input a client ID, and output result TLVs. Alternatively, input empty
139 * client ID and get next unread client result.
140 * - Copy List. List all client IDs.
141 *
142 * ------------------------------------------------
143 * Client Policy Evaluation
144 * ------------------------------------------------
145 * Policies are evaluated for clients upon client creation, and upon update events,
146 * which are network/agent/policy changes coalesced by a timer.
147 *
148 * The policy evaluation goes through the following steps:
149 * 1. Parse client parameters.
150 * 2. Select a scoped interface if applicable. This involves using require/prohibit
151 * parameters, along with the local address, to select the most appropriate interface
152 * if not explicitly set by the client parameters.
153 * 3. Run NECP application-level policy evalution
154 * 4. Set policy result into client result buffer.
155 *
156 * ------------------------------------------------
157 * Client Observers
158 * ------------------------------------------------
159 * If necp_open() is called with the NECP_OPEN_FLAG_OBSERVER flag, and the process
160 * passes the necessary privilege check, the fd is allowed to use necp_client_action()
161 * to copy client state attached to the file descriptors of other processes, and to
162 * list all client IDs on the system.
163 */
164
165 extern u_int32_t necp_debug;
166
167 static int necpop_select(struct fileproc *, int, void *, vfs_context_t);
168 static int necpop_close(struct fileglob *, vfs_context_t);
169 static int necpop_kqfilter(struct fileproc *, struct knote *, struct kevent_qos_s *);
170
171 // Timer functions
172 static int necp_timeout_microseconds = 1000 * 100; // 100ms
173 static int necp_timeout_leeway_microseconds = 1000 * 50; // 50ms
174 #if SKYWALK
175 static int necp_collect_stats_timeout_microseconds = 1000 * 1000 * 1; // 1s
176 static int necp_collect_stats_timeout_leeway_microseconds = 1000 * 500; // 500ms
177 static int necp_close_arenas_timeout_microseconds = 1000 * 1000 * 10; // 10s
178 static int necp_close_arenas_timeout_leeway_microseconds = 1000 * 1000 * 1; // 1s
179 #endif /* SKYWALK */
180
181 static int necp_client_fd_count = 0;
182 static int necp_observer_fd_count = 0;
183 static int necp_client_count = 0;
184 static int necp_socket_flow_count = 0;
185 static int necp_if_flow_count = 0;
186 static int necp_observer_message_limit = 256;
187
188 /*
189 * NECP client tracing control -
190 *
191 * necp_client_tracing_level : 1 for client trace, 2 for flow trace, 3 for parameter details
192 * necp_client_tracing_pid : match client with pid
193 */
194 static int necp_client_tracing_level = 0;
195 static int necp_client_tracing_pid = 0;
196
197 #define NECP_CLIENT_TRACE_LEVEL_CLIENT 1
198 #define NECP_CLIENT_TRACE_LEVEL_FLOW 2
199 #define NECP_CLIENT_TRACE_LEVEL_PARAMS 3
200
201 #define NECP_CLIENT_TRACE_PID_MATCHED(pid) \
202 (pid == necp_client_tracing_pid)
203
204 #define NECP_ENABLE_CLIENT_TRACE(level) \
205 ((necp_client_tracing_level >= level && \
206 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(client->proc_pid))) ? necp_client_tracing_level : 0)
207
208 #define NECP_CLIENT_LOG(client, fmt, ...) \
209 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_CLIENT)) { \
210 uuid_string_t client_uuid_str = { }; \
211 uuid_unparse_lower(client->client_id, client_uuid_str); \
212 NECPLOG(LOG_NOTICE, "NECP_CLIENT_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
213 }
214
215 #define NECP_CLIENT_FLOW_LOG(client, flow, fmt, ...) \
216 if (client && flow && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) { \
217 uuid_string_t client_uuid_str = { }; \
218 uuid_unparse_lower(client->client_id, client_uuid_str); \
219 uuid_string_t flow_uuid_str = { }; \
220 uuid_unparse_lower(flow->registration_id, flow_uuid_str); \
221 NECPLOG(LOG_NOTICE, "NECP CLIENT FLOW TRACE <pid %d %s> <flow %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, flow_uuid_str, ##__VA_ARGS__); \
222 }
223
224 #define NECP_CLIENT_PARAMS_LOG(client, fmt, ...) \
225 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
226 uuid_string_t client_uuid_str = { }; \
227 uuid_unparse_lower(client->client_id, client_uuid_str); \
228 NECPLOG(LOG_NOTICE, "NECP_CLIENT_PARAMS_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
229 }
230
231 #define NECP_SOCKET_PID(so) \
232 ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid)
233
234 #define NECP_ENABLE_SOCKET_TRACE(level) \
235 ((necp_client_tracing_level >= level && \
236 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(NECP_SOCKET_PID(so)))) ? necp_client_tracing_level : 0)
237
238 #define NECP_SOCKET_PARAMS_LOG(so, fmt, ...) \
239 if (so && NECP_ENABLE_SOCKET_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
240 NECPLOG(LOG_NOTICE, "NECP_SOCKET_PARAMS_LOG <pid %d>: " fmt "\n", NECP_SOCKET_PID(so), ##__VA_ARGS__); \
241 }
242
243 #define NECP_SOCKET_ATTRIBUTE_LOG(fmt, ...) \
244 if (necp_client_tracing_level >= NECP_CLIENT_TRACE_LEVEL_PARAMS) { \
245 NECPLOG(LOG_NOTICE, "NECP_SOCKET_ATTRIBUTE_LOG: " fmt "\n", ##__VA_ARGS__); \
246 }
247
248 #define NECP_CLIENT_TRACKER_LOG(pid, fmt, ...) \
249 if (pid) { \
250 NECPLOG(LOG_NOTICE, "NECP_CLIENT_TRACKER_LOG <pid %d>: " fmt "\n", pid, ##__VA_ARGS__); \
251 }
252
253 #if SKYWALK
254 static int necp_arena_count = 0;
255 static int necp_sysctl_arena_count = 0;
256 static int necp_nexus_flow_count = 0;
257
258 /* userspace stats sanity check range, same unit as TCP (see TCP_RTT_SCALE) */
259 static uint32_t necp_client_stats_rtt_floor = 1; // 32us
260 static uint32_t necp_client_stats_rtt_ceiling = 1920000; // 60s
261 const static struct sk_stats_flow ntstat_sk_stats_zero;
262 #endif /* SKYWALK */
263
264 static int necp_client_stats_use_route_metrics = 0;
265
266 /*
267 * Global lock to protect socket inp_necp_attributes across updates.
268 * NECP updating these attributes and clients accessing these attributes
269 * must take this lock.
270 */
271 static LCK_GRP_DECLARE(necp_socket_attr_lock_grp, "necpSocketAttrGroup");
272 LCK_MTX_DECLARE(necp_socket_attr_lock, &necp_socket_attr_lock_grp);
273
274 os_refgrp_decl(static, necp_client_refgrp, "NECPClientRefGroup", NULL);
275
276 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_FD_COUNT, client_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_fd_count, 0, "");
277 SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_FD_COUNT, observer_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_observer_fd_count, 0, "");
278 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_COUNT, client_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_count, 0, "");
279 SYSCTL_INT(_net_necp, NECPCTL_SOCKET_FLOW_COUNT, socket_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_socket_flow_count, 0, "");
280 SYSCTL_INT(_net_necp, NECPCTL_IF_FLOW_COUNT, if_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_if_flow_count, 0, "");
281 SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_MESSAGE_LIMIT, observer_message_limit, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_observer_message_limit, 256, "");
282 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_LEVEL, necp_client_tracing_level, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_level, 0, "");
283 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_PID, necp_client_tracing_pid, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_pid, 0, "");
284
285 #if SKYWALK
286 SYSCTL_INT(_net_necp, NECPCTL_ARENA_COUNT, arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_arena_count, 0, "");
287 SYSCTL_INT(_net_necp, NECPCTL_SYSCTL_ARENA_COUNT, sysctl_arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_sysctl_arena_count, 0, "");
288 SYSCTL_INT(_net_necp, NECPCTL_NEXUS_FLOW_COUNT, nexus_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_nexus_flow_count, 0, "");
289 #if (DEVELOPMENT || DEBUG)
290 SYSCTL_UINT(_net_necp, OID_AUTO, collect_stats_interval_us, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_collect_stats_timeout_microseconds, 0, "");
291 SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_floor, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_floor, 0, "");
292 SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_ceiling, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_ceiling, 0, "");
293 SYSCTL_INT(_net_necp, OID_AUTO, necp_client_stats_use_route_metrics, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_use_route_metrics, 0, "");
294 #endif /* (DEVELOPMENT || DEBUG) */
295 #endif /* SKYWALK */
296
297 #define NECP_MAX_CLIENT_LIST_SIZE 1024 * 1024 // 1MB
298 #define NECP_MAX_AGENT_ACTION_SIZE 10 * 1024 // 10K
299
300 extern int tvtohz(struct timeval *);
301 extern unsigned int get_maxmtu(struct rtentry *);
302
303 // Parsed parameters
304 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR 0x00001
305 #define NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR 0x00002
306 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF 0x00004
307 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF 0x00008
308 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE 0x00010
309 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE 0x00020
310 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT 0x00040
311 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT 0x00080
312 #define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT 0x00100
313 #define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT 0x00200
314 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE 0x00400
315 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE 0x00800
316 #define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE 0x01000
317 #define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE 0x02000
318 #define NECP_PARSED_PARAMETERS_FIELD_FLAGS 0x04000
319 #define NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL 0x08000
320 #define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID 0x10000
321 #define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID 0x20000
322 #define NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS 0x40000
323 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT 0x80000
324 #define NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID 0x100000
325 #define NECP_PARSED_PARAMETERS_FIELD_ETHERTYPE 0x200000
326 #define NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL 0x400000
327 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE 0x800000
328 #define NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER 0x1000000
329 #define NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID 0x2000000
330 #define NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN 0x4000000
331 #define NECP_PARSED_PARAMETERS_FIELD_UID 0x8000000
332 #define NECP_PARSED_PARAMETERS_FIELD_PERSONA_ID 0x10000000
333 #define NECP_PARSED_PARAMETERS_FIELD_EXTENDED_FLAGS 0x20000000
334
335
336 #define NECP_MAX_INTERFACE_PARAMETERS 16
337 #define NECP_MAX_AGENT_PARAMETERS 4
338 struct necp_client_parsed_parameters {
339 u_int32_t valid_fields;
340 u_int32_t flags;
341 u_int64_t delegated_upid;
342 union necp_sockaddr_union local_addr;
343 union necp_sockaddr_union remote_addr;
344 u_int32_t required_interface_index;
345 char prohibited_interfaces[NECP_MAX_INTERFACE_PARAMETERS][IFXNAMSIZ];
346 u_int8_t required_interface_type;
347 u_int8_t local_address_preference;
348 u_int8_t prohibited_interface_types[NECP_MAX_INTERFACE_PARAMETERS];
349 struct necp_client_parameter_netagent_type required_netagent_types[NECP_MAX_AGENT_PARAMETERS];
350 struct necp_client_parameter_netagent_type prohibited_netagent_types[NECP_MAX_AGENT_PARAMETERS];
351 struct necp_client_parameter_netagent_type preferred_netagent_types[NECP_MAX_AGENT_PARAMETERS];
352 struct necp_client_parameter_netagent_type avoided_netagent_types[NECP_MAX_AGENT_PARAMETERS];
353 uuid_t required_netagents[NECP_MAX_AGENT_PARAMETERS];
354 uuid_t prohibited_netagents[NECP_MAX_AGENT_PARAMETERS];
355 uuid_t preferred_netagents[NECP_MAX_AGENT_PARAMETERS];
356 uuid_t avoided_netagents[NECP_MAX_AGENT_PARAMETERS];
357 u_int8_t ip_protocol;
358 u_int8_t transport_protocol;
359 u_int16_t ethertype;
360 pid_t effective_pid;
361 uuid_t effective_uuid;
362 uuid_t parent_uuid;
363 u_int32_t traffic_class;
364 struct necp_demux_pattern demux_patterns[NECP_MAX_DEMUX_PATTERNS];
365 u_int8_t demux_pattern_count;
366 uid_t uid;
367 uid_t persona_id;
368 u_int64_t extended_flags;
369 };
370
371 static bool
372 necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters,
373 u_int *return_ifindex, bool *validate_agents);
374
375 static bool
376 necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa);
377
378 static bool
379 necp_ifnet_matches_parameters(struct ifnet *ifp,
380 struct necp_client_parsed_parameters *parsed_parameters,
381 u_int32_t override_flags,
382 u_int32_t *preferred_count,
383 bool secondary_interface,
384 bool require_scoped_field);
385
386 static const struct fileops necp_fd_ops = {
387 .fo_type = DTYPE_NETPOLICY,
388 .fo_read = fo_no_read,
389 .fo_write = fo_no_write,
390 .fo_ioctl = fo_no_ioctl,
391 .fo_select = necpop_select,
392 .fo_close = necpop_close,
393 .fo_drain = fo_no_drain,
394 .fo_kqfilter = necpop_kqfilter,
395 };
396
397 struct necp_client_assertion {
398 LIST_ENTRY(necp_client_assertion) assertion_chain;
399 uuid_t asserted_netagent;
400 };
401
402 struct necp_client_flow_header {
403 struct necp_tlv_header outer_header;
404 struct necp_tlv_header flow_id_tlv_header;
405 uuid_t flow_id;
406 struct necp_tlv_header flags_tlv_header;
407 u_int32_t flags_value;
408 struct necp_tlv_header interface_tlv_header;
409 struct necp_client_result_interface interface_value;
410 } __attribute__((__packed__));
411
412 struct necp_client_flow_protoctl_event_header {
413 struct necp_tlv_header protoctl_tlv_header;
414 struct necp_client_flow_protoctl_event protoctl_event;
415 } __attribute__((__packed__));
416
417 struct necp_client_flow_stats_index_header {
418 struct necp_tlv_header stats_index_tlv_header;
419 uint32_t stats_index;
420 } __attribute__((__packed__));
421
422 struct necp_client_nexus_flow_header {
423 struct necp_client_flow_header flow_header;
424 struct necp_tlv_header agent_tlv_header;
425 struct necp_client_result_netagent agent_value;
426 struct necp_tlv_header tfo_cookie_tlv_header;
427 u_int8_t tfo_cookie_value[NECP_TFO_COOKIE_LEN_MAX];
428 } __attribute__((__packed__));
429
430 #if SKYWALK
431 struct necp_arena_info;
432 #endif
433
434 struct necp_client_flow {
435 LIST_ENTRY(necp_client_flow) flow_chain;
436 unsigned invalid : 1;
437 unsigned nexus : 1; // If true, flow is a nexus; if false, flow is attached to socket
438 unsigned socket : 1;
439 unsigned viable : 1;
440 unsigned assigned : 1;
441 unsigned has_protoctl_event : 1;
442 unsigned check_tcp_heuristics : 1;
443 unsigned aop_offload : 1;
444 unsigned aop_stat_index_valid : 1;
445 union {
446 uuid_t nexus_agent;
447 struct {
448 void *socket_handle;
449 necp_client_flow_cb cb;
450 };
451 } u;
452 uint32_t interface_index;
453 u_short delegated_interface_index;
454 uint32_t interface_flags;
455 uint32_t necp_flow_flags;
456 struct necp_client_flow_protoctl_event protoctl_event;
457 union necp_sockaddr_union local_addr;
458 union necp_sockaddr_union remote_addr;
459 uint32_t flow_tag;
460 uint32_t stats_index; // Index associated with AOP flows
461
462 size_t assigned_results_length;
463 u_int8_t *__counted_by(assigned_results_length) assigned_results;
464 };
465
466 struct necp_client_flow_registration {
467 RB_ENTRY(necp_client_flow_registration) fd_link;
468 RB_ENTRY(necp_client_flow_registration) global_link;
469 RB_ENTRY(necp_client_flow_registration) client_link;
470 LIST_ENTRY(necp_client_flow_registration) collect_stats_chain;
471 uuid_t registration_id;
472 u_int32_t flags;
473 unsigned flow_result_read : 1;
474 unsigned defunct : 1;
475 unsigned aop_offload : 1;
476 void *interface_handle;
477 necp_client_flow_cb interface_cb;
478 struct necp_client *client;
479 LIST_HEAD(_necp_registration_flow_list, necp_client_flow) flow_list;
480 #if SKYWALK
481 struct necp_arena_info *stats_arena; /* arena where the stats objects came from */
482 void * kstats_kaddr; /* kernel snapshot of untrusted userspace stats, for calculating delta */
483 mach_vm_address_t ustats_uaddr; /* userspace stats (untrusted) */
484 nstat_userland_context stats_handler_context;
485 struct flow_stats *nexus_stats; /* shared stats objects between necp_client and skywalk */
486 #endif /* !SKYWALK */
487 u_int64_t last_interface_details __attribute__((aligned(sizeof(u_int64_t))));
488 };
489
490 static int necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1);
491
492 RB_HEAD(_necp_client_flow_tree, necp_client_flow_registration);
493 RB_PROTOTYPE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
494 RB_GENERATE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
495
496 #define NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT 4
497 #define NECP_CLIENT_MAX_INTERFACE_OPTIONS 32
498
499 #define NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT (NECP_CLIENT_MAX_INTERFACE_OPTIONS - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT)
500
501 struct necp_client {
502 RB_ENTRY(necp_client) link;
503 RB_ENTRY(necp_client) global_link;
504
505 decl_lck_mtx_data(, lock);
506 decl_lck_mtx_data(, route_lock);
507 os_refcnt_t reference_count;
508
509 uuid_t client_id;
510 unsigned result_read : 1;
511 unsigned group_members_read : 1;
512 unsigned allow_multiple_flows : 1;
513 unsigned legacy_client_is_flow : 1;
514
515 unsigned platform_binary : 1;
516 unsigned validated_parent : 1;
517
518 size_t result_length;
519 u_int8_t result[NECP_BASE_CLIENT_RESULT_SIZE];
520
521 necp_policy_id policy_id;
522 necp_policy_id skip_policy_id;
523
524 u_int8_t ip_protocol;
525 int proc_pid;
526
527 u_int64_t delegated_upid;
528
529 struct _necp_client_flow_tree flow_registrations;
530 LIST_HEAD(_necp_client_assertion_list, necp_client_assertion) assertion_list;
531
532 size_t assigned_group_members_length;
533 u_int8_t *__counted_by(assigned_group_members_length) assigned_group_members;
534
535 struct rtentry *current_route;
536
537 struct necp_client_interface_option interface_options[NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
538 struct necp_client_interface_option * __indexable extra_interface_options;
539 u_int8_t interface_option_count; // Number in interface_options + extra_interface_options
540
541 struct necp_client_result_netagent failed_trigger_agent;
542
543 void *agent_handle;
544
545 uuid_t override_euuid;
546
547 #if SKYWALK
548 netns_token port_reservation;
549 nstat_context nstat_context;
550 uuid_t latest_flow_registration_id;
551 uuid_t parent_client_id;
552 struct necp_client *original_parameters_source;
553 #endif /* !SKYWALK */
554
555 size_t parameters_length;
556 u_int8_t * __sized_by(parameters_length) parameters;
557 };
558
559 #define NECP_CLIENT_LOCK(_c) lck_mtx_lock(&_c->lock)
560 #define NECP_CLIENT_UNLOCK(_c) lck_mtx_unlock(&_c->lock)
561 #define NECP_CLIENT_ASSERT_LOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_OWNED)
562 #define NECP_CLIENT_ASSERT_UNLOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_NOTOWNED)
563
564 #define NECP_CLIENT_ROUTE_LOCK(_c) lck_mtx_lock(&_c->route_lock)
565 #define NECP_CLIENT_ROUTE_UNLOCK(_c) lck_mtx_unlock(&_c->route_lock)
566
567 static void necp_client_retain_locked(struct necp_client *client);
568 static void necp_client_retain(struct necp_client *client);
569
570 static bool necp_client_release_locked(struct necp_client *client);
571 static bool necp_client_release(struct necp_client *client);
572
573 static void
574 necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid);
575
576 static bool
577 necp_client_remove_assertion(struct necp_client *client, uuid_t netagent_uuid);
578
579 static int
580 necp_client_copy_parameters_locked(struct necp_client *client,
581 struct necp_client_nexus_parameters *parameters);
582
583 LIST_HEAD(_necp_flow_registration_list, necp_client_flow_registration);
584 static struct _necp_flow_registration_list necp_collect_stats_flow_list;
585
586 struct necp_flow_defunct {
587 LIST_ENTRY(necp_flow_defunct) chain;
588
589 uuid_t flow_id;
590 uuid_t nexus_agent;
591 void *agent_handle;
592 int proc_pid;
593 u_int32_t flags;
594 struct necp_client_agent_parameters close_parameters;
595 bool has_close_parameters;
596 };
597
598 LIST_HEAD(_necp_flow_defunct_list, necp_flow_defunct);
599
600 static int necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1);
601
602 RB_HEAD(_necp_client_tree, necp_client);
603 RB_PROTOTYPE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
604 RB_GENERATE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
605
606 RB_HEAD(_necp_client_global_tree, necp_client);
607 RB_PROTOTYPE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
608 RB_GENERATE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
609
610 RB_HEAD(_necp_fd_flow_tree, necp_client_flow_registration);
611 RB_PROTOTYPE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
612 RB_GENERATE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
613
614 RB_HEAD(_necp_client_flow_global_tree, necp_client_flow_registration);
615 RB_PROTOTYPE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
616 RB_GENERATE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
617
618 static struct _necp_client_global_tree necp_client_global_tree;
619 static struct _necp_client_flow_global_tree necp_client_flow_global_tree;
620
621 struct necp_client_update {
622 TAILQ_ENTRY(necp_client_update) chain;
623
624 uuid_t client_id;
625
626 size_t update_length;
627 struct necp_client_observer_update *__sized_by(update_length) update;
628 };
629
630 #if SKYWALK
631 struct necp_arena_info {
632 LIST_ENTRY(necp_arena_info) nai_chain;
633 u_int32_t nai_flags;
634 pid_t nai_proc_pid;
635 struct skmem_arena *nai_arena;
636 struct skmem_arena_mmap_info nai_mmap;
637 mach_vm_offset_t nai_roff;
638 u_int32_t nai_use_count;
639 };
640 #endif /* !SKYWALK */
641
642 #define NAIF_ATTACHED 0x1 // arena is attached to list
643 #define NAIF_REDIRECT 0x2 // arena mmap has been redirected
644 #define NAIF_DEFUNCT 0x4 // arena is now defunct
645
646 #define NECP_FD_REPORTED_AGENT_COUNT 2
647
648 struct necp_fd_reported_agents {
649 uuid_t agent_uuid[NECP_FD_REPORTED_AGENT_COUNT];
650 };
651
652 struct necp_fd_data {
653 u_int8_t necp_fd_type;
654 LIST_ENTRY(necp_fd_data) chain;
655 struct _necp_client_tree clients;
656 struct _necp_fd_flow_tree flows;
657 TAILQ_HEAD(_necp_client_update_list, necp_client_update) update_list;
658 int update_count;
659 int flags;
660
661 unsigned background : 1;
662 unsigned request_in_process_flow_divert : 1;
663
664 int proc_pid;
665 decl_lck_mtx_data(, fd_lock);
666 struct selinfo si;
667
668 struct necp_fd_reported_agents reported_agents;
669 #if SKYWALK
670 // Arenas and their mmap info for per-process stats. Stats objects are allocated from an active arena
671 // that is not redirected/defunct. The stats_arena_active keeps track of such an arena, and it also
672 // holds a reference count on the object. Each flow allocating a stats object also holds a reference
673 // the necp_arena_info (where the object got allocated from). During defunct, we redirect the mapping
674 // of the arena such that any attempt to access (read/write) will result in getting zero-filled pages.
675 // We then go thru all of the flows for the process and free the stats objects associated with them,
676 // followed by destroying the skmem region(s) associated with the arena. The stats_arena_list keeps
677 // track of all current and defunct stats arenas; there could be more than one arena created for the
678 // process as the arena destruction happens when its reference count drops to 0.
679 struct necp_arena_info *stats_arena_active;
680 LIST_HEAD(_necp_arena_info_list, necp_arena_info) stats_arena_list;
681 u_int32_t stats_arena_gencnt;
682
683 struct skmem_arena *sysctl_arena;
684 struct skmem_arena_mmap_info sysctl_mmap;
685 mach_vm_offset_t system_sysctls_roff;
686 #endif /* !SKYWALK */
687 };
688
689 #define NECP_FD_LOCK(_f) lck_mtx_lock(&_f->fd_lock)
690 #define NECP_FD_UNLOCK(_f) lck_mtx_unlock(&_f->fd_lock)
691 #define NECP_FD_ASSERT_LOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_OWNED)
692 #define NECP_FD_ASSERT_UNLOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_NOTOWNED)
693
694 static LIST_HEAD(_necp_fd_list, necp_fd_data) necp_fd_list;
695 static LIST_HEAD(_necp_fd_observer_list, necp_fd_data) necp_fd_observer_list;
696
697 #if SKYWALK
698 static KALLOC_TYPE_DEFINE(necp_arena_info_zone, struct necp_arena_info, NET_KT_DEFAULT);
699 #endif /* !SKYWALK */
700
701 static LCK_ATTR_DECLARE(necp_fd_mtx_attr, 0, 0);
702 static LCK_GRP_DECLARE(necp_fd_mtx_grp, "necp_fd");
703
704 static LCK_RW_DECLARE_ATTR(necp_fd_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
705 static LCK_RW_DECLARE_ATTR(necp_observer_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
706 static LCK_RW_DECLARE_ATTR(necp_client_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
707 static LCK_RW_DECLARE_ATTR(necp_flow_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
708 static LCK_RW_DECLARE_ATTR(necp_collect_stats_list_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
709
710
711 #define NECP_STATS_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_collect_stats_list_lock)
712 #define NECP_STATS_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_collect_stats_list_lock)
713 #define NECP_STATS_LIST_UNLOCK() lck_rw_done(&necp_collect_stats_list_lock)
714
715 #define NECP_CLIENT_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_client_tree_lock)
716 #define NECP_CLIENT_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_client_tree_lock)
717 #define NECP_CLIENT_TREE_UNLOCK() lck_rw_done(&necp_client_tree_lock)
718 #define NECP_CLIENT_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_client_tree_lock, LCK_RW_ASSERT_HELD)
719
720 #define NECP_FLOW_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_flow_tree_lock)
721 #define NECP_FLOW_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_flow_tree_lock)
722 #define NECP_FLOW_TREE_UNLOCK() lck_rw_done(&necp_flow_tree_lock)
723 #define NECP_FLOW_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_flow_tree_lock, LCK_RW_ASSERT_HELD)
724
725 #define NECP_FD_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_fd_lock)
726 #define NECP_FD_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_fd_lock)
727 #define NECP_FD_LIST_UNLOCK() lck_rw_done(&necp_fd_lock)
728 #define NECP_FD_LIST_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_fd_lock, LCK_RW_ASSERT_HELD)
729
730 #define NECP_OBSERVER_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_observer_lock)
731 #define NECP_OBSERVER_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_observer_lock)
732 #define NECP_OBSERVER_LIST_UNLOCK() lck_rw_done(&necp_observer_lock)
733
734 // Locking Notes
735
736 // Take NECP_FD_LIST_LOCK when accessing or modifying the necp_fd_list
737 // Take NECP_CLIENT_TREE_LOCK when accessing or modifying the necp_client_global_tree
738 // Take NECP_FLOW_TREE_LOCK when accessing or modifying the necp_client_flow_global_tree
739 // Take NECP_STATS_LIST_LOCK when accessing or modifying the necp_collect_stats_flow_list
740 // Take NECP_FD_LOCK when accessing or modifying an necp_fd_data entry
741 // Take NECP_CLIENT_LOCK when accessing or modifying a single necp_client
742 // Take NECP_CLIENT_ROUTE_LOCK when accessing or modifying a client's route
743
744 // Precedence, where 1 is the first lock that must be taken
745 // 1. NECP_FD_LIST_LOCK
746 // 2. NECP_FD_LOCK (any)
747 // 3. NECP_CLIENT_TREE_LOCK
748 // 4. NECP_CLIENT_LOCK (any)
749 // 5. NECP_FLOW_TREE_LOCK
750 // 6. NECP_STATS_LIST_LOCK
751 // 7. NECP_CLIENT_ROUTE_LOCK (any)
752
753 static thread_call_t necp_client_update_tcall;
754 static uint32_t necp_update_all_clients_sched_cnt = 0;
755 static uint64_t necp_update_all_clients_sched_abstime = 0;
756 static LCK_RW_DECLARE_ATTR(necp_update_all_clients_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
757 #define NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_update_all_clients_lock)
758 #define NECP_UPDATE_ALL_CLIENTS_SHARED_TO_EXCLUSIVE() lck_rw_lock_shared_to_exclusive(&necp_update_all_clients_lock)
759 #define NECP_UPDATE_ALL_CLIENTS_SHARED() lck_rw_lock_shared(&necp_update_all_clients_lock)
760 #define NECP_UPDATE_ALL_CLIENTS_UNLOCK() lck_rw_done(&necp_update_all_clients_lock)
761
762 // Array of PIDs that will trigger in-process flow divert, protected by NECP_FD_LIST_LOCK
763 #define NECP_MAX_FLOW_DIVERT_NEEDED_PIDS 4
764 static pid_t necp_flow_divert_needed_pids[NECP_MAX_FLOW_DIVERT_NEEDED_PIDS];
765
766 #if SKYWALK
767 static thread_call_t necp_client_collect_stats_tcall;
768 static thread_call_t necp_close_empty_arenas_tcall;
769
770 static void necp_fd_insert_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
771 static void necp_fd_remove_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
772 static struct necp_arena_info *necp_fd_mredirect_stats_arena(struct necp_fd_data *fd_data, struct proc *proc);
773
774 static void necp_arena_info_retain(struct necp_arena_info *nai);
775 static void necp_arena_info_release(struct necp_arena_info *nai);
776 static struct necp_arena_info *necp_arena_info_alloc(void);
777 static void necp_arena_info_free(struct necp_arena_info *nai);
778
779 static int necp_arena_initialize(struct necp_fd_data *fd_data, bool locked);
780 static int necp_stats_initialize(struct necp_fd_data *fd_data, struct necp_client *client,
781 struct necp_client_flow_registration *flow_registration, struct necp_stats_bufreq *bufreq);
782 static int necp_arena_create(struct necp_fd_data *fd_data, size_t obj_size, size_t obj_cnt, struct proc *p);
783 static int necp_arena_stats_obj_alloc(struct necp_fd_data *fd_data, mach_vm_offset_t *off, struct necp_arena_info **stats_arena, void **kstats_kaddr, boolean_t cansleep);
784 static void necp_arena_stats_obj_free(struct necp_fd_data *fd_data, struct necp_arena_info *stats_arena, void **kstats_kaddr, mach_vm_address_t *ustats_uaddr);
785 static void necp_stats_arenas_destroy(struct necp_fd_data *fd_data, boolean_t closing);
786
787 static int necp_sysctl_arena_initialize(struct necp_fd_data *fd_data, bool locked);
788 static void necp_sysctl_arena_destroy(struct necp_fd_data *fd_data);
789 static void *necp_arena_sysctls_obj(struct necp_fd_data *fd_data, mach_vm_offset_t *off, size_t *size);
790 #endif /* !SKYWALK */
791
792 static int necp_aop_offload_stats_initialize(struct necp_client_flow_registration *flow_registration, uuid_t netagent_uuid);
793 static void necp_aop_offload_stats_destroy(struct necp_client_flow *flow);
794
795 void necp_copy_inp_domain_info(struct inpcb *, struct socket *, nstat_domain_info *);
796 void necp_with_inp_domain_name(struct socket *so, void *ctx, void (*with_func)(char *domain_name __null_terminated, void *ctx));
797
798 #if __has_ptrcheck
799 static inline
800 __attribute__((always_inline)) __pure
801 struct necp_client_flow_stats * __indexable
necp_client_get_flow_stats(const struct necp_client_add_flow * req)802 necp_client_get_flow_stats(const struct necp_client_add_flow *req)
803 {
804 if (req == NULL) {
805 return NULL;
806 }
807
808 return __unsafe_forge_bidi_indexable(struct necp_client_flow_stats *, req->stats_requests, sizeof(struct necp_client_flow_stats) * req->stats_request_count);
809 }
810 #else
811 #define necp_client_get_flow_stats(req) ((struct necp_client_flow_stats *)&(req)->stats_requests[0])
812 #endif
813
814 #if __has_ptrcheck
815 static inline
816 __attribute__((always_inline)) __pure
817 uint8_t * __bidi_indexable
signable_get_data(const struct necp_client_signable * signable,size_t data_length)818 signable_get_data(const struct necp_client_signable *signable, size_t data_length)
819 {
820 if (signable == NULL) {
821 return NULL;
822 }
823
824 return __unsafe_forge_bidi_indexable(uint8_t *, signable->signable_data, data_length);
825 }
826 #else
827 #define signable_get_data(signable, data_length) ((signable)->signable_data)
828 #endif
829
830 #if __has_ptrcheck
831 static inline
832 __attribute__((always_inline)) __pure
833 struct sockaddr * __single
flow_req_get_address(const struct necp_client_add_flow * req,size_t offset_of_address)834 flow_req_get_address(const struct necp_client_add_flow *req, size_t offset_of_address)
835 {
836 if (req == NULL) {
837 return NULL;
838 }
839
840 uint8_t * __indexable req_ptr = __unsafe_forge_bidi_indexable(uint8_t *, req, sizeof(struct necp_client_add_flow));
841 return __unsafe_forge_single(struct sockaddr *, req_ptr + offset_of_address);
842 }
843 #else
844 #define flow_req_get_address(req, offset_of_address) ((struct sockaddr *)(((uint8_t *)req) + offset_of_address))
845 #endif
846
847 #if __has_ptrcheck
848 static inline
849 __attribute__((always_inline)) __pure
850 uint8_t * __single
flow_req_get_proto(const struct necp_client_add_flow * req,size_t offset_of_proto)851 flow_req_get_proto(const struct necp_client_add_flow *req, size_t offset_of_proto)
852 {
853 if (req == NULL) {
854 return NULL;
855 }
856
857 uint8_t * __indexable req_ptr = __unsafe_forge_bidi_indexable(uint8_t *, req, sizeof(struct necp_client_add_flow));
858 return __unsafe_forge_single(uint8_t *, req_ptr + offset_of_proto);
859 }
860 #else
861 #define flow_req_get_proto(req, offset_of_proto) ((uint8_t *)(((uint8_t *)req) + offset_of_proto))
862 #endif
863
864 #if __has_ptrcheck
865 static inline
866 __attribute__((always_inline)) __pure
867 uint8_t * __bidi_indexable
necp_update_get_tlv_buffer(const struct necp_client_observer_update * update,size_t buffer_size)868 necp_update_get_tlv_buffer(const struct necp_client_observer_update *update, size_t buffer_size)
869 {
870 if (update == NULL) {
871 return NULL;
872 }
873
874 return __unsafe_forge_bidi_indexable(uint8_t *, update->tlv_buffer, buffer_size);
875 }
876 #else
877 #define necp_update_get_tlv_buffer(update, buffer_size) ((update)->tlv_buffer)
878 #endif
879
880 #if __has_ptrcheck
881 static inline
882 __attribute__((always_inline)) __pure
883 char * __bidi_indexable
necp_answer_get_hostname(const struct necp_client_host_resolver_answer * answer,size_t hostname_length)884 necp_answer_get_hostname(const struct necp_client_host_resolver_answer *answer, size_t hostname_length)
885 {
886 if (answer == NULL) {
887 return NULL;
888 }
889
890 return __unsafe_forge_bidi_indexable(char *, answer->hostname, hostname_length);
891 }
892 #else
893 #define necp_answer_get_hostname(answer, hostname_length) ((answer)->hostname)
894 #endif
895
896 static void
necp_lock_socket_attributes(void)897 necp_lock_socket_attributes(void)
898 {
899 lck_mtx_lock(&necp_socket_attr_lock);
900 }
901
902 static void
necp_unlock_socket_attributes(void)903 necp_unlock_socket_attributes(void)
904 {
905 lck_mtx_unlock(&necp_socket_attr_lock);
906 }
907
908 /// NECP file descriptor functions
909
910 static void
necp_fd_notify(struct necp_fd_data * fd_data,bool locked)911 necp_fd_notify(struct necp_fd_data *fd_data, bool locked)
912 {
913 struct selinfo *si = &fd_data->si;
914
915 if (!locked) {
916 NECP_FD_LOCK(fd_data);
917 }
918
919 selwakeup(si);
920
921 // use a non-zero hint to tell the notification from the
922 // call done in kqueue_scan() which uses 0
923 KNOTE(&si->si_note, 1); // notification
924
925 if (!locked) {
926 NECP_FD_UNLOCK(fd_data);
927 }
928 }
929
930 static inline bool
necp_client_has_unread_flows(struct necp_client * client)931 necp_client_has_unread_flows(struct necp_client *client)
932 {
933 NECP_CLIENT_ASSERT_LOCKED(client);
934 struct necp_client_flow_registration *flow_registration = NULL;
935 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
936 if (!flow_registration->flow_result_read) {
937 return true;
938 }
939 }
940 return false;
941 }
942
943 static int
necp_fd_poll(struct necp_fd_data * fd_data,int events,void * wql,struct proc * p,int is_kevent)944 necp_fd_poll(struct necp_fd_data *fd_data, int events, void *wql, struct proc *p, int is_kevent)
945 {
946 #pragma unused(wql, p, is_kevent)
947 u_int revents = 0;
948
949 u_int want_rx = events & (POLLIN | POLLRDNORM);
950 if (want_rx) {
951 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
952 // Push-mode observers are readable when they have a new update
953 if (!TAILQ_EMPTY(&fd_data->update_list)) {
954 revents |= want_rx;
955 }
956 } else {
957 // Standard fds are readable when some client is unread
958 struct necp_client *client = NULL;
959 bool has_unread_clients = FALSE;
960 RB_FOREACH(client, _necp_client_tree, &fd_data->clients) {
961 NECP_CLIENT_LOCK(client);
962 if (!client->result_read || !client->group_members_read || necp_client_has_unread_flows(client)) {
963 has_unread_clients = TRUE;
964 }
965 NECP_CLIENT_UNLOCK(client);
966 if (has_unread_clients) {
967 break;
968 }
969 }
970
971 if (has_unread_clients || fd_data->request_in_process_flow_divert) {
972 revents |= want_rx;
973 }
974 }
975 }
976
977 return revents;
978 }
979
980 static inline void
necp_generate_client_id(uuid_t client_id,bool is_flow)981 necp_generate_client_id(uuid_t client_id, bool is_flow)
982 {
983 uuid_generate_random(client_id);
984
985 if (is_flow) {
986 client_id[9] |= 0x01;
987 } else {
988 client_id[9] &= ~0x01;
989 }
990 }
991
992 static inline bool
necp_client_id_is_flow(uuid_t client_id)993 necp_client_id_is_flow(uuid_t client_id)
994 {
995 return client_id[9] & 0x01;
996 }
997
998 static struct necp_client *
necp_find_client_and_lock(uuid_t client_id)999 necp_find_client_and_lock(uuid_t client_id)
1000 {
1001 NECP_CLIENT_TREE_ASSERT_LOCKED();
1002
1003 struct necp_client *client = NULL;
1004
1005 if (necp_client_id_is_flow(client_id)) {
1006 NECP_FLOW_TREE_LOCK_SHARED();
1007 struct necp_client_flow_registration find;
1008 uuid_copy(find.registration_id, client_id);
1009 struct necp_client_flow_registration *flow = RB_FIND(_necp_client_flow_global_tree, &necp_client_flow_global_tree, &find);
1010 if (flow != NULL) {
1011 client = flow->client;
1012 }
1013 NECP_FLOW_TREE_UNLOCK();
1014 } else {
1015 struct necp_client find;
1016 uuid_copy(find.client_id, client_id);
1017 client = RB_FIND(_necp_client_global_tree, &necp_client_global_tree, &find);
1018 }
1019
1020 if (client != NULL) {
1021 NECP_CLIENT_LOCK(client);
1022 }
1023
1024 return client;
1025 }
1026
1027 static struct necp_client_flow_registration *
necp_client_find_flow(struct necp_client * client,uuid_t flow_id)1028 necp_client_find_flow(struct necp_client *client, uuid_t flow_id)
1029 {
1030 NECP_CLIENT_ASSERT_LOCKED(client);
1031 struct necp_client_flow_registration *flow = NULL;
1032
1033 if (necp_client_id_is_flow(flow_id)) {
1034 struct necp_client_flow_registration find;
1035 uuid_copy(find.registration_id, flow_id);
1036 flow = RB_FIND(_necp_client_flow_tree, &client->flow_registrations, &find);
1037 } else {
1038 flow = RB_ROOT(&client->flow_registrations);
1039 }
1040
1041 return flow;
1042 }
1043
1044 static struct necp_client *
necp_client_fd_find_client_unlocked(struct necp_fd_data * client_fd,uuid_t client_id)1045 necp_client_fd_find_client_unlocked(struct necp_fd_data *client_fd, uuid_t client_id)
1046 {
1047 NECP_FD_ASSERT_LOCKED(client_fd);
1048 struct necp_client *client = NULL;
1049
1050 if (necp_client_id_is_flow(client_id)) {
1051 struct necp_client_flow_registration find;
1052 uuid_copy(find.registration_id, client_id);
1053 struct necp_client_flow_registration *flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find);
1054 if (flow != NULL) {
1055 client = flow->client;
1056 }
1057 } else {
1058 struct necp_client find;
1059 uuid_copy(find.client_id, client_id);
1060 client = RB_FIND(_necp_client_tree, &client_fd->clients, &find);
1061 }
1062
1063 return client;
1064 }
1065
1066 static struct necp_client *
necp_client_fd_find_client_and_lock(struct necp_fd_data * client_fd,uuid_t client_id)1067 necp_client_fd_find_client_and_lock(struct necp_fd_data *client_fd, uuid_t client_id)
1068 {
1069 struct necp_client *client = necp_client_fd_find_client_unlocked(client_fd, client_id);
1070 if (client != NULL) {
1071 NECP_CLIENT_LOCK(client);
1072 }
1073
1074 return client;
1075 }
1076
1077 static inline int
necp_client_id_cmp(struct necp_client * client0,struct necp_client * client1)1078 necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1)
1079 {
1080 return uuid_compare(client0->client_id, client1->client_id);
1081 }
1082
1083 static inline int
necp_client_flow_id_cmp(struct necp_client_flow_registration * flow0,struct necp_client_flow_registration * flow1)1084 necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1)
1085 {
1086 return uuid_compare(flow0->registration_id, flow1->registration_id);
1087 }
1088
1089 static int
necpop_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)1090 necpop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
1091 {
1092 #pragma unused(fp, which, wql, ctx)
1093 return 0;
1094 struct necp_fd_data *fd_data = NULL;
1095 int revents = 0;
1096 int events = 0;
1097 proc_t procp;
1098
1099 fd_data = (struct necp_fd_data *)fp_get_data(fp);
1100 if (fd_data == NULL) {
1101 return 0;
1102 }
1103
1104 procp = vfs_context_proc(ctx);
1105
1106 switch (which) {
1107 case FREAD: {
1108 events = POLLIN;
1109 break;
1110 }
1111
1112 default: {
1113 return 1;
1114 }
1115 }
1116
1117 NECP_FD_LOCK(fd_data);
1118 revents = necp_fd_poll(fd_data, events, wql, procp, 0);
1119 NECP_FD_UNLOCK(fd_data);
1120
1121 return (events & revents) ? 1 : 0;
1122 }
1123
1124 static void
necp_fd_knrdetach(struct knote * kn)1125 necp_fd_knrdetach(struct knote *kn)
1126 {
1127 struct necp_fd_data *fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1128 struct selinfo *si = &fd_data->si;
1129
1130 NECP_FD_LOCK(fd_data);
1131 KNOTE_DETACH(&si->si_note, kn);
1132 NECP_FD_UNLOCK(fd_data);
1133 }
1134
1135 static int
necp_fd_knread(struct knote * kn,long hint)1136 necp_fd_knread(struct knote *kn, long hint)
1137 {
1138 #pragma unused(kn, hint)
1139 return 1; /* assume we are ready */
1140 }
1141
1142 static int
necp_fd_knrprocess(struct knote * kn,struct kevent_qos_s * kev)1143 necp_fd_knrprocess(struct knote *kn, struct kevent_qos_s *kev)
1144 {
1145 struct necp_fd_data *fd_data;
1146 int revents;
1147 int res;
1148
1149 fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1150
1151 NECP_FD_LOCK(fd_data);
1152 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1153 res = ((revents & POLLIN) != 0);
1154 if (res) {
1155 knote_fill_kevent(kn, kev, 0);
1156 }
1157 NECP_FD_UNLOCK(fd_data);
1158 return res;
1159 }
1160
1161 static int
necp_fd_knrtouch(struct knote * kn,struct kevent_qos_s * kev)1162 necp_fd_knrtouch(struct knote *kn, struct kevent_qos_s *kev)
1163 {
1164 #pragma unused(kev)
1165 struct necp_fd_data *fd_data;
1166 int revents;
1167
1168 fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1169
1170 NECP_FD_LOCK(fd_data);
1171 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1172 NECP_FD_UNLOCK(fd_data);
1173
1174 return (revents & POLLIN) != 0;
1175 }
1176
1177 SECURITY_READ_ONLY_EARLY(struct filterops) necp_fd_rfiltops = {
1178 .f_isfd = 1,
1179 .f_detach = necp_fd_knrdetach,
1180 .f_event = necp_fd_knread,
1181 .f_touch = necp_fd_knrtouch,
1182 .f_process = necp_fd_knrprocess,
1183 };
1184
1185 static int
necpop_kqfilter(struct fileproc * fp,struct knote * kn,__unused struct kevent_qos_s * kev)1186 necpop_kqfilter(struct fileproc *fp, struct knote *kn,
1187 __unused struct kevent_qos_s *kev)
1188 {
1189 struct necp_fd_data *fd_data = NULL;
1190 int revents;
1191
1192 if (kn->kn_filter != EVFILT_READ) {
1193 NECPLOG(LOG_ERR, "bad filter request %d", kn->kn_filter);
1194 knote_set_error(kn, EINVAL);
1195 return 0;
1196 }
1197
1198 fd_data = (struct necp_fd_data *)fp_get_data(fp);
1199 if (fd_data == NULL) {
1200 NECPLOG0(LOG_ERR, "No channel for kqfilter");
1201 knote_set_error(kn, ENOENT);
1202 return 0;
1203 }
1204
1205 NECP_FD_LOCK(fd_data);
1206 kn->kn_filtid = EVFILTID_NECP_FD;
1207 knote_kn_hook_set_raw(kn, fd_data);
1208 KNOTE_ATTACH(&fd_data->si.si_note, kn);
1209
1210 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1211
1212 NECP_FD_UNLOCK(fd_data);
1213
1214 return (revents & POLLIN) != 0;
1215 }
1216
1217 #define INTERFACE_FLAGS_SHIFT 32
1218 #define INTERFACE_FLAGS_MASK 0xffffffff
1219 #define INTERFACE_INDEX_SHIFT 0
1220 #define INTERFACE_INDEX_MASK 0xffffffff
1221
1222 static uint64_t
combine_interface_details(uint32_t interface_index,uint32_t interface_flags)1223 combine_interface_details(uint32_t interface_index, uint32_t interface_flags)
1224 {
1225 return ((uint64_t)interface_flags & INTERFACE_FLAGS_MASK) << INTERFACE_FLAGS_SHIFT |
1226 ((uint64_t)interface_index & INTERFACE_INDEX_MASK) << INTERFACE_INDEX_SHIFT;
1227 }
1228
1229 #if SKYWALK
1230
1231 static void
split_interface_details(uint64_t combined_details,uint32_t * interface_index,uint32_t * interface_flags)1232 split_interface_details(uint64_t combined_details, uint32_t *interface_index, uint32_t *interface_flags)
1233 {
1234 *interface_index = (combined_details >> INTERFACE_INDEX_SHIFT) & INTERFACE_INDEX_MASK;
1235 *interface_flags = (combined_details >> INTERFACE_FLAGS_SHIFT) & INTERFACE_FLAGS_MASK;
1236 }
1237
1238 static void
necp_flow_save_current_interface_details(struct necp_client_flow_registration * flow_registration)1239 necp_flow_save_current_interface_details(struct necp_client_flow_registration *flow_registration)
1240 {
1241 struct necp_client_flow *flow = NULL;
1242 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1243 if (flow->nexus) {
1244 uint64_t combined_details = combine_interface_details(flow->interface_index, flow->interface_flags);
1245 os_atomic_store(&flow_registration->last_interface_details, combined_details, release);
1246 break;
1247 }
1248 }
1249 }
1250
1251 static void
necp_client_collect_interface_stats(struct necp_client_flow_registration * flow_registration,struct ifnet_stats_per_flow * ifs)1252 necp_client_collect_interface_stats(struct necp_client_flow_registration *flow_registration, struct ifnet_stats_per_flow *ifs)
1253 {
1254 struct necp_client_flow *flow = NULL;
1255
1256 if (ifs == NULL || ifs->txpackets == 0 || ifs->rxpackets == 0) {
1257 return; // App might have crashed without publishing ifs
1258 }
1259
1260 // Do malicious stats detection here
1261
1262 // Fold userspace stats into (trusted) kernel stats (stored in ifp).
1263 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1264 uint32_t if_idx = flow->interface_index;
1265 ifnet_t ifp = NULL;
1266 ifnet_head_lock_shared();
1267 if (if_idx != IFSCOPE_NONE && if_idx <= (uint32_t)if_index) {
1268 ifp = ifindex2ifnet[if_idx];
1269 ifnet_update_stats_per_flow(ifs, ifp);
1270 }
1271 ifnet_head_done();
1272
1273 // Currently there is only one flow that uses the shared necp
1274 // stats region, so this loop should exit after updating an ifp
1275 break;
1276 }
1277 }
1278
1279 static void
necp_client_collect_aop_flow_stats(struct necp_client_flow_registration * flow_registration)1280 necp_client_collect_aop_flow_stats(struct necp_client_flow_registration *flow_registration)
1281 {
1282 struct aop_flow_stats flow_stats = {};
1283 struct tcp_info *tcpi = &flow_stats.transport.tcp_stats.tcp_info;
1284 uint32_t aop_flow_count = 0;
1285 int err = 0;
1286
1287 ASSERT(flow_registration->aop_offload);
1288 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
1289 if (kstats == NULL) {
1290 return;
1291 }
1292
1293 struct necp_stat_counts *prev_tcpstats = &(((struct necp_tcp_stats *)&kstats->necp_stats_comm)->necp_tcp_counts);
1294 struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
1295
1296 struct necp_client_flow *flow = NULL;
1297 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1298 aop_flow_count++;
1299 ASSERT(flow->aop_offload && aop_flow_count == 1);
1300 if (flow->flow_tag > 0 && flow->aop_stat_index_valid) {
1301 err = net_aop_get_flow_stats(flow->stats_index, &flow_stats);
1302 if (err != 0) {
1303 NECPLOG(LOG_ERR, "failed to get aop flow stats "
1304 "for flow id %u with error %d", flow->flow_tag, err);
1305 continue;
1306 }
1307
1308 if (__improbable(flow->flow_tag != flow_stats.flow_id)) {
1309 NECPLOG(LOG_NOTICE, "aop flow stats, flow tag 0x%x != 0x%x",
1310 flow->flow_tag, flow_stats.flow_id);
1311 continue;
1312 }
1313
1314 if ((prev_tcpstats->necp_stat_rxpackets == tcpi->tcpi_rxpackets) &&
1315 prev_tcpstats->necp_stat_txpackets == tcpi->tcpi_txpackets) {
1316 continue;
1317 }
1318
1319 uint32_t d_rxpackets = tcpi->tcpi_rxpackets - prev_tcpstats->necp_stat_rxpackets;
1320 prev_tcpstats->necp_stat_rxpackets += d_rxpackets;
1321
1322 uint32_t d_txpackets = tcpi->tcpi_txpackets - prev_tcpstats->necp_stat_txpackets;
1323 prev_tcpstats->necp_stat_txpackets += d_txpackets;
1324
1325 uint32_t d_rxbytes = tcpi->tcpi_rxbytes - prev_tcpstats->necp_stat_rxbytes;
1326 prev_tcpstats->necp_stat_rxbytes += d_rxbytes;
1327
1328 uint32_t d_txbytes = tcpi->tcpi_txbytes - prev_tcpstats->necp_stat_txbytes;
1329 prev_tcpstats->necp_stat_txbytes += d_txbytes;
1330
1331 uint32_t d_rxduplicatebytes = tcpi->tcpi_rxduplicatebytes - prev_tcpstats->necp_stat_rxduplicatebytes;
1332 prev_tcpstats->necp_stat_rxduplicatebytes += d_rxduplicatebytes;
1333
1334 uint32_t d_rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes - prev_tcpstats->necp_stat_rxoutoforderbytes;
1335 prev_tcpstats->necp_stat_rxoutoforderbytes += d_rxoutoforderbytes;
1336
1337 uint32_t d_txretransmit = tcpi->tcpi_txretransmitbytes - prev_tcpstats->necp_stat_txretransmit;
1338 prev_tcpstats->necp_stat_txretransmit += d_txretransmit;
1339
1340 uint32_t d_connectattempts = prev_tcpstats->necp_stat_connectattempts - (tcpi->tcpi_state >= TCPS_SYN_SENT ? 1 : 0);
1341 prev_tcpstats->necp_stat_connectattempts += d_connectattempts;
1342
1343 uint32_t d_connectsuccesses = prev_tcpstats->necp_stat_connectsuccesses - (tcpi->tcpi_state >= TCPS_ESTABLISHED ? 1 : 0);
1344 prev_tcpstats->necp_stat_connectsuccesses += d_connectsuccesses;
1345
1346 prev_tcpstats->necp_stat_avg_rtt = tcpi->tcpi_srtt;
1347 prev_tcpstats->necp_stat_var_rtt = tcpi->tcpi_rttvar;
1348
1349 /* Update route stats */
1350 NECP_CLIENT_ROUTE_LOCK(flow_registration->client);
1351 struct rtentry *route = flow_registration->client->current_route;
1352 if (route != NULL) {
1353 nstat_route_update(route, d_connectattempts,
1354 d_connectsuccesses, d_rxpackets, d_rxbytes,
1355 d_rxduplicatebytes, d_rxoutoforderbytes,
1356 d_txpackets, d_txbytes, d_txretransmit,
1357 prev_tcpstats->necp_stat_avg_rtt, prev_tcpstats->necp_stat_var_rtt);
1358 }
1359 NECP_CLIENT_ROUTE_UNLOCK(flow_registration->client);
1360
1361 /* Update nexus flow stats */
1362 if (sf != NULL) {
1363 sf->sf_ibytes = flow_stats.rxbytes;
1364 sf->sf_obytes = flow_stats.txbytes;
1365 sf->sf_ipackets = flow_stats.rxpkts;
1366 sf->sf_opackets = flow_stats.txpkts;
1367 sf->sf_lseq = tcpi->tcpi_snd_nxt - 1;
1368 sf->sf_rseq = tcpi->tcpi_rcv_nxt - 1;
1369 sf->sf_lrtt = tcpi->tcpi_srtt;
1370 sf->sf_rrtt = tcpi->tcpi_rcv_srtt;
1371 sf->sf_ltrack.sft_state = tcpi->tcpi_state;
1372 sf->sf_lwscale = tcpi->tcpi_snd_wscale;
1373 sf->sf_rwscale = tcpi->tcpi_rcv_wscale;
1374
1375 memcpy(&sf->sf_activity, &flow_stats.activity_bitmap,
1376 sizeof(sf->sf_activity));
1377 }
1378 }
1379 }
1380 }
1381
1382 static void
necp_client_collect_nexus_flow_stats(struct necp_client_flow_registration * flow_registration)1383 necp_client_collect_nexus_flow_stats(struct necp_client_flow_registration *flow_registration)
1384 {
1385 ASSERT(!flow_registration->aop_offload);
1386
1387 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
1388 if (kstats == NULL) {
1389 return;
1390 }
1391
1392 // Grab userspace stats delta (untrusted).
1393 struct necp_tcp_stats *curr_tcpstats = (struct necp_tcp_stats *)kstats->necp_stats_ustats;
1394 struct necp_tcp_stats *prev_tcpstats = (struct necp_tcp_stats *)&kstats->necp_stats_comm;
1395 #define diff_n_update(field) \
1396 u_int32_t d_##field = (curr_tcpstats->necp_tcp_counts.necp_stat_##field - prev_tcpstats->necp_tcp_counts.necp_stat_##field); \
1397 prev_tcpstats->necp_tcp_counts.necp_stat_##field += d_##field;
1398 diff_n_update(rxpackets);
1399 diff_n_update(txpackets);
1400 if (d_rxpackets == 0 && d_txpackets == 0) {
1401 return; // no activity since last collection, stop here
1402 }
1403 diff_n_update(rxbytes);
1404 diff_n_update(txbytes);
1405 diff_n_update(rxduplicatebytes);
1406 diff_n_update(rxoutoforderbytes);
1407 diff_n_update(txretransmit);
1408 diff_n_update(connectattempts);
1409 diff_n_update(connectsuccesses);
1410 uint32_t rtt = prev_tcpstats->necp_tcp_counts.necp_stat_avg_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
1411 uint32_t rtt_var = prev_tcpstats->necp_tcp_counts.necp_stat_var_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_var_rtt;
1412 #undef diff_n_update
1413
1414 // Do malicious stats detection with the deltas here.
1415 // RTT check (not necessarily attacks, might just be not measured since we report stats async periodically).
1416 if (rtt < necp_client_stats_rtt_floor || rtt > necp_client_stats_rtt_ceiling) {
1417 rtt = rtt_var = 0; // nstat_route_update to skip 0 rtt
1418 }
1419
1420 // Fold userspace stats into (trusted) kernel stats (stored in route).
1421 NECP_CLIENT_ROUTE_LOCK(flow_registration->client);
1422 struct rtentry *route = flow_registration->client->current_route;
1423 if (route != NULL) {
1424 nstat_route_update(route, d_connectattempts, d_connectsuccesses, d_rxpackets, d_rxbytes, d_rxduplicatebytes,
1425 d_rxoutoforderbytes, d_txpackets, d_txbytes, d_txretransmit, rtt, rtt_var);
1426 }
1427 NECP_CLIENT_ROUTE_UNLOCK(flow_registration->client);
1428 }
1429
1430 static void
necp_client_collect_stats(struct necp_client_flow_registration * flow_registration)1431 necp_client_collect_stats(struct necp_client_flow_registration *flow_registration)
1432 {
1433 if (__probable(!flow_registration->aop_offload)) {
1434 necp_client_collect_nexus_flow_stats(flow_registration);
1435 } else {
1436 necp_client_collect_aop_flow_stats(flow_registration);
1437 }
1438 }
1439
1440 // This is called from various places; "closing" here implies the client being closed/removed if true, otherwise being
1441 // defunct. In the former, we expect the caller to not hold the lock; for the latter it must have acquired it.
1442 static void
necp_destroy_flow_stats(struct necp_fd_data * fd_data,struct necp_client_flow_registration * flow_registration,struct ifnet_stats_per_flow * flow_ifnet_stats,boolean_t closing)1443 necp_destroy_flow_stats(struct necp_fd_data *fd_data,
1444 struct necp_client_flow_registration *flow_registration,
1445 struct ifnet_stats_per_flow *flow_ifnet_stats,
1446 boolean_t closing)
1447 {
1448 NECP_FD_ASSERT_LOCKED(fd_data);
1449
1450 struct necp_client *client = flow_registration->client;
1451
1452 if (closing) {
1453 NECP_CLIENT_ASSERT_UNLOCKED(client);
1454 NECP_CLIENT_LOCK(client);
1455 } else {
1456 NECP_CLIENT_ASSERT_LOCKED(client);
1457 }
1458
1459 // the interface stats are independent of the flow stats, hence we check here
1460 if (flow_ifnet_stats != NULL) {
1461 necp_client_collect_interface_stats(flow_registration, flow_ifnet_stats);
1462 }
1463
1464 if (flow_registration->kstats_kaddr != NULL) {
1465 NECP_STATS_LIST_LOCK_EXCLUSIVE();
1466 necp_client_collect_stats(flow_registration);
1467 const bool destroyed = necp_client_release_locked(client); // Drop the reference held by the stats list
1468 ASSERT(!destroyed);
1469 (void)destroyed;
1470 LIST_REMOVE(flow_registration, collect_stats_chain);
1471 NECP_STATS_LIST_UNLOCK();
1472 if (flow_registration->stats_handler_context != NULL) {
1473 ntstat_userland_stats_close(flow_registration->stats_handler_context);
1474 flow_registration->stats_handler_context = NULL;
1475 }
1476 necp_arena_stats_obj_free(fd_data, flow_registration->stats_arena, &flow_registration->kstats_kaddr, &flow_registration->ustats_uaddr);
1477 ASSERT(flow_registration->kstats_kaddr == NULL);
1478 ASSERT(flow_registration->ustats_uaddr == 0);
1479 }
1480
1481 if (flow_registration->nexus_stats != NULL) {
1482 flow_stats_release(flow_registration->nexus_stats);
1483 flow_registration->nexus_stats = NULL;
1484 }
1485
1486 if (closing) {
1487 NECP_CLIENT_UNLOCK(client);
1488 }
1489 }
1490
1491 static void
necp_schedule_collect_stats_clients(bool recur)1492 necp_schedule_collect_stats_clients(bool recur)
1493 {
1494 if (necp_client_collect_stats_tcall == NULL ||
1495 (!recur && thread_call_isactive(necp_client_collect_stats_tcall))) {
1496 return;
1497 }
1498
1499 uint64_t deadline = 0;
1500 uint64_t leeway = 0;
1501 clock_interval_to_deadline(necp_collect_stats_timeout_microseconds, NSEC_PER_USEC, &deadline);
1502 clock_interval_to_absolutetime_interval(necp_collect_stats_timeout_leeway_microseconds, NSEC_PER_USEC, &leeway);
1503
1504 thread_call_enter_delayed_with_leeway(necp_client_collect_stats_tcall, NULL,
1505 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1506 }
1507
1508 static void
necp_collect_stats_client_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)1509 necp_collect_stats_client_callout(__unused thread_call_param_t dummy,
1510 __unused thread_call_param_t arg)
1511 {
1512 struct necp_client_flow_registration *flow_registration;
1513
1514 net_update_uptime();
1515 NECP_STATS_LIST_LOCK_SHARED();
1516 if (LIST_EMPTY(&necp_collect_stats_flow_list)) {
1517 NECP_STATS_LIST_UNLOCK();
1518 return;
1519 }
1520 LIST_FOREACH(flow_registration, &necp_collect_stats_flow_list, collect_stats_chain) {
1521 // Collecting stats should be cheap (atomic increments)
1522 // Values like flow_registration->kstats_kaddr are guaranteed to be valid
1523 // as long as the flow_registration is in the stats list
1524 necp_client_collect_stats(flow_registration);
1525 }
1526 NECP_STATS_LIST_UNLOCK();
1527
1528 necp_schedule_collect_stats_clients(TRUE); // recurring collection
1529 }
1530
1531 #endif /* !SKYWALK */
1532
1533 static void
necp_defunct_flow_registration(struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct _necp_flow_defunct_list * defunct_list)1534 necp_defunct_flow_registration(struct necp_client *client,
1535 struct necp_client_flow_registration *flow_registration,
1536 struct _necp_flow_defunct_list *defunct_list)
1537 {
1538 NECP_CLIENT_ASSERT_LOCKED(client);
1539
1540 if (!flow_registration->defunct) {
1541 bool needs_defunct = false;
1542 struct necp_client_flow *search_flow = NULL;
1543 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
1544 if (search_flow->nexus &&
1545 !uuid_is_null(search_flow->u.nexus_agent)) {
1546 // Save defunct values for the nexus
1547 if (defunct_list != NULL) {
1548 // Sleeping alloc won't fail; copy only what's necessary
1549 struct necp_flow_defunct *flow_defunct = kalloc_type(struct necp_flow_defunct,
1550 Z_WAITOK | Z_ZERO);
1551 uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent);
1552 uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1553 client->client_id :
1554 flow_registration->registration_id));
1555 flow_defunct->proc_pid = client->proc_pid;
1556 flow_defunct->agent_handle = client->agent_handle;
1557 flow_defunct->flags = flow_registration->flags;
1558 #if SKYWALK
1559 if (flow_registration->kstats_kaddr != NULL) {
1560 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1561 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1562 if (quicstats != NULL) {
1563 memcpy(flow_defunct->close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(flow_defunct->close_parameters.u.close_token));
1564 flow_defunct->has_close_parameters = true;
1565 }
1566 }
1567 #endif /* SKYWALK */
1568 // Add to the list provided by caller
1569 LIST_INSERT_HEAD(defunct_list, flow_defunct, chain);
1570 }
1571
1572 needs_defunct = true;
1573 }
1574 }
1575
1576 if (needs_defunct) {
1577 #if SKYWALK
1578 // Close the stats early
1579 if (flow_registration->stats_handler_context != NULL) {
1580 ntstat_userland_stats_event(flow_registration->stats_handler_context,
1581 NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT);
1582 }
1583 #endif /* SKYWALK */
1584
1585 // Only set defunct if there was some assigned flow
1586 flow_registration->defunct = true;
1587 }
1588 }
1589 }
1590
1591 static void
necp_defunct_client_for_policy(struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)1592 necp_defunct_client_for_policy(struct necp_client *client,
1593 struct _necp_flow_defunct_list *defunct_list)
1594 {
1595 NECP_CLIENT_ASSERT_LOCKED(client);
1596
1597 struct necp_client_flow_registration *flow_registration = NULL;
1598 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
1599 necp_defunct_flow_registration(client, flow_registration, defunct_list);
1600 }
1601 }
1602
1603 static void
necp_client_free(struct necp_client * client)1604 necp_client_free(struct necp_client *client)
1605 {
1606 NECP_CLIENT_ASSERT_UNLOCKED(client);
1607
1608 kfree_data(client->extra_interface_options,
1609 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT);
1610 client->extra_interface_options = NULL;
1611
1612 kfree_data_sized_by(client->parameters, client->parameters_length);
1613 kfree_data_counted_by(client->assigned_group_members, client->assigned_group_members_length);
1614
1615 lck_mtx_destroy(&client->route_lock, &necp_fd_mtx_grp);
1616 lck_mtx_destroy(&client->lock, &necp_fd_mtx_grp);
1617
1618 kfree_type(struct necp_client, client);
1619 }
1620
1621 static void
necp_client_retain_locked(struct necp_client * client)1622 necp_client_retain_locked(struct necp_client *client)
1623 {
1624 NECP_CLIENT_ASSERT_LOCKED(client);
1625
1626 os_ref_retain_locked(&client->reference_count);
1627 }
1628
1629 static void
necp_client_retain(struct necp_client * client)1630 necp_client_retain(struct necp_client *client)
1631 {
1632 NECP_CLIENT_LOCK(client);
1633 necp_client_retain_locked(client);
1634 NECP_CLIENT_UNLOCK(client);
1635 }
1636
1637 static bool
necp_client_release_locked(struct necp_client * client)1638 necp_client_release_locked(struct necp_client *client)
1639 {
1640 NECP_CLIENT_ASSERT_LOCKED(client);
1641
1642 os_ref_count_t count = os_ref_release_locked(&client->reference_count);
1643 if (count == 0) {
1644 NECP_CLIENT_UNLOCK(client);
1645 necp_client_free(client);
1646 }
1647
1648 return count == 0;
1649 }
1650
1651 static bool
necp_client_release(struct necp_client * client)1652 necp_client_release(struct necp_client *client)
1653 {
1654 bool last_ref;
1655
1656 NECP_CLIENT_LOCK(client);
1657 if (!(last_ref = necp_client_release_locked(client))) {
1658 NECP_CLIENT_UNLOCK(client);
1659 }
1660
1661 return last_ref;
1662 }
1663
1664 static struct necp_client_update *
necp_client_update_alloc(const void * __sized_by (length)data,size_t length)1665 necp_client_update_alloc(const void * __sized_by(length)data, size_t length)
1666 {
1667 struct necp_client_update *client_update;
1668 struct necp_client_observer_update *buffer;
1669 size_t alloc_size;
1670
1671 if (os_add_overflow(length, sizeof(*buffer), &alloc_size)) {
1672 return NULL;
1673 }
1674 buffer = kalloc_data(alloc_size, Z_WAITOK);
1675 if (buffer == NULL) {
1676 return NULL;
1677 }
1678
1679 client_update = kalloc_type(struct necp_client_update,
1680 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1681 client_update->update_length = alloc_size;
1682 client_update->update = buffer;
1683 memcpy(necp_update_get_tlv_buffer(buffer, alloc_size), data, length);
1684 return client_update;
1685 }
1686
1687 static void
necp_client_update_free(struct necp_client_update * client_update)1688 necp_client_update_free(struct necp_client_update *client_update)
1689 {
1690 kfree_data_sized_by(client_update->update, client_update->update_length);
1691 kfree_type(struct necp_client_update, client_update);
1692 }
1693
1694 static void
necp_client_update_observer_add_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1695 necp_client_update_observer_add_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1696 {
1697 struct necp_client_update *client_update;
1698
1699 NECP_FD_LOCK(observer_fd);
1700
1701 if (observer_fd->update_count >= necp_observer_message_limit) {
1702 NECP_FD_UNLOCK(observer_fd);
1703 return;
1704 }
1705
1706 client_update = necp_client_update_alloc(client->parameters, client->parameters_length);
1707 if (client_update != NULL) {
1708 uuid_copy(client_update->client_id, client->client_id);
1709 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_PARAMETERS;
1710 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1711 observer_fd->update_count++;
1712
1713 necp_fd_notify(observer_fd, true);
1714 }
1715
1716 NECP_FD_UNLOCK(observer_fd);
1717 }
1718
1719 static void
necp_client_update_observer_update_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1720 necp_client_update_observer_update_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1721 {
1722 NECP_FD_LOCK(observer_fd);
1723
1724 if (observer_fd->update_count >= necp_observer_message_limit) {
1725 NECP_FD_UNLOCK(observer_fd);
1726 return;
1727 }
1728
1729 struct necp_client_update *client_update = necp_client_update_alloc(client->result, client->result_length);
1730 if (client_update != NULL) {
1731 uuid_copy(client_update->client_id, client->client_id);
1732 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_RESULT;
1733 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1734 observer_fd->update_count++;
1735
1736 necp_fd_notify(observer_fd, true);
1737 }
1738
1739 NECP_FD_UNLOCK(observer_fd);
1740 }
1741
1742 static void
necp_client_update_observer_remove_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1743 necp_client_update_observer_remove_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1744 {
1745 NECP_FD_LOCK(observer_fd);
1746
1747 if (observer_fd->update_count >= necp_observer_message_limit) {
1748 NECP_FD_UNLOCK(observer_fd);
1749 return;
1750 }
1751
1752 struct necp_client_update *client_update = necp_client_update_alloc(NULL, 0);
1753 if (client_update != NULL) {
1754 uuid_copy(client_update->client_id, client->client_id);
1755 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_REMOVE;
1756 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1757 observer_fd->update_count++;
1758
1759 necp_fd_notify(observer_fd, true);
1760 }
1761
1762 NECP_FD_UNLOCK(observer_fd);
1763 }
1764
1765 static void
necp_client_update_observer_add(struct necp_client * client)1766 necp_client_update_observer_add(struct necp_client *client)
1767 {
1768 NECP_OBSERVER_LIST_LOCK_SHARED();
1769
1770 if (LIST_EMPTY(&necp_fd_observer_list)) {
1771 // No observers, bail
1772 NECP_OBSERVER_LIST_UNLOCK();
1773 return;
1774 }
1775
1776 struct necp_fd_data *observer_fd = NULL;
1777 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1778 necp_client_update_observer_add_internal(observer_fd, client);
1779 }
1780
1781 NECP_OBSERVER_LIST_UNLOCK();
1782 }
1783
1784 static void
necp_client_update_observer_update(struct necp_client * client)1785 necp_client_update_observer_update(struct necp_client *client)
1786 {
1787 NECP_OBSERVER_LIST_LOCK_SHARED();
1788
1789 if (LIST_EMPTY(&necp_fd_observer_list)) {
1790 // No observers, bail
1791 NECP_OBSERVER_LIST_UNLOCK();
1792 return;
1793 }
1794
1795 struct necp_fd_data *observer_fd = NULL;
1796 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1797 necp_client_update_observer_update_internal(observer_fd, client);
1798 }
1799
1800 NECP_OBSERVER_LIST_UNLOCK();
1801 }
1802
1803 static void
necp_client_update_observer_remove(struct necp_client * client)1804 necp_client_update_observer_remove(struct necp_client *client)
1805 {
1806 NECP_OBSERVER_LIST_LOCK_SHARED();
1807
1808 if (LIST_EMPTY(&necp_fd_observer_list)) {
1809 // No observers, bail
1810 NECP_OBSERVER_LIST_UNLOCK();
1811 return;
1812 }
1813
1814 struct necp_fd_data *observer_fd = NULL;
1815 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1816 necp_client_update_observer_remove_internal(observer_fd, client);
1817 }
1818
1819 NECP_OBSERVER_LIST_UNLOCK();
1820 }
1821
1822 static void
necp_destroy_client_flow_registration(struct necp_client * client,struct necp_client_flow_registration * flow_registration,pid_t pid,bool abort)1823 necp_destroy_client_flow_registration(struct necp_client *client,
1824 struct necp_client_flow_registration *flow_registration,
1825 pid_t pid, bool abort)
1826 {
1827 NECP_CLIENT_ASSERT_LOCKED(client);
1828
1829 bool has_close_parameters = false;
1830 struct necp_client_agent_parameters close_parameters = {};
1831 memset(close_parameters.u.close_token, 0, sizeof(close_parameters.u.close_token));
1832 #if SKYWALK
1833 if (flow_registration->kstats_kaddr != NULL) {
1834 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1835 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1836 if (quicstats != NULL &&
1837 quicstats->necp_quic_udp_stats.necp_udp_hdr.necp_stats_type == NECP_CLIENT_STATISTICS_TYPE_QUIC) {
1838 memcpy(close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(close_parameters.u.close_token));
1839 has_close_parameters = true;
1840 }
1841 }
1842
1843 // Release reference held on the stats arena
1844 if (flow_registration->stats_arena != NULL) {
1845 necp_arena_info_release(flow_registration->stats_arena);
1846 flow_registration->stats_arena = NULL;
1847 }
1848 #endif /* SKYWALK */
1849
1850 struct necp_client_flow * __single search_flow = NULL;
1851 struct necp_client_flow *temp_flow = NULL;
1852 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
1853 if (search_flow->nexus &&
1854 !uuid_is_null(search_flow->u.nexus_agent)) {
1855 // Don't unregister for defunct flows
1856 if (!flow_registration->defunct) {
1857 u_int8_t message_type = (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS :
1858 NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS);
1859 if (((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1860 (flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
1861 !(flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
1862 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
1863 }
1864 size_t dummy_length = 0;
1865 void * __sized_by(dummy_length) dummy_results = NULL;
1866 int netagent_error = netagent_client_message_with_params(search_flow->u.nexus_agent,
1867 ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1868 client->client_id :
1869 flow_registration->registration_id),
1870 pid, client->agent_handle,
1871 message_type,
1872 has_close_parameters ? &close_parameters : NULL,
1873 &dummy_results, &dummy_length);
1874 if (netagent_error != 0 && netagent_error != ENOENT) {
1875 NECPLOG(LOG_ERR, "necp_client_remove close nexus error (%d) MESSAGE TYPE %u", netagent_error, message_type);
1876 }
1877 }
1878 uuid_clear(search_flow->u.nexus_agent);
1879 }
1880 if (search_flow->assigned_results != NULL) {
1881 kfree_data_counted_by(search_flow->assigned_results, search_flow->assigned_results_length);
1882 }
1883 LIST_REMOVE(search_flow, flow_chain);
1884 #if SKYWALK
1885 if (search_flow->nexus) {
1886 OSDecrementAtomic(&necp_nexus_flow_count);
1887 } else
1888 #endif /* SKYWALK */
1889 if (search_flow->socket) {
1890 OSDecrementAtomic(&necp_socket_flow_count);
1891 } else {
1892 OSDecrementAtomic(&necp_if_flow_count);
1893 }
1894
1895 necp_aop_offload_stats_destroy(search_flow);
1896
1897 kfree_type(struct necp_client_flow, search_flow);
1898 }
1899
1900 RB_REMOVE(_necp_client_flow_tree, &client->flow_registrations, flow_registration);
1901 flow_registration->client = NULL;
1902
1903 kfree_type(struct necp_client_flow_registration, flow_registration);
1904 }
1905
1906 static void
necp_destroy_client(struct necp_client * client,pid_t pid,bool abort)1907 necp_destroy_client(struct necp_client *client, pid_t pid, bool abort)
1908 {
1909 NECP_CLIENT_ASSERT_UNLOCKED(client);
1910
1911 #if SKYWALK
1912 if (client->nstat_context != NULL) {
1913 // This is a catch-all that should be rarely used.
1914 nstat_provider_stats_close(client->nstat_context);
1915 client->nstat_context = NULL;
1916 }
1917 if (client->original_parameters_source != NULL) {
1918 necp_client_release(client->original_parameters_source);
1919 client->original_parameters_source = NULL;
1920 }
1921 #endif /* SKYWALK */
1922 necp_client_update_observer_remove(client);
1923
1924 NECP_CLIENT_LOCK(client);
1925
1926 // Free route
1927 NECP_CLIENT_ROUTE_LOCK(client);
1928 if (client->current_route != NULL) {
1929 rtfree(client->current_route);
1930 client->current_route = NULL;
1931 }
1932 NECP_CLIENT_ROUTE_UNLOCK(client);
1933
1934 // Remove flow assignments
1935 struct necp_client_flow_registration *flow_registration = NULL;
1936 struct necp_client_flow_registration *temp_flow_registration = NULL;
1937 RB_FOREACH_SAFE(flow_registration, _necp_client_flow_tree, &client->flow_registrations, temp_flow_registration) {
1938 necp_destroy_client_flow_registration(client, flow_registration, pid, abort);
1939 }
1940
1941 #if SKYWALK
1942 // Remove port reservation
1943 if (NETNS_TOKEN_VALID(&client->port_reservation)) {
1944 netns_release(&client->port_reservation);
1945 }
1946 #endif /* !SKYWALK */
1947
1948 // Remove agent assertions
1949 struct necp_client_assertion * __single search_assertion = NULL;
1950 struct necp_client_assertion *temp_assertion = NULL;
1951 LIST_FOREACH_SAFE(search_assertion, &client->assertion_list, assertion_chain, temp_assertion) {
1952 int netagent_error = netagent_client_message(search_assertion->asserted_netagent, client->client_id, pid,
1953 client->agent_handle, NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT);
1954 if (netagent_error != 0) {
1955 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR),
1956 "necp_client_remove unassert agent error (%d)", netagent_error);
1957 }
1958 LIST_REMOVE(search_assertion, assertion_chain);
1959 kfree_type(struct necp_client_assertion, search_assertion);
1960 }
1961
1962 if (!necp_client_release_locked(client)) {
1963 NECP_CLIENT_UNLOCK(client);
1964 }
1965
1966 OSDecrementAtomic(&necp_client_count);
1967 }
1968
1969 static bool
1970 necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats);
1971
1972 static void
necp_process_defunct_list(struct _necp_flow_defunct_list * defunct_list)1973 necp_process_defunct_list(struct _necp_flow_defunct_list *defunct_list)
1974 {
1975 if (!LIST_EMPTY(defunct_list)) {
1976 struct necp_flow_defunct * __single flow_defunct = NULL;
1977 struct necp_flow_defunct *temp_flow_defunct = NULL;
1978
1979 // For each newly defunct client, send a message to the nexus to remove the flow
1980 LIST_FOREACH_SAFE(flow_defunct, defunct_list, chain, temp_flow_defunct) {
1981 if (!uuid_is_null(flow_defunct->nexus_agent)) {
1982 u_int8_t message_type = NETAGENT_MESSAGE_TYPE_ABORT_NEXUS;
1983 if (((flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1984 (flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
1985 !(flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
1986 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
1987 }
1988 size_t dummy_length = 0;
1989 void * __sized_by(dummy_length) dummy_results = NULL;
1990 int netagent_error = netagent_client_message_with_params(flow_defunct->nexus_agent,
1991 flow_defunct->flow_id,
1992 flow_defunct->proc_pid,
1993 flow_defunct->agent_handle,
1994 message_type,
1995 flow_defunct->has_close_parameters ? &flow_defunct->close_parameters : NULL,
1996 &dummy_results, &dummy_length);
1997 if (netagent_error != 0) {
1998 char namebuf[MAXCOMLEN + 1];
1999 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
2000 proc_name(flow_defunct->proc_pid, namebuf, sizeof(namebuf));
2001 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_update_client abort nexus error (%d) for pid %d %s", netagent_error, flow_defunct->proc_pid, namebuf);
2002 }
2003 }
2004 LIST_REMOVE(flow_defunct, chain);
2005 kfree_type(struct necp_flow_defunct, flow_defunct);
2006 }
2007 }
2008 ASSERT(LIST_EMPTY(defunct_list));
2009 }
2010
2011 static int
necpop_close(struct fileglob * fg,vfs_context_t ctx)2012 necpop_close(struct fileglob *fg, vfs_context_t ctx)
2013 {
2014 #pragma unused(ctx)
2015 struct necp_fd_data * __single fd_data = NULL;
2016 int error = 0;
2017
2018 fd_data = (struct necp_fd_data *)fg_get_data(fg);
2019 fg_set_data(fg, NULL);
2020
2021 if (fd_data != NULL) {
2022 struct _necp_client_tree clients_to_close;
2023 RB_INIT(&clients_to_close);
2024
2025 // Remove from list quickly
2026 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
2027 NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
2028 LIST_REMOVE(fd_data, chain);
2029 NECP_OBSERVER_LIST_UNLOCK();
2030 } else {
2031 NECP_FD_LIST_LOCK_EXCLUSIVE();
2032 LIST_REMOVE(fd_data, chain);
2033 NECP_FD_LIST_UNLOCK();
2034 }
2035
2036 NECP_FD_LOCK(fd_data);
2037 pid_t pid = fd_data->proc_pid;
2038
2039 struct _necp_flow_defunct_list defunct_list;
2040 LIST_INIT(&defunct_list);
2041
2042 (void)necp_defunct_client_fd_locked_inner(fd_data, &defunct_list, false);
2043
2044 struct necp_client_flow_registration *flow_registration = NULL;
2045 struct necp_client_flow_registration *temp_flow_registration = NULL;
2046 RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
2047 #if SKYWALK
2048 necp_destroy_flow_stats(fd_data, flow_registration, NULL, TRUE);
2049 #endif /* SKYWALK */
2050 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
2051 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
2052 NECP_FLOW_TREE_UNLOCK();
2053 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
2054 }
2055
2056 struct necp_client *client = NULL;
2057 struct necp_client *temp_client = NULL;
2058 RB_FOREACH_SAFE(client, _necp_client_tree, &fd_data->clients, temp_client) {
2059 // Clear out the agent_handle to avoid dangling pointers back to fd_data
2060 NECP_CLIENT_LOCK(client);
2061 client->agent_handle = NULL;
2062 NECP_CLIENT_UNLOCK(client);
2063
2064 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
2065 RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
2066 NECP_CLIENT_TREE_UNLOCK();
2067 RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
2068 RB_INSERT(_necp_client_tree, &clients_to_close, client);
2069 }
2070
2071 struct necp_client_update *client_update = NULL;
2072 struct necp_client_update *temp_update = NULL;
2073 TAILQ_FOREACH_SAFE(client_update, &fd_data->update_list, chain, temp_update) {
2074 // Flush pending updates
2075 TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
2076 necp_client_update_free(client_update);
2077 }
2078 fd_data->update_count = 0;
2079
2080 #if SKYWALK
2081 // Cleanup stats arena(s); indicate that we're closing
2082 necp_stats_arenas_destroy(fd_data, TRUE);
2083 ASSERT(fd_data->stats_arena_active == NULL);
2084 ASSERT(LIST_EMPTY(&fd_data->stats_arena_list));
2085
2086 // Cleanup systctl arena
2087 necp_sysctl_arena_destroy(fd_data);
2088 ASSERT(fd_data->sysctl_arena == NULL);
2089 #endif /* SKYWALK */
2090
2091 NECP_FD_UNLOCK(fd_data);
2092
2093 selthreadclear(&fd_data->si);
2094
2095 lck_mtx_destroy(&fd_data->fd_lock, &necp_fd_mtx_grp);
2096
2097 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
2098 OSDecrementAtomic(&necp_observer_fd_count);
2099 } else {
2100 OSDecrementAtomic(&necp_client_fd_count);
2101 }
2102
2103 kfree_type(struct necp_fd_data, fd_data);
2104
2105 RB_FOREACH_SAFE(client, _necp_client_tree, &clients_to_close, temp_client) {
2106 RB_REMOVE(_necp_client_tree, &clients_to_close, client);
2107 necp_destroy_client(client, pid, true);
2108 }
2109
2110 necp_process_defunct_list(&defunct_list);
2111 }
2112
2113 return error;
2114 }
2115
2116 /// NECP client utilities
2117
2118 static inline bool
necp_address_is_wildcard(const union necp_sockaddr_union * const addr)2119 necp_address_is_wildcard(const union necp_sockaddr_union * const addr)
2120 {
2121 return (addr->sa.sa_family == AF_INET && addr->sin.sin_addr.s_addr == INADDR_ANY) ||
2122 (addr->sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr->sin6.sin6_addr));
2123 }
2124
2125 static int
necp_find_fd_data(struct proc * p,int fd,struct fileproc ** fpp,struct necp_fd_data ** fd_data)2126 necp_find_fd_data(struct proc *p, int fd,
2127 struct fileproc **fpp, struct necp_fd_data **fd_data)
2128 {
2129 struct fileproc * __single fp;
2130 int error = fp_get_ftype(p, fd, DTYPE_NETPOLICY, ENODEV, &fp);
2131
2132 if (error == 0) {
2133 *fd_data = (struct necp_fd_data *)fp_get_data(fp);
2134 *fpp = fp;
2135
2136 if ((*fd_data)->necp_fd_type != necp_fd_type_client) {
2137 // Not a client fd, ignore
2138 fp_drop(p, fd, fp, 0);
2139 error = EINVAL;
2140 }
2141 }
2142 return error;
2143 }
2144
2145 static void
necp_client_add_nexus_flow(struct necp_client_flow_registration * flow_registration,uuid_t nexus_agent,uint32_t interface_index,uint32_t interface_flags,bool aop_offload)2146 necp_client_add_nexus_flow(struct necp_client_flow_registration *flow_registration,
2147 uuid_t nexus_agent,
2148 uint32_t interface_index,
2149 uint32_t interface_flags,
2150 bool aop_offload)
2151 {
2152 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2153
2154 new_flow->nexus = TRUE;
2155 uuid_copy(new_flow->u.nexus_agent, nexus_agent);
2156 new_flow->interface_index = interface_index;
2157 new_flow->interface_flags = interface_flags;
2158 new_flow->check_tcp_heuristics = TRUE;
2159 new_flow->aop_offload = aop_offload ? TRUE : FALSE;
2160 #if SKYWALK
2161 OSIncrementAtomic(&necp_nexus_flow_count);
2162 #endif /* SKYWALK */
2163
2164 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
2165
2166 #if SKYWALK
2167 necp_flow_save_current_interface_details(flow_registration);
2168 #endif /* SKYWALK */
2169 }
2170
2171 static void
necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration * flow_registration,uuid_t nexus_agent,uint32_t interface_index,bool aop_offload)2172 necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration *flow_registration,
2173 uuid_t nexus_agent, uint32_t interface_index, bool aop_offload)
2174 {
2175 struct necp_client_flow *flow = NULL;
2176 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2177 if (flow->nexus &&
2178 uuid_compare(flow->u.nexus_agent, nexus_agent) == 0) {
2179 return;
2180 }
2181 }
2182
2183 uint32_t interface_flags = 0;
2184 ifnet_t ifp = NULL;
2185 ifnet_head_lock_shared();
2186 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
2187 ifp = ifindex2ifnet[interface_index];
2188 if (ifp != NULL) {
2189 ifnet_lock_shared(ifp);
2190 interface_flags = nstat_ifnet_to_flags(ifp);
2191 ifnet_lock_done(ifp);
2192 }
2193 }
2194 ifnet_head_done();
2195 necp_client_add_nexus_flow(flow_registration, nexus_agent, interface_index, interface_flags, aop_offload);
2196 }
2197
2198 static struct necp_client_flow *
necp_client_add_interface_flow(struct necp_client_flow_registration * flow_registration,uint32_t interface_index)2199 necp_client_add_interface_flow(struct necp_client_flow_registration *flow_registration,
2200 uint32_t interface_index)
2201 {
2202 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2203
2204 // Neither nexus nor socket
2205 new_flow->interface_index = interface_index;
2206 new_flow->u.socket_handle = flow_registration->interface_handle;
2207 new_flow->u.cb = flow_registration->interface_cb;
2208
2209 OSIncrementAtomic(&necp_if_flow_count);
2210
2211 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
2212
2213 return new_flow;
2214 }
2215
2216 static struct necp_client_flow *
necp_client_add_interface_flow_if_needed(struct necp_client * client,struct necp_client_flow_registration * flow_registration,uint32_t interface_index)2217 necp_client_add_interface_flow_if_needed(struct necp_client *client,
2218 struct necp_client_flow_registration *flow_registration,
2219 uint32_t interface_index)
2220 {
2221 if (!client->allow_multiple_flows ||
2222 interface_index == IFSCOPE_NONE) {
2223 // Interface not set, or client not allowed to use this mode
2224 return NULL;
2225 }
2226
2227 struct necp_client_flow *flow = NULL;
2228 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2229 if (!flow->nexus && !flow->socket && flow->interface_index == interface_index) {
2230 // Already have the flow
2231 flow->invalid = FALSE;
2232 flow->u.socket_handle = flow_registration->interface_handle;
2233 flow->u.cb = flow_registration->interface_cb;
2234 return NULL;
2235 }
2236 }
2237 return necp_client_add_interface_flow(flow_registration, interface_index);
2238 }
2239
2240 static void
necp_client_add_interface_option_if_needed(struct necp_client * client,uint32_t interface_index,uint32_t interface_generation,uuid_t * nexus_agent,bool network_provider)2241 necp_client_add_interface_option_if_needed(struct necp_client *client,
2242 uint32_t interface_index,
2243 uint32_t interface_generation,
2244 uuid_t *nexus_agent,
2245 bool network_provider)
2246 {
2247 if ((interface_index == IFSCOPE_NONE && !network_provider) ||
2248 (client->interface_option_count != 0 && !client->allow_multiple_flows)) {
2249 // Interface not set, or client not allowed to use this mode
2250 return;
2251 }
2252
2253 if (client->interface_option_count >= NECP_CLIENT_MAX_INTERFACE_OPTIONS) {
2254 // Cannot take any more interface options
2255 return;
2256 }
2257
2258 // Check if already present
2259 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
2260 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2261 struct necp_client_interface_option *option = &client->interface_options[option_i];
2262 if (option->interface_index == interface_index) {
2263 if (nexus_agent == NULL) {
2264 return;
2265 }
2266 if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
2267 return;
2268 }
2269 if (uuid_is_null(option->nexus_agent)) {
2270 uuid_copy(option->nexus_agent, *nexus_agent);
2271 return;
2272 }
2273 // If we get to this point, this is a new nexus flow
2274 }
2275 } else {
2276 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2277 if (option->interface_index == interface_index) {
2278 if (nexus_agent == NULL) {
2279 return;
2280 }
2281 if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
2282 return;
2283 }
2284 if (uuid_is_null(option->nexus_agent)) {
2285 uuid_copy(option->nexus_agent, *nexus_agent);
2286 return;
2287 }
2288 // If we get to this point, this is a new nexus flow
2289 }
2290 }
2291 }
2292
2293 // Add a new entry
2294 if (client->interface_option_count < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2295 // Add to static
2296 struct necp_client_interface_option *option = &client->interface_options[client->interface_option_count];
2297 option->interface_index = interface_index;
2298 option->interface_generation = interface_generation;
2299 if (nexus_agent != NULL) {
2300 uuid_copy(option->nexus_agent, *nexus_agent);
2301 } else {
2302 uuid_clear(option->nexus_agent);
2303 }
2304 client->interface_option_count++;
2305 } else {
2306 // Add to extra
2307 if (client->extra_interface_options == NULL) {
2308 client->extra_interface_options = (struct necp_client_interface_option *)kalloc_data(
2309 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT, Z_WAITOK | Z_ZERO);
2310 }
2311 if (client->extra_interface_options != NULL) {
2312 struct necp_client_interface_option *option = &client->extra_interface_options[client->interface_option_count - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2313 option->interface_index = interface_index;
2314 option->interface_generation = interface_generation;
2315 if (nexus_agent != NULL) {
2316 uuid_copy(option->nexus_agent, *nexus_agent);
2317 } else {
2318 uuid_clear(option->nexus_agent);
2319 }
2320 client->interface_option_count++;
2321 }
2322 }
2323 }
2324
2325 static bool
necp_client_flow_is_viable(proc_t proc,struct necp_client * client,struct necp_client_flow * flow)2326 necp_client_flow_is_viable(proc_t proc, struct necp_client *client,
2327 struct necp_client_flow *flow)
2328 {
2329 struct necp_aggregate_result result;
2330 bool ignore_address = (client->allow_multiple_flows && !flow->nexus && !flow->socket);
2331
2332 flow->necp_flow_flags = 0;
2333 int error = necp_application_find_policy_match_internal(proc, client->parameters,
2334 (u_int32_t)client->parameters_length,
2335 &result, &flow->necp_flow_flags, NULL,
2336 flow->interface_index,
2337 &flow->local_addr, &flow->remote_addr, NULL, NULL,
2338 NULL, ignore_address, true, NULL);
2339
2340 // Check for blocking agents
2341 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
2342 if (uuid_is_null(result.netagents[i])) {
2343 // Passed end of valid agents
2344 break;
2345 }
2346 if (result.netagent_use_flags[i] & NECP_AGENT_USE_FLAG_REMOVE) {
2347 // A removed agent, ignore
2348 continue;
2349 }
2350 u_int32_t flags = netagent_get_flags(result.netagents[i]);
2351 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2352 !(flags & NETAGENT_FLAG_VOLUNTARY) &&
2353 !(flags & NETAGENT_FLAG_ACTIVE) &&
2354 !(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY)) {
2355 // A required agent is not active, cause the flow to be marked non-viable
2356 return false;
2357 }
2358 }
2359
2360 if (flow->interface_index != IFSCOPE_NONE) {
2361 ifnet_head_lock_shared();
2362
2363 struct ifnet *ifp = ifindex2ifnet[flow->interface_index];
2364 if (ifp && ifp->if_delegated.ifp != IFSCOPE_NONE) {
2365 flow->delegated_interface_index = ifp->if_delegated.ifp->if_index;
2366 }
2367
2368 ifnet_head_done();
2369 }
2370
2371 return error == 0 &&
2372 result.routed_interface_index != IFSCOPE_NONE &&
2373 result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP;
2374 }
2375
2376 static void
necp_flow_add_interface_flows(proc_t proc,struct necp_client * client,struct necp_client_flow_registration * flow_registration,bool send_initial)2377 necp_flow_add_interface_flows(proc_t proc,
2378 struct necp_client *client,
2379 struct necp_client_flow_registration *flow_registration,
2380 bool send_initial)
2381 {
2382 // Traverse all interfaces and add a tracking flow if needed
2383 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
2384 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2385 struct necp_client_interface_option *option = &client->interface_options[option_i];
2386 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
2387 if (flow != NULL && send_initial) {
2388 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2389 if (flow->viable && flow->u.cb) {
2390 bool viable = flow->viable;
2391 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2392 flow->viable = viable;
2393 }
2394 }
2395 } else {
2396 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2397 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
2398 if (flow != NULL && send_initial) {
2399 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2400 if (flow->viable && flow->u.cb) {
2401 bool viable = flow->viable;
2402 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2403 flow->viable = viable;
2404 }
2405 }
2406 }
2407 }
2408 }
2409
2410 static bool
necp_client_update_flows(proc_t proc,struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)2411 necp_client_update_flows(proc_t proc,
2412 struct necp_client *client,
2413 struct _necp_flow_defunct_list *defunct_list)
2414 {
2415 NECP_CLIENT_ASSERT_LOCKED(client);
2416
2417 bool any_client_updated = FALSE;
2418 struct necp_client_flow * __single flow = NULL;
2419 struct necp_client_flow *temp_flow = NULL;
2420 struct necp_client_flow_registration *flow_registration = NULL;
2421 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2422 if (flow_registration->interface_cb != NULL) {
2423 // Add any interface flows that are not already tracked
2424 necp_flow_add_interface_flows(proc, client, flow_registration, false);
2425 }
2426
2427 LIST_FOREACH_SAFE(flow, &flow_registration->flow_list, flow_chain, temp_flow) {
2428 bool client_updated = FALSE;
2429
2430 // Check policy result for flow
2431 u_short old_delegated_ifindex = flow->delegated_interface_index;
2432
2433 int old_flags = flow->necp_flow_flags;
2434 bool viable = necp_client_flow_is_viable(proc, client, flow);
2435
2436 // TODO: Defunct nexus flows that are blocked by policy
2437
2438 if (flow->viable != viable) {
2439 flow->viable = viable;
2440 client_updated = TRUE;
2441 }
2442
2443 if ((old_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE) !=
2444 (flow->necp_flow_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE)) {
2445 client_updated = TRUE;
2446 }
2447
2448 if (flow->delegated_interface_index != old_delegated_ifindex) {
2449 client_updated = TRUE;
2450 }
2451
2452 if (flow->viable && client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2453 bool flow_viable = flow->viable;
2454 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_VIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2455 flow->viable = flow_viable;
2456 }
2457
2458 if (!flow->viable || flow->invalid) {
2459 if (client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2460 bool flow_viable = flow->viable;
2461 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_NONVIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2462 flow->viable = flow_viable;
2463 }
2464 // The callback might change the viable-flag of the
2465 // flow depending on its policy. Thus, we need to
2466 // check the flags again after the callback.
2467 }
2468
2469 #if SKYWALK
2470 if (defunct_list != NULL) {
2471 if (flow->invalid && flow->nexus && flow->assigned && !uuid_is_null(flow->u.nexus_agent)) {
2472 // This is a nexus flow that was assigned, but not found on path
2473 u_int32_t flags = netagent_get_flags(flow->u.nexus_agent);
2474 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2475 // The agent is no longer registered! Mark defunct.
2476 necp_defunct_flow_registration(client, flow_registration, defunct_list);
2477 client_updated = TRUE;
2478 }
2479 }
2480 }
2481 #else /* !SKYWALK */
2482 (void)defunct_list;
2483 #endif /* !SKYWALK */
2484
2485 // Handle flows that no longer match
2486 if (!flow->viable || flow->invalid) {
2487 // Drop them as long as they aren't assigned data
2488 if (!flow->nexus && !flow->assigned) {
2489 if (flow->assigned_results != NULL) {
2490 kfree_data_counted_by(flow->assigned_results, flow->assigned_results_length);
2491 client_updated = TRUE;
2492 }
2493 LIST_REMOVE(flow, flow_chain);
2494 #if SKYWALK
2495 if (flow->nexus) {
2496 OSDecrementAtomic(&necp_nexus_flow_count);
2497 } else
2498 #endif /* SKYWALK */
2499 if (flow->socket) {
2500 OSDecrementAtomic(&necp_socket_flow_count);
2501 } else {
2502 OSDecrementAtomic(&necp_if_flow_count);
2503 }
2504
2505 necp_aop_offload_stats_destroy(flow);
2506
2507 kfree_type(struct necp_client_flow, flow);
2508 }
2509 }
2510
2511 any_client_updated |= client_updated;
2512 }
2513 #if SKYWALK
2514 necp_flow_save_current_interface_details(flow_registration);
2515 #endif /* SKYWALK */
2516 }
2517
2518 return any_client_updated;
2519 }
2520
2521 static void
necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client * client)2522 necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client *client)
2523 {
2524 struct necp_client_flow_registration *flow_registration = NULL;
2525 struct necp_client_flow *flow = NULL;
2526 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2527 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2528 if (!flow->socket) { // Socket flows are not marked as invalid
2529 flow->invalid = TRUE;
2530 }
2531 }
2532 }
2533
2534 // Reset option count every update
2535 client->interface_option_count = 0;
2536 }
2537
2538 static inline bool
necp_netagent_is_requested(const struct necp_client_parsed_parameters * parameters,uuid_t * netagent_uuid)2539 necp_netagent_is_requested(const struct necp_client_parsed_parameters *parameters,
2540 uuid_t *netagent_uuid)
2541 {
2542 // Specific use agents only apply when requested
2543 bool requested = false;
2544 if (parameters != NULL) {
2545 // Check required agent UUIDs
2546 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2547 if (uuid_is_null(parameters->required_netagents[i])) {
2548 break;
2549 }
2550 if (uuid_compare(parameters->required_netagents[i], *netagent_uuid) == 0) {
2551 requested = true;
2552 break;
2553 }
2554 }
2555
2556 if (!requested) {
2557 // Check required agent types
2558 bool fetched_type = false;
2559 char netagent_domain[NETAGENT_DOMAINSIZE];
2560 char netagent_type[NETAGENT_TYPESIZE];
2561 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
2562 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
2563
2564 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2565 if (strbuflen(parameters->required_netagent_types[i].netagent_domain, sizeof(parameters->required_netagent_types[i].netagent_domain)) == 0 ||
2566 strbuflen(parameters->required_netagent_types[i].netagent_type, sizeof(parameters->required_netagent_types[i].netagent_type)) == 0) {
2567 break;
2568 }
2569
2570 if (!fetched_type) {
2571 if (netagent_get_agent_domain_and_type(*netagent_uuid, netagent_domain, netagent_type)) {
2572 fetched_type = TRUE;
2573 } else {
2574 break;
2575 }
2576 }
2577
2578 if ((strbuflen(parameters->required_netagent_types[i].netagent_domain, sizeof(parameters->required_netagent_types[i].netagent_domain)) == 0 ||
2579 strbufcmp(netagent_domain, NETAGENT_DOMAINSIZE, parameters->required_netagent_types[i].netagent_domain, sizeof(parameters->required_netagent_types[i].netagent_domain)) == 0) &&
2580 (strbuflen(parameters->required_netagent_types[i].netagent_type, sizeof(parameters->required_netagent_types[i].netagent_type)) == 0 ||
2581 strbufcmp(netagent_type, NETAGENT_TYPESIZE, parameters->required_netagent_types[i].netagent_type, sizeof(parameters->required_netagent_types[i].netagent_type)) == 0)) {
2582 requested = true;
2583 break;
2584 }
2585 }
2586 }
2587
2588 // Check preferred agent UUIDs
2589 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2590 if (uuid_is_null(parameters->preferred_netagents[i])) {
2591 break;
2592 }
2593 if (uuid_compare(parameters->preferred_netagents[i], *netagent_uuid) == 0) {
2594 requested = true;
2595 break;
2596 }
2597 }
2598
2599 if (!requested) {
2600 // Check preferred agent types
2601 bool fetched_type = false;
2602 char netagent_domain[NETAGENT_DOMAINSIZE];
2603 char netagent_type[NETAGENT_TYPESIZE];
2604 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
2605 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
2606
2607 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2608 if (strbuflen(parameters->preferred_netagent_types[i].netagent_domain, sizeof(parameters->preferred_netagent_types[i].netagent_domain)) == 0 ||
2609 strbuflen(parameters->preferred_netagent_types[i].netagent_type, sizeof(parameters->preferred_netagent_types[i].netagent_type)) == 0) {
2610 break;
2611 }
2612
2613 if (!fetched_type) {
2614 if (netagent_get_agent_domain_and_type(*netagent_uuid, netagent_domain, netagent_type)) {
2615 fetched_type = TRUE;
2616 } else {
2617 break;
2618 }
2619 }
2620
2621 if ((strbuflen(parameters->preferred_netagent_types[i].netagent_domain, sizeof(parameters->preferred_netagent_types[i].netagent_domain)) == 0 ||
2622 strbufcmp(netagent_domain, NETAGENT_DOMAINSIZE, parameters->preferred_netagent_types[i].netagent_domain, sizeof(parameters->preferred_netagent_types[i].netagent_domain)) == 0) &&
2623 (strbuflen(parameters->preferred_netagent_types[i].netagent_type, sizeof(parameters->preferred_netagent_types[i].netagent_type)) == 0 ||
2624 strbufcmp(netagent_type, NETAGENT_TYPESIZE, parameters->preferred_netagent_types[i].netagent_type, sizeof(parameters->preferred_netagent_types[i].netagent_type)) == 0)) {
2625 requested = true;
2626 break;
2627 }
2628 }
2629 }
2630 }
2631
2632 return requested;
2633 }
2634
2635 static bool
necp_netagent_applies_to_client(struct necp_client * client,const struct necp_client_parsed_parameters * parameters,uuid_t * netagent_uuid,bool allow_nexus,uint32_t interface_index,uint32_t interface_generation)2636 necp_netagent_applies_to_client(struct necp_client *client,
2637 const struct necp_client_parsed_parameters *parameters,
2638 uuid_t *netagent_uuid, bool allow_nexus,
2639 uint32_t interface_index, uint32_t interface_generation)
2640 {
2641 #pragma unused(interface_index, interface_generation)
2642 bool applies = FALSE;
2643 u_int32_t flags = netagent_get_flags(*netagent_uuid);
2644 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2645 // Unregistered agents never apply
2646 return applies;
2647 }
2648
2649 const bool is_nexus_agent = ((flags & NETAGENT_FLAG_NEXUS_PROVIDER) ||
2650 (flags & NETAGENT_FLAG_NEXUS_LISTENER) ||
2651 (flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS) ||
2652 (flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS) ||
2653 (flags & NETAGENT_FLAG_INTERPOSE_NEXUS));
2654 if (is_nexus_agent) {
2655 if (!allow_nexus) {
2656 // Hide nexus providers unless allowed
2657 // Direct interfaces and direct policies are allowed to use a nexus
2658 // Delegate interfaces or re-scoped interfaces are not allowed
2659 return applies;
2660 }
2661
2662 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2663 !(flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS)) {
2664 // Client requested a custom ether nexus, but this nexus isn't one
2665 return applies;
2666 }
2667
2668 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2669 !(flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS)) {
2670 // Client requested a custom IP nexus, but this nexus isn't one
2671 return applies;
2672 }
2673
2674 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2675 !(flags & NETAGENT_FLAG_INTERPOSE_NEXUS)) {
2676 // Client requested an interpose nexus, but this nexus isn't one
2677 return applies;
2678 }
2679
2680 if (!(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2681 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2682 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2683 !(flags & NETAGENT_FLAG_NEXUS_PROVIDER)) {
2684 // Client requested default parameters, but this nexus isn't generic
2685 return applies;
2686 }
2687 }
2688
2689 if (uuid_compare(client->failed_trigger_agent.netagent_uuid, *netagent_uuid) == 0) {
2690 if (client->failed_trigger_agent.generation == netagent_get_generation(*netagent_uuid)) {
2691 // If this agent was triggered, and failed, and hasn't changed, keep hiding it
2692 return applies;
2693 } else {
2694 // Mismatch generation, clear out old trigger
2695 uuid_clear(client->failed_trigger_agent.netagent_uuid);
2696 client->failed_trigger_agent.generation = 0;
2697 }
2698 }
2699
2700 if (flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) {
2701 // Specific use agents only apply when requested
2702 applies = necp_netagent_is_requested(parameters, netagent_uuid);
2703 } else {
2704 applies = TRUE;
2705 }
2706
2707 #if SKYWALK
2708 // Add nexus agent if it is a nexus, and either is not a listener, or the nexus supports listeners
2709 if (applies && is_nexus_agent &&
2710 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) && // Don't add for browse paths
2711 ((flags & NETAGENT_FLAG_NEXUS_LISTENER) || !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER))) {
2712 necp_client_add_interface_option_if_needed(client, interface_index,
2713 interface_generation, netagent_uuid,
2714 (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2715 }
2716 #endif /* SKYWALK */
2717
2718 return applies;
2719 }
2720
2721 static void
necp_client_add_agent_interface_options(struct necp_client * client,const struct necp_client_parsed_parameters * parsed_parameters,ifnet_t ifp)2722 necp_client_add_agent_interface_options(struct necp_client *client,
2723 const struct necp_client_parsed_parameters *parsed_parameters,
2724 ifnet_t ifp)
2725 {
2726 if (ifp == NULL) {
2727 return;
2728 }
2729
2730 ifnet_lock_shared(ifp);
2731 if (ifp->if_agentids != NULL) {
2732 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2733 if (uuid_is_null(ifp->if_agentids[i])) {
2734 continue;
2735 }
2736 // Relies on the side effect that nexus agents that apply will create flows
2737 (void)necp_netagent_applies_to_client(client, parsed_parameters, &ifp->if_agentids[i], TRUE,
2738 ifp->if_index, ifnet_get_generation(ifp));
2739 }
2740 }
2741 ifnet_lock_done(ifp);
2742 }
2743
2744 static void
necp_client_add_browse_interface_options(struct necp_client * client,const struct necp_client_parsed_parameters * parsed_parameters,ifnet_t ifp)2745 necp_client_add_browse_interface_options(struct necp_client *client,
2746 const struct necp_client_parsed_parameters *parsed_parameters,
2747 ifnet_t ifp)
2748 {
2749 if (ifp == NULL) {
2750 return;
2751 }
2752
2753 ifnet_lock_shared(ifp);
2754 if (ifp->if_agentids != NULL) {
2755 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2756 if (uuid_is_null(ifp->if_agentids[i])) {
2757 continue;
2758 }
2759
2760 u_int32_t flags = netagent_get_flags(ifp->if_agentids[i]);
2761 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2762 (flags & NETAGENT_FLAG_ACTIVE) &&
2763 (flags & NETAGENT_FLAG_SUPPORTS_BROWSE) &&
2764 (!(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) ||
2765 necp_netagent_is_requested(parsed_parameters, &ifp->if_agentids[i]))) {
2766 necp_client_add_interface_option_if_needed(client, ifp->if_index, ifnet_get_generation(ifp), &ifp->if_agentids[i], (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2767
2768 // Finding one is enough
2769 break;
2770 }
2771 }
2772 }
2773 ifnet_lock_done(ifp);
2774 }
2775
2776 static inline bool
_necp_client_address_is_valid(struct sockaddr * address)2777 _necp_client_address_is_valid(struct sockaddr *address)
2778 {
2779 if (address->sa_family == AF_INET) {
2780 return address->sa_len == sizeof(struct sockaddr_in);
2781 } else if (address->sa_family == AF_INET6) {
2782 return address->sa_len == sizeof(struct sockaddr_in6);
2783 } else {
2784 return FALSE;
2785 }
2786 }
2787
2788 #define necp_client_address_is_valid(S) _necp_client_address_is_valid(SA(S))
2789
2790 static inline bool
necp_client_endpoint_is_unspecified(struct necp_client_endpoint * endpoint)2791 necp_client_endpoint_is_unspecified(struct necp_client_endpoint *endpoint)
2792 {
2793 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2794 if (endpoint->u.sa.sa_family == AF_INET) {
2795 return endpoint->u.sin.sin_addr.s_addr == INADDR_ANY;
2796 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2797 return IN6_IS_ADDR_UNSPECIFIED(&endpoint->u.sin6.sin6_addr);
2798 } else {
2799 return TRUE;
2800 }
2801 } else {
2802 return TRUE;
2803 }
2804 }
2805
2806 #if SKYWALK
2807 static void
necp_client_update_local_port_parameters(u_int8_t * __sized_by (parameters_size)parameters,u_int32_t parameters_size,uint16_t local_port)2808 necp_client_update_local_port_parameters(u_int8_t * __sized_by(parameters_size)parameters,
2809 u_int32_t parameters_size,
2810 uint16_t local_port)
2811 {
2812 size_t offset = 0;
2813 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
2814 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
2815 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
2816
2817 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
2818 // If the length is larger than what can fit in the remaining parameters size, bail
2819 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
2820 break;
2821 }
2822
2823 if (length > 0) {
2824 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
2825 if (value != NULL) {
2826 switch (type) {
2827 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
2828 if (length >= sizeof(struct necp_policy_condition_addr)) {
2829 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
2830 if (necp_client_address_is_valid(&address_struct->address.sa)) {
2831 if (address_struct->address.sa.sa_family == AF_INET) {
2832 address_struct->address.sin.sin_port = local_port;
2833 } else if (address_struct->address.sa.sa_family == AF_INET6) {
2834 address_struct->address.sin6.sin6_port = local_port;
2835 }
2836 }
2837 }
2838 break;
2839 }
2840 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: {
2841 if (length >= sizeof(struct necp_client_endpoint)) {
2842 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
2843 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2844 if (endpoint->u.sa.sa_family == AF_INET) {
2845 endpoint->u.sin.sin_port = local_port;
2846 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2847 endpoint->u.sin6.sin6_port = local_port;
2848 }
2849 }
2850 }
2851 break;
2852 }
2853 default: {
2854 break;
2855 }
2856 }
2857 }
2858 }
2859
2860 offset += sizeof(struct necp_tlv_header) + length;
2861 }
2862 }
2863 #endif /* !SKYWALK */
2864
2865 #define NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH 253
2866
2867 static void
necp_client_trace_parameter_parsing(struct necp_client * client,u_int8_t type,u_int8_t * __sized_by (length)value,u_int32_t length)2868 necp_client_trace_parameter_parsing(struct necp_client *client, u_int8_t type, u_int8_t * __sized_by(length)value, u_int32_t length)
2869 {
2870 uint64_t num = 0;
2871 uint16_t shortBuf;
2872 uint32_t intBuf;
2873 char buffer[NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH + 1];
2874
2875 if (value != NULL && length > 0) {
2876 switch (length) {
2877 case 1:
2878 num = *value;
2879 break;
2880 case 2:
2881 memcpy(&shortBuf, value, sizeof(shortBuf));
2882 num = shortBuf;
2883 break;
2884 case 4:
2885 memcpy(&intBuf, value, sizeof(intBuf));
2886 num = intBuf;
2887 break;
2888 case 8:
2889 memcpy(&num, value, sizeof(num));
2890 break;
2891 default:
2892 num = 0;
2893 break;
2894 }
2895 int len = NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH < length ? NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH : length;
2896 memcpy(buffer, value, len);
2897 buffer[len] = 0;
2898 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d value <%llu (%llX)> %s", type, length, num, num, buffer);
2899 } else {
2900 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d", type, length);
2901 }
2902 }
2903
2904 static void
necp_client_trace_parsed_parameters(struct necp_client * client,struct necp_client_parsed_parameters * parsed_parameters)2905 necp_client_trace_parsed_parameters(struct necp_client *client, struct necp_client_parsed_parameters *parsed_parameters)
2906 {
2907 int i;
2908 char local_buffer[64] = { };
2909 char remote_buffer[64] = { };
2910 uuid_string_t uuid_str = { };
2911 uuid_unparse_lower(parsed_parameters->effective_uuid, uuid_str);
2912
2913 switch (parsed_parameters->local_addr.sa.sa_family) {
2914 case AF_INET:
2915 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2916 struct sockaddr_in *addr = &parsed_parameters->local_addr.sin;
2917 inet_ntop(AF_INET, &(addr->sin_addr), local_buffer, sizeof(local_buffer));
2918 }
2919 break;
2920 case AF_INET6:
2921 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2922 struct sockaddr_in6 *addr6 = &parsed_parameters->local_addr.sin6;
2923 inet_ntop(AF_INET6, &(addr6->sin6_addr), local_buffer, sizeof(local_buffer));
2924 }
2925 break;
2926 default:
2927 break;
2928 }
2929
2930 switch (parsed_parameters->remote_addr.sa.sa_family) {
2931 case AF_INET:
2932 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2933 struct sockaddr_in *addr = &parsed_parameters->remote_addr.sin;
2934 inet_ntop(AF_INET, &(addr->sin_addr), remote_buffer, sizeof(remote_buffer));
2935 }
2936 break;
2937 case AF_INET6:
2938 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2939 struct sockaddr_in6 *addr6 = &parsed_parameters->remote_addr.sin6;
2940 inet_ntop(AF_INET6, &(addr6->sin6_addr), remote_buffer, sizeof(remote_buffer));
2941 }
2942 break;
2943 default:
2944 break;
2945 }
2946
2947 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - valid_fields %X flags %X "
2948 "extended flags %llX delegated_upid %llu local_addr %s remote_addr %s "
2949 "required_interface_index %u required_interface_type %d local_address_preference %d "
2950 "ip_protocol %d transport_protocol %d ethertype %d effective_pid %d "
2951 "effective_uuid %s uid %d persona_id %d traffic_class %d",
2952 parsed_parameters->valid_fields,
2953 parsed_parameters->flags,
2954 parsed_parameters->extended_flags,
2955 parsed_parameters->delegated_upid,
2956 local_buffer, remote_buffer,
2957 parsed_parameters->required_interface_index,
2958 parsed_parameters->required_interface_type,
2959 parsed_parameters->local_address_preference,
2960 parsed_parameters->ip_protocol,
2961 parsed_parameters->transport_protocol,
2962 parsed_parameters->ethertype,
2963 parsed_parameters->effective_pid,
2964 uuid_str,
2965 parsed_parameters->uid,
2966 parsed_parameters->persona_id,
2967 parsed_parameters->traffic_class);
2968
2969 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - tracker flags <known-tracker %X> <non-app-initiated %X> <silent %X> <app-approved %X>",
2970 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER,
2971 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED,
2972 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_SILENT,
2973 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_APPROVED_APP_DOMAIN);
2974
2975 for (i = 0; i < NECP_MAX_INTERFACE_PARAMETERS && parsed_parameters->prohibited_interfaces[i][0]; i++) {
2976 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_interfaces[%d] <%s>", i, parsed_parameters->prohibited_interfaces[i]);
2977 }
2978
2979 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->required_netagent_types[i].netagent_domain[0]; i++) {
2980 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagent_types[%d] <%s> <%s>", i,
2981 parsed_parameters->required_netagent_types[i].netagent_domain,
2982 parsed_parameters->required_netagent_types[i].netagent_type);
2983 }
2984 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->prohibited_netagent_types[i].netagent_domain[0]; i++) {
2985 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagent_types[%d] <%s> <%s>", i,
2986 parsed_parameters->prohibited_netagent_types[i].netagent_domain,
2987 parsed_parameters->prohibited_netagent_types[i].netagent_type);
2988 }
2989 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->preferred_netagent_types[i].netagent_domain[0]; i++) {
2990 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagent_types[%d] <%s> <%s>", i,
2991 parsed_parameters->preferred_netagent_types[i].netagent_domain,
2992 parsed_parameters->preferred_netagent_types[i].netagent_type);
2993 }
2994 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->avoided_netagent_types[i].netagent_domain[0]; i++) {
2995 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagent_types[%d] <%s> <%s>", i,
2996 parsed_parameters->avoided_netagent_types[i].netagent_domain,
2997 parsed_parameters->avoided_netagent_types[i].netagent_type);
2998 }
2999
3000 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->required_netagents[i]); i++) {
3001 uuid_unparse_lower(parsed_parameters->required_netagents[i], uuid_str);
3002 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagents[%d] <%s>", i, uuid_str);
3003 }
3004 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->prohibited_netagents[i]); i++) {
3005 uuid_unparse_lower(parsed_parameters->prohibited_netagents[i], uuid_str);
3006 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagents[%d] <%s>", i, uuid_str);
3007 }
3008 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->preferred_netagents[i]); i++) {
3009 uuid_unparse_lower(parsed_parameters->preferred_netagents[i], uuid_str);
3010 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagents[%d] <%s>", i, uuid_str);
3011 }
3012 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->avoided_netagents[i]); i++) {
3013 uuid_unparse_lower(parsed_parameters->avoided_netagents[i], uuid_str);
3014 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagents[%d] <%s>", i, uuid_str);
3015 }
3016 }
3017
3018 static bool
necp_client_strings_are_equal(const char * __sized_by (string1_length)string1,size_t string1_length,const char * __sized_by (string2_length)string2,size_t string2_length)3019 necp_client_strings_are_equal(const char * __sized_by(string1_length)string1, size_t string1_length,
3020 const char * __sized_by(string2_length)string2, size_t string2_length)
3021 {
3022 if (string1 == NULL || string2 == NULL) {
3023 return false;
3024 }
3025 const size_t string1_actual_length = strnlen(string1, string1_length);
3026 const size_t string2_actual_length = strnlen(string2, string2_length);
3027 if (string1_actual_length != string2_actual_length) {
3028 return false;
3029 }
3030 return strbufcmp(string1, string1_actual_length, string2, string2_actual_length) == 0;
3031 }
3032
3033 static int
necp_client_parse_parameters(struct necp_client * client,u_int8_t * __sized_by (parameters_size)parameters,u_int32_t parameters_size,struct necp_client_parsed_parameters * parsed_parameters)3034 necp_client_parse_parameters(struct necp_client *client, u_int8_t * __sized_by(parameters_size)parameters,
3035 u_int32_t parameters_size,
3036 struct necp_client_parsed_parameters *parsed_parameters)
3037 {
3038 int error = 0;
3039 size_t offset = 0;
3040
3041 u_int32_t num_prohibited_interfaces = 0;
3042 u_int32_t num_prohibited_interface_types = 0;
3043 u_int32_t num_required_agents = 0;
3044 u_int32_t num_prohibited_agents = 0;
3045 u_int32_t num_preferred_agents = 0;
3046 u_int32_t num_avoided_agents = 0;
3047 u_int32_t num_required_agent_types = 0;
3048 u_int32_t num_prohibited_agent_types = 0;
3049 u_int32_t num_preferred_agent_types = 0;
3050 u_int32_t num_avoided_agent_types = 0;
3051 u_int32_t resolver_tag_length = 0;
3052 u_int8_t * __sized_by(resolver_tag_length) resolver_tag = NULL;
3053 u_int32_t hostname_length = 0;
3054 u_int8_t * __sized_by(hostname_length) client_hostname = NULL;
3055 uuid_t parent_id = {};
3056
3057 if (parsed_parameters == NULL) {
3058 return EINVAL;
3059 }
3060
3061 memset(parsed_parameters, 0, sizeof(struct necp_client_parsed_parameters));
3062
3063 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
3064 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
3065 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
3066
3067 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
3068 // If the length is larger than what can fit in the remaining parameters size, bail
3069 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
3070 break;
3071 }
3072
3073 if (length > 0) {
3074 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
3075 if (value != NULL) {
3076 switch (type) {
3077 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: {
3078 if (length <= IFXNAMSIZ && length > 0) {
3079 ifnet_t __single bound_interface = NULL;
3080 char interface_name[IFXNAMSIZ];
3081 memcpy(interface_name, value, length);
3082 interface_name[length - 1] = 0; // Make sure the string is NULL terminated
3083 if (ifnet_find_by_name(__unsafe_null_terminated_from_indexable(interface_name, &interface_name[length - 1]), &bound_interface) == 0) {
3084 parsed_parameters->required_interface_index = bound_interface->if_index;
3085 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF;
3086 ifnet_release(bound_interface);
3087 }
3088 }
3089 break;
3090 }
3091 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
3092 if (length >= sizeof(struct necp_policy_condition_addr)) {
3093 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
3094 if (necp_client_address_is_valid(&address_struct->address.sa)) {
3095 parsed_parameters->local_addr.sin6 = address_struct->address.sin6;
3096 if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) {
3097 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR;
3098 }
3099 if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) ||
3100 (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) {
3101 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT;
3102 }
3103 }
3104 }
3105 break;
3106 }
3107 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: {
3108 if (length >= sizeof(struct necp_client_endpoint)) {
3109 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3110 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3111 parsed_parameters->local_addr.sin6 = endpoint->u.sin6;
3112 if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) {
3113 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR;
3114 }
3115 if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) ||
3116 (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) {
3117 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT;
3118 }
3119 }
3120 }
3121 break;
3122 }
3123 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
3124 if (length >= sizeof(struct necp_policy_condition_addr)) {
3125 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
3126 if (necp_client_address_is_valid(&address_struct->address.sa)) {
3127 parsed_parameters->remote_addr.sin6 = address_struct->address.sin6;
3128 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
3129 }
3130 }
3131 break;
3132 }
3133 case NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT: {
3134 if (length >= sizeof(struct necp_client_endpoint)) {
3135 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3136 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3137 parsed_parameters->remote_addr.sin6 = endpoint->u.sin6;
3138 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
3139 }
3140 }
3141 break;
3142 }
3143 case NECP_CLIENT_PARAMETER_PROHIBIT_INTERFACE: {
3144 if (num_prohibited_interfaces >= NECP_MAX_INTERFACE_PARAMETERS) {
3145 break;
3146 }
3147 if (length <= IFXNAMSIZ && length > 0) {
3148 memcpy(parsed_parameters->prohibited_interfaces[num_prohibited_interfaces], value, length);
3149 parsed_parameters->prohibited_interfaces[num_prohibited_interfaces][length - 1] = 0; // Make sure the string is NULL terminated
3150 num_prohibited_interfaces++;
3151 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF;
3152 }
3153 break;
3154 }
3155 case NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE: {
3156 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) {
3157 break;
3158 }
3159 if (length >= sizeof(u_int8_t)) {
3160 memcpy(&parsed_parameters->required_interface_type, value, sizeof(u_int8_t));
3161 if (parsed_parameters->required_interface_type) {
3162 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE;
3163 }
3164 }
3165 break;
3166 }
3167 case NECP_CLIENT_PARAMETER_PROHIBIT_IF_TYPE: {
3168 if (num_prohibited_interface_types >= NECP_MAX_INTERFACE_PARAMETERS) {
3169 break;
3170 }
3171 if (length >= sizeof(u_int8_t)) {
3172 memcpy(&parsed_parameters->prohibited_interface_types[num_prohibited_interface_types], value, sizeof(u_int8_t));
3173 num_prohibited_interface_types++;
3174 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE;
3175 }
3176 break;
3177 }
3178 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT: {
3179 if (num_required_agents >= NECP_MAX_AGENT_PARAMETERS) {
3180 break;
3181 }
3182 if (length >= sizeof(uuid_t)) {
3183 memcpy(&parsed_parameters->required_netagents[num_required_agents], value, sizeof(uuid_t));
3184 num_required_agents++;
3185 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT;
3186 }
3187 break;
3188 }
3189 case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT: {
3190 if (num_prohibited_agents >= NECP_MAX_AGENT_PARAMETERS) {
3191 break;
3192 }
3193 if (length >= sizeof(uuid_t)) {
3194 memcpy(&parsed_parameters->prohibited_netagents[num_prohibited_agents], value, sizeof(uuid_t));
3195 num_prohibited_agents++;
3196 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT;
3197 }
3198 break;
3199 }
3200 case NECP_CLIENT_PARAMETER_PREFER_AGENT: {
3201 if (num_preferred_agents >= NECP_MAX_AGENT_PARAMETERS) {
3202 break;
3203 }
3204 if (length >= sizeof(uuid_t)) {
3205 memcpy(&parsed_parameters->preferred_netagents[num_preferred_agents], value, sizeof(uuid_t));
3206 num_preferred_agents++;
3207 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT;
3208 }
3209 break;
3210 }
3211 case NECP_CLIENT_PARAMETER_AVOID_AGENT: {
3212 if (num_avoided_agents >= NECP_MAX_AGENT_PARAMETERS) {
3213 break;
3214 }
3215 if (length >= sizeof(uuid_t)) {
3216 memcpy(&parsed_parameters->avoided_netagents[num_avoided_agents], value, sizeof(uuid_t));
3217 num_avoided_agents++;
3218 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT;
3219 }
3220 break;
3221 }
3222 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE: {
3223 if (num_required_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3224 break;
3225 }
3226 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3227 memcpy(&parsed_parameters->required_netagent_types[num_required_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3228 num_required_agent_types++;
3229 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE;
3230 }
3231 break;
3232 }
3233 case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT_TYPE: {
3234 if (num_prohibited_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3235 break;
3236 }
3237 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3238 memcpy(&parsed_parameters->prohibited_netagent_types[num_prohibited_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3239 num_prohibited_agent_types++;
3240 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE;
3241 }
3242 break;
3243 }
3244 case NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE: {
3245 if (num_preferred_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3246 break;
3247 }
3248 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3249 memcpy(&parsed_parameters->preferred_netagent_types[num_preferred_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3250 num_preferred_agent_types++;
3251 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE;
3252 }
3253 break;
3254 }
3255 case NECP_CLIENT_PARAMETER_AVOID_AGENT_TYPE: {
3256 if (num_avoided_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3257 break;
3258 }
3259 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3260 memcpy(&parsed_parameters->avoided_netagent_types[num_avoided_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3261 num_avoided_agent_types++;
3262 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE;
3263 }
3264 break;
3265 }
3266 case NECP_CLIENT_PARAMETER_FLAGS: {
3267 if (length >= sizeof(u_int32_t)) {
3268 memcpy(&parsed_parameters->flags, value, sizeof(parsed_parameters->flags));
3269 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_FLAGS;
3270 }
3271 break;
3272 }
3273 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
3274 if (length == sizeof(u_int16_t)) {
3275 u_int16_t large_ip_protocol = 0;
3276 memcpy(&large_ip_protocol, value, sizeof(large_ip_protocol));
3277 parsed_parameters->ip_protocol = (u_int8_t)large_ip_protocol;
3278 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL;
3279 } else if (length >= sizeof(parsed_parameters->ip_protocol)) {
3280 memcpy(&parsed_parameters->ip_protocol, value, sizeof(parsed_parameters->ip_protocol));
3281 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL;
3282 }
3283 break;
3284 }
3285 case NECP_CLIENT_PARAMETER_TRANSPORT_PROTOCOL: {
3286 if (length >= sizeof(parsed_parameters->transport_protocol)) {
3287 memcpy(&parsed_parameters->transport_protocol, value, sizeof(parsed_parameters->transport_protocol));
3288 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL;
3289 }
3290 break;
3291 }
3292 case NECP_CLIENT_PARAMETER_PID: {
3293 if (length >= sizeof(parsed_parameters->effective_pid)) {
3294 memcpy(&parsed_parameters->effective_pid, value, sizeof(parsed_parameters->effective_pid));
3295 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID;
3296 }
3297 break;
3298 }
3299 case NECP_CLIENT_PARAMETER_DELEGATED_UPID: {
3300 if (length >= sizeof(parsed_parameters->delegated_upid)) {
3301 memcpy(&parsed_parameters->delegated_upid, value, sizeof(parsed_parameters->delegated_upid));
3302 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID;
3303 }
3304 break;
3305 }
3306 case NECP_CLIENT_PARAMETER_ETHERTYPE: {
3307 if (length >= sizeof(parsed_parameters->ethertype)) {
3308 memcpy(&parsed_parameters->ethertype, value, sizeof(parsed_parameters->ethertype));
3309 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_ETHERTYPE;
3310 }
3311 break;
3312 }
3313 case NECP_CLIENT_PARAMETER_APPLICATION: {
3314 if (length >= sizeof(parsed_parameters->effective_uuid)) {
3315 memcpy(&parsed_parameters->effective_uuid, value, sizeof(parsed_parameters->effective_uuid));
3316 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID;
3317 }
3318 break;
3319 }
3320 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
3321 if (length >= sizeof(parsed_parameters->traffic_class)) {
3322 memcpy(&parsed_parameters->traffic_class, value, sizeof(parsed_parameters->traffic_class));
3323 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS;
3324 }
3325 break;
3326 }
3327 case NECP_CLIENT_PARAMETER_RESOLVER_TAG: {
3328 if (length > 0) {
3329 if (resolver_tag != NULL) {
3330 // Multiple resolver tags is invalid
3331 NECPLOG0(LOG_ERR, "Multiple resolver tags are not supported");
3332 error = EINVAL;
3333 } else {
3334 resolver_tag = (u_int8_t *)value;
3335 resolver_tag_length = length;
3336 }
3337 }
3338 break;
3339 }
3340 case NECP_CLIENT_PARAMETER_DOMAIN: {
3341 if (length > 0) {
3342 client_hostname = (u_int8_t *)value;
3343 hostname_length = length;
3344 }
3345 break;
3346 }
3347 case NECP_CLIENT_PARAMETER_PARENT_ID: {
3348 if (length == sizeof(parent_id)) {
3349 uuid_copy(parent_id, value);
3350 memcpy(&parsed_parameters->parent_uuid, value, sizeof(parsed_parameters->parent_uuid));
3351 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID;
3352 }
3353 break;
3354 }
3355 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE: {
3356 if (length >= sizeof(parsed_parameters->local_address_preference)) {
3357 memcpy(&parsed_parameters->local_address_preference, value, sizeof(parsed_parameters->local_address_preference));
3358 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE;
3359 }
3360 break;
3361 }
3362 case NECP_CLIENT_PARAMETER_ATTRIBUTED_BUNDLE_IDENTIFIER: {
3363 if (length > 0) {
3364 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER;
3365 }
3366 break;
3367 }
3368 case NECP_CLIENT_PARAMETER_FLOW_DEMUX_PATTERN: {
3369 if (parsed_parameters->demux_pattern_count >= NECP_MAX_DEMUX_PATTERNS) {
3370 break;
3371 }
3372 if (length >= sizeof(struct necp_demux_pattern)) {
3373 memcpy(&parsed_parameters->demux_patterns[parsed_parameters->demux_pattern_count], value, sizeof(struct necp_demux_pattern));
3374 parsed_parameters->demux_pattern_count++;
3375 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN;
3376 }
3377 break;
3378 }
3379 case NECP_CLIENT_PARAMETER_APPLICATION_ID: {
3380 if (length >= sizeof(necp_application_id_t)) {
3381 necp_application_id_t *application_id = (necp_application_id_t *)(void *)value;
3382 // UID
3383 parsed_parameters->uid = application_id->uid;
3384 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_UID;
3385 // EUUID
3386 uuid_copy(parsed_parameters->effective_uuid, application_id->effective_uuid);
3387 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID;
3388 // PERSONA
3389 parsed_parameters->persona_id = application_id->persona_id;
3390 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PERSONA_ID;
3391 }
3392 break;
3393 }
3394 case NECP_CLIENT_PARAMETER_EXTENDED_FLAGS: {
3395 if (length >= sizeof(u_int64_t)) {
3396 memcpy(&parsed_parameters->extended_flags, value, sizeof(parsed_parameters->extended_flags));
3397 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EXTENDED_FLAGS;
3398 }
3399 break;
3400 }
3401 default: {
3402 break;
3403 }
3404 }
3405 }
3406
3407 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) {
3408 necp_client_trace_parameter_parsing(client, type, value, length);
3409 }
3410 }
3411
3412 offset += sizeof(struct necp_tlv_header) + length;
3413 }
3414
3415 if (resolver_tag != NULL) {
3416 struct necp_client_validatable *validatable = (struct necp_client_validatable *)resolver_tag;
3417 if (resolver_tag_length <= sizeof(struct necp_client_validatable)) {
3418 error = EINVAL;
3419 NECPLOG(LOG_ERR, "Resolver tag length too short: %u", resolver_tag_length);
3420 } else {
3421 bool matches = true;
3422
3423 // Check the client UUID for client-specific results
3424 if (validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER ||
3425 validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_BROWSE_RESULT ||
3426 validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_SERVICE_RESOLVER_ANSWER) {
3427 if (uuid_compare(parent_id, validatable->signable.client_id) != 0 &&
3428 uuid_compare(client->client_id, validatable->signable.client_id) != 0) {
3429 NECPLOG0(LOG_ERR, "Resolver tag invalid client ID");
3430 matches = false;
3431 }
3432 }
3433
3434 size_t data_length = resolver_tag_length - sizeof(struct necp_client_validatable);
3435 switch (validatable->signable.sign_type) {
3436 case NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER:
3437 case NECP_CLIENT_SIGN_TYPE_SYSTEM_RESOLVER_ANSWER: {
3438 if (data_length < (sizeof(struct necp_client_host_resolver_answer) - sizeof(struct necp_client_signable))) {
3439 NECPLOG0(LOG_ERR, "Resolver tag invalid length for resolver answer");
3440 matches = false;
3441 } else {
3442 struct necp_client_host_resolver_answer * __single answer_struct = (struct necp_client_host_resolver_answer *)&validatable->signable;
3443 if (data_length != (sizeof(struct necp_client_host_resolver_answer) + answer_struct->hostname_length - sizeof(struct necp_client_signable))) {
3444 NECPLOG0(LOG_ERR, "Resolver tag invalid length for resolver answer");
3445 matches = false;
3446 } else {
3447 struct sockaddr_in6 sin6 = answer_struct->address_answer.sin6;
3448 if (answer_struct->hostname_length != 0 && // If the hostname on the signed answer is empty, ignore
3449 !necp_client_strings_are_equal((const char *)client_hostname, hostname_length,
3450 necp_answer_get_hostname(answer_struct, answer_struct->hostname_length), answer_struct->hostname_length)) {
3451 NECPLOG0(LOG_ERR, "Resolver tag hostname does not match");
3452 matches = false;
3453 } else if (answer_struct->address_answer.sa.sa_family != parsed_parameters->remote_addr.sa.sa_family ||
3454 answer_struct->address_answer.sa.sa_len != parsed_parameters->remote_addr.sa.sa_len) {
3455 NECPLOG0(LOG_ERR, "Resolver tag address type does not match");
3456 matches = false;
3457 } else if (answer_struct->address_answer.sin.sin_port != 0 && // If the port on the signed answer is empty, ignore
3458 answer_struct->address_answer.sin.sin_port != parsed_parameters->remote_addr.sin.sin_port) {
3459 NECPLOG0(LOG_ERR, "Resolver tag port does not match");
3460 matches = false;
3461 } else if ((answer_struct->address_answer.sa.sa_family == AF_INET &&
3462 answer_struct->address_answer.sin.sin_addr.s_addr != parsed_parameters->remote_addr.sin.sin_addr.s_addr) ||
3463 (answer_struct->address_answer.sa.sa_family == AF_INET6 &&
3464 memcmp(&sin6.sin6_addr, &parsed_parameters->remote_addr.sin6.sin6_addr, sizeof(struct in6_addr)) != 0)) {
3465 NECPLOG0(LOG_ERR, "Resolver tag address does not match");
3466 matches = false;
3467 }
3468 }
3469 }
3470 break;
3471 }
3472 case NECP_CLIENT_SIGN_TYPE_BROWSE_RESULT:
3473 case NECP_CLIENT_SIGN_TYPE_SYSTEM_BROWSE_RESULT: {
3474 if (data_length < (sizeof(struct necp_client_browse_result) - sizeof(struct necp_client_signable))) {
3475 NECPLOG0(LOG_ERR, "Resolver tag invalid length for browse result");
3476 matches = false;
3477 } else {
3478 struct necp_client_browse_result * __single answer_struct = (struct necp_client_browse_result *)&validatable->signable;
3479 if (data_length != (sizeof(struct necp_client_browse_result) + answer_struct->service_length - sizeof(struct necp_client_signable))) {
3480 NECPLOG0(LOG_ERR, "Resolver tag invalid length for browse result");
3481 matches = false;
3482 }
3483 }
3484 break;
3485 }
3486 case NECP_CLIENT_SIGN_TYPE_SERVICE_RESOLVER_ANSWER:
3487 case NECP_CLIENT_SIGN_TYPE_SYSTEM_SERVICE_RESOLVER_ANSWER: {
3488 if (data_length < (sizeof(struct necp_client_service_resolver_answer) - sizeof(struct necp_client_signable))) {
3489 NECPLOG0(LOG_ERR, "Resolver tag invalid length for service resolver answer");
3490 matches = false;
3491 } else {
3492 struct necp_client_service_resolver_answer * __single answer_struct = (struct necp_client_service_resolver_answer *)&validatable->signable;
3493 if (data_length != (sizeof(struct necp_client_service_resolver_answer) + answer_struct->service_length + answer_struct->hostname_length - sizeof(struct necp_client_signable))) {
3494 NECPLOG0(LOG_ERR, "Resolver tag invalid length for service resolver answer");
3495 matches = false;
3496 }
3497 }
3498 break;
3499 }
3500 default: {
3501 NECPLOG(LOG_ERR, "Resolver tag unknown sign type: %u", validatable->signable.sign_type);
3502 matches = false;
3503 break;
3504 }
3505 }
3506 if (!matches) {
3507 error = EAUTH;
3508 } else {
3509 const bool validated = necp_validate_resolver_answer(validatable->signable.client_id,
3510 validatable->signable.sign_type,
3511 signable_get_data(&validatable->signable, data_length), data_length,
3512 validatable->signature.signed_tag, sizeof(validatable->signature.signed_tag));
3513 if (!validated) {
3514 error = EAUTH;
3515 NECPLOG0(LOG_ERR, "Failed to validate resolve answer");
3516 }
3517 }
3518 }
3519 }
3520
3521 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) {
3522 necp_client_trace_parsed_parameters(client, parsed_parameters);
3523 }
3524
3525 return error;
3526 }
3527
3528 static int
necp_client_parse_result(u_int8_t * __indexable result,u_int32_t result_size,struct necp_client_flow * flow,void ** flow_stats)3529 necp_client_parse_result(u_int8_t * __indexable result,
3530 u_int32_t result_size,
3531 struct necp_client_flow *flow,
3532 void **flow_stats)
3533 {
3534 #pragma unused(flow_stats)
3535 int error = 0;
3536 size_t offset = 0;
3537
3538 while ((offset + sizeof(struct necp_tlv_header)) <= result_size) {
3539 u_int8_t type = necp_buffer_get_tlv_type(result, result_size, offset);
3540 u_int32_t length = necp_buffer_get_tlv_length(result, result_size, offset);
3541
3542 if (length > 0 && (offset + sizeof(struct necp_tlv_header) + length) <= result_size) {
3543 u_int8_t * __indexable value = necp_buffer_get_tlv_value(result, result_size, offset, NULL);
3544 if (value != NULL) {
3545 switch (type) {
3546 case NECP_CLIENT_RESULT_LOCAL_ENDPOINT: {
3547 if (length >= sizeof(struct necp_client_endpoint)) {
3548 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3549 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3550 flow->local_addr.sin6 = endpoint->u.sin6;
3551 }
3552 }
3553 break;
3554 }
3555 case NECP_CLIENT_RESULT_REMOTE_ENDPOINT: {
3556 if (length >= sizeof(struct necp_client_endpoint)) {
3557 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3558 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3559 flow->remote_addr.sin6 = endpoint->u.sin6;
3560 }
3561 }
3562 break;
3563 }
3564 #if SKYWALK
3565 case NECP_CLIENT_RESULT_NEXUS_FLOW_STATS: {
3566 // this TLV contains flow_stats pointer which is refcnt'ed.
3567 if (flow_stats != NULL && length >= sizeof(struct sk_stats_flow *)) {
3568 struct flow_stats * __single fs = *(void **)(void *)value;
3569 // transfer the refcnt to flow_stats pointer
3570 *flow_stats = fs;
3571 }
3572 memset(value, 0, length); // nullify TLV always
3573 break;
3574 }
3575 case NECP_CLIENT_RESULT_UNIQUE_FLOW_TAG: {
3576 if (length >= sizeof(uint32_t)) {
3577 flow->flow_tag = *(uint32_t *)(void *)value;
3578 break;
3579 }
3580 }
3581 #endif /* SKYWALK */
3582 default: {
3583 break;
3584 }
3585 }
3586 }
3587 }
3588
3589 offset += sizeof(struct necp_tlv_header) + length;
3590 }
3591
3592 return error;
3593 }
3594
3595 static struct necp_client_flow_registration *
necp_client_create_flow_registration(struct necp_fd_data * fd_data,struct necp_client * client)3596 necp_client_create_flow_registration(struct necp_fd_data *fd_data, struct necp_client *client)
3597 {
3598 NECP_FD_ASSERT_LOCKED(fd_data);
3599 NECP_CLIENT_ASSERT_LOCKED(client);
3600
3601 struct necp_client_flow_registration *new_registration = kalloc_type(struct necp_client_flow_registration, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3602
3603 new_registration->last_interface_details = combine_interface_details(IFSCOPE_NONE, NSTAT_IFNET_IS_UNKNOWN_TYPE);
3604
3605 necp_generate_client_id(new_registration->registration_id, true);
3606 LIST_INIT(&new_registration->flow_list);
3607
3608 // Add registration to client list
3609 RB_INSERT(_necp_client_flow_tree, &client->flow_registrations, new_registration);
3610
3611 // Add registration to fd list
3612 RB_INSERT(_necp_fd_flow_tree, &fd_data->flows, new_registration);
3613
3614 // Add registration to global tree for lookup
3615 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
3616 RB_INSERT(_necp_client_flow_global_tree, &necp_client_flow_global_tree, new_registration);
3617 NECP_FLOW_TREE_UNLOCK();
3618
3619 new_registration->client = client;
3620
3621 #if SKYWALK
3622 {
3623 // The uuid caching here is something of a hack, but saves a dynamic lookup with attendant lock hierarchy issues
3624 uint64_t stats_event_type = (uuid_is_null(client->latest_flow_registration_id)) ? NSTAT_EVENT_SRC_FLOW_UUID_ASSIGNED : NSTAT_EVENT_SRC_FLOW_UUID_CHANGED;
3625 uuid_copy(client->latest_flow_registration_id, new_registration->registration_id);
3626
3627 // With the flow uuid known, push a new statistics update to ensure the uuid gets known by any clients before the flow can close
3628 if (client->nstat_context != NULL) {
3629 nstat_provider_stats_event(client->nstat_context, stats_event_type);
3630 }
3631 }
3632 #endif /* !SKYWALK */
3633
3634 // Start out assuming there is nothing to read from the flow
3635 new_registration->flow_result_read = true;
3636
3637 return new_registration;
3638 }
3639
3640 static void
necp_client_add_socket_flow(struct necp_client_flow_registration * flow_registration,struct inpcb * inp)3641 necp_client_add_socket_flow(struct necp_client_flow_registration *flow_registration,
3642 struct inpcb *inp)
3643 {
3644 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3645
3646 new_flow->socket = TRUE;
3647 new_flow->u.socket_handle = inp;
3648 new_flow->u.cb = inp->necp_cb;
3649
3650 OSIncrementAtomic(&necp_socket_flow_count);
3651
3652 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
3653 }
3654
3655 static int
necp_client_register_socket_inner(pid_t pid,uuid_t client_id,struct inpcb * inp,bool is_listener)3656 necp_client_register_socket_inner(pid_t pid, uuid_t client_id, struct inpcb *inp, bool is_listener)
3657 {
3658 int error = 0;
3659 struct necp_fd_data *client_fd = NULL;
3660 bool found_client = FALSE;
3661
3662 NECP_FD_LIST_LOCK_SHARED();
3663 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3664 NECP_FD_LOCK(client_fd);
3665 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3666 if (client != NULL) {
3667 if (!pid || client->proc_pid == pid) {
3668 if (is_listener) {
3669 found_client = TRUE;
3670 #if SKYWALK
3671 // Check netns token for registration
3672 if (!NETNS_TOKEN_VALID(&client->port_reservation)) {
3673 error = EINVAL;
3674 }
3675 #endif /* !SKYWALK */
3676 } else {
3677 // Find client flow and assign from socket
3678 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3679 if (flow_registration != NULL) {
3680 // Found the right client and flow registration, add a new flow
3681 found_client = TRUE;
3682 necp_client_add_socket_flow(flow_registration, inp);
3683 } else if (RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
3684 // No flows yet on this client, add a new registration
3685 flow_registration = necp_client_create_flow_registration(client_fd, client);
3686 if (flow_registration == NULL) {
3687 error = ENOMEM;
3688 } else {
3689 // Add a new flow
3690 found_client = TRUE;
3691 necp_client_add_socket_flow(flow_registration, inp);
3692 }
3693 }
3694 }
3695 }
3696
3697 NECP_CLIENT_UNLOCK(client);
3698 }
3699 NECP_FD_UNLOCK(client_fd);
3700
3701 if (found_client) {
3702 break;
3703 }
3704 }
3705 NECP_FD_LIST_UNLOCK();
3706
3707 if (!found_client) {
3708 error = ENOENT;
3709 } else {
3710 // Count the sockets that have the NECP client UUID set
3711 struct socket *so = inp->inp_socket;
3712 if (!(so->so_flags1 & SOF1_HAS_NECP_CLIENT_UUID)) {
3713 so->so_flags1 |= SOF1_HAS_NECP_CLIENT_UUID;
3714 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_necp_clientuuid_total);
3715 }
3716 }
3717
3718 return error;
3719 }
3720
3721 int
necp_client_register_socket_flow(pid_t pid,uuid_t client_id,struct inpcb * inp)3722 necp_client_register_socket_flow(pid_t pid, uuid_t client_id, struct inpcb *inp)
3723 {
3724 return necp_client_register_socket_inner(pid, client_id, inp, false);
3725 }
3726
3727 int
necp_client_register_socket_listener(pid_t pid,uuid_t client_id,struct inpcb * inp)3728 necp_client_register_socket_listener(pid_t pid, uuid_t client_id, struct inpcb *inp)
3729 {
3730 return necp_client_register_socket_inner(pid, client_id, inp, true);
3731 }
3732
3733 #if SKYWALK
3734 int
necp_client_get_netns_flow_info(uuid_t client_id,struct ns_flow_info * flow_info)3735 necp_client_get_netns_flow_info(uuid_t client_id, struct ns_flow_info *flow_info)
3736 {
3737 int error = 0;
3738 struct necp_fd_data *client_fd = NULL;
3739 bool found_client = FALSE;
3740
3741 NECP_FD_LIST_LOCK_SHARED();
3742 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3743 NECP_FD_LOCK(client_fd);
3744 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3745 if (client != NULL) {
3746 found_client = TRUE;
3747 if (!NETNS_TOKEN_VALID(&client->port_reservation)) {
3748 error = EINVAL;
3749 } else {
3750 error = netns_get_flow_info(&client->port_reservation, flow_info);
3751 }
3752
3753 NECP_CLIENT_UNLOCK(client);
3754 }
3755 NECP_FD_UNLOCK(client_fd);
3756
3757 if (found_client) {
3758 break;
3759 }
3760 }
3761 NECP_FD_LIST_UNLOCK();
3762
3763 if (!found_client) {
3764 error = ENOENT;
3765 }
3766
3767 return error;
3768 }
3769 #endif /* !SKYWALK */
3770
3771 static void
necp_client_add_multipath_interface_flows(struct necp_client_flow_registration * flow_registration,struct necp_client * client,struct mppcb * mpp)3772 necp_client_add_multipath_interface_flows(struct necp_client_flow_registration *flow_registration,
3773 struct necp_client *client,
3774 struct mppcb *mpp)
3775 {
3776 flow_registration->interface_handle = mpp;
3777 flow_registration->interface_cb = mpp->necp_cb;
3778
3779 proc_t proc = proc_find(client->proc_pid);
3780 if (proc == PROC_NULL) {
3781 return;
3782 }
3783
3784 // Traverse all interfaces and add a tracking flow if needed
3785 necp_flow_add_interface_flows(proc, client, flow_registration, true);
3786
3787 proc_rele(proc);
3788 proc = PROC_NULL;
3789 }
3790
3791 int
necp_client_register_multipath_cb(pid_t pid,uuid_t client_id,struct mppcb * mpp)3792 necp_client_register_multipath_cb(pid_t pid, uuid_t client_id, struct mppcb *mpp)
3793 {
3794 int error = 0;
3795 struct necp_fd_data *client_fd = NULL;
3796 bool found_client = FALSE;
3797
3798 NECP_FD_LIST_LOCK_SHARED();
3799 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3800 NECP_FD_LOCK(client_fd);
3801 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3802 if (client != NULL) {
3803 if (!pid || client->proc_pid == pid) {
3804 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3805 if (flow_registration != NULL) {
3806 // Found the right client and flow registration, add a new flow
3807 found_client = TRUE;
3808 necp_client_add_multipath_interface_flows(flow_registration, client, mpp);
3809 } else if (RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
3810 // No flows yet on this client, add a new registration
3811 flow_registration = necp_client_create_flow_registration(client_fd, client);
3812 if (flow_registration == NULL) {
3813 error = ENOMEM;
3814 } else {
3815 // Add a new flow
3816 found_client = TRUE;
3817 necp_client_add_multipath_interface_flows(flow_registration, client, mpp);
3818 }
3819 }
3820 }
3821
3822 NECP_CLIENT_UNLOCK(client);
3823 }
3824 NECP_FD_UNLOCK(client_fd);
3825
3826 if (found_client) {
3827 break;
3828 }
3829 }
3830 NECP_FD_LIST_UNLOCK();
3831
3832 if (!found_client && error == 0) {
3833 error = ENOENT;
3834 }
3835
3836 return error;
3837 }
3838
3839 #define NETAGENT_DOMAIN_RADIO_MANAGER "WirelessRadioManager"
3840 #define NETAGENT_TYPE_RADIO_MANAGER "WirelessRadioManager:BB Manager"
3841
3842 static int
necp_client_lookup_bb_radio_manager(struct necp_client * client,uuid_t netagent_uuid)3843 necp_client_lookup_bb_radio_manager(struct necp_client *client,
3844 uuid_t netagent_uuid)
3845 {
3846 char netagent_domain[NETAGENT_DOMAINSIZE];
3847 char netagent_type[NETAGENT_TYPESIZE];
3848 struct necp_aggregate_result result;
3849 proc_t proc;
3850 int error;
3851
3852 proc = proc_find(client->proc_pid);
3853 if (proc == PROC_NULL) {
3854 return ESRCH;
3855 }
3856
3857 error = necp_application_find_policy_match_internal(proc, client->parameters, (u_int32_t)client->parameters_length,
3858 &result, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, true, true, NULL);
3859
3860 proc_rele(proc);
3861 proc = PROC_NULL;
3862
3863 if (error) {
3864 return error;
3865 }
3866
3867 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
3868 if (uuid_is_null(result.netagents[i])) {
3869 // Passed end of valid agents
3870 break;
3871 }
3872
3873 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
3874 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
3875 if (netagent_get_agent_domain_and_type(result.netagents[i], netagent_domain, netagent_type) == FALSE) {
3876 continue;
3877 }
3878
3879 if (strlcmp(netagent_domain, NETAGENT_DOMAIN_RADIO_MANAGER, NETAGENT_DOMAINSIZE) != 0) {
3880 continue;
3881 }
3882
3883 if (strlcmp(netagent_type, NETAGENT_TYPE_RADIO_MANAGER, NETAGENT_TYPESIZE) != 0) {
3884 continue;
3885 }
3886
3887 uuid_copy(netagent_uuid, result.netagents[i]);
3888
3889 break;
3890 }
3891
3892 return 0;
3893 }
3894
3895 static int
necp_client_assert_bb_radio_manager_common(struct necp_client * client,bool assert)3896 necp_client_assert_bb_radio_manager_common(struct necp_client *client, bool assert)
3897 {
3898 uuid_t netagent_uuid;
3899 uint8_t assert_type;
3900 int error;
3901
3902 error = necp_client_lookup_bb_radio_manager(client, netagent_uuid);
3903 if (error) {
3904 NECPLOG0(LOG_ERR, "BB radio manager agent not found");
3905 return error;
3906 }
3907
3908 // Before unasserting, verify that the assertion was already taken
3909 if (assert == FALSE) {
3910 assert_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
3911
3912 if (!necp_client_remove_assertion(client, netagent_uuid)) {
3913 return EINVAL;
3914 }
3915 } else {
3916 assert_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT;
3917 }
3918
3919 error = netagent_client_message(netagent_uuid, client->client_id, client->proc_pid, client->agent_handle, assert_type);
3920 if (error) {
3921 NECPLOG0(LOG_ERR, "netagent_client_message failed");
3922 return error;
3923 }
3924
3925 // Only save the assertion if the action succeeded
3926 if (assert == TRUE) {
3927 necp_client_add_assertion(client, netagent_uuid);
3928 }
3929
3930 return 0;
3931 }
3932
3933 int
necp_client_assert_bb_radio_manager(uuid_t client_id,bool assert)3934 necp_client_assert_bb_radio_manager(uuid_t client_id, bool assert)
3935 {
3936 struct necp_client *client;
3937 int error = 0;
3938
3939 NECP_CLIENT_TREE_LOCK_SHARED();
3940
3941 client = necp_find_client_and_lock(client_id);
3942
3943 if (client) {
3944 // Found the right client!
3945 error = necp_client_assert_bb_radio_manager_common(client, assert);
3946
3947 NECP_CLIENT_UNLOCK(client);
3948 } else {
3949 NECPLOG0(LOG_ERR, "Couldn't find client");
3950 error = ENOENT;
3951 }
3952
3953 NECP_CLIENT_TREE_UNLOCK();
3954
3955 return error;
3956 }
3957
3958 static int
necp_client_unregister_socket_flow(uuid_t client_id,void * handle)3959 necp_client_unregister_socket_flow(uuid_t client_id, void *handle)
3960 {
3961 int error = 0;
3962 struct necp_fd_data *client_fd = NULL;
3963 bool found_client = FALSE;
3964 bool client_updated = FALSE;
3965
3966 NECP_FD_LIST_LOCK_SHARED();
3967 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3968 NECP_FD_LOCK(client_fd);
3969
3970 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3971 if (client != NULL) {
3972 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3973 if (flow_registration != NULL) {
3974 // Found the right client and flow!
3975 found_client = TRUE;
3976
3977 // Remove flow assignment
3978 struct necp_client_flow * __single search_flow = NULL;
3979 struct necp_client_flow *temp_flow = NULL;
3980 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
3981 if (search_flow->socket && search_flow->u.socket_handle == handle) {
3982 if (search_flow->assigned_results != NULL) {
3983 kfree_data_counted_by(search_flow->assigned_results, search_flow->assigned_results_length);
3984 }
3985 client_updated = TRUE;
3986 flow_registration->flow_result_read = FALSE;
3987 LIST_REMOVE(search_flow, flow_chain);
3988 OSDecrementAtomic(&necp_socket_flow_count);
3989 kfree_type(struct necp_client_flow, search_flow);
3990 }
3991 }
3992 }
3993
3994 NECP_CLIENT_UNLOCK(client);
3995 }
3996
3997 if (client_updated) {
3998 necp_fd_notify(client_fd, true);
3999 }
4000 NECP_FD_UNLOCK(client_fd);
4001
4002 if (found_client) {
4003 break;
4004 }
4005 }
4006 NECP_FD_LIST_UNLOCK();
4007
4008 if (!found_client) {
4009 error = ENOENT;
4010 }
4011
4012 return error;
4013 }
4014
4015 static int
necp_client_unregister_multipath_cb(uuid_t client_id,void * handle)4016 necp_client_unregister_multipath_cb(uuid_t client_id, void *handle)
4017 {
4018 int error = 0;
4019 bool found_client = FALSE;
4020
4021 NECP_CLIENT_TREE_LOCK_SHARED();
4022
4023 struct necp_client *client = necp_find_client_and_lock(client_id);
4024 if (client != NULL) {
4025 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4026 if (flow_registration != NULL) {
4027 // Found the right client and flow!
4028 found_client = TRUE;
4029
4030 // Remove flow assignment
4031 struct necp_client_flow *search_flow = NULL;
4032 struct necp_client_flow *temp_flow = NULL;
4033 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
4034 if (!search_flow->socket && !search_flow->nexus &&
4035 search_flow->u.socket_handle == handle) {
4036 search_flow->u.socket_handle = NULL;
4037 search_flow->u.cb = NULL;
4038 }
4039 }
4040
4041 flow_registration->interface_handle = NULL;
4042 flow_registration->interface_cb = NULL;
4043 }
4044
4045 NECP_CLIENT_UNLOCK(client);
4046 }
4047
4048 NECP_CLIENT_TREE_UNLOCK();
4049
4050 if (!found_client) {
4051 error = ENOENT;
4052 }
4053
4054 return error;
4055 }
4056
4057 int
necp_client_assign_from_socket(pid_t pid,uuid_t client_id,struct inpcb * inp)4058 necp_client_assign_from_socket(pid_t pid, uuid_t client_id, struct inpcb *inp)
4059 {
4060 int error = 0;
4061 struct necp_fd_data *client_fd = NULL;
4062 bool found_client = FALSE;
4063 bool client_updated = FALSE;
4064
4065 NECP_FD_LIST_LOCK_SHARED();
4066 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4067 if (pid && client_fd->proc_pid != pid) {
4068 continue;
4069 }
4070
4071 proc_t proc = proc_find(client_fd->proc_pid);
4072 if (proc == PROC_NULL) {
4073 continue;
4074 }
4075
4076 NECP_FD_LOCK(client_fd);
4077
4078 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4079 if (client != NULL) {
4080 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4081 if (flow_registration == NULL && RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
4082 // No flows yet on this client, add a new registration
4083 flow_registration = necp_client_create_flow_registration(client_fd, client);
4084 if (flow_registration == NULL) {
4085 error = ENOMEM;
4086 }
4087 }
4088 if (flow_registration != NULL) {
4089 // Found the right client and flow!
4090 found_client = TRUE;
4091
4092 struct necp_client_flow *flow = NULL;
4093 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
4094 if (flow->socket && flow->u.socket_handle == inp) {
4095 // Release prior results and route
4096 if (flow->assigned_results != NULL) {
4097 kfree_data_counted_by(flow->assigned_results, flow->assigned_results_length);
4098 }
4099
4100 ifnet_t ifp = NULL;
4101 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp) {
4102 ifp = inp->inp_boundifp;
4103 } else {
4104 ifp = inp->inp_last_outifp;
4105 }
4106
4107 if (ifp != NULL) {
4108 flow->interface_index = ifp->if_index;
4109 } else {
4110 flow->interface_index = IFSCOPE_NONE;
4111 }
4112
4113 if (inp->inp_vflag & INP_IPV4) {
4114 flow->local_addr.sin.sin_family = AF_INET;
4115 flow->local_addr.sin.sin_len = sizeof(struct sockaddr_in);
4116 flow->local_addr.sin.sin_port = inp->inp_lport;
4117 memcpy(&flow->local_addr.sin.sin_addr, &inp->inp_laddr, sizeof(struct in_addr));
4118
4119 flow->remote_addr.sin.sin_family = AF_INET;
4120 flow->remote_addr.sin.sin_len = sizeof(struct sockaddr_in);
4121 flow->remote_addr.sin.sin_port = inp->inp_fport;
4122 memcpy(&flow->remote_addr.sin.sin_addr, &inp->inp_faddr, sizeof(struct in_addr));
4123 } else if (inp->inp_vflag & INP_IPV6) {
4124 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope, &flow->local_addr.sin6, sizeof(flow->local_addr));
4125 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope, &flow->remote_addr.sin6, sizeof(flow->remote_addr));
4126 }
4127
4128 flow->viable = necp_client_flow_is_viable(proc, client, flow);
4129
4130 uuid_t empty_uuid;
4131 uuid_clear(empty_uuid);
4132 flow->assigned = TRUE;
4133
4134 size_t message_length;
4135 void *message = necp_create_nexus_assign_message(empty_uuid, 0, NULL, 0,
4136 (struct necp_client_endpoint *)&flow->local_addr,
4137 (struct necp_client_endpoint *)&flow->remote_addr,
4138 NULL, 0, NULL, 0, &message_length);
4139 flow->assigned_results = message;
4140 flow->assigned_results_length = message_length;
4141 flow_registration->flow_result_read = FALSE;
4142 client_updated = TRUE;
4143 break;
4144 }
4145 }
4146 }
4147
4148 NECP_CLIENT_UNLOCK(client);
4149 }
4150 if (client_updated) {
4151 necp_fd_notify(client_fd, true);
4152 }
4153 NECP_FD_UNLOCK(client_fd);
4154
4155 proc_rele(proc);
4156 proc = PROC_NULL;
4157
4158 if (found_client) {
4159 break;
4160 }
4161 }
4162 NECP_FD_LIST_UNLOCK();
4163
4164 if (error == 0) {
4165 if (!found_client) {
4166 error = ENOENT;
4167 } else if (!client_updated) {
4168 error = EINVAL;
4169 }
4170 }
4171
4172 return error;
4173 }
4174
4175 bool
necp_socket_is_allowed_to_recv_on_interface(struct inpcb * inp,ifnet_t interface)4176 necp_socket_is_allowed_to_recv_on_interface(struct inpcb *inp, ifnet_t interface)
4177 {
4178 if (interface == NULL ||
4179 inp == NULL ||
4180 !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
4181 uuid_is_null(inp->necp_client_uuid)) {
4182 // If there's no interface or client ID to check,
4183 // or if this is not a listener, pass.
4184 // Outbound connections will have already been
4185 // validated for policy.
4186 return TRUE;
4187 }
4188
4189 // Only filter out listener sockets (no remote address specified)
4190 if ((inp->inp_vflag & INP_IPV4) &&
4191 inp->inp_faddr.s_addr != INADDR_ANY) {
4192 return TRUE;
4193 }
4194 if ((inp->inp_vflag & INP_IPV6) &&
4195 !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
4196 return TRUE;
4197 }
4198
4199 bool allowed = TRUE;
4200
4201 NECP_CLIENT_TREE_LOCK_SHARED();
4202
4203 struct necp_client *client = necp_find_client_and_lock(inp->necp_client_uuid);
4204 if (client != NULL) {
4205 struct necp_client_parsed_parameters * __single parsed_parameters = NULL;
4206
4207 parsed_parameters = kalloc_type(struct necp_client_parsed_parameters,
4208 Z_WAITOK | Z_ZERO | Z_NOFAIL);
4209 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, parsed_parameters);
4210 if (error == 0) {
4211 if (!necp_ifnet_matches_parameters(interface, parsed_parameters, 0, NULL, true, false)) {
4212 allowed = FALSE;
4213 }
4214 }
4215 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4216
4217 NECP_CLIENT_UNLOCK(client);
4218 }
4219
4220 NECP_CLIENT_TREE_UNLOCK();
4221
4222 return allowed;
4223 }
4224
4225 int
necp_update_flow_protoctl_event(uuid_t netagent_uuid,uuid_t client_id,uint32_t protoctl_event_code,uint32_t protoctl_event_val,uint32_t protoctl_event_tcp_seq_number)4226 necp_update_flow_protoctl_event(uuid_t netagent_uuid, uuid_t client_id,
4227 uint32_t protoctl_event_code, uint32_t protoctl_event_val,
4228 uint32_t protoctl_event_tcp_seq_number)
4229 {
4230 int error = 0;
4231 struct necp_fd_data *client_fd = NULL;
4232 bool found_client = FALSE;
4233 bool client_updated = FALSE;
4234
4235 NECP_FD_LIST_LOCK_SHARED();
4236 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4237 proc_t proc = proc_find(client_fd->proc_pid);
4238 if (proc == PROC_NULL) {
4239 continue;
4240 }
4241
4242 NECP_FD_LOCK(client_fd);
4243
4244 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4245 if (client != NULL) {
4246 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4247 if (flow_registration != NULL) {
4248 // Found the right client and flow!
4249 found_client = TRUE;
4250
4251 struct necp_client_flow *flow = NULL;
4252 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
4253 // Verify that the client nexus agent matches
4254 if ((flow->nexus && uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) ||
4255 flow->socket) {
4256 flow->has_protoctl_event = TRUE;
4257 flow->protoctl_event.protoctl_event_code = protoctl_event_code;
4258 flow->protoctl_event.protoctl_event_val = protoctl_event_val;
4259 flow->protoctl_event.protoctl_event_tcp_seq_num = protoctl_event_tcp_seq_number;
4260 flow_registration->flow_result_read = FALSE;
4261 client_updated = TRUE;
4262 break;
4263 }
4264 }
4265 }
4266
4267 NECP_CLIENT_UNLOCK(client);
4268 }
4269
4270 if (client_updated) {
4271 necp_fd_notify(client_fd, true);
4272 }
4273
4274 NECP_FD_UNLOCK(client_fd);
4275 proc_rele(proc);
4276 proc = PROC_NULL;
4277
4278 if (found_client) {
4279 break;
4280 }
4281 }
4282 NECP_FD_LIST_UNLOCK();
4283
4284 if (!found_client) {
4285 error = ENOENT;
4286 } else if (!client_updated) {
4287 error = EINVAL;
4288 }
4289 return error;
4290 }
4291
4292 static bool
necp_assign_client_result_locked(struct proc * proc,struct necp_fd_data * client_fd,struct necp_client * client,struct necp_client_flow_registration * flow_registration,uuid_t netagent_uuid,u_int8_t * __indexable assigned_results,size_t assigned_results_length,bool notify_fd,bool assigned_from_userspace_agent)4293 necp_assign_client_result_locked(struct proc *proc,
4294 struct necp_fd_data *client_fd,
4295 struct necp_client *client,
4296 struct necp_client_flow_registration *flow_registration,
4297 uuid_t netagent_uuid,
4298 u_int8_t * __indexable assigned_results,
4299 size_t assigned_results_length,
4300 bool notify_fd,
4301 bool assigned_from_userspace_agent)
4302 {
4303 bool client_updated = FALSE;
4304
4305 NECP_FD_ASSERT_LOCKED(client_fd);
4306 NECP_CLIENT_ASSERT_LOCKED(client);
4307
4308 struct necp_client_flow *flow = NULL;
4309 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
4310 // Verify that the client nexus agent matches
4311 if (flow->nexus &&
4312 uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) {
4313 // Release prior results and route
4314 if (flow->assigned_results != NULL) {
4315 kfree_data_counted_by(flow->assigned_results, flow->assigned_results_length);
4316 }
4317
4318 void * __single nexus_stats = NULL;
4319 if (assigned_results != NULL && assigned_results_length > 0) {
4320 int error = necp_client_parse_result(assigned_results, (u_int32_t)assigned_results_length,
4321 flow, assigned_from_userspace_agent ? NULL : &nexus_stats); // Only assign stats from kernel agents
4322 VERIFY(error == 0);
4323 }
4324
4325 flow->viable = necp_client_flow_is_viable(proc, client, flow);
4326
4327 flow->assigned = TRUE;
4328 flow->assigned_results = assigned_results;
4329 flow->assigned_results_length = assigned_results_length;
4330 flow_registration->flow_result_read = FALSE;
4331 #if SKYWALK
4332 if (nexus_stats != NULL) {
4333 if (flow_registration->nexus_stats != NULL) {
4334 flow_stats_release(flow_registration->nexus_stats);
4335 }
4336 flow_registration->nexus_stats = nexus_stats;
4337 }
4338 #endif /* SKYWALK */
4339 client_updated = TRUE;
4340 break;
4341 }
4342 }
4343
4344 if (client_updated && notify_fd) {
4345 necp_fd_notify(client_fd, true);
4346 }
4347
4348 // if not updated, client must free assigned_results
4349 return client_updated;
4350 }
4351
4352 int
necp_assign_client_result(uuid_t netagent_uuid,uuid_t client_id,u_int8_t * __sized_by (assigned_results_length)assigned_results,size_t assigned_results_length)4353 necp_assign_client_result(uuid_t netagent_uuid, uuid_t client_id,
4354 u_int8_t * __sized_by(assigned_results_length)assigned_results, size_t assigned_results_length)
4355 {
4356 int error = 0;
4357 struct necp_fd_data *client_fd = NULL;
4358 bool found_client = FALSE;
4359 bool client_updated = FALSE;
4360
4361 NECP_FD_LIST_LOCK_SHARED();
4362
4363 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4364 proc_t proc = proc_find(client_fd->proc_pid);
4365 if (proc == PROC_NULL) {
4366 continue;
4367 }
4368
4369 NECP_FD_LOCK(client_fd);
4370 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4371 if (client != NULL) {
4372 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4373 if (flow_registration != NULL) {
4374 // Found the right client and flow!
4375 found_client = TRUE;
4376 if (necp_assign_client_result_locked(proc, client_fd, client, flow_registration, netagent_uuid,
4377 assigned_results, assigned_results_length, true, true)) {
4378 client_updated = TRUE;
4379 }
4380 }
4381
4382 NECP_CLIENT_UNLOCK(client);
4383 }
4384 NECP_FD_UNLOCK(client_fd);
4385
4386 proc_rele(proc);
4387 proc = PROC_NULL;
4388
4389 if (found_client) {
4390 break;
4391 }
4392 }
4393
4394 NECP_FD_LIST_UNLOCK();
4395
4396 // upon error, client must free assigned_results
4397 if (!found_client) {
4398 error = ENOENT;
4399 } else if (!client_updated) {
4400 error = EINVAL;
4401 }
4402
4403 return error;
4404 }
4405
4406 int
necp_assign_client_group_members(uuid_t netagent_uuid,uuid_t client_id,u_int8_t * __counted_by (assigned_group_members_length)assigned_group_members,size_t assigned_group_members_length)4407 necp_assign_client_group_members(uuid_t netagent_uuid, uuid_t client_id,
4408 u_int8_t *__counted_by(assigned_group_members_length) assigned_group_members,
4409 size_t assigned_group_members_length)
4410 {
4411 #pragma unused(netagent_uuid)
4412 int error = 0;
4413 struct necp_fd_data *client_fd = NULL;
4414 bool found_client = false;
4415 bool client_updated = false;
4416
4417 NECP_FD_LIST_LOCK_SHARED();
4418
4419 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4420 proc_t proc = proc_find(client_fd->proc_pid);
4421 if (proc == PROC_NULL) {
4422 continue;
4423 }
4424
4425 NECP_FD_LOCK(client_fd);
4426 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4427 if (client != NULL) {
4428 found_client = true;
4429 // Release prior results
4430 if (client->assigned_group_members != NULL) {
4431 kfree_data_counted_by(client->assigned_group_members, client->assigned_group_members_length);
4432 }
4433
4434 // Save new results
4435 client->assigned_group_members = assigned_group_members;
4436 client->assigned_group_members_length = assigned_group_members_length;
4437 client->group_members_read = false;
4438
4439 client_updated = true;
4440 necp_fd_notify(client_fd, true);
4441
4442 NECP_CLIENT_UNLOCK(client);
4443 }
4444 NECP_FD_UNLOCK(client_fd);
4445
4446 proc_rele(proc);
4447 proc = PROC_NULL;
4448
4449 if (found_client) {
4450 break;
4451 }
4452 }
4453
4454 NECP_FD_LIST_UNLOCK();
4455
4456 // upon error, client must free assigned_results
4457 if (!found_client) {
4458 error = ENOENT;
4459 } else if (!client_updated) {
4460 error = EINVAL;
4461 }
4462
4463 return error;
4464 }
4465
4466 /// Client updating
4467
4468 static bool
necp_update_parsed_parameters(struct necp_client_parsed_parameters * parsed_parameters,struct necp_aggregate_result * result)4469 necp_update_parsed_parameters(struct necp_client_parsed_parameters *parsed_parameters,
4470 struct necp_aggregate_result *result)
4471 {
4472 if (parsed_parameters == NULL ||
4473 result == NULL) {
4474 return false;
4475 }
4476
4477 bool updated = false;
4478 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
4479 if (uuid_is_null(result->netagents[i])) {
4480 // Passed end of valid agents
4481 break;
4482 }
4483
4484 if (!(result->netagent_use_flags[i] & NECP_AGENT_USE_FLAG_SCOPE)) {
4485 // Not a scoped agent, ignore
4486 continue;
4487 }
4488
4489 // This is a scoped agent. Add it to the required agents.
4490 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
4491 // Already some required agents, add this at the end
4492 for (int j = 0; j < NECP_MAX_AGENT_PARAMETERS; j++) {
4493 if (uuid_compare(parsed_parameters->required_netagents[j], result->netagents[i]) == 0) {
4494 // Already required, break
4495 break;
4496 }
4497 if (uuid_is_null(parsed_parameters->required_netagents[j])) {
4498 // Add here
4499 memcpy(&parsed_parameters->required_netagents[j], result->netagents[i], sizeof(uuid_t));
4500 updated = true;
4501 break;
4502 }
4503 }
4504 } else {
4505 // No required agents yet, add this one
4506 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT;
4507 memcpy(&parsed_parameters->required_netagents[0], result->netagents[i], sizeof(uuid_t));
4508 updated = true;
4509 }
4510
4511 // Remove requirements for agents of the same type
4512 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
4513 char remove_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4514 char remove_agent_type[NETAGENT_TYPESIZE] = { 0 };
4515 if (netagent_get_agent_domain_and_type(result->netagents[i], remove_agent_domain, remove_agent_type)) {
4516 for (int j = 0; j < NECP_MAX_AGENT_PARAMETERS; j++) {
4517 if (strbuflen(parsed_parameters->required_netagent_types[j].netagent_domain, sizeof(parsed_parameters->required_netagent_types[j].netagent_domain)) == 0 &&
4518 strbuflen(parsed_parameters->required_netagent_types[j].netagent_type, sizeof(parsed_parameters->required_netagent_types[j].netagent_type)) == 0) {
4519 break;
4520 }
4521
4522 if (strbufcmp(parsed_parameters->required_netagent_types[j].netagent_domain, sizeof(parsed_parameters->required_netagent_types[j].netagent_domain), remove_agent_domain, NETAGENT_DOMAINSIZE) == 0 &&
4523 strbufcmp(parsed_parameters->required_netagent_types[j].netagent_type, sizeof(parsed_parameters->required_netagent_types[j].netagent_type), remove_agent_type, NETAGENT_TYPESIZE) == 0) {
4524 updated = true;
4525
4526 if (j == NECP_MAX_AGENT_PARAMETERS - 1) {
4527 // Last field, just clear and break
4528 memset(&parsed_parameters->required_netagent_types[NECP_MAX_AGENT_PARAMETERS - 1], 0, sizeof(struct necp_client_parameter_netagent_type));
4529 break;
4530 } else {
4531 // Move the parameters down, clear the last entry
4532 memmove(&parsed_parameters->required_netagent_types[j],
4533 &parsed_parameters->required_netagent_types[j + 1],
4534 sizeof(struct necp_client_parameter_netagent_type) * (NECP_MAX_AGENT_PARAMETERS - (j + 1)));
4535 memset(&parsed_parameters->required_netagent_types[NECP_MAX_AGENT_PARAMETERS - 1], 0, sizeof(struct necp_client_parameter_netagent_type));
4536 // Continue, don't increment but look at the new shifted item instead
4537 continue;
4538 }
4539 }
4540
4541 // Increment j to look at the next agent type parameter
4542 j++;
4543 }
4544 }
4545 }
4546 }
4547
4548 if (updated &&
4549 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4550 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) {
4551 // A required interface index was added after the fact. Clear it.
4552 parsed_parameters->required_interface_index = IFSCOPE_NONE;
4553 }
4554
4555
4556 return updated;
4557 }
4558
4559 static inline bool
necp_agent_types_match(const char * __sized_by (NETAGENT_DOMAINSIZE)agent_domain1,const char * __sized_by (NETAGENT_TYPESIZE)agent_type1,const char * __sized_by (NETAGENT_DOMAINSIZE)agent_domain2,const char * __sized_by (NETAGENT_TYPESIZE)agent_type2)4560 necp_agent_types_match(const char * __sized_by(NETAGENT_DOMAINSIZE)agent_domain1, const char * __sized_by(NETAGENT_TYPESIZE)agent_type1,
4561 const char * __sized_by(NETAGENT_DOMAINSIZE)agent_domain2, const char * __sized_by(NETAGENT_TYPESIZE)agent_type2)
4562 {
4563 return (strbuflen(agent_domain1, NETAGENT_DOMAINSIZE) == 0 ||
4564 strbufcmp(agent_domain2, NETAGENT_DOMAINSIZE, agent_domain1, NETAGENT_DOMAINSIZE) == 0) &&
4565 (strbuflen(agent_type1, NETAGENT_TYPESIZE) == 0 ||
4566 strbufcmp(agent_type2, NETAGENT_TYPESIZE, agent_type1, NETAGENT_TYPESIZE) == 0);
4567 }
4568
4569 static inline bool
necp_calculate_client_result(proc_t proc,struct necp_client * client,struct necp_client_parsed_parameters * parsed_parameters,struct necp_aggregate_result * result,u_int32_t * flags,u_int32_t * reason,struct necp_client_endpoint * v4_gateway,struct necp_client_endpoint * v6_gateway,uuid_t * override_euuid)4570 necp_calculate_client_result(proc_t proc,
4571 struct necp_client *client,
4572 struct necp_client_parsed_parameters *parsed_parameters,
4573 struct necp_aggregate_result *result,
4574 u_int32_t *flags,
4575 u_int32_t *reason,
4576 struct necp_client_endpoint *v4_gateway,
4577 struct necp_client_endpoint *v6_gateway,
4578 uuid_t *override_euuid)
4579 {
4580 struct rtentry * __single route = NULL;
4581
4582 // Check parameters to find best interface
4583 bool validate_agents = false;
4584 u_int matching_if_index = 0;
4585 if (necp_find_matching_interface_index(parsed_parameters, &matching_if_index, &validate_agents)) {
4586 if (matching_if_index != 0) {
4587 parsed_parameters->required_interface_index = matching_if_index;
4588 }
4589 // Interface found or not needed, match policy.
4590 memset(result, 0, sizeof(*result));
4591 int error = necp_application_find_policy_match_internal(proc, client->parameters,
4592 (u_int32_t)client->parameters_length,
4593 result, flags, reason, matching_if_index,
4594 NULL, NULL,
4595 v4_gateway, v6_gateway,
4596 &route, false, true,
4597 override_euuid);
4598 if (error != 0) {
4599 if (route != NULL) {
4600 rtfree(route);
4601 }
4602 return FALSE;
4603 }
4604
4605 if (validate_agents) {
4606 bool requirement_failed = FALSE;
4607 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
4608 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
4609 if (uuid_is_null(parsed_parameters->required_netagents[i])) {
4610 break;
4611 }
4612
4613 bool requirement_found = FALSE;
4614 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4615 if (uuid_is_null(result->netagents[j])) {
4616 break;
4617 }
4618
4619 if (result->netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) {
4620 // A removed agent, ignore
4621 continue;
4622 }
4623
4624 if (uuid_compare(parsed_parameters->required_netagents[i], result->netagents[j]) == 0) {
4625 requirement_found = TRUE;
4626 break;
4627 }
4628 }
4629
4630 if (!requirement_found) {
4631 requirement_failed = TRUE;
4632 break;
4633 }
4634 }
4635 }
4636
4637 if (!requirement_failed && parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
4638 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
4639 if (strbuflen(parsed_parameters->required_netagent_types[i].netagent_domain, sizeof(parsed_parameters->required_netagent_types[i].netagent_domain)) == 0 &&
4640 strbuflen(parsed_parameters->required_netagent_types[i].netagent_type, sizeof(parsed_parameters->required_netagent_types[i].netagent_type)) == 0) {
4641 break;
4642 }
4643
4644 bool requirement_found = FALSE;
4645 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4646 if (uuid_is_null(result->netagents[j])) {
4647 break;
4648 }
4649
4650 if (result->netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) {
4651 // A removed agent, ignore
4652 continue;
4653 }
4654
4655 char policy_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4656 char policy_agent_type[NETAGENT_TYPESIZE] = { 0 };
4657
4658 if (netagent_get_agent_domain_and_type(result->netagents[j], policy_agent_domain, policy_agent_type)) {
4659 if (necp_agent_types_match(parsed_parameters->required_netagent_types[i].netagent_domain,
4660 parsed_parameters->required_netagent_types[i].netagent_type,
4661 policy_agent_domain, policy_agent_type)) {
4662 requirement_found = TRUE;
4663 break;
4664 }
4665 }
4666 }
4667
4668 if (!requirement_found) {
4669 requirement_failed = TRUE;
4670 break;
4671 }
4672 }
4673 }
4674
4675 if (requirement_failed) {
4676 // Agent requirement failed. Clear out the whole result, make everything fail.
4677 memset(result, 0, sizeof(*result));
4678 if (route != NULL) {
4679 rtfree(route);
4680 }
4681 return TRUE;
4682 }
4683 }
4684
4685 // Reset current route
4686 NECP_CLIENT_ROUTE_LOCK(client);
4687 if (client->current_route != NULL) {
4688 rtfree(client->current_route);
4689 }
4690 client->current_route = route;
4691 NECP_CLIENT_ROUTE_UNLOCK(client);
4692 } else {
4693 // Interface not found. Clear out the whole result, make everything fail.
4694 memset(result, 0, sizeof(*result));
4695 }
4696
4697 return TRUE;
4698 }
4699
4700 static bool
necp_agent_is_removed_by_type(struct necp_aggregate_result * result,uuid_t agent_uuid)4701 necp_agent_is_removed_by_type(struct necp_aggregate_result *result,
4702 uuid_t agent_uuid)
4703 {
4704 for (int i = 0; i < NECP_MAX_REMOVE_NETAGENT_TYPES; i++) {
4705 if (result->remove_netagent_types[i].agent_domain[0] == 0 &&
4706 result->remove_netagent_types[i].agent_type[0] == 0) {
4707 // Empty type, hit the end of the list
4708 return false;
4709 }
4710
4711 char compare_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4712 char compare_agent_type[NETAGENT_TYPESIZE] = { 0 };
4713 if (netagent_get_agent_domain_and_type(agent_uuid, compare_agent_domain, compare_agent_type)) {
4714 if (necp_agent_types_match(result->remove_netagent_types[i].agent_domain,
4715 result->remove_netagent_types[i].agent_type,
4716 compare_agent_domain, compare_agent_type)) {
4717 return true;
4718 }
4719 }
4720 }
4721 return false;
4722 }
4723
4724 #define NECP_PARSED_PARAMETERS_REQUIRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF | \
4725 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
4726 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
4727 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE)
4728
4729 static bool
necp_update_client_result(proc_t proc,struct necp_fd_data * client_fd,struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)4730 necp_update_client_result(proc_t proc,
4731 struct necp_fd_data *client_fd,
4732 struct necp_client *client,
4733 struct _necp_flow_defunct_list *defunct_list)
4734 {
4735 struct necp_client_result_netagent netagent;
4736 struct necp_aggregate_result result;
4737 struct necp_client_parsed_parameters * __single parsed_parameters = NULL;
4738 u_int32_t flags = 0;
4739 u_int32_t reason = 0;
4740
4741 NECP_CLIENT_ASSERT_LOCKED(client);
4742
4743 parsed_parameters = kalloc_type(struct necp_client_parsed_parameters,
4744 Z_WAITOK | Z_ZERO | Z_NOFAIL);
4745
4746 // Nexus flows will be brought back if they are still valid
4747 necp_client_mark_all_nonsocket_flows_as_invalid(client);
4748
4749 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, parsed_parameters);
4750 if (error != 0) {
4751 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4752 return FALSE;
4753 }
4754 bool originally_scoped = (parsed_parameters->required_interface_index != IFSCOPE_NONE);
4755
4756 // Update saved IP protocol
4757 client->ip_protocol = parsed_parameters->ip_protocol;
4758
4759 // Calculate the policy result
4760 struct necp_client_endpoint v4_gateway = {};
4761 struct necp_client_endpoint v6_gateway = {};
4762 uuid_t override_euuid;
4763 uuid_clear(override_euuid);
4764 if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags, &reason, &v4_gateway, &v6_gateway, &override_euuid)) {
4765 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4766 return FALSE;
4767 }
4768
4769 if (necp_update_parsed_parameters(parsed_parameters, &result)) {
4770 // Changed the parameters based on result, try again (only once)
4771 if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags, &reason, &v4_gateway, &v6_gateway, &override_euuid)) {
4772 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4773 return FALSE;
4774 }
4775 }
4776
4777 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) &&
4778 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4779 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) {
4780 // Listener should not apply required interface index if
4781 parsed_parameters->required_interface_index = IFSCOPE_NONE;
4782 }
4783
4784 // Save the last policy id on the client
4785 client->policy_id = result.policy_id;
4786 client->skip_policy_id = result.skip_policy_id;
4787 uuid_copy(client->override_euuid, override_euuid);
4788
4789 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH) ||
4790 (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) ||
4791 ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) &&
4792 result.routing_result != NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED)) {
4793 client->allow_multiple_flows = TRUE;
4794 } else {
4795 client->allow_multiple_flows = FALSE;
4796 }
4797
4798 // If the original request was scoped, and the policy result matches, make sure the result is scoped
4799 if ((result.routing_result == NECP_KERNEL_POLICY_RESULT_NONE ||
4800 result.routing_result == NECP_KERNEL_POLICY_RESULT_PASS) &&
4801 result.routed_interface_index != IFSCOPE_NONE &&
4802 parsed_parameters->required_interface_index == result.routed_interface_index) {
4803 result.routing_result = NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED;
4804 result.routing_result_parameter.scoped_interface_index = result.routed_interface_index;
4805 }
4806
4807 if (defunct_list != NULL &&
4808 result.routing_result == NECP_KERNEL_POLICY_RESULT_DROP) {
4809 // If we are forced to drop the client, defunct it if it has flows
4810 necp_defunct_client_for_policy(client, defunct_list);
4811 }
4812
4813 // Recalculate flags
4814 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
4815 // Listeners are valid as long as they aren't dropped
4816 if (result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP) {
4817 flags |= NECP_CLIENT_RESULT_FLAG_SATISFIED;
4818 }
4819 } else if (result.routed_interface_index != 0) {
4820 // Clients without flows determine viability based on having some routable interface
4821 flags |= NECP_CLIENT_RESULT_FLAG_SATISFIED;
4822 }
4823
4824 bool updated = FALSE;
4825 u_int8_t * __indexable cursor = client->result;
4826 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLAGS, sizeof(flags), &flags, &updated, client->result, sizeof(client->result));
4827 if (reason != 0) {
4828 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_REASON, sizeof(reason), &reason, &updated, client->result, sizeof(client->result));
4829 }
4830 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_CLIENT_ID, sizeof(uuid_t), client->client_id, &updated,
4831 client->result, sizeof(client->result));
4832 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT, sizeof(result.routing_result), &result.routing_result, &updated,
4833 client->result, sizeof(client->result));
4834 if (result.routing_result_parameter.tunnel_interface_index != 0) {
4835 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT_PARAMETER,
4836 sizeof(result.routing_result_parameter), &result.routing_result_parameter, &updated,
4837 client->result, sizeof(client->result));
4838 }
4839 if (result.filter_control_unit != 0) {
4840 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FILTER_CONTROL_UNIT,
4841 sizeof(result.filter_control_unit), &result.filter_control_unit, &updated,
4842 client->result, sizeof(client->result));
4843 }
4844 if (result.flow_divert_aggregate_unit != 0) {
4845 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLOW_DIVERT_AGGREGATE_UNIT,
4846 sizeof(result.flow_divert_aggregate_unit), &result.flow_divert_aggregate_unit, &updated,
4847 client->result, sizeof(client->result));
4848 }
4849 if (result.routed_interface_index != 0) {
4850 u_int routed_interface_index = result.routed_interface_index;
4851 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
4852 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_REQUIRED_FIELDS) &&
4853 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4854 parsed_parameters->required_interface_index != result.routed_interface_index) {
4855 routed_interface_index = parsed_parameters->required_interface_index;
4856 }
4857
4858 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_INDEX,
4859 sizeof(routed_interface_index), &routed_interface_index, &updated,
4860 client->result, sizeof(client->result));
4861 }
4862 if (client_fd && client_fd->flags & NECP_OPEN_FLAG_BACKGROUND) {
4863 u_int32_t effective_traffic_class = SO_TC_BK_SYS;
4864 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_TRAFFIC_CLASS,
4865 sizeof(effective_traffic_class), &effective_traffic_class, &updated,
4866 client->result, sizeof(client->result));
4867 }
4868
4869 if (client_fd->background) {
4870 bool has_assigned_flow = FALSE;
4871 struct necp_client_flow_registration *flow_registration = NULL;
4872 struct necp_client_flow *search_flow = NULL;
4873 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
4874 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
4875 if (search_flow->assigned) {
4876 has_assigned_flow = TRUE;
4877 break;
4878 }
4879 }
4880 }
4881
4882 if (has_assigned_flow) {
4883 u_int32_t background = client_fd->background;
4884 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_TRAFFIC_MGMT_BG,
4885 sizeof(background), &background, &updated,
4886 client->result, sizeof(client->result));
4887 }
4888 }
4889
4890 bool write_v4_gateway = !necp_client_endpoint_is_unspecified(&v4_gateway);
4891 bool write_v6_gateway = !necp_client_endpoint_is_unspecified(&v6_gateway);
4892
4893 NECP_CLIENT_ROUTE_LOCK(client);
4894 if (client->current_route != NULL) {
4895 const u_int32_t route_mtu = get_maxmtu(client->current_route);
4896 if (route_mtu != 0) {
4897 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_MTU,
4898 sizeof(route_mtu), &route_mtu, &updated,
4899 client->result, sizeof(client->result));
4900 }
4901 bool has_remote_addr = parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
4902 if (has_remote_addr && client->current_route->rt_gateway != NULL) {
4903 if (client->current_route->rt_gateway->sa_family == AF_INET) {
4904 write_v6_gateway = false;
4905 } else if (client->current_route->rt_gateway->sa_family == AF_INET6) {
4906 write_v4_gateway = false;
4907 }
4908 }
4909
4910 if (client->current_route->rt_ifp != NULL) {
4911 int8_t if_lqm = client->current_route->rt_ifp->if_interface_state.lqm_state;
4912
4913 // Upgrade to enhancedLQM for cellular interfaces that support it
4914 if (client->current_route->rt_ifp->if_type == IFT_CELLULAR && client->current_route->rt_ifp->if_link_status != NULL) {
4915 struct if_cellular_status_v1 *cell_link_status = &client->current_route->rt_ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
4916
4917 if (cell_link_status->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
4918 if_lqm = ifnet_lqm_normalize(cell_link_status->link_quality_metric);
4919 }
4920 }
4921
4922 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_LINK_QUALITY,
4923 sizeof(if_lqm), &if_lqm, &updated,
4924 client->result, sizeof(client->result));
4925 }
4926 }
4927 NECP_CLIENT_ROUTE_UNLOCK(client);
4928
4929 if (write_v4_gateway) {
4930 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_GATEWAY,
4931 sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)&v4_gateway, &updated,
4932 client->result, sizeof(client->result));
4933 }
4934
4935 if (write_v6_gateway) {
4936 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_GATEWAY,
4937 sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)&v6_gateway, &updated,
4938 client->result, sizeof(client->result));
4939 }
4940
4941 for (int i = 0; i < NAT64_MAX_NUM_PREFIXES; i++) {
4942 if (result.nat64_prefixes[i].prefix_len != 0) {
4943 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NAT64,
4944 sizeof(result.nat64_prefixes), result.nat64_prefixes, &updated,
4945 client->result, sizeof(client->result));
4946 break;
4947 }
4948 }
4949
4950 if (result.mss_recommended != 0) {
4951 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_RECOMMENDED_MSS,
4952 sizeof(result.mss_recommended), &result.mss_recommended, &updated,
4953 client->result, sizeof(client->result));
4954 }
4955
4956 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
4957 if (uuid_is_null(result.netagents[i])) {
4958 break;
4959 }
4960 if (result.netagent_use_flags[i] & NECP_AGENT_USE_FLAG_REMOVE) {
4961 // A removed agent, ignore
4962 continue;
4963 }
4964
4965 if (necp_agent_is_removed_by_type(&result, result.netagents[i])) {
4966 // A removed agent, ignore
4967 continue;
4968 }
4969
4970 uuid_copy(netagent.netagent_uuid, result.netagents[i]);
4971 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
4972 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE, 0, 0)) {
4973 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
4974 client->result, sizeof(client->result));
4975 }
4976 }
4977
4978 ifnet_head_lock_shared();
4979 ifnet_t direct_interface = NULL;
4980 ifnet_t delegate_interface = NULL;
4981 ifnet_t original_scoped_interface = NULL;
4982
4983 if (result.routed_interface_index != IFSCOPE_NONE && result.routed_interface_index <= (u_int32_t)if_index) {
4984 direct_interface = ifindex2ifnet[result.routed_interface_index];
4985 } else if (parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4986 parsed_parameters->required_interface_index <= (u_int32_t)if_index) {
4987 // If the request was scoped, but the route didn't match, still grab the agents
4988 direct_interface = ifindex2ifnet[parsed_parameters->required_interface_index];
4989 } else if (result.routed_interface_index == IFSCOPE_NONE &&
4990 result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED &&
4991 result.routing_result_parameter.scoped_interface_index != IFSCOPE_NONE) {
4992 direct_interface = ifindex2ifnet[result.routing_result_parameter.scoped_interface_index];
4993 }
4994 if (direct_interface != NULL) {
4995 delegate_interface = direct_interface->if_delegated.ifp;
4996 }
4997 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
4998 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_REQUIRED_FIELDS) &&
4999 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
5000 parsed_parameters->required_interface_index != result.routing_result_parameter.tunnel_interface_index &&
5001 parsed_parameters->required_interface_index <= (u_int32_t)if_index) {
5002 original_scoped_interface = ifindex2ifnet[parsed_parameters->required_interface_index];
5003 }
5004 // Add interfaces
5005 if (original_scoped_interface != NULL) {
5006 struct necp_client_result_interface interface_struct;
5007 interface_struct.index = original_scoped_interface->if_index;
5008 interface_struct.generation = ifnet_get_generation(original_scoped_interface);
5009 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
5010 client->result, sizeof(client->result));
5011 }
5012 if (direct_interface != NULL) {
5013 struct necp_client_result_interface interface_struct;
5014 interface_struct.index = direct_interface->if_index;
5015 interface_struct.generation = ifnet_get_generation(direct_interface);
5016 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
5017 client->result, sizeof(client->result));
5018
5019 // Set the delta time since interface up/down
5020 struct timeval updown_delta = {};
5021 if (ifnet_updown_delta(direct_interface, &updown_delta) == 0) {
5022 u_int32_t delta = updown_delta.tv_sec;
5023 bool ignore_updated = FALSE;
5024 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_TIME_DELTA,
5025 sizeof(delta), &delta, &ignore_updated,
5026 client->result, sizeof(client->result));
5027 }
5028 }
5029 if (delegate_interface != NULL) {
5030 struct necp_client_result_interface interface_struct;
5031 interface_struct.index = delegate_interface->if_index;
5032 interface_struct.generation = ifnet_get_generation(delegate_interface);
5033 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
5034 client->result, sizeof(client->result));
5035 }
5036
5037 // Update multipath/listener interface flows
5038 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH && !(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE)) {
5039 // Add the interface option for the routed interface first
5040 if (direct_interface != NULL) {
5041 // Add nexus agent
5042 necp_client_add_agent_interface_options(client, parsed_parameters, direct_interface);
5043
5044 // Add interface option in case it is not a nexus
5045 necp_client_add_interface_option_if_needed(client, direct_interface->if_index,
5046 ifnet_get_generation(direct_interface), NULL, false);
5047 }
5048 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INBOUND) {
5049 // For inbound multipath, add from the global list (like a listener)
5050 struct ifnet *multi_interface = NULL;
5051 TAILQ_FOREACH(multi_interface, &ifnet_head, if_link) {
5052 if ((multi_interface->if_flags & (IFF_UP | IFF_RUNNING)) &&
5053 necp_ifnet_matches_parameters(multi_interface, parsed_parameters, 0, NULL, true, false)) {
5054 // Add nexus agents for inbound multipath
5055 necp_client_add_agent_interface_options(client, parsed_parameters, multi_interface);
5056 }
5057 }
5058 } else {
5059 // Get other multipath interface options from ordered list
5060 struct ifnet *multi_interface = NULL;
5061 TAILQ_FOREACH(multi_interface, &ifnet_ordered_head, if_ordered_link) {
5062 if (multi_interface != direct_interface &&
5063 necp_ifnet_matches_parameters(multi_interface, parsed_parameters, 0, NULL, true, false)) {
5064 // Add nexus agents for multipath
5065 necp_client_add_agent_interface_options(client, parsed_parameters, multi_interface);
5066
5067 // Add multipath interface flows for kernel MPTCP
5068 necp_client_add_interface_option_if_needed(client, multi_interface->if_index,
5069 ifnet_get_generation(multi_interface), NULL, false);
5070 }
5071 }
5072 }
5073 } else if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
5074 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
5075 if (direct_interface != NULL) {
5076 // If scoped, only listen on that interface
5077 // Add nexus agents for listeners
5078 necp_client_add_agent_interface_options(client, parsed_parameters, direct_interface);
5079
5080 // Add interface option in case it is not a nexus
5081 necp_client_add_interface_option_if_needed(client, direct_interface->if_index,
5082 ifnet_get_generation(direct_interface), NULL, false);
5083 }
5084 } else {
5085 // Get listener interface options from global list
5086 struct ifnet *listen_interface = NULL;
5087 TAILQ_FOREACH(listen_interface, &ifnet_head, if_link) {
5088 if ((listen_interface->if_flags & (IFF_UP | IFF_RUNNING)) &&
5089 necp_ifnet_matches_parameters(listen_interface, parsed_parameters, 0, NULL, true, false)) {
5090 // Add nexus agents for listeners
5091 necp_client_add_agent_interface_options(client, parsed_parameters, listen_interface);
5092 }
5093 }
5094 }
5095 } else if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) {
5096 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED && originally_scoped) {
5097 if (direct_interface != NULL) {
5098 // Add browse option if it has an agent
5099 necp_client_add_browse_interface_options(client, parsed_parameters, direct_interface);
5100 }
5101 } else {
5102 // Get browse interface options from global list
5103 struct ifnet *browse_interface = NULL;
5104 TAILQ_FOREACH(browse_interface, &ifnet_head, if_link) {
5105 if (necp_ifnet_matches_parameters(browse_interface, parsed_parameters, 0, NULL, true, false)) {
5106 necp_client_add_browse_interface_options(client, parsed_parameters, browse_interface);
5107 }
5108 }
5109 }
5110 }
5111
5112 struct necp_client_result_estimated_throughput throughput = {
5113 .up = 0,
5114 .down = 0,
5115 };
5116
5117 // Add agents
5118 if (original_scoped_interface != NULL) {
5119 ifnet_lock_shared(original_scoped_interface);
5120 if (original_scoped_interface->if_agentids != NULL) {
5121 for (u_int32_t i = 0; i < original_scoped_interface->if_agentcount; i++) {
5122 if (uuid_is_null(original_scoped_interface->if_agentids[i])) {
5123 continue;
5124 }
5125 bool skip_agent = false;
5126 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
5127 if (uuid_is_null(result.netagents[j])) {
5128 break;
5129 }
5130 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
5131 uuid_compare(original_scoped_interface->if_agentids[i], result.netagents[j]) == 0) {
5132 skip_agent = true;
5133 break;
5134 }
5135 }
5136
5137 if (!skip_agent && necp_agent_is_removed_by_type(&result, original_scoped_interface->if_agentids[i])) {
5138 skip_agent = true;
5139 }
5140
5141 if (skip_agent) {
5142 continue;
5143 }
5144
5145 uuid_copy(netagent.netagent_uuid, original_scoped_interface->if_agentids[i]);
5146 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5147 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE,
5148 original_scoped_interface->if_index, ifnet_get_generation(original_scoped_interface))) {
5149 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5150 client->result, sizeof(client->result));
5151 }
5152 }
5153 }
5154 ifnet_lock_done(original_scoped_interface);
5155 }
5156 if (direct_interface != NULL) {
5157 ifnet_lock_shared(direct_interface);
5158 throughput.up = direct_interface->if_estimated_up_bucket;
5159 throughput.down = direct_interface->if_estimated_down_bucket;
5160 if (direct_interface->if_agentids != NULL) {
5161 for (u_int32_t i = 0; i < direct_interface->if_agentcount; i++) {
5162 if (uuid_is_null(direct_interface->if_agentids[i])) {
5163 continue;
5164 }
5165 bool skip_agent = false;
5166 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
5167 if (uuid_is_null(result.netagents[j])) {
5168 break;
5169 }
5170 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
5171 uuid_compare(direct_interface->if_agentids[i], result.netagents[j]) == 0) {
5172 skip_agent = true;
5173 break;
5174 }
5175 }
5176
5177 if (!skip_agent && necp_agent_is_removed_by_type(&result, direct_interface->if_agentids[i])) {
5178 skip_agent = true;
5179 }
5180
5181 if (skip_agent) {
5182 continue;
5183 }
5184 uuid_copy(netagent.netagent_uuid, direct_interface->if_agentids[i]);
5185 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5186 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE,
5187 direct_interface->if_index, ifnet_get_generation(direct_interface))) {
5188 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5189 client->result, sizeof(client->result));
5190 }
5191 }
5192 }
5193 ifnet_lock_done(direct_interface);
5194 }
5195 if (delegate_interface != NULL) {
5196 ifnet_lock_shared(delegate_interface);
5197 if (throughput.up == 0 && throughput.down == 0) {
5198 throughput.up = delegate_interface->if_estimated_up_bucket;
5199 throughput.down = delegate_interface->if_estimated_down_bucket;
5200 }
5201 if (delegate_interface->if_agentids != NULL) {
5202 for (u_int32_t i = 0; i < delegate_interface->if_agentcount; i++) {
5203 if (uuid_is_null(delegate_interface->if_agentids[i])) {
5204 continue;
5205 }
5206 bool skip_agent = false;
5207 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
5208 if (uuid_is_null(result.netagents[j])) {
5209 break;
5210 }
5211 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
5212 uuid_compare(delegate_interface->if_agentids[i], result.netagents[j]) == 0) {
5213 skip_agent = true;
5214 break;
5215 }
5216 }
5217
5218 if (!skip_agent && necp_agent_is_removed_by_type(&result, delegate_interface->if_agentids[i])) {
5219 skip_agent = true;
5220 }
5221
5222 if (skip_agent) {
5223 continue;
5224 }
5225 uuid_copy(netagent.netagent_uuid, delegate_interface->if_agentids[i]);
5226 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5227 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE,
5228 delegate_interface->if_index, ifnet_get_generation(delegate_interface))) {
5229 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5230 client->result, sizeof(client->result));
5231 }
5232 }
5233 }
5234 ifnet_lock_done(delegate_interface);
5235 }
5236 ifnet_head_done();
5237
5238 if (throughput.up != 0 || throughput.down != 0) {
5239 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_ESTIMATED_THROUGHPUT,
5240 sizeof(throughput), &throughput, &updated, client->result, sizeof(client->result));
5241 }
5242
5243 // Add interface options
5244 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
5245 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
5246 struct necp_client_interface_option *option = &client->interface_options[option_i];
5247 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated,
5248 client->result, sizeof(client->result));
5249 } else {
5250 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
5251 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated,
5252 client->result, sizeof(client->result));
5253 }
5254 }
5255
5256 size_t new_result_length = (cursor - client->result);
5257 if (new_result_length != client->result_length) {
5258 client->result_length = new_result_length;
5259 updated = TRUE;
5260 }
5261
5262 // Update flow viability/flags
5263 if (necp_client_update_flows(proc, client, defunct_list)) {
5264 updated = TRUE;
5265 }
5266
5267 if (updated) {
5268 client->result_read = FALSE;
5269 necp_client_update_observer_update(client);
5270 }
5271
5272 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
5273 return updated;
5274 }
5275
5276 static bool
necp_defunct_client_fd_locked_inner(struct necp_fd_data * client_fd,struct _necp_flow_defunct_list * defunct_list,bool destroy_stats)5277 necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats)
5278 {
5279 bool updated_result = FALSE;
5280 struct necp_client *client = NULL;
5281
5282 NECP_FD_ASSERT_LOCKED(client_fd);
5283
5284 RB_FOREACH(client, _necp_client_tree, &client_fd->clients) {
5285 struct necp_client_flow_registration *flow_registration = NULL;
5286
5287 NECP_CLIENT_LOCK(client);
5288
5289 // Prepare close events to be sent to the nexus to effectively remove the flows
5290 struct necp_client_flow *search_flow = NULL;
5291 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
5292 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
5293 if (search_flow->nexus &&
5294 !uuid_is_null(search_flow->u.nexus_agent)) {
5295 // Sleeping alloc won't fail; copy only what's necessary
5296 struct necp_flow_defunct *flow_defunct = kalloc_type(struct necp_flow_defunct, Z_WAITOK | Z_ZERO);
5297 uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent);
5298 uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
5299 client->client_id :
5300 flow_registration->registration_id));
5301 flow_defunct->proc_pid = client->proc_pid;
5302 flow_defunct->agent_handle = client->agent_handle;
5303 flow_defunct->flags = flow_registration->flags;
5304 #if SKYWALK
5305 if (flow_registration->kstats_kaddr != NULL) {
5306 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
5307 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
5308 if (quicstats != NULL &&
5309 quicstats->necp_quic_udp_stats.necp_udp_hdr.necp_stats_type == NECP_CLIENT_STATISTICS_TYPE_QUIC) {
5310 memcpy(flow_defunct->close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(flow_defunct->close_parameters.u.close_token));
5311 flow_defunct->has_close_parameters = true;
5312 }
5313 }
5314 #endif /* SKYWALK */
5315 // Add to the list provided by caller
5316 LIST_INSERT_HEAD(defunct_list, flow_defunct, chain);
5317
5318 flow_registration->defunct = true;
5319 flow_registration->flow_result_read = false;
5320 updated_result = true;
5321 }
5322 }
5323 }
5324 if (destroy_stats) {
5325 #if SKYWALK
5326 // Free any remaining stats objects back to the arena where they came from;
5327 // do this independent of the above defunct check, as the client may have
5328 // been marked as defunct separately via necp_defunct_client_for_policy().
5329 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
5330 necp_destroy_flow_stats(client_fd, flow_registration, NULL, FALSE);
5331 }
5332 #endif /* SKYWALK */
5333 }
5334 NECP_CLIENT_UNLOCK(client);
5335 }
5336
5337 return updated_result;
5338 }
5339
5340 static inline void
necp_defunct_client_fd_locked(struct necp_fd_data * client_fd,struct _necp_flow_defunct_list * defunct_list,struct proc * proc)5341 necp_defunct_client_fd_locked(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, struct proc *proc)
5342 {
5343 #pragma unused(proc)
5344 bool updated_result = FALSE;
5345
5346 NECP_FD_ASSERT_LOCKED(client_fd);
5347 #if SKYWALK
5348 // redirect regions of currently-active stats arena to zero-filled pages
5349 struct necp_arena_info *nai = necp_fd_mredirect_stats_arena(client_fd, proc);
5350 #endif /* SKYWALK */
5351
5352 updated_result = necp_defunct_client_fd_locked_inner(client_fd, defunct_list, true);
5353
5354 #if SKYWALK
5355 // and tear down the currently-active arena's regions now that the redirection and freeing are done
5356 if (nai != NULL) {
5357 ASSERT((nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)) == NAIF_REDIRECT);
5358 ASSERT(nai->nai_arena != NULL);
5359 ASSERT(nai->nai_mmap.ami_mapref != NULL);
5360
5361 int err = skmem_arena_defunct(nai->nai_arena);
5362 VERIFY(err == 0);
5363
5364 nai->nai_flags |= NAIF_DEFUNCT;
5365 }
5366 #endif /* SKYWALK */
5367
5368 if (updated_result) {
5369 necp_fd_notify(client_fd, true);
5370 }
5371 }
5372
5373 static inline void
necp_update_client_fd_locked(struct necp_fd_data * client_fd,proc_t proc,struct _necp_flow_defunct_list * defunct_list)5374 necp_update_client_fd_locked(struct necp_fd_data *client_fd,
5375 proc_t proc,
5376 struct _necp_flow_defunct_list *defunct_list)
5377 {
5378 struct necp_client *client = NULL;
5379 bool updated_result = FALSE;
5380 NECP_FD_ASSERT_LOCKED(client_fd);
5381 RB_FOREACH(client, _necp_client_tree, &client_fd->clients) {
5382 NECP_CLIENT_LOCK(client);
5383 if (necp_update_client_result(proc, client_fd, client, defunct_list)) {
5384 updated_result = TRUE;
5385 }
5386 NECP_CLIENT_UNLOCK(client);
5387 }
5388
5389 // Check if this PID needs to request in-process flow divert
5390 NECP_FD_LIST_ASSERT_LOCKED();
5391 for (int i = 0; i < NECP_MAX_FLOW_DIVERT_NEEDED_PIDS; i++) {
5392 if (necp_flow_divert_needed_pids[i] == 0) {
5393 break;
5394 }
5395 if (necp_flow_divert_needed_pids[i] == client_fd->proc_pid) {
5396 client_fd->request_in_process_flow_divert = true;
5397 break;
5398 }
5399 }
5400
5401 if (updated_result || client_fd->request_in_process_flow_divert) {
5402 necp_fd_notify(client_fd, true);
5403 }
5404 }
5405
5406 #if SKYWALK
5407 static void
necp_close_empty_arenas_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)5408 necp_close_empty_arenas_callout(__unused thread_call_param_t dummy,
5409 __unused thread_call_param_t arg)
5410 {
5411 struct necp_fd_data *client_fd = NULL;
5412
5413 NECP_FD_LIST_LOCK_SHARED();
5414
5415 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5416 NECP_FD_LOCK(client_fd);
5417 necp_stats_arenas_destroy(client_fd, FALSE);
5418 NECP_FD_UNLOCK(client_fd);
5419 }
5420
5421 NECP_FD_LIST_UNLOCK();
5422 }
5423 #endif /* SKYWALK */
5424
5425 static void
necp_update_all_clients_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)5426 necp_update_all_clients_callout(__unused thread_call_param_t dummy,
5427 __unused thread_call_param_t arg)
5428 {
5429 struct necp_fd_data *client_fd = NULL;
5430
5431 NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE();
5432 uint32_t count = necp_update_all_clients_sched_cnt;
5433 necp_update_all_clients_sched_cnt = 0;
5434 necp_update_all_clients_sched_abstime = 0;
5435 NECP_UPDATE_ALL_CLIENTS_UNLOCK();
5436
5437 if (necp_debug > 0) {
5438 NECPLOG(LOG_DEBUG,
5439 "necp_update_all_clients_callout running for coalesced %u updates",
5440 count);
5441 }
5442
5443 struct _necp_flow_defunct_list defunct_list;
5444 LIST_INIT(&defunct_list);
5445
5446 NECP_FD_LIST_LOCK_SHARED();
5447
5448 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5449 proc_t proc = proc_find(client_fd->proc_pid);
5450 if (proc == PROC_NULL) {
5451 continue;
5452 }
5453
5454 // Update all clients on one fd
5455 NECP_FD_LOCK(client_fd);
5456 necp_update_client_fd_locked(client_fd, proc, &defunct_list);
5457 NECP_FD_UNLOCK(client_fd);
5458
5459 proc_rele(proc);
5460 proc = PROC_NULL;
5461 }
5462
5463 // Reset the necp_flow_divert_needed_pids list
5464 for (int i = 0; i < NECP_MAX_FLOW_DIVERT_NEEDED_PIDS; i++) {
5465 necp_flow_divert_needed_pids[i] = 0;
5466 }
5467
5468 NECP_FD_LIST_UNLOCK();
5469
5470 // Handle the case in which some clients became newly defunct
5471 necp_process_defunct_list(&defunct_list);
5472 }
5473
5474 void
necp_update_all_clients(void)5475 necp_update_all_clients(void)
5476 {
5477 necp_update_all_clients_immediately_if_needed(false);
5478 }
5479
5480 void
necp_update_all_clients_immediately_if_needed(bool should_update_immediately)5481 necp_update_all_clients_immediately_if_needed(bool should_update_immediately)
5482 {
5483 if (necp_client_update_tcall == NULL) {
5484 // Don't try to update clients if the module is not initialized
5485 return;
5486 }
5487
5488 uint64_t deadline = 0;
5489 uint64_t leeway = 0;
5490
5491 uint32_t timeout_to_use = necp_timeout_microseconds;
5492 uint32_t leeway_to_use = necp_timeout_leeway_microseconds;
5493 if (should_update_immediately) {
5494 timeout_to_use = 1000 * 10; // 10ms
5495 leeway_to_use = 1000 * 10; // 10ms;
5496 }
5497
5498 clock_interval_to_deadline(timeout_to_use, NSEC_PER_USEC, &deadline);
5499 clock_interval_to_absolutetime_interval(leeway_to_use, NSEC_PER_USEC, &leeway);
5500
5501 NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE();
5502 bool need_cancel = false;
5503 bool need_schedule = true;
5504 uint64_t sched_abstime;
5505
5506 clock_absolutetime_interval_to_deadline(deadline + leeway, &sched_abstime);
5507
5508 /*
5509 * Do not push the timer if it is already scheduled
5510 */
5511 if (necp_update_all_clients_sched_abstime != 0) {
5512 need_schedule = false;
5513
5514 if (should_update_immediately) {
5515 /*
5516 * To update immediately we may have to cancel the current timer
5517 * if it's scheduled too far out.
5518 */
5519 if (necp_update_all_clients_sched_abstime > sched_abstime) {
5520 need_cancel = true;
5521 need_schedule = true;
5522 }
5523 }
5524 }
5525
5526 /*
5527 * Record the time of the deadline with leeway
5528 */
5529 if (need_schedule) {
5530 necp_update_all_clients_sched_abstime = sched_abstime;
5531 }
5532
5533 necp_update_all_clients_sched_cnt += 1;
5534 uint32_t count = necp_update_all_clients_sched_cnt;
5535 NECP_UPDATE_ALL_CLIENTS_UNLOCK();
5536
5537 if (need_schedule) {
5538 /*
5539 * Wait if the thread call is currently executing to make sure the
5540 * next update will be delivered to all clients
5541 */
5542 if (need_cancel) {
5543 (void) thread_call_cancel_wait(necp_client_update_tcall);
5544 }
5545
5546 (void) thread_call_enter_delayed_with_leeway(necp_client_update_tcall, NULL,
5547 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
5548 }
5549 if (necp_debug > 0) {
5550 NECPLOG(LOG_DEBUG,
5551 "necp_update_all_clients immediate %s update %u",
5552 should_update_immediately ? "true" : "false", count);
5553 }
5554 }
5555
5556 bool
necp_set_client_as_background(proc_t proc,struct fileproc * fp,bool background)5557 necp_set_client_as_background(proc_t proc,
5558 struct fileproc *fp,
5559 bool background)
5560 {
5561 if (proc == PROC_NULL) {
5562 NECPLOG0(LOG_ERR, "NULL proc");
5563 return FALSE;
5564 }
5565
5566 if (fp == NULL) {
5567 NECPLOG0(LOG_ERR, "NULL fp");
5568 return FALSE;
5569 }
5570
5571 struct necp_fd_data *client_fd = (struct necp_fd_data *)fp_get_data(fp);
5572 if (client_fd == NULL) {
5573 NECPLOG0(LOG_ERR, "Could not find client structure for backgrounded client");
5574 return FALSE;
5575 }
5576
5577 if (client_fd->necp_fd_type != necp_fd_type_client) {
5578 // Not a client fd, ignore
5579 NECPLOG0(LOG_ERR, "Not a client fd, ignore");
5580 return FALSE;
5581 }
5582
5583 client_fd->background = background;
5584
5585 return TRUE;
5586 }
5587
5588 void
necp_fd_memstatus(proc_t proc,uint32_t status,struct necp_fd_data * client_fd)5589 necp_fd_memstatus(proc_t proc, uint32_t status,
5590 struct necp_fd_data *client_fd)
5591 {
5592 #pragma unused(proc, status, client_fd)
5593 ASSERT(proc != PROC_NULL);
5594 ASSERT(client_fd != NULL);
5595
5596 // Nothing to reap for the process or client for now,
5597 // but this is where we would trigger that in future.
5598 }
5599
5600 void
necp_fd_defunct(proc_t proc,struct necp_fd_data * client_fd)5601 necp_fd_defunct(proc_t proc, struct necp_fd_data *client_fd)
5602 {
5603 struct _necp_flow_defunct_list defunct_list;
5604
5605 ASSERT(proc != PROC_NULL);
5606 ASSERT(client_fd != NULL);
5607
5608 if (client_fd->necp_fd_type != necp_fd_type_client) {
5609 // Not a client fd, ignore
5610 return;
5611 }
5612
5613 // Our local temporary list
5614 LIST_INIT(&defunct_list);
5615
5616 // Need to hold lock so ntstats defunct the same set of clients
5617 NECP_FD_LOCK(client_fd);
5618 #if SKYWALK
5619 // Shut down statistics
5620 nstats_userland_stats_defunct_for_process(proc_getpid(proc));
5621 #endif /* SKYWALK */
5622 necp_defunct_client_fd_locked(client_fd, &defunct_list, proc);
5623 NECP_FD_UNLOCK(client_fd);
5624
5625 necp_process_defunct_list(&defunct_list);
5626 }
5627
5628 void
necp_client_request_in_process_flow_divert(pid_t pid)5629 necp_client_request_in_process_flow_divert(pid_t pid)
5630 {
5631 if (pid == 0) {
5632 return;
5633 }
5634
5635 // Add to the list of pids that should get an update. These will
5636 // get picked up on the next thread call to update client paths.
5637 NECP_FD_LIST_LOCK_SHARED();
5638 for (int i = 0; i < NECP_MAX_FLOW_DIVERT_NEEDED_PIDS; i++) {
5639 if (necp_flow_divert_needed_pids[i] == 0) {
5640 necp_flow_divert_needed_pids[i] = pid;
5641 break;
5642 }
5643 }
5644 NECP_FD_LIST_UNLOCK();
5645 }
5646
5647 static void
necp_client_remove_agent_from_result(struct necp_client * client,uuid_t netagent_uuid)5648 necp_client_remove_agent_from_result(struct necp_client *client, uuid_t netagent_uuid)
5649 {
5650 size_t offset = 0;
5651
5652 u_int8_t *result_buffer = client->result;
5653 while ((offset + sizeof(struct necp_tlv_header)) <= client->result_length) {
5654 u_int8_t type = necp_buffer_get_tlv_type(result_buffer, client->result_length, offset);
5655 u_int32_t length = necp_buffer_get_tlv_length(result_buffer, client->result_length, offset);
5656
5657 size_t tlv_total_length = (sizeof(struct necp_tlv_header) + length);
5658 if (type == NECP_CLIENT_RESULT_NETAGENT &&
5659 length == sizeof(struct necp_client_result_netagent) &&
5660 (offset + tlv_total_length) <= client->result_length) {
5661 struct necp_client_result_netagent *value = ((struct necp_client_result_netagent *)(void *)
5662 necp_buffer_get_tlv_value(result_buffer, client->result_length, offset, NULL));
5663 if (uuid_compare(value->netagent_uuid, netagent_uuid) == 0) {
5664 // Found a netagent to remove
5665 // Shift bytes down to remove the tlv, and adjust total length
5666 // Don't adjust the current offset
5667 memmove(result_buffer + offset,
5668 result_buffer + offset + tlv_total_length,
5669 client->result_length - (offset + tlv_total_length));
5670 client->result_length -= tlv_total_length;
5671 memset(result_buffer + client->result_length, 0, sizeof(client->result) - client->result_length);
5672 continue;
5673 }
5674 }
5675
5676 offset += tlv_total_length;
5677 }
5678 }
5679
5680 void
necp_force_update_client(uuid_t client_id,uuid_t remove_netagent_uuid,u_int32_t agent_generation)5681 necp_force_update_client(uuid_t client_id, uuid_t remove_netagent_uuid, u_int32_t agent_generation)
5682 {
5683 struct necp_fd_data *client_fd = NULL;
5684
5685 NECP_FD_LIST_LOCK_SHARED();
5686
5687 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5688 bool updated_result = FALSE;
5689 NECP_FD_LOCK(client_fd);
5690 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
5691 if (client != NULL) {
5692 client->failed_trigger_agent.generation = agent_generation;
5693 uuid_copy(client->failed_trigger_agent.netagent_uuid, remove_netagent_uuid);
5694 if (!uuid_is_null(remove_netagent_uuid)) {
5695 necp_client_remove_agent_from_result(client, remove_netagent_uuid);
5696 }
5697 client->result_read = FALSE;
5698 // Found the client, break
5699 updated_result = TRUE;
5700 NECP_CLIENT_UNLOCK(client);
5701 }
5702 if (updated_result) {
5703 necp_fd_notify(client_fd, true);
5704 }
5705 NECP_FD_UNLOCK(client_fd);
5706 if (updated_result) {
5707 // Found the client, break
5708 break;
5709 }
5710 }
5711
5712 NECP_FD_LIST_UNLOCK();
5713 }
5714
5715 #if SKYWALK
5716 void
necp_client_early_close(uuid_t client_id)5717 necp_client_early_close(uuid_t client_id)
5718 {
5719 NECP_CLIENT_TREE_LOCK_SHARED();
5720
5721 struct necp_client *client = necp_find_client_and_lock(client_id);
5722 if (client != NULL) {
5723 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
5724 if (flow_registration != NULL) {
5725 // Found the right client and flow, mark the stats as over
5726 if (flow_registration->stats_handler_context != NULL) {
5727 ntstat_userland_stats_event(flow_registration->stats_handler_context,
5728 NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT);
5729 }
5730 }
5731 NECP_CLIENT_UNLOCK(client);
5732 }
5733
5734 NECP_CLIENT_TREE_UNLOCK();
5735 }
5736 #endif /* SKYWALK */
5737
5738 /// Interface matching
5739
5740 #define NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5741 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF | \
5742 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
5743 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE | \
5744 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
5745 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT | \
5746 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5747 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \
5748 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \
5749 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE | \
5750 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \
5751 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE)
5752
5753 #define NECP_PARSED_PARAMETERS_SCOPED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5754 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
5755 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
5756 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5757 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \
5758 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE)
5759
5760 #define NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5761 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)
5762
5763 #define NECP_PARSED_PARAMETERS_PREFERRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5764 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \
5765 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \
5766 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE)
5767
5768 static bool
necp_ifnet_matches_type(struct ifnet * ifp,u_int8_t interface_type,bool check_delegates)5769 necp_ifnet_matches_type(struct ifnet *ifp, u_int8_t interface_type, bool check_delegates)
5770 {
5771 struct ifnet *check_ifp = ifp;
5772 while (check_ifp) {
5773 if (if_functional_type(check_ifp, TRUE) == interface_type) {
5774 return TRUE;
5775 }
5776 if (!check_delegates) {
5777 break;
5778 }
5779 check_ifp = check_ifp->if_delegated.ifp;
5780 }
5781 return FALSE;
5782 }
5783
5784 static bool
necp_ifnet_matches_name(struct ifnet * ifp,const char * __sized_by (IFXNAMSIZ)interface_name,bool check_delegates)5785 necp_ifnet_matches_name(struct ifnet *ifp, const char * __sized_by(IFXNAMSIZ)interface_name, bool check_delegates)
5786 {
5787 struct ifnet *check_ifp = ifp;
5788 while (check_ifp) {
5789 if (strlcmp(interface_name, check_ifp->if_xname, IFXNAMSIZ) == 0) {
5790 return TRUE;
5791 }
5792 if (!check_delegates) {
5793 break;
5794 }
5795 check_ifp = check_ifp->if_delegated.ifp;
5796 }
5797 return FALSE;
5798 }
5799
5800 static bool
necp_ifnet_matches_agent(struct ifnet * ifp,uuid_t * agent_uuid,bool check_delegates)5801 necp_ifnet_matches_agent(struct ifnet *ifp, uuid_t *agent_uuid, bool check_delegates)
5802 {
5803 struct ifnet *check_ifp = ifp;
5804
5805 while (check_ifp != NULL) {
5806 ifnet_lock_shared(check_ifp);
5807 if (check_ifp->if_agentids != NULL) {
5808 for (u_int32_t index = 0; index < check_ifp->if_agentcount; index++) {
5809 if (uuid_compare(check_ifp->if_agentids[index], *agent_uuid) == 0) {
5810 ifnet_lock_done(check_ifp);
5811 return TRUE;
5812 }
5813 }
5814 }
5815 ifnet_lock_done(check_ifp);
5816
5817 if (!check_delegates) {
5818 break;
5819 }
5820 check_ifp = check_ifp->if_delegated.ifp;
5821 }
5822 return FALSE;
5823 }
5824
5825 static bool
necp_ifnet_matches_agent_type(struct ifnet * ifp,const char * __sized_by (NETAGENT_DOMAINSIZE)agent_domain,const char * __sized_by (NETAGENT_TYPESIZE)agent_type,bool check_delegates)5826 necp_ifnet_matches_agent_type(struct ifnet *ifp, const char * __sized_by(NETAGENT_DOMAINSIZE)agent_domain, const char * __sized_by(NETAGENT_TYPESIZE)agent_type, bool check_delegates)
5827 {
5828 struct ifnet *check_ifp = ifp;
5829
5830 while (check_ifp != NULL) {
5831 ifnet_lock_shared(check_ifp);
5832 if (check_ifp->if_agentids != NULL) {
5833 for (u_int32_t index = 0; index < check_ifp->if_agentcount; index++) {
5834 if (uuid_is_null(check_ifp->if_agentids[index])) {
5835 continue;
5836 }
5837
5838 char if_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
5839 char if_agent_type[NETAGENT_TYPESIZE] = { 0 };
5840
5841 if (netagent_get_agent_domain_and_type(check_ifp->if_agentids[index], if_agent_domain, if_agent_type)) {
5842 if (necp_agent_types_match(agent_domain, agent_type, if_agent_domain, if_agent_type)) {
5843 ifnet_lock_done(check_ifp);
5844 return TRUE;
5845 }
5846 }
5847 }
5848 }
5849 ifnet_lock_done(check_ifp);
5850
5851 if (!check_delegates) {
5852 break;
5853 }
5854 check_ifp = check_ifp->if_delegated.ifp;
5855 }
5856 return FALSE;
5857 }
5858
5859 static bool
necp_ifnet_matches_local_address(struct ifnet * ifp,struct sockaddr * sa)5860 necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa)
5861 {
5862 struct ifaddr *ifa = NULL;
5863 bool matched_local_address = FALSE;
5864
5865 // Transform sa into the ifaddr form
5866 // IPv6 Scope IDs are always embedded in the ifaddr list
5867 struct sockaddr_storage address;
5868 u_int ifscope = IFSCOPE_NONE;
5869 (void)sa_copy(sa, &address, &ifscope);
5870 SIN(&address)->sin_port = 0;
5871 if (address.ss_family == AF_INET6) {
5872 if (in6_embedded_scope ||
5873 !IN6_IS_SCOPE_EMBED(&SIN6(&address)->sin6_addr)) {
5874 SIN6(&address)->sin6_scope_id = 0;
5875 }
5876 }
5877
5878 ifa = ifa_ifwithaddr_scoped_locked(SA(&address), ifp->if_index);
5879 matched_local_address = (ifa != NULL);
5880
5881 if (ifa) {
5882 ifaddr_release(ifa);
5883 }
5884
5885 return matched_local_address;
5886 }
5887
5888 static bool
necp_interface_type_should_match_unranked_interfaces(u_int8_t interface_type)5889 necp_interface_type_should_match_unranked_interfaces(u_int8_t interface_type)
5890 {
5891 switch (interface_type) {
5892 // These are the interface types we allow a client to request even if the matching
5893 // interface isn't currently eligible to be primary (has default route, dns, etc)
5894 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
5895 case IFRTYPE_FUNCTIONAL_INTCOPROC:
5896 case IFRTYPE_FUNCTIONAL_COMPANIONLINK:
5897 return true;
5898 default:
5899 break;
5900 }
5901 return false;
5902 }
5903
5904 #define NECP_IFP_IS_ON_ORDERED_LIST(_ifp) ((_ifp)->if_ordered_link.tqe_next != NULL || (_ifp)->if_ordered_link.tqe_prev != NULL)
5905
5906 // Secondary interface flag indicates that the interface is being
5907 // used for multipath or a listener as an extra path
5908 static bool
necp_ifnet_matches_parameters(struct ifnet * ifp,struct necp_client_parsed_parameters * parsed_parameters,u_int32_t override_flags,u_int32_t * preferred_count,bool secondary_interface,bool require_scoped_field)5909 necp_ifnet_matches_parameters(struct ifnet *ifp,
5910 struct necp_client_parsed_parameters *parsed_parameters,
5911 u_int32_t override_flags,
5912 u_int32_t *preferred_count,
5913 bool secondary_interface,
5914 bool require_scoped_field)
5915 {
5916 bool matched_some_scoped_field = FALSE;
5917
5918 if (preferred_count) {
5919 *preferred_count = 0;
5920 }
5921
5922 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) {
5923 if (parsed_parameters->required_interface_index != ifp->if_index) {
5924 return FALSE;
5925 }
5926 }
5927 #if SKYWALK
5928 else {
5929 if (ifnet_is_low_latency(ifp)) {
5930 return FALSE;
5931 }
5932 }
5933 #endif /* SKYWALK */
5934
5935 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR) {
5936 if (!necp_ifnet_matches_local_address(ifp, SA(&parsed_parameters->local_addr.sa))) {
5937 return FALSE;
5938 }
5939 if (require_scoped_field) {
5940 matched_some_scoped_field = TRUE;
5941 }
5942 }
5943
5944 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) {
5945 if (override_flags != 0) {
5946 if ((override_flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE) &&
5947 IFNET_IS_EXPENSIVE(ifp)) {
5948 return FALSE;
5949 }
5950 if ((override_flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED) &&
5951 IFNET_IS_CONSTRAINED(ifp)) {
5952 return FALSE;
5953 }
5954 if (!(override_flags & NECP_CLIENT_PARAMETER_FLAG_ALLOW_ULTRA_CONSTRAINED) &&
5955 IFNET_IS_ULTRA_CONSTRAINED(ifp)) {
5956 return FALSE;
5957 }
5958 } else {
5959 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE) &&
5960 IFNET_IS_EXPENSIVE(ifp)) {
5961 return FALSE;
5962 }
5963 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED) &&
5964 IFNET_IS_CONSTRAINED(ifp)) {
5965 return FALSE;
5966 }
5967 if (!(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_ALLOW_ULTRA_CONSTRAINED) &&
5968 IFNET_IS_ULTRA_CONSTRAINED(ifp)) {
5969 return FALSE;
5970 }
5971 }
5972 }
5973
5974 if ((!secondary_interface || // Enforce interface type if this is the primary interface
5975 !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) || // or if there are no flags
5976 !(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_ONLY_PRIMARY_REQUIRES_TYPE)) && // or if the flags don't give an exception
5977 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) &&
5978 !necp_ifnet_matches_type(ifp, parsed_parameters->required_interface_type, FALSE)) {
5979 return FALSE;
5980 }
5981
5982 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) {
5983 if (require_scoped_field) {
5984 matched_some_scoped_field = TRUE;
5985 }
5986 }
5987
5988 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE) {
5989 for (int i = 0; i < NECP_MAX_INTERFACE_PARAMETERS; i++) {
5990 if (parsed_parameters->prohibited_interface_types[i] == 0) {
5991 break;
5992 }
5993
5994 if (necp_ifnet_matches_type(ifp, parsed_parameters->prohibited_interface_types[i], TRUE)) {
5995 return FALSE;
5996 }
5997 }
5998 }
5999
6000 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF) {
6001 for (int i = 0; i < NECP_MAX_INTERFACE_PARAMETERS; i++) {
6002 if (strbuflen(parsed_parameters->prohibited_interfaces[i], sizeof(parsed_parameters->prohibited_interfaces[i])) == 0) {
6003 break;
6004 }
6005
6006 if (necp_ifnet_matches_name(ifp, parsed_parameters->prohibited_interfaces[i], TRUE)) {
6007 return FALSE;
6008 }
6009 }
6010 }
6011
6012 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
6013 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6014 if (uuid_is_null(parsed_parameters->required_netagents[i])) {
6015 break;
6016 }
6017
6018 if (!necp_ifnet_matches_agent(ifp, &parsed_parameters->required_netagents[i], FALSE)) {
6019 return FALSE;
6020 }
6021
6022 if (require_scoped_field) {
6023 matched_some_scoped_field = TRUE;
6024 }
6025 }
6026 }
6027
6028 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT) {
6029 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6030 if (uuid_is_null(parsed_parameters->prohibited_netagents[i])) {
6031 break;
6032 }
6033
6034 if (necp_ifnet_matches_agent(ifp, &parsed_parameters->prohibited_netagents[i], TRUE)) {
6035 return FALSE;
6036 }
6037 }
6038 }
6039
6040 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
6041 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6042 if (strbuflen(parsed_parameters->required_netagent_types[i].netagent_domain, sizeof(parsed_parameters->required_netagent_types[i].netagent_domain)) == 0 &&
6043 strbuflen(parsed_parameters->required_netagent_types[i].netagent_type, sizeof(parsed_parameters->required_netagent_types[i].netagent_type)) == 0) {
6044 break;
6045 }
6046
6047 if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->required_netagent_types[i].netagent_domain, parsed_parameters->required_netagent_types[i].netagent_type, FALSE)) {
6048 return FALSE;
6049 }
6050
6051 if (require_scoped_field) {
6052 matched_some_scoped_field = TRUE;
6053 }
6054 }
6055 }
6056
6057 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE) {
6058 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6059 if (strbuflen(parsed_parameters->prohibited_netagent_types[i].netagent_domain, sizeof(parsed_parameters->prohibited_netagent_types[i].netagent_domain)) == 0 &&
6060 strbuflen(parsed_parameters->prohibited_netagent_types[i].netagent_type, sizeof(parsed_parameters->prohibited_netagent_types[i].netagent_type)) == 0) {
6061 break;
6062 }
6063
6064 if (necp_ifnet_matches_agent_type(ifp, parsed_parameters->prohibited_netagent_types[i].netagent_domain, parsed_parameters->prohibited_netagent_types[i].netagent_type, TRUE)) {
6065 return FALSE;
6066 }
6067 }
6068 }
6069
6070 // Checked preferred properties
6071 if (preferred_count) {
6072 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT) {
6073 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6074 if (uuid_is_null(parsed_parameters->preferred_netagents[i])) {
6075 break;
6076 }
6077
6078 if (necp_ifnet_matches_agent(ifp, &parsed_parameters->preferred_netagents[i], TRUE)) {
6079 (*preferred_count)++;
6080 if (require_scoped_field) {
6081 matched_some_scoped_field = TRUE;
6082 }
6083 }
6084 }
6085 }
6086
6087 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE) {
6088 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6089 if (strbuflen(parsed_parameters->preferred_netagent_types[i].netagent_domain, sizeof(parsed_parameters->preferred_netagent_types[i].netagent_domain)) == 0 &&
6090 strbuflen(parsed_parameters->preferred_netagent_types[i].netagent_type, sizeof(parsed_parameters->preferred_netagent_types[i].netagent_type)) == 0) {
6091 break;
6092 }
6093
6094 if (necp_ifnet_matches_agent_type(ifp, parsed_parameters->preferred_netagent_types[i].netagent_domain, parsed_parameters->preferred_netagent_types[i].netagent_type, TRUE)) {
6095 (*preferred_count)++;
6096 if (require_scoped_field) {
6097 matched_some_scoped_field = TRUE;
6098 }
6099 }
6100 }
6101 }
6102
6103 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT) {
6104 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6105 if (uuid_is_null(parsed_parameters->avoided_netagents[i])) {
6106 break;
6107 }
6108
6109 if (!necp_ifnet_matches_agent(ifp, &parsed_parameters->avoided_netagents[i], TRUE)) {
6110 (*preferred_count)++;
6111 }
6112 }
6113 }
6114
6115 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) {
6116 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6117 if (strbuflen(parsed_parameters->avoided_netagent_types[i].netagent_domain, sizeof(parsed_parameters->avoided_netagent_types[i].netagent_domain)) == 0 &&
6118 strbuflen(parsed_parameters->avoided_netagent_types[i].netagent_type, sizeof(parsed_parameters->avoided_netagent_types[i].netagent_type)) == 0) {
6119 break;
6120 }
6121
6122 if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->avoided_netagent_types[i].netagent_domain,
6123 parsed_parameters->avoided_netagent_types[i].netagent_type, TRUE)) {
6124 (*preferred_count)++;
6125 }
6126 }
6127 }
6128 }
6129
6130 if (require_scoped_field) {
6131 return matched_some_scoped_field;
6132 }
6133
6134 return TRUE;
6135 }
6136
6137 static bool
necp_find_matching_interface_index(struct necp_client_parsed_parameters * parsed_parameters,u_int * return_ifindex,bool * validate_agents)6138 necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters,
6139 u_int *return_ifindex, bool *validate_agents)
6140 {
6141 struct ifnet *ifp = NULL;
6142 u_int32_t best_preferred_count = 0;
6143 bool has_preferred_fields = FALSE;
6144 *return_ifindex = 0;
6145
6146 if (parsed_parameters->required_interface_index != 0) {
6147 *return_ifindex = parsed_parameters->required_interface_index;
6148 return TRUE;
6149 }
6150
6151 // Check and save off flags
6152 u_int32_t flags = 0;
6153 bool has_prohibit_flags = FALSE;
6154 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) {
6155 flags = parsed_parameters->flags;
6156 has_prohibit_flags = (parsed_parameters->flags &
6157 (NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE |
6158 NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED));
6159 }
6160
6161 if (!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS) &&
6162 !has_prohibit_flags) {
6163 return TRUE;
6164 }
6165
6166 has_preferred_fields = (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS);
6167
6168 // We have interesting parameters to parse and find a matching interface
6169 ifnet_head_lock_shared();
6170
6171 if (!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_FIELDS) &&
6172 !has_preferred_fields) {
6173 // We do have fields to match, but they are only prohibitory
6174 // If the first interface in the list matches, or there are no ordered interfaces, we don't need to scope
6175 ifp = TAILQ_FIRST(&ifnet_ordered_head);
6176 if (ifp == NULL || necp_ifnet_matches_parameters(ifp, parsed_parameters, 0, NULL, false, false)) {
6177 // Don't set return_ifindex, so the client doesn't need to scope
6178 ifnet_head_done();
6179 return TRUE;
6180 }
6181
6182 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR &&
6183 parsed_parameters->remote_addr.sin6.sin6_family == AF_INET6 &&
6184 parsed_parameters->remote_addr.sin6.sin6_scope_id != IFSCOPE_NONE &&
6185 parsed_parameters->remote_addr.sin6.sin6_scope_id <= (u_int32_t)if_index) {
6186 ifp = ifindex2ifnet[parsed_parameters->remote_addr.sin6.sin6_scope_id];
6187 if (ifp != NULL && necp_ifnet_matches_parameters(ifp, parsed_parameters, 0, NULL, false, false)) {
6188 // Don't set return_ifindex, so the client doesn't need to scope since the v6 scope ID will
6189 // already route to the correct interface
6190 ifnet_head_done();
6191 return TRUE;
6192 }
6193 }
6194 }
6195
6196 // First check the ordered interface list
6197 TAILQ_FOREACH(ifp, &ifnet_ordered_head, if_ordered_link) {
6198 u_int32_t preferred_count = 0;
6199 if (necp_ifnet_matches_parameters(ifp, parsed_parameters, flags, &preferred_count, false, false)) {
6200 if (preferred_count > best_preferred_count ||
6201 *return_ifindex == 0) {
6202 // Everything matched, and is most preferred. Return this interface.
6203 *return_ifindex = ifp->if_index;
6204 best_preferred_count = preferred_count;
6205
6206 if (!has_preferred_fields) {
6207 break;
6208 }
6209 }
6210 }
6211
6212 if (has_prohibit_flags &&
6213 ifp == TAILQ_FIRST(&ifnet_ordered_head)) {
6214 // This was the first interface. From here on, if the
6215 // client prohibited either expensive or constrained,
6216 // don't allow either as a secondary interface option.
6217 flags |= (NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE |
6218 NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED);
6219 }
6220 }
6221
6222 bool is_listener = ((parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
6223 (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER));
6224
6225 // Then check the remaining interfaces
6226 if ((parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_FIELDS) &&
6227 ((!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)) ||
6228 necp_interface_type_should_match_unranked_interfaces(parsed_parameters->required_interface_type) ||
6229 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR) ||
6230 is_listener) &&
6231 (*return_ifindex == 0 || has_preferred_fields)) {
6232 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
6233 u_int32_t preferred_count = 0;
6234 if (NECP_IFP_IS_ON_ORDERED_LIST(ifp)) {
6235 // This interface was in the ordered list, skip
6236 continue;
6237 }
6238 if (necp_ifnet_matches_parameters(ifp, parsed_parameters, flags, &preferred_count, false, true)) {
6239 if (preferred_count > best_preferred_count ||
6240 *return_ifindex == 0) {
6241 // Everything matched, and is most preferred. Return this interface.
6242 *return_ifindex = ifp->if_index;
6243 best_preferred_count = preferred_count;
6244
6245 if (!has_preferred_fields) {
6246 break;
6247 }
6248 }
6249 }
6250 }
6251 }
6252
6253 ifnet_head_done();
6254
6255 if (has_preferred_fields && best_preferred_count == 0 &&
6256 ((parsed_parameters->valid_fields & (NECP_PARSED_PARAMETERS_SCOPED_FIELDS | NECP_PARSED_PARAMETERS_PREFERRED_FIELDS)) ==
6257 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS))) {
6258 // If only has preferred ifnet fields, and nothing was found, clear the interface index and return TRUE
6259 *return_ifindex = 0;
6260 return TRUE;
6261 }
6262
6263 if (*return_ifindex == 0 &&
6264 !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS)) {
6265 // Has required fields, but not including specific interface fields. Pass for now, and check
6266 // to see if agents are satisfied by policy.
6267 *validate_agents = TRUE;
6268 return TRUE;
6269 }
6270
6271 return *return_ifindex != 0;
6272 }
6273
6274 void
necp_copy_inp_domain_info(struct inpcb * inp,struct socket * so,nstat_domain_info * domain_info)6275 necp_copy_inp_domain_info(struct inpcb *inp, struct socket *so, nstat_domain_info *domain_info)
6276 {
6277 if (inp == NULL || so == NULL || domain_info == NULL) {
6278 return;
6279 }
6280
6281 necp_lock_socket_attributes();
6282
6283 domain_info->is_silent = !!(so->so_flags1 & SOF1_DOMAIN_INFO_SILENT);
6284 if (!domain_info->is_silent) {
6285 domain_info->is_tracker = !!(so->so_flags1 & SOF1_KNOWN_TRACKER);
6286 domain_info->is_non_app_initiated = !!(so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED);
6287 if (domain_info->is_tracker &&
6288 inp->inp_necp_attributes.inp_tracker_domain != NULL) {
6289 strlcpy(domain_info->domain_name, inp->inp_necp_attributes.inp_tracker_domain,
6290 sizeof(domain_info->domain_name));
6291 } else if (inp->inp_necp_attributes.inp_domain != NULL) {
6292 strlcpy(domain_info->domain_name, inp->inp_necp_attributes.inp_domain,
6293 sizeof(domain_info->domain_name));
6294 }
6295 if (inp->inp_necp_attributes.inp_domain_owner != NULL) {
6296 strlcpy(domain_info->domain_owner, inp->inp_necp_attributes.inp_domain_owner,
6297 sizeof(domain_info->domain_owner));
6298 }
6299 if (inp->inp_necp_attributes.inp_domain_context != NULL) {
6300 strlcpy(domain_info->domain_tracker_ctxt, inp->inp_necp_attributes.inp_domain_context,
6301 sizeof(domain_info->domain_tracker_ctxt));
6302 }
6303 }
6304
6305 necp_unlock_socket_attributes();
6306 }
6307
6308 void
necp_with_inp_domain_name(struct socket * so,void * ctx,void (* with_func)(char * domain_name __null_terminated,void * ctx))6309 necp_with_inp_domain_name(struct socket *so, void *ctx, void (*with_func)(char *domain_name __null_terminated, void *ctx))
6310 {
6311 struct inpcb *inp = NULL;
6312
6313 if (so == NULL || with_func == NULL) {
6314 return;
6315 }
6316
6317 inp = (struct inpcb *)so->so_pcb;
6318 if (inp == NULL) {
6319 return;
6320 }
6321
6322 necp_lock_socket_attributes();
6323 with_func(inp->inp_necp_attributes.inp_domain, ctx);
6324 necp_unlock_socket_attributes();
6325 }
6326
6327 static size_t
necp_find_domain_info_common(struct necp_client * client,u_int8_t * __sized_by (parameters_size)parameters,size_t parameters_size,struct necp_client_flow_registration * flow_registration,nstat_domain_info * domain_info)6328 necp_find_domain_info_common(struct necp_client *client,
6329 u_int8_t * __sized_by(parameters_size)parameters,
6330 size_t parameters_size,
6331 struct necp_client_flow_registration *flow_registration, /* For logging purposes only */
6332 nstat_domain_info *domain_info)
6333 {
6334 if (client == NULL) {
6335 return 0;
6336 }
6337 if (domain_info == NULL) {
6338 return sizeof(nstat_domain_info);
6339 }
6340
6341 size_t offset = 0;
6342 u_int32_t flags = 0;
6343 u_int8_t *tracker_domain = NULL;
6344 u_int8_t *domain = NULL;
6345 size_t tracker_domain_length = 0;
6346 size_t domain_length = 0;
6347
6348 NECP_CLIENT_FLOW_LOG(client, flow_registration, "Collecting stats");
6349
6350 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6351 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
6352 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
6353
6354 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6355 // If the length is larger than what can fit in the remaining parameters size, bail
6356 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6357 break;
6358 }
6359
6360 if (length > 0) {
6361 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
6362 if (value != NULL) {
6363 switch (type) {
6364 case NECP_CLIENT_PARAMETER_FLAGS: {
6365 if (length >= sizeof(u_int32_t)) {
6366 memcpy(&flags, value, sizeof(u_int32_t));
6367 }
6368
6369 domain_info->is_tracker =
6370 !!(flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER);
6371 domain_info->is_non_app_initiated =
6372 !!(flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED);
6373 domain_info->is_silent =
6374 !!(flags & NECP_CLIENT_PARAMETER_FLAG_SILENT);
6375 break;
6376 }
6377 case NECP_CLIENT_PARAMETER_TRACKER_DOMAIN: {
6378 tracker_domain_length = length;
6379 tracker_domain = value;
6380 break;
6381 }
6382 case NECP_CLIENT_PARAMETER_DOMAIN: {
6383 domain_length = length;
6384 domain = value;
6385 break;
6386 }
6387 case NECP_CLIENT_PARAMETER_DOMAIN_OWNER: {
6388 size_t length_to_copy = MIN(length, sizeof(domain_info->domain_owner));
6389 strbufcpy(domain_info->domain_owner, sizeof(domain_info->domain_owner), (const char *)value, length_to_copy);
6390 break;
6391 }
6392 case NECP_CLIENT_PARAMETER_DOMAIN_CONTEXT: {
6393 size_t length_to_copy = MIN(length, sizeof(domain_info->domain_tracker_ctxt));
6394 strbufcpy(domain_info->domain_tracker_ctxt, sizeof(domain_info->domain_tracker_ctxt), (const char *)value, length_to_copy);
6395 break;
6396 }
6397 case NECP_CLIENT_PARAMETER_ATTRIBUTED_BUNDLE_IDENTIFIER: {
6398 size_t length_to_copy = MIN(length, sizeof(domain_info->domain_attributed_bundle_id));
6399 strbufcpy(domain_info->domain_attributed_bundle_id, sizeof(domain_info->domain_attributed_bundle_id), (const char *)value, length_to_copy);
6400 break;
6401 }
6402 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
6403 if (length >= sizeof(struct necp_policy_condition_addr)) {
6404 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6405 if (necp_client_address_is_valid(&address_struct->address.sa)) {
6406 domain_info->remote.v6 = address_struct->address.sin6;
6407 }
6408 }
6409 break;
6410 }
6411 default: {
6412 break;
6413 }
6414 }
6415 }
6416 }
6417 offset += sizeof(struct necp_tlv_header) + length;
6418 }
6419
6420 if (domain_info->is_silent) {
6421 memset(domain_info, 0, sizeof(*domain_info));
6422 domain_info->is_silent = true;
6423 } else if (domain_info->is_tracker && tracker_domain != NULL && tracker_domain_length > 0) {
6424 size_t length_to_copy = MIN(tracker_domain_length, sizeof(domain_info->domain_name));
6425 strbufcpy(domain_info->domain_name, sizeof(domain_info->domain_name), (const char *)tracker_domain, length_to_copy);
6426 } else if (domain != NULL && domain_length > 0) {
6427 size_t length_to_copy = MIN(domain_length, sizeof(domain_info->domain_name));
6428 strbufcpy(domain_info->domain_name, sizeof(domain_info->domain_name), (const char *)domain, length_to_copy);
6429 }
6430
6431 NECP_CLIENT_FLOW_LOG(client, flow_registration,
6432 "Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> "
6433 "is_tracker %d is_non_app_initiated %d is_silent %d",
6434 domain_info->domain_name,
6435 domain_info->domain_owner,
6436 domain_info->domain_tracker_ctxt,
6437 domain_info->domain_attributed_bundle_id,
6438 domain_info->is_tracker,
6439 domain_info->is_non_app_initiated,
6440 domain_info->is_silent);
6441
6442 return sizeof(nstat_domain_info);
6443 }
6444
6445 static size_t
necp_find_conn_extension_info(nstat_provider_context ctx,int requested_extension,void * __sized_by (buf_size)buf,size_t buf_size)6446 necp_find_conn_extension_info(nstat_provider_context ctx,
6447 int requested_extension, /* The extension to be returned */
6448 void * __sized_by(buf_size)buf, /* If not NULL, the address for extensions to be returned in */
6449 size_t buf_size) /* The size of the buffer space, typically matching the return from a previous call with a NULL buf pointer */
6450 {
6451 // Note, the caller has guaranteed that any buffer has been zeroed, there is no need to clear it again
6452
6453 if (ctx == NULL) {
6454 return 0;
6455 }
6456 struct necp_client *client = (struct necp_client *)ctx;
6457 switch (requested_extension) {
6458 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
6459 // This is for completeness. The intent is that domain information can be extracted at user level from the TLV parameters
6460 if (buf == NULL) {
6461 return sizeof(nstat_domain_info);
6462 }
6463 if (buf_size < sizeof(nstat_domain_info)) {
6464 return 0;
6465 }
6466 return necp_find_domain_info_common(client, client->parameters, client->parameters_length, NULL, (nstat_domain_info *)buf);
6467
6468 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV: {
6469 size_t parameters_length = client->parameters_length;
6470 if (buf == NULL) {
6471 return parameters_length;
6472 }
6473 if (buf_size < parameters_length) {
6474 return 0;
6475 }
6476 memcpy(buf, client->parameters, parameters_length);
6477 return parameters_length;
6478 }
6479 case NSTAT_EXTENDED_UPDATE_TYPE_ORIGINAL_NECP_TLV:
6480 if (buf == NULL) {
6481 return (client->original_parameters_source != NULL) ? client->original_parameters_source->parameters_length : 0;
6482 }
6483 if ((client->original_parameters_source == NULL) || (buf_size < client->original_parameters_source->parameters_length)) {
6484 return 0;
6485 }
6486 memcpy(buf, client->original_parameters_source->parameters, client->original_parameters_source->parameters_length);
6487 return client->original_parameters_source->parameters_length;
6488
6489 case NSTAT_EXTENDED_UPDATE_TYPE_ORIGINAL_DOMAIN:
6490 if (buf == NULL) {
6491 return (client->original_parameters_source != NULL) ? sizeof(nstat_domain_info) : 0;
6492 }
6493 if ((buf_size < sizeof(nstat_domain_info)) || (client->original_parameters_source == NULL)) {
6494 return 0;
6495 }
6496 return necp_find_domain_info_common(client, client->original_parameters_source->parameters, client->original_parameters_source->parameters_length,
6497 NULL, (nstat_domain_info *)buf);
6498
6499 default:
6500 return 0;
6501 }
6502 }
6503
6504 #if SKYWALK
6505
6506 static struct traffic_stats*
media_stats_embedded_ts(struct media_stats * media_stats,uint32_t ifflags)6507 media_stats_embedded_ts(struct media_stats *media_stats, uint32_t ifflags)
6508 {
6509 struct traffic_stats *ts = NULL;
6510 if (media_stats) {
6511 if (ifflags & NSTAT_IFNET_IS_WIFI) {
6512 if (ifflags & NSTAT_IFNET_IS_WIFI_INFRA) {
6513 ts = &media_stats->ms_wifi_infra;
6514 } else {
6515 ts = &media_stats->ms_wifi_non_infra;
6516 }
6517 } else if (ifflags & NSTAT_IFNET_IS_CELLULAR) {
6518 ts = &media_stats->ms_cellular;
6519 } else if (ifflags & NSTAT_IFNET_IS_WIRED) {
6520 ts = &media_stats->ms_wired;
6521 } else if (ifflags & NSTAT_IFNET_IS_COMPANIONLINK_BT) {
6522 ts = &media_stats->ms_bluetooth;
6523 } else if (!(ifflags & NSTAT_IFNET_IS_LOOPBACK)) {
6524 ts = &media_stats->ms_alternate;
6525 }
6526 }
6527 return ts;
6528 }
6529
6530 static size_t
necp_find_extension_info(userland_stats_provider_context * ctx,int requested_extension,void * __sized_by (buf_size)buf,size_t buf_size)6531 necp_find_extension_info(userland_stats_provider_context *ctx,
6532 int requested_extension, /* The extension to be returned */
6533 void * __sized_by(buf_size)buf, /* If not NULL, the address for extensions to be returned in */
6534 size_t buf_size) /* The size of the buffer space, typically matching the return from a previous call with a NULL buf pointer */
6535 {
6536 if (ctx == NULL) {
6537 return 0;
6538 }
6539 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
6540 struct necp_client *client = flow_registration->client;
6541
6542 switch (requested_extension) {
6543 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
6544 if (buf == NULL) {
6545 return sizeof(nstat_domain_info);
6546 }
6547 if (buf_size < sizeof(nstat_domain_info)) {
6548 return 0;
6549 }
6550 return necp_find_domain_info_common(client, client->parameters, client->parameters_length, flow_registration, (nstat_domain_info *)buf);
6551
6552 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV:
6553 if (buf == NULL) {
6554 return client->parameters_length;
6555 }
6556 if (buf_size < client->parameters_length) {
6557 return 0;
6558 }
6559 memcpy(buf, client->parameters, client->parameters_length);
6560 return client->parameters_length;
6561
6562 case NSTAT_EXTENDED_UPDATE_TYPE_FUUID:
6563 if (buf == NULL) {
6564 return sizeof(uuid_t);
6565 }
6566 if (buf_size < sizeof(uuid_t)) {
6567 return 0;
6568 }
6569 uuid_copy(buf, flow_registration->registration_id);
6570 return sizeof(uuid_t);
6571
6572 case NSTAT_EXTENDED_UPDATE_TYPE_BLUETOOTH_COUNTS: {
6573 // Retrieve details from the last time the assigned flows were updated
6574 u_int32_t route_ifindex = IFSCOPE_NONE;
6575 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6576 u_int64_t combined_interface_details = 0;
6577
6578 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
6579 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6580 bool is_companionlink_bluetooth = (route_ifflags & NSTAT_IFNET_IS_COMPANIONLINK_BT);
6581
6582 if (buf == NULL) {
6583 return (is_companionlink_bluetooth ||
6584 (route_ifflags & NSTAT_IFNET_PEEREGRESSINTERFACE_IS_CELLULAR)) ? sizeof(nstat_interface_counts):0;
6585 }
6586 if (buf_size < sizeof(nstat_interface_counts)) {
6587 return 0;
6588 }
6589
6590 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6591 if ((sf != NULL) &&
6592 (is_companionlink_bluetooth || (route_ifflags & NSTAT_IFNET_PEEREGRESSINTERFACE_IS_CELLULAR))) {
6593 nstat_interface_counts *bt_counts = (nstat_interface_counts *)buf;
6594 bt_counts->nstat_rxbytes = sf->sf_ibytes;
6595 bt_counts->nstat_txbytes = sf->sf_obytes;
6596 return sizeof(nstat_interface_counts);
6597 } else {
6598 return 0;
6599 }
6600 }
6601
6602 default:
6603 return 0;
6604 }
6605 }
6606
6607 static void
necp_find_netstat_data(struct necp_client * client,union necp_sockaddr_union * remote,pid_t * effective_pid,uid_t * uid,uuid_t euuid,uid_t * persona_id,u_int32_t * traffic_class,u_int8_t * fallback_mode)6608 necp_find_netstat_data(struct necp_client *client,
6609 union necp_sockaddr_union *remote,
6610 pid_t *effective_pid,
6611 uid_t *uid,
6612 uuid_t euuid,
6613 uid_t *persona_id,
6614 u_int32_t *traffic_class,
6615 u_int8_t *fallback_mode)
6616 {
6617 bool have_set_euuid = false;
6618 size_t offset = 0;
6619 u_int8_t *parameters;
6620 u_int32_t parameters_size;
6621
6622 parameters = client->parameters;
6623 parameters_size = (u_int32_t)client->parameters_length;
6624
6625 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6626 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
6627 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
6628
6629 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6630 // If the length is larger than what can fit in the remaining parameters size, bail
6631 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6632 break;
6633 }
6634
6635 if (length > 0) {
6636 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
6637 if (value != NULL) {
6638 switch (type) {
6639 case NECP_CLIENT_PARAMETER_APPLICATION: {
6640 if (length >= sizeof(uuid_t)) {
6641 uuid_copy(euuid, value);
6642 }
6643 break;
6644 }
6645 case NECP_CLIENT_PARAMETER_PID: {
6646 if (length >= sizeof(pid_t)) {
6647 memcpy(effective_pid, value, sizeof(pid_t));
6648 }
6649 break;
6650 }
6651 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
6652 if (length >= sizeof(u_int32_t)) {
6653 memcpy(traffic_class, value, sizeof(u_int32_t));
6654 }
6655 break;
6656 }
6657 case NECP_CLIENT_PARAMETER_FALLBACK_MODE: {
6658 if (length >= sizeof(u_int8_t)) {
6659 memcpy(fallback_mode, value, sizeof(u_int8_t));
6660 }
6661 break;
6662 }
6663 // It is an implementation quirk that the remote address can be found in the necp parameters
6664 // while the local address must be retrieved from the flowswitch
6665 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
6666 if (length >= sizeof(struct necp_policy_condition_addr)) {
6667 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6668 if (necp_client_address_is_valid(&address_struct->address.sa)) {
6669 remote->sin6 = address_struct->address.sin6;
6670 }
6671 }
6672 break;
6673 }
6674 case NECP_CLIENT_PARAMETER_APPLICATION_ID: {
6675 if (length >= sizeof(necp_application_id_t) && uid && persona_id) {
6676 necp_application_id_t *application_id = (necp_application_id_t *)(void *)value;
6677 memcpy(uid, &application_id->uid, sizeof(uid_t));
6678 uuid_copy(euuid, application_id->effective_uuid);
6679 memcpy(persona_id, &application_id->persona_id, sizeof(uid_t));
6680 have_set_euuid = true;
6681 }
6682 break;
6683 }
6684 default: {
6685 break;
6686 }
6687 }
6688 }
6689 }
6690 offset += sizeof(struct necp_tlv_header) + length;
6691 }
6692
6693 if (!have_set_euuid) {
6694 proc_t proc = proc_find(client->proc_pid);
6695 if (proc != PROC_NULL) {
6696 uuid_t responsible_uuid = { 0 };
6697 proc_getresponsibleuuid(proc, responsible_uuid, sizeof(responsible_uuid));
6698 proc_rele(proc);
6699 if (!uuid_is_null(responsible_uuid)) {
6700 uuid_copy(euuid, responsible_uuid);
6701 }
6702 }
6703 }
6704 }
6705
6706 static u_int64_t
necp_find_netstat_initial_properties(struct necp_client * client)6707 necp_find_netstat_initial_properties(struct necp_client *client)
6708 {
6709 size_t offset = 0;
6710 u_int64_t retval = 0;
6711 u_int8_t *parameters;
6712 u_int32_t parameters_size;
6713
6714 parameters = client->parameters;
6715 parameters_size = (u_int32_t)client->parameters_length;
6716
6717 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6718 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
6719 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
6720
6721 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6722 // If the length is larger than what can fit in the remaining parameters size, bail
6723 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6724 break;
6725 }
6726
6727 if (type == NECP_CLIENT_PARAMETER_FLAGS) {
6728 u_int32_t policy_condition_client_flags;
6729 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
6730 if ((value != NULL) && (length >= sizeof(policy_condition_client_flags))) {
6731 memcpy(&policy_condition_client_flags, value, sizeof(policy_condition_client_flags));
6732 if (policy_condition_client_flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
6733 retval |= NSTAT_SOURCE_IS_LISTENER;
6734 }
6735 if (policy_condition_client_flags & NECP_CLIENT_PARAMETER_FLAG_INBOUND) {
6736 retval |= NSTAT_SOURCE_IS_INBOUND;
6737 }
6738 }
6739 break;
6740 }
6741 offset += sizeof(struct necp_tlv_header) + length;
6742 }
6743 if (retval == 0) {
6744 retval = NSTAT_SOURCE_IS_OUTBOUND;
6745 }
6746 return retval;
6747 }
6748
6749 static bool
necp_request_nexus_tcp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)6750 necp_request_nexus_tcp_netstats(userland_stats_provider_context *ctx,
6751 u_int32_t *ifflagsp,
6752 nstat_progress_digest *digestp,
6753 nstat_counts *countsp,
6754 nstat_detailed_counts *detailed_countsp,
6755 void *metadatap)
6756 {
6757 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
6758 struct necp_client *client = flow_registration->client;
6759 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
6760 struct necp_tcp_stats *tcpstats = (struct necp_tcp_stats *)ustats_kaddr;
6761 ASSERT(tcpstats != NULL);
6762 ASSERT(!flow_registration->aop_offload);
6763
6764 u_int32_t nstat_diagnostic_flags = 0;
6765
6766 // Retrieve details from the last time the assigned flows were updated
6767 u_int32_t route_ifindex = IFSCOPE_NONE;
6768 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6769 u_int64_t combined_interface_details = 0;
6770
6771 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
6772 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6773
6774 if (route_ifindex == IFSCOPE_NONE) {
6775 // Mark no interface
6776 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
6777 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6778 NECPLOG(LOG_INFO, "req tcp stats, failed to get route details for pid %d curproc %d %s\n",
6779 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
6780 }
6781
6782 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6783 if (sf == NULL) {
6784 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
6785 char namebuf[MAXCOMLEN + 1];
6786 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
6787 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
6788 NECPLOG(LOG_ERR, "req tcp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
6789 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
6790 sf = &ntstat_sk_stats_zero;
6791 }
6792
6793 if (ifflagsp) {
6794 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
6795 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
6796 if (tcpstats->necp_tcp_extra.flags1 & SOF1_CELLFALLBACK) {
6797 *ifflagsp |= NSTAT_IFNET_VIA_CELLFALLBACK;
6798 }
6799 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
6800 return true;
6801 }
6802 }
6803
6804 if (digestp) {
6805 // The digest is intended to give information that may help give insight into the state of the link
6806 digestp->rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6807 digestp->txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6808 digestp->rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6809 digestp->rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6810 digestp->txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6811 digestp->ifindex = route_ifindex;
6812 digestp->state = tcpstats->necp_tcp_extra.state;
6813 digestp->txunacked = tcpstats->necp_tcp_extra.txunacked;
6814 digestp->txwindow = tcpstats->necp_tcp_extra.txwindow;
6815 digestp->connstatus.probe_activated = tcpstats->necp_tcp_extra.probestatus.probe_activated;
6816 digestp->connstatus.write_probe_failed = tcpstats->necp_tcp_extra.probestatus.write_probe_failed;
6817 digestp->connstatus.read_probe_failed = tcpstats->necp_tcp_extra.probestatus.read_probe_failed;
6818 digestp->connstatus.conn_probe_failed = tcpstats->necp_tcp_extra.probestatus.conn_probe_failed;
6819
6820 if ((countsp == NULL) && (metadatap == NULL)) {
6821 return true;
6822 }
6823 }
6824
6825 if (countsp) {
6826 countsp->nstat_rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6827 countsp->nstat_txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6828
6829 countsp->nstat_rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6830 countsp->nstat_rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6831 countsp->nstat_txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6832
6833 countsp->nstat_min_rtt = tcpstats->necp_tcp_counts.necp_stat_min_rtt;
6834 countsp->nstat_avg_rtt = tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
6835 countsp->nstat_var_rtt = tcpstats->necp_tcp_counts.necp_stat_var_rtt;
6836
6837 countsp->nstat_connectattempts = tcpstats->necp_tcp_extra.state >= TCPS_SYN_SENT ? 1 : 0;
6838 countsp->nstat_connectsuccesses = tcpstats->necp_tcp_extra.state >= TCPS_ESTABLISHED ? 1 : 0;
6839
6840 // Supplement what the user level has told us with what we know from the flowswitch
6841 // The nstat_counts structure has only one set of packet counts so set them from the
6842 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
6843 countsp->nstat_rxpackets = sf->sf_ipackets;
6844 countsp->nstat_txpackets = sf->sf_opackets;
6845 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
6846 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
6847 countsp->nstat_cell_txbytes = sf->sf_obytes;
6848 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
6849 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
6850 countsp->nstat_wifi_txbytes = sf->sf_obytes;
6851 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
6852 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
6853 countsp->nstat_wired_txbytes = sf->sf_obytes;
6854 }
6855 }
6856
6857 if (detailed_countsp) {
6858 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6859 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6860 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = tcpstats->necp_tcp_counts.necp_stat_rxpackets;
6861 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = tcpstats->necp_tcp_counts.necp_stat_txpackets;
6862
6863 detailed_countsp->nstat_rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6864 detailed_countsp->nstat_rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6865 detailed_countsp->nstat_txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6866
6867 detailed_countsp->nstat_min_rtt = tcpstats->necp_tcp_counts.necp_stat_min_rtt;
6868 detailed_countsp->nstat_avg_rtt = tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
6869 detailed_countsp->nstat_var_rtt = tcpstats->necp_tcp_counts.necp_stat_var_rtt;
6870
6871 // Supplement what the user level has told us with what we know from the flowswitch
6872 // The user level statistics don't include a bitmap so use the one within the kernel,
6873 memcpy(&detailed_countsp->nstat_media_stats.ms_total.ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6874
6875 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
6876 if (ts) {
6877 ts->ts_rxpackets = sf->sf_ipackets;
6878 ts->ts_txpackets = sf->sf_opackets;
6879 ts->ts_rxbytes = sf->sf_ibytes;
6880 ts->ts_txbytes = sf->sf_obytes;
6881 memcpy(&ts->ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6882 }
6883 }
6884
6885 if (metadatap) {
6886 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)metadatap;
6887 memset(desc, 0, sizeof(*desc));
6888
6889 // Metadata from the flow registration
6890 uuid_copy(desc->fuuid, flow_registration->registration_id);
6891
6892 // Metadata that the necp client should have in TLV format.
6893 pid_t effective_pid = client->proc_pid;
6894 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
6895 desc->epid = (u_int32_t)effective_pid;
6896
6897 // Metadata from the flow registration
6898 // This needs to revisited if multiple flows are created from one flow registration
6899 struct necp_client_flow *flow = NULL;
6900 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
6901 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
6902 break;
6903 }
6904
6905 // Metadata from the route
6906 desc->ifindex = route_ifindex;
6907 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
6908 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
6909 if (tcpstats->necp_tcp_extra.flags1 & SOF1_CELLFALLBACK) {
6910 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
6911 }
6912
6913 // Basic metadata from userland
6914 desc->rcvbufsize = tcpstats->necp_tcp_basic.rcvbufsize;
6915 desc->rcvbufused = tcpstats->necp_tcp_basic.rcvbufused;
6916
6917 // Additional TCP specific data
6918 desc->sndbufsize = tcpstats->necp_tcp_extra.sndbufsize;
6919 desc->sndbufused = tcpstats->necp_tcp_extra.sndbufused;
6920 desc->txunacked = tcpstats->necp_tcp_extra.txunacked;
6921 desc->txwindow = tcpstats->necp_tcp_extra.txwindow;
6922 desc->txcwindow = tcpstats->necp_tcp_extra.txcwindow;
6923 desc->traffic_mgt_flags = tcpstats->necp_tcp_extra.traffic_mgt_flags;
6924 desc->state = tcpstats->necp_tcp_extra.state;
6925
6926 u_int32_t cc_alg_index = tcpstats->necp_tcp_extra.cc_alg_index;
6927 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
6928 strbufcpy(desc->cc_algo, sizeof(desc->cc_algo), tcp_cc_algo_list[cc_alg_index]->name, sizeof(tcp_cc_algo_list[cc_alg_index]->name));
6929 } else {
6930 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
6931 }
6932
6933 desc->connstatus.probe_activated = tcpstats->necp_tcp_extra.probestatus.probe_activated;
6934 desc->connstatus.write_probe_failed = tcpstats->necp_tcp_extra.probestatus.write_probe_failed;
6935 desc->connstatus.read_probe_failed = tcpstats->necp_tcp_extra.probestatus.read_probe_failed;
6936 desc->connstatus.conn_probe_failed = tcpstats->necp_tcp_extra.probestatus.conn_probe_failed;
6937
6938 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6939
6940 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
6941 uuid_string_t euuid_str = { 0 };
6942 uuid_unparse(desc->euuid, euuid_str);
6943 NECPLOG(LOG_NOTICE, "Collected stats - TCP - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
6944 }
6945 }
6946
6947 return true;
6948 }
6949
6950 static bool
necp_request_aop_tcp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)6951 necp_request_aop_tcp_netstats(userland_stats_provider_context *ctx,
6952 u_int32_t *ifflagsp,
6953 nstat_progress_digest *digestp,
6954 nstat_counts *countsp,
6955 nstat_detailed_counts *detailed_countsp,
6956 void *metadatap)
6957 {
6958 struct aop_flow_stats flow_stats = {};
6959 struct tcp_info *tcpi = &flow_stats.transport.tcp_stats.tcp_info;
6960 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
6961 struct necp_client *client = flow_registration->client;
6962 int err = 0;
6963
6964 ASSERT(flow_registration->aop_offload);
6965
6966 u_int32_t nstat_diagnostic_flags = 0;
6967
6968 // Retrieve details from the last time the assigned flows were updated
6969 u_int32_t route_ifindex = IFSCOPE_NONE;
6970 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6971 u_int64_t combined_interface_details = 0;
6972
6973 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
6974 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6975
6976 if (route_ifindex == IFSCOPE_NONE) {
6977 // Mark no interface
6978 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
6979 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6980 NECPLOG(LOG_INFO, "req tcp stats, failed to get route details for pid %d curproc %d %s\n",
6981 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
6982 }
6983
6984 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6985 if (sf == NULL) {
6986 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
6987 char namebuf[MAXCOMLEN + 1];
6988 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
6989 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
6990 NECPLOG(LOG_ERR, "req tcp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
6991 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
6992 sf = &ntstat_sk_stats_zero;
6993 }
6994
6995 if (ifflagsp) {
6996 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
6997 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
6998 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
6999 return true;
7000 }
7001 }
7002
7003 // This needs to revisited if multiple flows are created from one flow registration
7004 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
7005 if (flow == NULL) {
7006 return false;
7007 }
7008
7009 ASSERT(flow->aop_offload && flow->flow_tag > 0);
7010 if (!flow->aop_stat_index_valid) {
7011 return false;
7012 }
7013 err = net_aop_get_flow_stats(flow->stats_index, &flow_stats);
7014 if (err != 0) {
7015 NECPLOG(LOG_ERR, "failed to get aop flow stats "
7016 "for flow id %u with error %d", flow->flow_tag, err);
7017 return false;
7018 }
7019
7020 if (__improbable(flow->flow_tag != flow_stats.flow_id)) {
7021 NECPLOG(LOG_ERR, "aop flow stats, flow tag 0x%x != 0x%x",
7022 flow->flow_tag, flow_stats.flow_id);
7023 return false;
7024 }
7025
7026 if (digestp) {
7027 // The digest is intended to give information that may help give insight into the state of the link
7028 digestp->rxbytes = tcpi->tcpi_rxbytes;
7029 digestp->txbytes = tcpi->tcpi_txbytes;
7030 digestp->rxduplicatebytes = tcpi->tcpi_rxduplicatebytes;
7031 digestp->rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes;
7032 digestp->txretransmit = tcpi->tcpi_txretransmitbytes;
7033 digestp->ifindex = route_ifindex;
7034 digestp->state = tcpi->tcpi_state;
7035 digestp->txunacked = tcpi->tcpi_txunacked;
7036 digestp->txwindow = tcpi->tcpi_snd_wnd;
7037
7038 if ((countsp == NULL) && (metadatap == NULL)) {
7039 return true;
7040 }
7041 }
7042
7043 if (countsp) {
7044 countsp->nstat_rxbytes = tcpi->tcpi_rxbytes;
7045 countsp->nstat_txbytes = tcpi->tcpi_txbytes;
7046
7047 countsp->nstat_rxduplicatebytes = tcpi->tcpi_rxduplicatebytes;
7048 countsp->nstat_rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes;
7049 countsp->nstat_txretransmit = tcpi->tcpi_txretransmitbytes;
7050
7051 countsp->nstat_min_rtt = tcpi->tcpi_rttbest;
7052 countsp->nstat_avg_rtt = tcpi->tcpi_srtt;
7053 countsp->nstat_var_rtt = tcpi->tcpi_rttvar;
7054
7055 countsp->nstat_connectattempts = tcpi->tcpi_state >= TCPS_SYN_SENT ? 1 : 0;
7056 countsp->nstat_connectsuccesses = tcpi->tcpi_state >= TCPS_ESTABLISHED ? 1 : 0;
7057
7058 // Supplement what the user level has told us with what we know from the flowswitch
7059 // The nstat_counts structure has only one set of packet counts so set them from the
7060 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
7061 countsp->nstat_rxpackets = sf->sf_ipackets;
7062 countsp->nstat_txpackets = sf->sf_opackets;
7063 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
7064 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
7065 countsp->nstat_cell_txbytes = sf->sf_obytes;
7066 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
7067 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
7068 countsp->nstat_wifi_txbytes = sf->sf_obytes;
7069 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
7070 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
7071 countsp->nstat_wired_txbytes = sf->sf_obytes;
7072 }
7073 }
7074
7075 if (detailed_countsp) {
7076 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = tcpi->tcpi_rxbytes;
7077 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = tcpi->tcpi_txbytes;
7078 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = tcpi->tcpi_rxpackets;
7079 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = tcpi->tcpi_txpackets;
7080
7081 detailed_countsp->nstat_rxduplicatebytes = tcpi->tcpi_rxduplicatebytes;
7082 detailed_countsp->nstat_rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes;
7083 detailed_countsp->nstat_txretransmit = tcpi->tcpi_txretransmitbytes;
7084
7085 detailed_countsp->nstat_min_rtt = tcpi->tcpi_rttbest;
7086 detailed_countsp->nstat_avg_rtt = tcpi->tcpi_srtt;
7087 detailed_countsp->nstat_var_rtt = tcpi->tcpi_rttvar;
7088
7089 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
7090 if (ts) {
7091 ts->ts_rxpackets = sf->sf_ipackets;
7092 ts->ts_txpackets = sf->sf_opackets;
7093 ts->ts_rxbytes = sf->sf_ibytes;
7094 ts->ts_txbytes = sf->sf_obytes;
7095 }
7096 }
7097
7098 if (metadatap) {
7099 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)metadatap;
7100 memset(desc, 0, sizeof(*desc));
7101
7102 // Metadata from the flow registration
7103 uuid_copy(desc->fuuid, flow_registration->registration_id);
7104
7105 // Metadata that the necp client should have in TLV format.
7106 pid_t effective_pid = client->proc_pid;
7107 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
7108 desc->epid = (u_int32_t)effective_pid;
7109
7110 // Metadata from the flow registration
7111 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
7112
7113 // Metadata from the route
7114 desc->ifindex = route_ifindex;
7115 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
7116 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7117
7118 // Basic metadata from userland
7119 desc->rcvbufsize = flow_stats.rx_buffer_stats.bufsize;
7120 desc->rcvbufused = flow_stats.rx_buffer_stats.bufused;
7121
7122 // Additional TCP specific data
7123 desc->sndbufsize = flow_stats.tx_buffer_stats.bufsize;
7124 desc->sndbufused = flow_stats.tx_buffer_stats.bufused;
7125 desc->txunacked = tcpi->tcpi_txunacked;
7126 desc->txwindow = tcpi->tcpi_snd_wnd;
7127 desc->txcwindow = tcpi->tcpi_snd_cwnd;
7128 desc->traffic_mgt_flags = 0;
7129 desc->state = tcpi->tcpi_state;
7130
7131 u_int32_t cc_alg_index = flow_stats.transport.tcp_stats.tcp_cc_algo;
7132 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
7133 strbufcpy(desc->cc_algo, sizeof(desc->cc_algo), tcp_cc_algo_list[cc_alg_index]->name, sizeof(tcp_cc_algo_list[cc_alg_index]->name));
7134 } else {
7135 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
7136 }
7137
7138 desc->connstatus.probe_activated = 0;
7139 desc->connstatus.write_probe_failed = 0;
7140 desc->connstatus.read_probe_failed = 0;
7141 desc->connstatus.conn_probe_failed = 0;
7142
7143 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
7144 uuid_string_t euuid_str = { 0 };
7145 uuid_unparse(desc->euuid, euuid_str);
7146 NECPLOG(LOG_NOTICE, "Collected stats - TCP - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
7147 }
7148 }
7149
7150 return true;
7151 }
7152
7153 // Called from NetworkStatistics when it wishes to collect latest information for a TCP flow.
7154 // It is a responsibility of NetworkStatistics to have previously zeroed any supplied memory.
7155 static bool
necp_request_tcp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)7156 necp_request_tcp_netstats(userland_stats_provider_context *ctx,
7157 u_int32_t *ifflagsp,
7158 nstat_progress_digest *digestp,
7159 nstat_counts *countsp,
7160 nstat_detailed_counts *detailed_countsp,
7161 void *metadatap)
7162 {
7163 if (ctx == NULL) {
7164 return false;
7165 }
7166
7167 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
7168 if (__probable(!flow_registration->aop_offload)) {
7169 return necp_request_nexus_tcp_netstats(ctx, ifflagsp, digestp, countsp, detailed_countsp, metadatap);
7170 } else {
7171 return necp_request_aop_tcp_netstats(ctx, ifflagsp, digestp, countsp, detailed_countsp, metadatap);
7172 }
7173 }
7174
7175 // Called from NetworkStatistics when it wishes to collect latest information for a UDP flow.
7176 static bool
necp_request_udp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)7177 necp_request_udp_netstats(userland_stats_provider_context *ctx,
7178 u_int32_t *ifflagsp,
7179 nstat_progress_digest *digestp,
7180 nstat_counts *countsp,
7181 nstat_detailed_counts *detailed_countsp,
7182 void *metadatap)
7183 {
7184 #pragma unused(digestp)
7185
7186 if (ctx == NULL) {
7187 return false;
7188 }
7189
7190 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
7191 struct necp_client *client = flow_registration->client;
7192 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
7193 struct necp_udp_stats *udpstats = (struct necp_udp_stats *)ustats_kaddr;
7194 ASSERT(udpstats != NULL);
7195
7196 u_int32_t nstat_diagnostic_flags = 0;
7197
7198 // Retrieve details from the last time the assigned flows were updated
7199 u_int32_t route_ifindex = IFSCOPE_NONE;
7200 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7201 u_int64_t combined_interface_details = 0;
7202
7203 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
7204 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
7205
7206 if (route_ifindex == IFSCOPE_NONE) {
7207 // Mark no interface
7208 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
7209 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7210 NECPLOG(LOG_INFO, "req udp stats, failed to get route details for pid %d curproc %d %s\n",
7211 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
7212 }
7213
7214 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
7215 if (sf == NULL) {
7216 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
7217 char namebuf[MAXCOMLEN + 1];
7218 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
7219 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
7220 NECPLOG(LOG_ERR, "req udp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
7221 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
7222 sf = &ntstat_sk_stats_zero;
7223 }
7224
7225 if (ifflagsp) {
7226 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
7227 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7228 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
7229 return true;
7230 }
7231 }
7232
7233 if (countsp) {
7234 countsp->nstat_rxbytes = udpstats->necp_udp_counts.necp_stat_rxbytes;
7235 countsp->nstat_txbytes = udpstats->necp_udp_counts.necp_stat_txbytes;
7236
7237 countsp->nstat_rxduplicatebytes = udpstats->necp_udp_counts.necp_stat_rxduplicatebytes;
7238 countsp->nstat_rxoutoforderbytes = udpstats->necp_udp_counts.necp_stat_rxoutoforderbytes;
7239 countsp->nstat_txretransmit = udpstats->necp_udp_counts.necp_stat_txretransmit;
7240
7241 countsp->nstat_min_rtt = udpstats->necp_udp_counts.necp_stat_min_rtt;
7242 countsp->nstat_avg_rtt = udpstats->necp_udp_counts.necp_stat_avg_rtt;
7243 countsp->nstat_var_rtt = udpstats->necp_udp_counts.necp_stat_var_rtt;
7244
7245 // Supplement what the user level has told us with what we know from the flowswitch
7246 // The nstat_counts structure has only one set of packet counts so set them from the
7247 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
7248 countsp->nstat_rxpackets = sf->sf_ipackets;
7249 countsp->nstat_txpackets = sf->sf_opackets;
7250 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
7251 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
7252 countsp->nstat_cell_txbytes = sf->sf_obytes;
7253 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
7254 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
7255 countsp->nstat_wifi_txbytes = sf->sf_obytes;
7256 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
7257 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
7258 countsp->nstat_wired_txbytes = sf->sf_obytes;
7259 }
7260 }
7261
7262 if (detailed_countsp) {
7263 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = udpstats->necp_udp_counts.necp_stat_rxbytes;
7264 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = udpstats->necp_udp_counts.necp_stat_txbytes;
7265 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = udpstats->necp_udp_counts.necp_stat_rxpackets;
7266 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = udpstats->necp_udp_counts.necp_stat_txpackets;
7267
7268 detailed_countsp->nstat_rxduplicatebytes = udpstats->necp_udp_counts.necp_stat_rxduplicatebytes;
7269 detailed_countsp->nstat_rxoutoforderbytes = udpstats->necp_udp_counts.necp_stat_rxoutoforderbytes;
7270 detailed_countsp->nstat_txretransmit = udpstats->necp_udp_counts.necp_stat_txretransmit;
7271
7272 detailed_countsp->nstat_min_rtt = udpstats->necp_udp_counts.necp_stat_min_rtt;
7273 detailed_countsp->nstat_avg_rtt = udpstats->necp_udp_counts.necp_stat_avg_rtt;
7274 detailed_countsp->nstat_var_rtt = udpstats->necp_udp_counts.necp_stat_var_rtt;
7275
7276 // Supplement what the user level has told us with what we know from the flowswitch
7277 // The user level statistics don't include a bitmap so use the one within the kernel,
7278 memcpy(&detailed_countsp->nstat_media_stats.ms_total.ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7279
7280 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
7281 if (ts) {
7282 ts->ts_rxpackets = sf->sf_ipackets;
7283 ts->ts_txpackets = sf->sf_opackets;
7284 ts->ts_rxbytes = sf->sf_ibytes;
7285 ts->ts_txbytes = sf->sf_obytes;
7286 memcpy(&ts->ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7287 }
7288 }
7289
7290 if (metadatap) {
7291 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)metadatap;
7292 memset(desc, 0, sizeof(*desc));
7293
7294 // Metadata from the flow registration
7295 uuid_copy(desc->fuuid, flow_registration->registration_id);
7296
7297 // Metadata that the necp client should have in TLV format.
7298 pid_t effective_pid = client->proc_pid;
7299 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
7300 desc->epid = (u_int32_t)effective_pid;
7301
7302 // Metadata from the flow registration
7303 // This needs to revisited if multiple flows are created from one flow registration
7304 struct necp_client_flow *flow = NULL;
7305 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
7306 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
7307 break;
7308 }
7309
7310 // Metadata from the route
7311 desc->ifindex = route_ifindex;
7312 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
7313 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7314
7315 // Basic metadata is all that is required for UDP
7316 desc->rcvbufsize = udpstats->necp_udp_basic.rcvbufsize;
7317 desc->rcvbufused = udpstats->necp_udp_basic.rcvbufused;
7318
7319 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7320
7321 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
7322 uuid_string_t euuid_str = { 0 };
7323 uuid_unparse(desc->euuid, euuid_str);
7324 NECPLOG(LOG_NOTICE, "Collected stats - UDP - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
7325 }
7326 }
7327
7328 return true;
7329 }
7330
7331 // Called from NetworkStatistics when it wishes to collect latest information for a QUIC flow.
7332 //
7333 // TODO: For now it is an exact implementation as that of TCP.
7334 // Still to keep the logic separate for future divergence, keeping the routines separate.
7335 // It also seems there are lots of common code between existing implementations and
7336 // it would be good to refactor this logic at some point.
7337 static bool
necp_request_quic_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)7338 necp_request_quic_netstats(userland_stats_provider_context *ctx,
7339 u_int32_t *ifflagsp,
7340 nstat_progress_digest *digestp,
7341 nstat_counts *countsp,
7342 nstat_detailed_counts *detailed_countsp,
7343 void *metadatap)
7344 {
7345 if (ctx == NULL) {
7346 return false;
7347 }
7348
7349 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
7350 struct necp_client *client = flow_registration->client;
7351 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
7352 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
7353 ASSERT(quicstats != NULL);
7354
7355 u_int32_t nstat_diagnostic_flags = 0;
7356
7357 // Retrieve details from the last time the assigned flows were updated
7358 u_int32_t route_ifindex = IFSCOPE_NONE;
7359 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7360 u_int64_t combined_interface_details = 0;
7361
7362 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
7363 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
7364
7365 if (route_ifindex == IFSCOPE_NONE) {
7366 // Mark no interface
7367 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
7368 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7369 NECPLOG(LOG_INFO, "req quic stats, failed to get route details for pid %d curproc %d %s\n",
7370 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
7371 }
7372
7373 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
7374 if (sf == NULL) {
7375 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
7376 char namebuf[MAXCOMLEN + 1];
7377 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
7378 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
7379 NECPLOG(LOG_ERR, "req quic stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
7380 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
7381 sf = &ntstat_sk_stats_zero;
7382 }
7383
7384 if (ifflagsp) {
7385 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
7386 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7387 if (quicstats->necp_quic_extra.fallback) {
7388 *ifflagsp |= NSTAT_IFNET_VIA_CELLFALLBACK;
7389 }
7390 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
7391 return true;
7392 }
7393 }
7394
7395 if (digestp) {
7396 // The digest is intended to give information that may help give insight into the state of the link
7397 digestp->rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
7398 digestp->txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
7399 digestp->rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
7400 digestp->rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
7401 digestp->txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
7402 digestp->ifindex = route_ifindex;
7403 digestp->state = quicstats->necp_quic_extra.state;
7404 digestp->txunacked = quicstats->necp_quic_extra.txunacked;
7405 digestp->txwindow = quicstats->necp_quic_extra.txwindow;
7406 digestp->connstatus.probe_activated = quicstats->necp_quic_extra.probestatus.probe_activated;
7407 digestp->connstatus.write_probe_failed = quicstats->necp_quic_extra.probestatus.write_probe_failed;
7408 digestp->connstatus.read_probe_failed = quicstats->necp_quic_extra.probestatus.read_probe_failed;
7409 digestp->connstatus.conn_probe_failed = quicstats->necp_quic_extra.probestatus.conn_probe_failed;
7410
7411 if ((countsp == NULL) && (metadatap == NULL)) {
7412 return true;
7413 }
7414 }
7415
7416 if (countsp) {
7417 countsp->nstat_rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
7418 countsp->nstat_txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
7419
7420 countsp->nstat_rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
7421 countsp->nstat_rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
7422 countsp->nstat_txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
7423
7424 countsp->nstat_min_rtt = quicstats->necp_quic_counts.necp_stat_min_rtt;
7425 countsp->nstat_avg_rtt = quicstats->necp_quic_counts.necp_stat_avg_rtt;
7426 countsp->nstat_var_rtt = quicstats->necp_quic_counts.necp_stat_var_rtt;
7427
7428 // TODO: It would be good to expose QUIC stats for CH/SH retransmission and connection state
7429 // Supplement what the user level has told us with what we know from the flowswitch
7430 // The nstat_counts structure has only one set of packet counts so set them from the
7431 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
7432 countsp->nstat_rxpackets = sf->sf_ipackets;
7433 countsp->nstat_txpackets = sf->sf_opackets;
7434 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
7435 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
7436 countsp->nstat_cell_txbytes = sf->sf_obytes;
7437 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
7438 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
7439 countsp->nstat_wifi_txbytes = sf->sf_obytes;
7440 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
7441 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
7442 countsp->nstat_wired_txbytes = sf->sf_obytes;
7443 }
7444 }
7445
7446 if (detailed_countsp) {
7447 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
7448 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
7449 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = quicstats->necp_quic_counts.necp_stat_rxpackets;
7450 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = quicstats->necp_quic_counts.necp_stat_txpackets;
7451
7452 detailed_countsp->nstat_rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
7453 detailed_countsp->nstat_rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
7454 detailed_countsp->nstat_txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
7455
7456 detailed_countsp->nstat_min_rtt = quicstats->necp_quic_counts.necp_stat_min_rtt;
7457 detailed_countsp->nstat_avg_rtt = quicstats->necp_quic_counts.necp_stat_avg_rtt;
7458 detailed_countsp->nstat_var_rtt = quicstats->necp_quic_counts.necp_stat_var_rtt;
7459
7460 // Supplement what the user level has told us with what we know from the flowswitch
7461 // The user level statistics don't include a bitmap so use the one within the kernel,
7462 memcpy(&detailed_countsp->nstat_media_stats.ms_total.ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7463
7464 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
7465 if (ts) {
7466 ts->ts_rxpackets = sf->sf_ipackets;
7467 ts->ts_txpackets = sf->sf_opackets;
7468 ts->ts_rxbytes = sf->sf_ibytes;
7469 ts->ts_txbytes = sf->sf_obytes;
7470 memcpy(&ts->ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7471 }
7472 }
7473
7474 if (metadatap) {
7475 nstat_quic_descriptor *desc = (nstat_quic_descriptor *)metadatap;
7476 memset(desc, 0, sizeof(*desc));
7477
7478 // Metadata from the flow registration
7479 uuid_copy(desc->fuuid, flow_registration->registration_id);
7480
7481 // Metadata, that the necp client should have, in TLV format.
7482 pid_t effective_pid = client->proc_pid;
7483 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
7484 desc->epid = (u_int32_t)effective_pid;
7485
7486 // Metadata from the flow registration
7487 // This needs to revisited if multiple flows are created from one flow registration
7488 struct necp_client_flow *flow = NULL;
7489 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
7490 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
7491 break;
7492 }
7493
7494 // Metadata from the route
7495 desc->ifindex = route_ifindex;
7496 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
7497 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7498 if (quicstats->necp_quic_extra.fallback) {
7499 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
7500 desc->fallback_mode = SO_FALLBACK_MODE_FAST;
7501 }
7502
7503 // Basic metadata from userland
7504 desc->rcvbufsize = quicstats->necp_quic_basic.rcvbufsize;
7505 desc->rcvbufused = quicstats->necp_quic_basic.rcvbufused;
7506
7507 // Additional QUIC specific data
7508 desc->sndbufsize = quicstats->necp_quic_extra.sndbufsize;
7509 desc->sndbufused = quicstats->necp_quic_extra.sndbufused;
7510 desc->txunacked = quicstats->necp_quic_extra.txunacked;
7511 desc->txwindow = quicstats->necp_quic_extra.txwindow;
7512 desc->txcwindow = quicstats->necp_quic_extra.txcwindow;
7513 desc->traffic_mgt_flags = quicstats->necp_quic_extra.traffic_mgt_flags;
7514 desc->state = quicstats->necp_quic_extra.state;
7515
7516 // TODO: CC algo defines should be named agnostic of the protocol
7517 u_int32_t cc_alg_index = quicstats->necp_quic_extra.cc_alg_index;
7518 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
7519 strbufcpy(desc->cc_algo, sizeof(desc->cc_algo), tcp_cc_algo_list[cc_alg_index]->name, sizeof(tcp_cc_algo_list[cc_alg_index]->name));
7520 } else {
7521 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
7522 }
7523
7524 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7525
7526 desc->connstatus.probe_activated = quicstats->necp_quic_extra.probestatus.probe_activated;
7527 desc->connstatus.write_probe_failed = quicstats->necp_quic_extra.probestatus.write_probe_failed;
7528 desc->connstatus.read_probe_failed = quicstats->necp_quic_extra.probestatus.read_probe_failed;
7529 desc->connstatus.conn_probe_failed = quicstats->necp_quic_extra.probestatus.conn_probe_failed;
7530
7531 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
7532 uuid_string_t euuid_str = { 0 };
7533 uuid_unparse(desc->euuid, euuid_str);
7534 NECPLOG(LOG_NOTICE, "Collected stats - QUIC - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
7535 }
7536 }
7537 return true;
7538 }
7539
7540 #endif /* SKYWALK */
7541
7542 // Support functions for NetworkStatistics support for necp_client connections
7543
7544 static void
necp_client_inherit_from_parent(struct necp_client * client,struct necp_client * parent)7545 necp_client_inherit_from_parent(
7546 struct necp_client *client,
7547 struct necp_client *parent)
7548 {
7549 assert(client->original_parameters_source == NULL);
7550
7551 if (parent->original_parameters_source != NULL) {
7552 client->original_parameters_source = parent->original_parameters_source;
7553 } else {
7554 client->original_parameters_source = parent;
7555 }
7556 necp_client_retain(client->original_parameters_source);
7557 }
7558
7559 static void
necp_find_conn_netstat_data(struct necp_client * client,u_int32_t * ntstat_flags,pid_t * effective_pid,uuid_t * puuid,uid_t * uid,uuid_t * euuid,uid_t * persona_id)7560 necp_find_conn_netstat_data(struct necp_client *client,
7561 u_int32_t *ntstat_flags,
7562 pid_t *effective_pid,
7563 uuid_t *puuid,
7564 uid_t *uid,
7565 uuid_t *euuid,
7566 uid_t *persona_id)
7567 {
7568 bool has_remote_address = false;
7569 bool has_ip_protocol = false;
7570 bool has_transport_protocol = false;
7571 size_t offset = 0;
7572 u_int8_t *parameters;
7573 u_int32_t parameters_size;
7574
7575
7576 parameters = client->parameters;
7577 parameters_size = (u_int32_t)client->parameters_length;
7578
7579 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
7580 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
7581 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
7582
7583 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
7584 // If the length is larger than what can fit in the remaining parameters size, bail
7585 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
7586 break;
7587 }
7588
7589 if (length > 0) {
7590 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
7591 if (value != NULL) {
7592 switch (type) {
7593 case NECP_CLIENT_PARAMETER_APPLICATION: {
7594 if ((euuid) && (length >= sizeof(uuid_t))) {
7595 uuid_copy(*euuid, value);
7596 }
7597 break;
7598 }
7599 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
7600 if (length >= 1) {
7601 has_ip_protocol = true;
7602 }
7603 break;
7604 }
7605 case NECP_CLIENT_PARAMETER_PID: {
7606 if ((effective_pid) && length >= sizeof(pid_t)) {
7607 memcpy(effective_pid, value, sizeof(pid_t));
7608 }
7609 break;
7610 }
7611 case NECP_CLIENT_PARAMETER_PARENT_ID: {
7612 if ((puuid) && (length == sizeof(uuid_t))) {
7613 uuid_copy(*puuid, value);
7614 }
7615 break;
7616 }
7617 // It is an implementation quirk that the remote address can be found in the necp parameters
7618 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
7619 if (length >= sizeof(struct necp_policy_condition_addr)) {
7620 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
7621 if (necp_client_address_is_valid(&address_struct->address.sa)) {
7622 has_remote_address = true;
7623 }
7624 }
7625 break;
7626 }
7627 case NECP_CLIENT_PARAMETER_TRANSPORT_PROTOCOL: {
7628 if (length >= 1) {
7629 has_transport_protocol = true;
7630 }
7631 break;
7632 }
7633 case NECP_CLIENT_PARAMETER_APPLICATION_ID: {
7634 if (length >= sizeof(necp_application_id_t) && uid && persona_id) {
7635 necp_application_id_t *application_id = (necp_application_id_t *)(void *)value;
7636 memcpy(uid, &application_id->uid, sizeof(uid_t));
7637 uuid_copy(*euuid, application_id->effective_uuid);
7638 memcpy(persona_id, &application_id->persona_id, sizeof(uid_t));
7639 }
7640 break;
7641 }
7642 default: {
7643 break;
7644 }
7645 }
7646 }
7647 }
7648 offset += sizeof(struct necp_tlv_header) + length;
7649 }
7650 if (ntstat_flags) {
7651 *ntstat_flags = (has_remote_address && has_ip_protocol && has_transport_protocol)? NSTAT_NECP_CONN_HAS_NET_ACCESS: 0;
7652 }
7653 }
7654
7655 static bool
necp_request_conn_netstats(nstat_provider_context ctx,u_int32_t * ifflagsp,nstat_counts * countsp,nstat_detailed_counts * detailsp,void * metadatap)7656 necp_request_conn_netstats(nstat_provider_context ctx,
7657 u_int32_t *ifflagsp,
7658 nstat_counts *countsp,
7659 nstat_detailed_counts *detailsp,
7660 void *metadatap)
7661 {
7662 if (ctx == NULL) {
7663 return false;
7664 }
7665 struct necp_client * __single client = (struct necp_client *)(void *)ctx;
7666 nstat_connection_descriptor *desc = (nstat_connection_descriptor *)metadatap;
7667
7668 if (ifflagsp) {
7669 necp_find_conn_netstat_data(client, ifflagsp, NULL, NULL, NULL, NULL, NULL);
7670 }
7671 if (countsp) {
7672 memset(countsp, 0, sizeof(*countsp));
7673 }
7674 if (detailsp) {
7675 memset(detailsp, 0, sizeof(*detailsp));
7676 }
7677 if (desc) {
7678 memset(desc, 0, sizeof(*desc));
7679 // Metadata, that the necp client should have, in TLV format.
7680 pid_t effective_pid = client->proc_pid;
7681 necp_find_conn_netstat_data(client, &desc->ifnet_properties, &effective_pid, &desc->puuid, &desc->uid, &desc->euuid, &desc->persona_id);
7682 desc->epid = (u_int32_t)effective_pid;
7683
7684 // User level should obtain almost all connection information from an extension
7685 // leaving little to do here
7686 uuid_copy(desc->fuuid, client->latest_flow_registration_id);
7687 uuid_copy(desc->cuuid, client->client_id);
7688 }
7689 return true;
7690 }
7691
7692 static int
necp_skywalk_priv_check_cred(proc_t p,kauth_cred_t cred)7693 necp_skywalk_priv_check_cred(proc_t p, kauth_cred_t cred)
7694 {
7695 #pragma unused(p, cred)
7696 #if SKYWALK
7697 /* This includes Nexus controller and Skywalk observer privs */
7698 return skywalk_nxctl_check_privileges(p, cred);
7699 #else /* !SKYWALK */
7700 return 0;
7701 #endif /* !SKYWALK */
7702 }
7703
7704 /// System calls
7705
7706 int
necp_open(struct proc * p,struct necp_open_args * uap,int * retval)7707 necp_open(struct proc *p, struct necp_open_args *uap, int *retval)
7708 {
7709 #pragma unused(retval)
7710 int error = 0;
7711 struct necp_fd_data * __single fd_data = NULL;
7712 struct fileproc * __single fp = NULL;
7713 int fd = -1;
7714
7715 if (uap->flags & NECP_OPEN_FLAG_OBSERVER ||
7716 uap->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
7717 if (necp_skywalk_priv_check_cred(p, kauth_cred_get()) != 0 &&
7718 priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
7719 NECPLOG0(LOG_ERR, "Client does not hold necessary entitlement to observe other NECP clients");
7720 error = EACCES;
7721 goto done;
7722 }
7723 }
7724
7725 #if CONFIG_MACF
7726 error = mac_necp_check_open(p, uap->flags);
7727 if (error) {
7728 goto done;
7729 }
7730 #endif /* MACF */
7731
7732 error = falloc(p, &fp, &fd);
7733 if (error != 0) {
7734 goto done;
7735 }
7736
7737 fd_data = kalloc_type(struct necp_fd_data, Z_WAITOK | Z_ZERO | Z_NOFAIL);
7738
7739 fd_data->necp_fd_type = necp_fd_type_client;
7740 fd_data->flags = uap->flags;
7741 RB_INIT(&fd_data->clients);
7742 RB_INIT(&fd_data->flows);
7743 TAILQ_INIT(&fd_data->update_list);
7744 lck_mtx_init(&fd_data->fd_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
7745 klist_init(&fd_data->si.si_note);
7746 fd_data->proc_pid = proc_pid(p);
7747 #if SKYWALK
7748 LIST_INIT(&fd_data->stats_arena_list);
7749 #endif /* !SKYWALK */
7750
7751 fp->fp_flags |= FP_CLOEXEC | FP_CLOFORK;
7752 fp->fp_glob->fg_flag = FREAD;
7753 fp->fp_glob->fg_ops = &necp_fd_ops;
7754 fp_set_data(fp, fd_data);
7755
7756 proc_fdlock(p);
7757
7758 procfdtbl_releasefd(p, fd, NULL);
7759 fp_drop(p, fd, fp, 1);
7760
7761 *retval = fd;
7762
7763 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
7764 NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
7765 LIST_INSERT_HEAD(&necp_fd_observer_list, fd_data, chain);
7766 OSIncrementAtomic(&necp_observer_fd_count);
7767 NECP_OBSERVER_LIST_UNLOCK();
7768
7769 // Walk all existing clients and add them
7770 NECP_CLIENT_TREE_LOCK_SHARED();
7771 struct necp_client *existing_client = NULL;
7772 RB_FOREACH(existing_client, _necp_client_global_tree, &necp_client_global_tree) {
7773 NECP_CLIENT_LOCK(existing_client);
7774 necp_client_update_observer_add_internal(fd_data, existing_client);
7775 necp_client_update_observer_update_internal(fd_data, existing_client);
7776 NECP_CLIENT_UNLOCK(existing_client);
7777 }
7778 NECP_CLIENT_TREE_UNLOCK();
7779 } else {
7780 NECP_FD_LIST_LOCK_EXCLUSIVE();
7781 LIST_INSERT_HEAD(&necp_fd_list, fd_data, chain);
7782 OSIncrementAtomic(&necp_client_fd_count);
7783 NECP_FD_LIST_UNLOCK();
7784 }
7785
7786 proc_fdunlock(p);
7787
7788 done:
7789 if (error != 0) {
7790 if (fp != NULL) {
7791 fp_free(p, fd, fp);
7792 fp = NULL;
7793 }
7794 if (fd_data != NULL) {
7795 kfree_type(struct necp_fd_data, fd_data);
7796 }
7797 }
7798
7799 return error;
7800 }
7801
7802 // All functions called directly from necp_client_action() to handle one of the
7803 // types should be marked with NECP_CLIENT_ACTION_FUNCTION. This ensures that
7804 // necp_client_action() does not inline all the actions into a single function.
7805 #define NECP_CLIENT_ACTION_FUNCTION __attribute__((noinline))
7806
7807 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_add(struct proc * p,struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)7808 necp_client_add(struct proc *p, struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
7809 {
7810 int error = 0;
7811 struct necp_client * __single client = NULL;
7812 const size_t buffer_size = uap->buffer_size;
7813 const task_t __single task = proc_task(p);
7814
7815 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
7816 NECPLOG0(LOG_ERR, "NECP client observers with push enabled may not add their own clients");
7817 return EINVAL;
7818 }
7819
7820 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
7821 buffer_size == 0 || buffer_size > NECP_MAX_CLIENT_PARAMETERS_SIZE || uap->buffer == 0) {
7822 return EINVAL;
7823 }
7824
7825 client = kalloc_type(struct necp_client, Z_WAITOK | Z_ZERO | Z_NOFAIL);
7826 client->parameters = kalloc_data(buffer_size, Z_WAITOK | Z_NOFAIL);
7827 client->parameters_length = buffer_size;
7828 lck_mtx_init(&client->lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
7829 lck_mtx_init(&client->route_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
7830
7831 error = copyin(uap->buffer, client->parameters, buffer_size);
7832 if (error) {
7833 NECPLOG(LOG_ERR, "necp_client_add parameters copyin error (%d)", error);
7834 goto done;
7835 }
7836
7837 os_ref_init(&client->reference_count, &necp_client_refgrp); // Hold our reference until close
7838
7839 client->proc_pid = fd_data->proc_pid; // Save off proc pid in case the client will persist past fd
7840 client->agent_handle = (void *)fd_data;
7841 client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1);
7842
7843 necp_generate_client_id(client->client_id, false);
7844 LIST_INIT(&client->assertion_list);
7845 RB_INIT(&client->flow_registrations);
7846
7847 NECP_CLIENT_LOG(client, "Adding client");
7848
7849 error = copyout(client->client_id, uap->client_id, sizeof(uuid_t));
7850 if (error) {
7851 NECPLOG(LOG_ERR, "necp_client_add client_id copyout error (%d)", error);
7852 goto done;
7853 }
7854
7855 #if SKYWALK
7856 struct necp_client_parsed_parameters parsed_parameters = {};
7857 int parse_error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, &parsed_parameters);
7858
7859 if (parse_error == 0 &&
7860 ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) ||
7861 (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER))) {
7862 bool has_delegation_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0);
7863 if (!has_delegation_entitlement) {
7864 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) {
7865 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by upid",
7866 proc_name_address(p), proc_pid(p));
7867 }
7868 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER) {
7869 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to set attributed bundle identifier",
7870 proc_name_address(p), proc_pid(p));
7871 }
7872 error = EPERM;
7873 goto done;
7874 }
7875
7876 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) {
7877 // Save off delegated unique PID
7878 client->delegated_upid = parsed_parameters.delegated_upid;
7879 }
7880 }
7881
7882 if (parse_error == 0 && parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) {
7883 bool has_nexus_entitlement = (necp_skywalk_priv_check_cred(p, kauth_cred_get()) == 0);
7884 if (!has_nexus_entitlement) {
7885 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to open a custom nexus client",
7886 proc_name_address(p), proc_pid(p));
7887 error = EPERM;
7888 goto done;
7889 }
7890 }
7891
7892 if (parse_error == 0 && (parsed_parameters.flags &
7893 (NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER | NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP))) {
7894 bool has_custom_protocol_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_CUSTOM_PROTOCOL, 0) == 0);
7895 if (!has_custom_protocol_entitlement) {
7896 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement for custom protocol APIs",
7897 proc_name_address(p), proc_pid(p));
7898 error = EPERM;
7899 goto done;
7900 }
7901 }
7902
7903 if (parse_error == 0 && (parsed_parameters.extended_flags & NECP_CLIENT_PARAMETER_EXTENDED_FLAG_AOP2_OFFLOAD)) {
7904 bool has_aop_offload_entitlement = IOTaskHasEntitlement(task, "com.apple.private.network.aop2_offload");
7905 if (!has_aop_offload_entitlement) {
7906 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement for aop offload",
7907 proc_name_address(p), proc_pid(p));
7908 error = EPERM;
7909 goto done;
7910 }
7911
7912 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH) ||
7913 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) ||
7914 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER)) {
7915 NECPLOG0(LOG_INFO, "necp_client_add, aop_offload not supported for multipath/listener");
7916 error = EINVAL;
7917 goto done;
7918 }
7919 }
7920
7921 if (parse_error == 0 && parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER &&
7922 (parsed_parameters.ip_protocol == IPPROTO_TCP || parsed_parameters.ip_protocol == IPPROTO_UDP)) {
7923 uint32_t *netns_addr = NULL;
7924 uint8_t netns_addr_len = 0;
7925 struct ns_flow_info flow_info = {};
7926 uint32_t netns_flags = NETNS_LISTENER;
7927 uuid_copy(flow_info.nfi_flow_uuid, client->client_id);
7928 flow_info.nfi_protocol = parsed_parameters.ip_protocol;
7929 flow_info.nfi_owner_pid = client->proc_pid;
7930 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID) {
7931 flow_info.nfi_effective_pid = parsed_parameters.effective_pid;
7932 } else {
7933 flow_info.nfi_effective_pid = flow_info.nfi_owner_pid;
7934 }
7935 proc_name(flow_info.nfi_owner_pid, flow_info.nfi_owner_name, MAXCOMLEN);
7936 proc_name(flow_info.nfi_effective_pid, flow_info.nfi_effective_name, MAXCOMLEN);
7937
7938 if (parsed_parameters.local_addr.sa.sa_family == AF_UNSPEC) {
7939 // Treat no local address as a wildcard IPv6
7940 // parsed_parameters is already initialized to all zeros
7941 parsed_parameters.local_addr.sin6.sin6_family = AF_INET6;
7942 parsed_parameters.local_addr.sin6.sin6_len = sizeof(struct sockaddr_in6);
7943 }
7944
7945 switch (parsed_parameters.local_addr.sa.sa_family) {
7946 case AF_INET: {
7947 memcpy(&flow_info.nfi_laddr, &parsed_parameters.local_addr.sa, parsed_parameters.local_addr.sa.sa_len);
7948 netns_addr = (uint32_t *)&parsed_parameters.local_addr.sin.sin_addr;
7949 netns_addr_len = 4;
7950 break;
7951 }
7952 case AF_INET6: {
7953 memcpy(&flow_info.nfi_laddr.sin6, &parsed_parameters.local_addr.sin6, parsed_parameters.local_addr.sa.sa_len);
7954 netns_addr = (uint32_t *)&parsed_parameters.local_addr.sin6.sin6_addr;
7955 netns_addr_len = 16;
7956 break;
7957 }
7958
7959 default: {
7960 NECPLOG(LOG_ERR, "necp_client_add listener invalid address family (%d)", parsed_parameters.local_addr.sa.sa_family);
7961 error = EINVAL;
7962 goto done;
7963 }
7964 }
7965 if ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
7966 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_REUSE_LOCAL)) {
7967 netns_flags |= NETNS_REUSEPORT;
7968 }
7969 if (parsed_parameters.local_addr.sin.sin_port == 0) {
7970 error = netns_reserve_ephemeral(&client->port_reservation, netns_addr, netns_addr_len, parsed_parameters.ip_protocol,
7971 &parsed_parameters.local_addr.sin.sin_port, netns_flags, &flow_info);
7972 if (error) {
7973 NECPLOG(LOG_ERR, "necp_client_add netns_reserve_ephemeral error (%d)", error);
7974 goto done;
7975 }
7976
7977 // Update the parameter TLVs with the assigned port
7978 necp_client_update_local_port_parameters(client->parameters, (u_int32_t)client->parameters_length, parsed_parameters.local_addr.sin.sin_port);
7979 } else {
7980 error = netns_reserve(&client->port_reservation, netns_addr, netns_addr_len, parsed_parameters.ip_protocol,
7981 parsed_parameters.local_addr.sin.sin_port, netns_flags, &flow_info);
7982 if (error) {
7983 NECPLOG(LOG_ERR, "necp_client_add netns_reserve error (%d)", error);
7984 goto done;
7985 }
7986 }
7987 }
7988
7989 struct necp_client *parent = NULL;
7990 uuid_t parent_client_id;
7991 uuid_clear(parent_client_id);
7992 struct necp_client_nexus_parameters parent_parameters = {};
7993 uint16_t num_flow_regs = 0;
7994 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID) {
7995 // The parent "should" be found on fd_data without having to search across the whole necp_fd_list
7996 // It would be nice to do this a little further down where there's another instance of NECP_FD_LOCK
7997 // but the logic here depends on the parse paramters
7998 NECP_FD_LOCK(fd_data);
7999 parent = necp_client_fd_find_client_unlocked(fd_data, parsed_parameters.parent_uuid);
8000 if (parent != NULL) {
8001 necp_client_inherit_from_parent(client, parent);
8002 necp_client_copy_parameters_locked(client, &parent_parameters);
8003 uuid_copy(parent_client_id, parsed_parameters.parent_uuid);
8004 struct necp_client_flow_registration *flow_registration = NULL;
8005 RB_FOREACH(flow_registration, _necp_client_flow_tree, &parent->flow_registrations) {
8006 num_flow_regs++;
8007 }
8008 }
8009 NECP_FD_UNLOCK(fd_data);
8010 if (parent == NULL) {
8011 NECPLOG0(LOG_ERR, "necp_client_add, no necp_client_inherit_from_parent as can't find parent on fd_data");
8012 }
8013 }
8014 if (parse_error == 0 && parent != NULL && parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN) {
8015 do {
8016 if (parsed_parameters.demux_patterns[0].len == 0) {
8017 NECPLOG0(LOG_INFO, "necp_client_add, child does not have a demux pattern");
8018 break;
8019 }
8020
8021 if (uuid_is_null(parent_client_id)) {
8022 NECPLOG0(LOG_INFO, "necp_client_add, parent ID is null");
8023 break;
8024 }
8025
8026 if (num_flow_regs > 1) {
8027 NECPLOG0(LOG_INFO, "necp_client_add, multiple parent flows not supported");
8028 break;
8029 }
8030 if (parsed_parameters.ip_protocol != IPPROTO_UDP) {
8031 NECPLOG(LOG_INFO, "necp_client_add, flow demux pattern not supported for %d protocol",
8032 parsed_parameters.ip_protocol);
8033 break;
8034 }
8035 if (parsed_parameters.ip_protocol != parent_parameters.ip_protocol) {
8036 NECPLOG0(LOG_INFO, "necp_client_add, parent/child ip protocol mismatch");
8037 break;
8038 }
8039 if (parsed_parameters.local_addr.sa.sa_family != AF_INET && parsed_parameters.local_addr.sa.sa_family != AF_INET6) {
8040 NECPLOG(LOG_INFO, "necp_client_add, flow demux pattern not supported for %d family",
8041 parsed_parameters.local_addr.sa.sa_family);
8042 break;
8043 }
8044 if (parsed_parameters.local_addr.sa.sa_family != parsed_parameters.remote_addr.sa.sa_family) {
8045 NECPLOG0(LOG_INFO, "necp_client_add, local/remote address family mismatch");
8046 break;
8047 }
8048 if (parsed_parameters.local_addr.sa.sa_family != parent_parameters.local_addr.sa.sa_family) {
8049 NECPLOG0(LOG_INFO, "necp_client_add, parent/child address family mismatch");
8050 break;
8051 }
8052 if (SOCKADDR_CMP(&parsed_parameters.local_addr.sa, &parent_parameters.local_addr.sa, parsed_parameters.local_addr.sa.sa_len)) {
8053 NECPLOG0(LOG_INFO, "necp_client_add, parent/child local address mismatch");
8054 break;
8055 }
8056 if (SOCKADDR_CMP(&parsed_parameters.remote_addr.sa, &parent_parameters.remote_addr.sa, parsed_parameters.remote_addr.sa.sa_len)) {
8057 NECPLOG0(LOG_INFO, "necp_client_add, parent/child remote address mismatch");
8058 break;
8059 }
8060 if (parsed_parameters.local_addr.sin.sin_port != parent_parameters.local_addr.sin.sin_port) {
8061 NECPLOG0(LOG_INFO, "necp_client_add, parent/child local port mismatch");
8062 break;
8063 }
8064 if (parsed_parameters.remote_addr.sin.sin_port != parent_parameters.remote_addr.sin.sin_port) {
8065 NECPLOG0(LOG_INFO, "necp_client_add, parent/child remote port mismatch");
8066 break;
8067 }
8068 client->validated_parent = 1;
8069 uuid_copy(client->parent_client_id, parent_client_id);
8070 } while (false);
8071 }
8072
8073 #endif /* !SKYWALK */
8074
8075 necp_client_update_observer_add(client);
8076
8077 NECP_FD_LOCK(fd_data);
8078 RB_INSERT(_necp_client_tree, &fd_data->clients, client);
8079 OSIncrementAtomic(&necp_client_count);
8080 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
8081 RB_INSERT(_necp_client_global_tree, &necp_client_global_tree, client);
8082 NECP_CLIENT_TREE_UNLOCK();
8083
8084 // Prime the client result
8085 NECP_CLIENT_LOCK(client);
8086 (void)necp_update_client_result(current_proc(), fd_data, client, NULL);
8087 necp_client_retain_locked(client);
8088 NECP_CLIENT_UNLOCK(client);
8089 NECP_FD_UNLOCK(fd_data);
8090 // Now everything is set, it's safe to plumb this in to NetworkStatistics
8091 uint32_t ntstat_properties = 0;
8092 necp_find_conn_netstat_data(client, &ntstat_properties, NULL, NULL, NULL, NULL, NULL);
8093
8094 client->nstat_context = nstat_provider_stats_open((nstat_provider_context)client,
8095 NSTAT_PROVIDER_CONN_USERLAND, (u_int64_t)ntstat_properties, necp_request_conn_netstats, necp_find_conn_extension_info);
8096 necp_client_release(client);
8097 done:
8098 if (error != 0 && client != NULL) {
8099 necp_client_free(client);
8100 client = NULL;
8101 }
8102 *retval = error;
8103
8104 return error;
8105 }
8106
8107 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_claim(struct proc * p,struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8108 necp_client_claim(struct proc *p, struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8109 {
8110 int error = 0;
8111 uuid_t client_id = {};
8112 struct necp_client *client = NULL;
8113
8114 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8115 error = EINVAL;
8116 goto done;
8117 }
8118
8119 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8120 if (error) {
8121 NECPLOG(LOG_ERR, "necp_client_claim copyin client_id error (%d)", error);
8122 goto done;
8123 }
8124
8125 if (necp_client_id_is_flow(client_id)) {
8126 NECPLOG0(LOG_ERR, "necp_client_claim cannot claim from flow UUID");
8127 error = EINVAL;
8128 goto done;
8129 }
8130
8131 u_int64_t upid = proc_uniqueid(p);
8132
8133 NECP_FD_LIST_LOCK_SHARED();
8134
8135 struct necp_fd_data *find_fd = NULL;
8136 LIST_FOREACH(find_fd, &necp_fd_list, chain) {
8137 NECP_FD_LOCK(find_fd);
8138 struct necp_client *find_client = necp_client_fd_find_client_and_lock(find_fd, client_id);
8139 if (find_client != NULL) {
8140 if (find_client->delegated_upid == upid &&
8141 RB_EMPTY(&find_client->flow_registrations)) {
8142 // Matched the client to claim; remove from the old fd
8143 client = find_client;
8144 RB_REMOVE(_necp_client_tree, &find_fd->clients, client);
8145 necp_client_retain_locked(client);
8146 }
8147 NECP_CLIENT_UNLOCK(find_client);
8148 }
8149 NECP_FD_UNLOCK(find_fd);
8150
8151 if (client != NULL) {
8152 break;
8153 }
8154 }
8155
8156 NECP_FD_LIST_UNLOCK();
8157
8158 if (client == NULL) {
8159 error = ENOENT;
8160 goto done;
8161 }
8162
8163 client->proc_pid = fd_data->proc_pid; // Transfer client to claiming pid
8164 client->agent_handle = (void *)fd_data;
8165 client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1);
8166
8167 NECP_CLIENT_LOG(client, "Claiming client");
8168
8169 // Add matched client to our fd and re-run result
8170 NECP_FD_LOCK(fd_data);
8171 RB_INSERT(_necp_client_tree, &fd_data->clients, client);
8172 NECP_CLIENT_LOCK(client);
8173 (void)necp_update_client_result(current_proc(), fd_data, client, NULL);
8174 NECP_CLIENT_UNLOCK(client);
8175 NECP_FD_UNLOCK(fd_data);
8176
8177 necp_client_release(client);
8178
8179 done:
8180 *retval = error;
8181
8182 return error;
8183 }
8184
8185 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_remove(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8186 necp_client_remove(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8187 {
8188 int error = 0;
8189 uuid_t client_id = {};
8190 struct ifnet_stats_per_flow flow_ifnet_stats = {};
8191 const size_t buffer_size = uap->buffer_size;
8192
8193 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8194 error = EINVAL;
8195 goto done;
8196 }
8197
8198 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8199 if (error) {
8200 NECPLOG(LOG_ERR, "necp_client_remove copyin client_id error (%d)", error);
8201 goto done;
8202 }
8203
8204 if (uap->buffer != 0 && buffer_size == sizeof(flow_ifnet_stats)) {
8205 error = copyin(uap->buffer, &flow_ifnet_stats, buffer_size);
8206 if (error) {
8207 NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error);
8208 // Not fatal; make sure to zero-out stats in case of partial copy
8209 memset(&flow_ifnet_stats, 0, sizeof(flow_ifnet_stats));
8210 error = 0;
8211 }
8212 } else if (uap->buffer != 0) {
8213 NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size);
8214 }
8215
8216 NECP_FD_LOCK(fd_data);
8217
8218 pid_t pid = fd_data->proc_pid;
8219 struct necp_client *client = necp_client_fd_find_client_unlocked(fd_data, client_id);
8220
8221 NECP_CLIENT_LOG(client, "Removing client");
8222
8223 if (client != NULL) {
8224 // Remove any flow registrations that match
8225 struct necp_client_flow_registration *flow_registration = NULL;
8226 struct necp_client_flow_registration *temp_flow_registration = NULL;
8227 RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
8228 if (flow_registration->client == client) {
8229 #if SKYWALK
8230 necp_destroy_flow_stats(fd_data, flow_registration, NULL, TRUE);
8231 #endif /* SKYWALK */
8232 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
8233 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
8234 NECP_FLOW_TREE_UNLOCK();
8235 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
8236 }
8237 }
8238 #if SKYWALK
8239 if (client->nstat_context != NULL) {
8240 // Main path, we expect stats to be in existance at this point
8241 nstat_provider_stats_close(client->nstat_context);
8242 client->nstat_context = NULL;
8243 } else {
8244 NECPLOG0(LOG_ERR, "necp_client_remove ntstat shutdown finds nstat_context NULL");
8245 }
8246 #endif /* SKYWALK */
8247 // Remove client from lists
8248 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
8249 RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
8250 NECP_CLIENT_TREE_UNLOCK();
8251 RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
8252 }
8253
8254 #if SKYWALK
8255 // If the currently-active arena is idle (has no more flows referring to it), or if there are defunct
8256 // arenas lingering in the list, schedule a threadcall to do the clean up. The idle check is done
8257 // by checking if the reference count is 3: one held by this client (will be released below when we
8258 // destroy it) when it's non-NULL; the rest held by stats_arena_{active,list}.
8259 if ((fd_data->stats_arena_active != NULL && fd_data->stats_arena_active->nai_use_count == 3) ||
8260 (fd_data->stats_arena_active == NULL && !LIST_EMPTY(&fd_data->stats_arena_list))) {
8261 uint64_t deadline = 0;
8262 uint64_t leeway = 0;
8263 clock_interval_to_deadline(necp_close_arenas_timeout_microseconds, NSEC_PER_USEC, &deadline);
8264 clock_interval_to_absolutetime_interval(necp_close_arenas_timeout_leeway_microseconds, NSEC_PER_USEC, &leeway);
8265
8266 thread_call_enter_delayed_with_leeway(necp_close_empty_arenas_tcall, NULL,
8267 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
8268 }
8269 #endif /* SKYWALK */
8270
8271 NECP_FD_UNLOCK(fd_data);
8272
8273 if (client != NULL) {
8274 ASSERT(error == 0);
8275 necp_destroy_client(client, pid, true);
8276 } else {
8277 error = ENOENT;
8278 NECPLOG(LOG_ERR, "necp_client_remove invalid client_id (%d)", error);
8279 }
8280 done:
8281 *retval = error;
8282
8283 return error;
8284 }
8285
8286 static struct necp_client_flow_registration *
necp_client_fd_find_flow(struct necp_fd_data * client_fd,uuid_t flow_id)8287 necp_client_fd_find_flow(struct necp_fd_data *client_fd, uuid_t flow_id)
8288 {
8289 NECP_FD_ASSERT_LOCKED(client_fd);
8290 struct necp_client_flow_registration *flow = NULL;
8291
8292 if (necp_client_id_is_flow(flow_id)) {
8293 struct necp_client_flow_registration find;
8294 uuid_copy(find.registration_id, flow_id);
8295 flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find);
8296 }
8297
8298 return flow;
8299 }
8300
8301 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_remove_flow(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8302 necp_client_remove_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8303 {
8304 int error = 0;
8305 uuid_t flow_id = {};
8306 struct ifnet_stats_per_flow flow_ifnet_stats = {};
8307 const size_t buffer_size = uap->buffer_size;
8308
8309 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8310 error = EINVAL;
8311 NECPLOG(LOG_ERR, "necp_client_remove_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
8312 goto done;
8313 }
8314
8315 error = copyin(uap->client_id, flow_id, sizeof(uuid_t));
8316 if (error) {
8317 NECPLOG(LOG_ERR, "necp_client_remove_flow copyin client_id error (%d)", error);
8318 goto done;
8319 }
8320
8321 if (uap->buffer != 0 && buffer_size != 0) {
8322 error = copyin(uap->buffer, &flow_ifnet_stats, MIN(buffer_size, sizeof(flow_ifnet_stats)));
8323 if (error) {
8324 NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error);
8325 // Not fatal
8326 }
8327 } else if (uap->buffer != 0) {
8328 NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size);
8329 }
8330
8331 NECP_FD_LOCK(fd_data);
8332 struct necp_client *client = NULL;
8333 struct necp_client_flow_registration *flow_registration = necp_client_fd_find_flow(fd_data, flow_id);
8334 if (flow_registration != NULL) {
8335 #if SKYWALK
8336 // Cleanup stats per flow
8337 necp_destroy_flow_stats(fd_data, flow_registration, &flow_ifnet_stats, TRUE);
8338 #endif /* SKYWALK */
8339 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
8340 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
8341 NECP_FLOW_TREE_UNLOCK();
8342 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
8343
8344 client = flow_registration->client;
8345 if (client != NULL) {
8346 necp_client_retain(client);
8347 }
8348 }
8349 NECP_FD_UNLOCK(fd_data);
8350
8351 NECP_CLIENT_FLOW_LOG(client, flow_registration, "removing flow");
8352
8353 if (flow_registration != NULL && client != NULL) {
8354 NECP_CLIENT_LOCK(client);
8355 if (flow_registration->client == client) {
8356 bool abort = (flow_registration->aop_offload) ? true : false;
8357 necp_destroy_client_flow_registration(client, flow_registration, fd_data->proc_pid, abort);
8358 }
8359 necp_client_release_locked(client);
8360 NECP_CLIENT_UNLOCK(client);
8361 }
8362
8363 done:
8364 *retval = error;
8365 if (error != 0) {
8366 NECPLOG(LOG_ERR, "Remove flow error (%d)", error);
8367 }
8368
8369 return error;
8370 }
8371
8372 // Don't inline the function since it includes necp_client_parsed_parameters on the stack
8373 static __attribute__((noinline)) int
necp_client_check_tcp_heuristics(struct necp_client * client,struct necp_client_flow * flow,u_int32_t * flags,u_int8_t * __counted_by (tfo_cookie_maxlen)tfo_cookie,u_int8_t tfo_cookie_maxlen,u_int8_t * tfo_cookie_len)8374 necp_client_check_tcp_heuristics(struct necp_client *client, struct necp_client_flow *flow,
8375 u_int32_t *flags, u_int8_t *__counted_by(tfo_cookie_maxlen) tfo_cookie, u_int8_t tfo_cookie_maxlen,
8376 u_int8_t *tfo_cookie_len)
8377 {
8378 struct necp_client_parsed_parameters parsed_parameters;
8379 int error = 0;
8380
8381 error = necp_client_parse_parameters(client, client->parameters,
8382 (u_int32_t)client->parameters_length,
8383 &parsed_parameters);
8384 if (error) {
8385 NECPLOG(LOG_ERR, "necp_client_parse_parameters error (%d)", error);
8386 return error;
8387 }
8388
8389 if ((flow->remote_addr.sa.sa_family != AF_INET &&
8390 flow->remote_addr.sa.sa_family != AF_INET6) ||
8391 (flow->local_addr.sa.sa_family != AF_INET &&
8392 flow->local_addr.sa.sa_family != AF_INET6)) {
8393 return EINVAL;
8394 }
8395
8396 NECP_CLIENT_ROUTE_LOCK(client);
8397
8398 if (client->current_route == NULL) {
8399 error = ENOENT;
8400 goto do_unlock;
8401 }
8402
8403 bool check_ecn = false;
8404 do {
8405 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) ==
8406 NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) {
8407 check_ecn = true;
8408 break;
8409 }
8410
8411 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) ==
8412 NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) {
8413 break;
8414 }
8415
8416 if (tcp_ecn == 1) {
8417 check_ecn = true;
8418 }
8419 } while (false);
8420
8421 if (check_ecn) {
8422 if (tcp_heuristic_do_ecn_with_address(client->current_route->rt_ifp,
8423 (union sockaddr_in_4_6 *)&flow->local_addr)) {
8424 *flags |= NECP_CLIENT_RESULT_FLAG_ECN_ENABLED;
8425 }
8426 }
8427
8428 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) ==
8429 NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) {
8430 if (!tcp_heuristic_do_tfo_with_address(client->current_route->rt_ifp,
8431 (union sockaddr_in_4_6 *)&flow->local_addr,
8432 (union sockaddr_in_4_6 *)&flow->remote_addr,
8433 tfo_cookie, tfo_cookie_maxlen, tfo_cookie_len)) {
8434 *flags |= NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED;
8435 *tfo_cookie_len = 0;
8436 }
8437 } else {
8438 *flags |= NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED;
8439 *tfo_cookie_len = 0;
8440 }
8441 do_unlock:
8442 NECP_CLIENT_ROUTE_UNLOCK(client);
8443
8444 return error;
8445 }
8446
8447 static size_t
necp_client_calculate_flow_tlv_size(struct necp_client_flow_registration * flow_registration)8448 necp_client_calculate_flow_tlv_size(struct necp_client_flow_registration *flow_registration)
8449 {
8450 size_t assigned_results_size = 0;
8451 struct necp_client_flow *flow = NULL;
8452 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
8453 if (flow->assigned || flow_registration->defunct || !necp_client_endpoint_is_unspecified((struct necp_client_endpoint *)&flow->remote_addr)) {
8454 size_t header_length = 0;
8455 if (flow->nexus) {
8456 header_length = sizeof(struct necp_client_nexus_flow_header);
8457 } else {
8458 header_length = sizeof(struct necp_client_flow_header);
8459 }
8460 assigned_results_size += (header_length + flow->assigned_results_length);
8461
8462 if (flow->has_protoctl_event) {
8463 assigned_results_size += sizeof(struct necp_client_flow_protoctl_event_header);
8464 }
8465 }
8466 }
8467 return assigned_results_size;
8468 }
8469
8470 static errno_t
necp_client_destination_mac_address(struct sockaddr * remote,uint32_t index,struct ether_addr * remote_mac)8471 necp_client_destination_mac_address(struct sockaddr *remote, uint32_t index,
8472 struct ether_addr *remote_mac)
8473 {
8474 struct rtentry *rt = NULL;
8475 struct rtentry *tgt_rt = NULL;
8476 struct rtentry *__single gwrt = NULL;
8477 errno_t err = 0;
8478
8479 ASSERT(remote_mac != NULL);
8480 ASSERT(remote != NULL);
8481
8482 rt = rtalloc1_scoped(remote, 0, 0, index);
8483 if (rt == NULL) {
8484 return ENOENT;
8485 }
8486
8487 if (IS_DIRECT_HOSTROUTE(rt)) {
8488 tgt_rt = rt;
8489 } else {
8490 err = route_to_gwroute(remote, rt, &gwrt);
8491 if (err != 0) {
8492 goto done;
8493 }
8494
8495 ASSERT(gwrt != NULL);
8496 RT_LOCK_ASSERT_HELD(gwrt);
8497 tgt_rt = gwrt;
8498 }
8499
8500 if ((tgt_rt->rt_flags & RTF_HOST) &&
8501 (tgt_rt->rt_flags & RTF_LLINFO) &&
8502 (tgt_rt->rt_gateway->sa_family == AF_LINK) &&
8503 (SDL(tgt_rt->rt_gateway)->sdl_alen == ETHER_ADDR_LEN)) {
8504 struct sockaddr_dl *__bidi_indexable sdl =
8505 (struct sockaddr_dl *__bidi_indexable)SDL(tgt_rt->rt_gateway);
8506 bcopy(LLADDR(sdl), remote_mac->octet, ETHER_ADDR_LEN);
8507 } else {
8508 err = ENOENT;
8509 }
8510 done:
8511 if (gwrt != NULL) {
8512 RT_UNLOCK(gwrt);
8513 rtfree(gwrt);
8514 gwrt = NULL;
8515 }
8516
8517 if (rt != NULL) {
8518 rtfree(rt);
8519 rt = NULL;
8520 }
8521
8522 return err;
8523 }
8524
8525 static uint8_t *
8526 __sized_by(*buflen)
necp_client_flow_mac_and_gateway(struct necp_client_flow * flow,size_t * buflen)8527 necp_client_flow_mac_and_gateway(struct necp_client_flow *flow, size_t *buflen)
8528 {
8529 u_int8_t * __indexable buffer = NULL;
8530 u_int8_t * __indexable cursor = NULL;
8531 size_t valsize = 0;
8532
8533 ASSERT(flow != NULL);
8534 ASSERT(buflen != NULL);
8535
8536 *buflen = 0;
8537
8538 ifnet_t ifp = NULL;
8539 ifnet_head_lock_shared();
8540 if (flow->interface_index != IFSCOPE_NONE && flow->interface_index <= if_index) {
8541 ifp = ifindex2ifnet[flow->interface_index];
8542 }
8543 ifnet_head_done();
8544
8545 if (ifp == NULL) {
8546 NECPLOG0(LOG_ERR, "necp_client_flow_mac_and_gateway: ifp is NULL");
8547 return NULL;
8548 }
8549
8550 if (!IFNET_IS_ETHERNET(ifp)) {
8551 return NULL;
8552 }
8553
8554 /* local MAC */
8555 struct ether_addr local_ether = {};
8556 bool local_ether_set = false;
8557 if (ifnet_lladdr_copy_bytes(ifp, local_ether.octet, ETHER_ADDR_LEN) == 0) {
8558 local_ether_set = true;
8559 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
8560 }
8561
8562 /*remote MAC */
8563 struct ether_addr remote_ether = {};
8564 bool remote_ether_set = false;
8565 if (necp_client_destination_mac_address(SA(&flow->remote_addr),
8566 flow->interface_index, &remote_ether) == 0) {
8567 remote_ether_set = true;
8568 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
8569 }
8570
8571 if (valsize == 0) {
8572 return NULL;
8573 }
8574
8575 buffer = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
8576 if (buffer == NULL) {
8577 return NULL;
8578 }
8579
8580 cursor = buffer;
8581 if (local_ether_set) {
8582 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ETHER_ADDR,
8583 sizeof(struct ether_addr), (uint8_t *)(struct ether_addr * __bidi_indexable)&local_ether,
8584 buffer, valsize);
8585 }
8586 if (remote_ether_set) {
8587 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_REMOTE_ETHER_ADDR,
8588 sizeof(struct ether_addr), (uint8_t *)(struct ether_addr * __bidi_indexable)&remote_ether,
8589 buffer, valsize);
8590 }
8591 *buflen = valsize;
8592 return buffer;
8593 }
8594
8595 static int
necp_client_fillout_flow_tlvs(struct necp_client * client,bool client_is_observed,struct necp_client_flow_registration * flow_registration,struct necp_client_action_args * uap,size_t * assigned_results_cursor)8596 necp_client_fillout_flow_tlvs(struct necp_client *client,
8597 bool client_is_observed,
8598 struct necp_client_flow_registration *flow_registration,
8599 struct necp_client_action_args *uap,
8600 size_t *assigned_results_cursor)
8601 {
8602 int error = 0;
8603 struct necp_client_flow *flow = NULL;
8604 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
8605 if (flow->assigned || flow_registration->defunct || !necp_client_endpoint_is_unspecified((struct necp_client_endpoint *)&flow->remote_addr)) {
8606 // Write TLV headers
8607 struct necp_client_nexus_flow_header header = {};
8608 u_int32_t length = 0;
8609 u_int32_t flags = 0;
8610 u_int8_t tfo_cookie_len = 0;
8611 u_int8_t type = 0;
8612 size_t buflen = 0;
8613 uint8_t *buffer = NULL;
8614
8615 type = NECP_CLIENT_RESULT_FLOW_ID;
8616 length = sizeof(header.flow_header.flow_id);
8617 header.flow_header.flow_id_tlv_header.type = type;
8618 header.flow_header.flow_id_tlv_header.length = length;
8619 uuid_copy(header.flow_header.flow_id, flow_registration->registration_id);
8620
8621 if (flow->nexus) {
8622 if (flow->check_tcp_heuristics) {
8623 u_int8_t tfo_cookie[NECP_TFO_COOKIE_LEN_MAX];
8624 tfo_cookie_len = NECP_TFO_COOKIE_LEN_MAX;
8625
8626 if (necp_client_check_tcp_heuristics(client, flow, &flags,
8627 tfo_cookie, tfo_cookie_len, &tfo_cookie_len) != 0) {
8628 tfo_cookie_len = 0;
8629 } else {
8630 flow->check_tcp_heuristics = FALSE;
8631
8632 if (tfo_cookie_len != 0) {
8633 type = NECP_CLIENT_RESULT_TFO_COOKIE;
8634 length = tfo_cookie_len;
8635 header.tfo_cookie_tlv_header.type = type;
8636 header.tfo_cookie_tlv_header.length = length;
8637 memcpy(&header.tfo_cookie_value, tfo_cookie, tfo_cookie_len);
8638 }
8639 }
8640 }
8641 }
8642
8643 size_t header_length = 0;
8644 if (flow->nexus) {
8645 if (tfo_cookie_len != 0) {
8646 header_length = sizeof(struct necp_client_nexus_flow_header) - (NECP_TFO_COOKIE_LEN_MAX - tfo_cookie_len);
8647 } else {
8648 header_length = sizeof(struct necp_client_nexus_flow_header) - sizeof(struct necp_tlv_header) - NECP_TFO_COOKIE_LEN_MAX;
8649 }
8650 } else {
8651 header_length = sizeof(struct necp_client_flow_header);
8652 }
8653
8654 type = NECP_CLIENT_RESULT_FLAGS;
8655 length = sizeof(header.flow_header.flags_value);
8656 header.flow_header.flags_tlv_header.type = type;
8657 header.flow_header.flags_tlv_header.length = length;
8658 if (flow->assigned) {
8659 flags |= NECP_CLIENT_RESULT_FLAG_FLOW_ASSIGNED;
8660 }
8661 if (flow->viable) {
8662 flags |= NECP_CLIENT_RESULT_FLAG_FLOW_VIABLE;
8663 }
8664 if (flow_registration->defunct) {
8665 flags |= NECP_CLIENT_RESULT_FLAG_DEFUNCT;
8666 }
8667 flags |= flow->necp_flow_flags;
8668 header.flow_header.flags_value = flags;
8669
8670 type = NECP_CLIENT_RESULT_INTERFACE;
8671 length = sizeof(header.flow_header.interface_value);
8672 header.flow_header.interface_tlv_header.type = type;
8673 header.flow_header.interface_tlv_header.length = length;
8674
8675 struct necp_client_result_interface interface_struct;
8676 interface_struct.generation = 0;
8677 interface_struct.index = flow->interface_index;
8678
8679 header.flow_header.interface_value = interface_struct;
8680 if (flow->nexus) {
8681 type = NECP_CLIENT_RESULT_NETAGENT;
8682 length = sizeof(header.agent_value);
8683 header.agent_tlv_header.type = type;
8684 header.agent_tlv_header.length = length;
8685
8686 struct necp_client_result_netagent agent_struct;
8687 uuid_copy(agent_struct.netagent_uuid, flow->u.nexus_agent);
8688 agent_struct.generation = netagent_get_generation(agent_struct.netagent_uuid);
8689
8690 header.agent_value = agent_struct;
8691 }
8692
8693 // Don't include outer TLV header in length field
8694 type = NECP_CLIENT_RESULT_FLOW;
8695 length = (header_length - sizeof(struct necp_tlv_header) + flow->assigned_results_length);
8696 if (flow->has_protoctl_event) {
8697 length += sizeof(struct necp_client_flow_protoctl_event_header);
8698 }
8699 if (flow->nexus && flow->aop_offload) {
8700 buffer = necp_client_flow_mac_and_gateway(flow, &buflen);
8701 length += buflen;
8702
8703 if (flow->aop_stat_index_valid) {
8704 length += sizeof(struct necp_client_flow_stats_index_header);
8705 }
8706 }
8707 header.flow_header.outer_header.type = type;
8708 header.flow_header.outer_header.length = length;
8709
8710 error = copyout(&header, uap->buffer + client->result_length + *assigned_results_cursor, header_length);
8711 if (error) {
8712 NECPLOG(LOG_ERR, "necp_client_copy assigned results tlv_header copyout error (%d)", error);
8713 return error;
8714 }
8715 *assigned_results_cursor += header_length;
8716
8717 if (flow->assigned_results && flow->assigned_results_length) {
8718 // Write inner TLVs
8719 error = copyout(flow->assigned_results, uap->buffer + client->result_length + *assigned_results_cursor,
8720 flow->assigned_results_length);
8721 if (error) {
8722 NECPLOG(LOG_ERR, "necp_client_copy assigned results copyout error (%d)", error);
8723 return error;
8724 }
8725 }
8726 *assigned_results_cursor += flow->assigned_results_length;
8727
8728 /* Read the protocol event and reset it */
8729 if (flow->has_protoctl_event) {
8730 struct necp_client_flow_protoctl_event_header protoctl_event_header = {};
8731
8732 type = NECP_CLIENT_RESULT_PROTO_CTL_EVENT;
8733 length = sizeof(protoctl_event_header.protoctl_event);
8734
8735 protoctl_event_header.protoctl_tlv_header.type = type;
8736 protoctl_event_header.protoctl_tlv_header.length = length;
8737 protoctl_event_header.protoctl_event = flow->protoctl_event;
8738
8739 error = copyout(&protoctl_event_header, uap->buffer + client->result_length + *assigned_results_cursor,
8740 sizeof(protoctl_event_header));
8741
8742 if (error) {
8743 NECPLOG(LOG_ERR, "necp_client_copy protocol control event results"
8744 " tlv_header copyout error (%d)", error);
8745 return error;
8746 }
8747 *assigned_results_cursor += sizeof(protoctl_event_header);
8748 flow->has_protoctl_event = FALSE;
8749 flow->protoctl_event.protoctl_event_code = 0;
8750 flow->protoctl_event.protoctl_event_val = 0;
8751 flow->protoctl_event.protoctl_event_tcp_seq_num = 0;
8752 }
8753
8754 if (flow->nexus && flow->aop_offload) {
8755 if (buffer != NULL) {
8756 ASSERT(buflen > 0);
8757 error = copyout(buffer, uap->buffer + client->result_length + *assigned_results_cursor,
8758 buflen);
8759 *assigned_results_cursor += buflen;
8760 kfree_data_counted_by(buffer, buflen);
8761 if (error) {
8762 NECPLOG(LOG_ERR, "necp_client_copy mac address results"
8763 " tlv_header copyout error (%d)", error);
8764 return error;
8765 }
8766 }
8767
8768 if (flow->aop_stat_index_valid) {
8769 struct necp_client_flow_stats_index_header flow_stats_header = {};
8770
8771 type = NECP_CLIENT_RESULT_FLOW_STATS_INDEX;
8772 length = sizeof(flow_stats_header.stats_index);
8773
8774 flow_stats_header.stats_index_tlv_header.type = type;
8775 flow_stats_header.stats_index_tlv_header.length = length;
8776 flow_stats_header.stats_index = flow->stats_index;
8777
8778 error = copyout(&flow_stats_header, uap->buffer +
8779 client->result_length + *assigned_results_cursor, sizeof(flow_stats_header));
8780 if (error) {
8781 NECPLOG(LOG_ERR, "necp_client_copy flow stats index "
8782 "tlv header copyout error (%d)", error);
8783 return error;
8784 }
8785 *assigned_results_cursor += sizeof(flow_stats_header);
8786 }
8787 }
8788 }
8789 }
8790 if (!client_is_observed) {
8791 flow_registration->flow_result_read = TRUE;
8792 }
8793 return 0;
8794 }
8795
8796 static int
necp_client_copy_internal(struct necp_client * client,uuid_t client_id,bool client_is_observed,struct necp_client_action_args * uap,int * retval)8797 necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool client_is_observed, struct necp_client_action_args *uap, int *retval)
8798 {
8799 NECP_CLIENT_ASSERT_LOCKED(client);
8800 int error = 0;
8801 // Copy results out
8802 if (uap->action == NECP_CLIENT_ACTION_COPY_PARAMETERS) {
8803 if (uap->buffer_size < client->parameters_length) {
8804 return EINVAL;
8805 }
8806 error = copyout(client->parameters, uap->buffer, client->parameters_length);
8807 if (error) {
8808 NECPLOG(LOG_ERR, "necp_client_copy parameters copyout error (%d)", error);
8809 return error;
8810 }
8811 *retval = client->parameters_length;
8812 } else if ((uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT || uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) &&
8813 client->result_read && client->group_members_read && !necp_client_has_unread_flows(client)) {
8814 // Copy updates only, but nothing to read
8815 // Just return 0 for bytes read
8816 *retval = 0;
8817 } else if (uap->action == NECP_CLIENT_ACTION_COPY_RESULT ||
8818 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT ||
8819 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8820 size_t assigned_results_size = client->assigned_group_members_length;
8821
8822 bool some_flow_is_defunct = false;
8823 struct necp_client_flow_registration *single_flow_registration = NULL;
8824 if (necp_client_id_is_flow(client_id)) {
8825 single_flow_registration = necp_client_find_flow(client, client_id);
8826 if (single_flow_registration != NULL) {
8827 assigned_results_size += necp_client_calculate_flow_tlv_size(single_flow_registration);
8828 }
8829 } else {
8830 // This request is for the client, so copy everything
8831 struct necp_client_flow_registration *flow_registration = NULL;
8832 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
8833 if (flow_registration->defunct) {
8834 some_flow_is_defunct = true;
8835 }
8836 assigned_results_size += necp_client_calculate_flow_tlv_size(flow_registration);
8837 }
8838 }
8839 if (uap->buffer_size < (client->result_length + assigned_results_size)) {
8840 if (uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8841 // Mark the client and all flows as read to prevent looping
8842 client->result_read = true;
8843 struct necp_client_flow_registration *flow_registration = NULL;
8844 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
8845 flow_registration->flow_result_read = true;
8846 }
8847 }
8848 return EINVAL;
8849 }
8850
8851 u_int32_t original_flags = 0;
8852 bool flags_updated = false;
8853 if (some_flow_is_defunct && client->legacy_client_is_flow) {
8854 // If our client expects the defunct flag in the client, add it now
8855 u_int32_t client_flags = 0;
8856 u_int32_t value_size = 0;
8857 u_int8_t *flags_pointer = necp_buffer_get_tlv_value(client->result, client->result_length, 0, &value_size);
8858 if (flags_pointer != NULL && value_size == sizeof(client_flags)) {
8859 memcpy(&client_flags, flags_pointer, value_size);
8860 original_flags = client_flags;
8861 client_flags |= NECP_CLIENT_RESULT_FLAG_DEFUNCT;
8862 (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS,
8863 sizeof(client_flags), &client_flags, &flags_updated,
8864 client->result, sizeof(client->result));
8865 }
8866 }
8867
8868 error = copyout(client->result, uap->buffer, client->result_length);
8869
8870 if (flags_updated) {
8871 // Revert stored flags
8872 (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS,
8873 sizeof(original_flags), &original_flags, &flags_updated,
8874 client->result, sizeof(client->result));
8875 }
8876
8877 if (error != 0) {
8878 NECPLOG(LOG_ERR, "necp_client_copy result copyout error (%d)", error);
8879 return error;
8880 }
8881
8882 if (client->assigned_group_members != NULL && client->assigned_group_members_length > 0) {
8883 error = copyout(client->assigned_group_members, uap->buffer + client->result_length, client->assigned_group_members_length);
8884 if (error != 0) {
8885 NECPLOG(LOG_ERR, "necp_client_copy group members copyout error (%d)", error);
8886 return error;
8887 }
8888 }
8889
8890 size_t assigned_results_cursor = client->assigned_group_members_length; // Start with an offset based on the group members
8891 if (necp_client_id_is_flow(client_id)) {
8892 if (single_flow_registration != NULL) {
8893 error = necp_client_fillout_flow_tlvs(client, client_is_observed, single_flow_registration, uap, &assigned_results_cursor);
8894 if (error != 0) {
8895 return error;
8896 }
8897 }
8898 } else {
8899 // This request is for the client, so copy everything
8900 struct necp_client_flow_registration *flow_registration = NULL;
8901 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
8902 error = necp_client_fillout_flow_tlvs(client, client_is_observed, flow_registration, uap, &assigned_results_cursor);
8903 if (error != 0) {
8904 return error;
8905 }
8906 }
8907 }
8908
8909 *retval = client->result_length + assigned_results_cursor;
8910
8911 if (!client_is_observed) {
8912 client->result_read = TRUE;
8913 client->group_members_read = TRUE;
8914 }
8915 }
8916
8917 return 0;
8918 }
8919
8920 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8921 necp_client_copy(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8922 {
8923 int error = 0;
8924 struct necp_client *client = NULL;
8925 uuid_t client_id;
8926 uuid_clear(client_id);
8927
8928 *retval = 0;
8929
8930 if (uap->buffer_size == 0 || uap->buffer == 0) {
8931 return EINVAL;
8932 }
8933
8934 if (uap->action != NECP_CLIENT_ACTION_COPY_PARAMETERS &&
8935 uap->action != NECP_CLIENT_ACTION_COPY_RESULT &&
8936 uap->action != NECP_CLIENT_ACTION_COPY_UPDATED_RESULT &&
8937 uap->action != NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8938 return EINVAL;
8939 }
8940
8941 if (uap->client_id) {
8942 if (uap->client_id_len != sizeof(uuid_t)) {
8943 NECPLOG(LOG_ERR, "Incorrect length (got %zu, expected %zu)", (size_t)uap->client_id_len, sizeof(uuid_t));
8944 return ERANGE;
8945 }
8946
8947 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8948 if (error) {
8949 NECPLOG(LOG_ERR, "necp_client_copy client_id copyin error (%d)", error);
8950 return error;
8951 }
8952 }
8953
8954 const bool is_wildcard = (bool)uuid_is_null(client_id);
8955
8956 NECP_FD_LOCK(fd_data);
8957
8958 bool send_in_process_flow_divert_message = false;
8959 if (is_wildcard) {
8960 if (uap->action == NECP_CLIENT_ACTION_COPY_RESULT ||
8961 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT ||
8962 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8963 struct necp_client *find_client = NULL;
8964 RB_FOREACH(find_client, _necp_client_tree, &fd_data->clients) {
8965 NECP_CLIENT_LOCK(find_client);
8966 if (!find_client->result_read || !find_client->group_members_read || necp_client_has_unread_flows(find_client)) {
8967 client = find_client;
8968 // Leave the client locked, and break
8969 break;
8970 }
8971 NECP_CLIENT_UNLOCK(find_client);
8972 }
8973
8974 if (client == NULL && fd_data->request_in_process_flow_divert) {
8975 // No client found that needs update. Check for an event requesting in-process flow divert.
8976 send_in_process_flow_divert_message = true;
8977 }
8978 }
8979 } else {
8980 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
8981 }
8982
8983 if (client != NULL) {
8984 if (!send_in_process_flow_divert_message) {
8985 // If client is set, it is locked
8986 error = necp_client_copy_internal(client, client_id, FALSE, uap, retval);
8987 }
8988 NECP_CLIENT_UNLOCK(client);
8989 }
8990
8991 if (send_in_process_flow_divert_message) {
8992 fd_data->request_in_process_flow_divert = false;
8993
8994 struct necp_tlv_header request_tlv = {
8995 .type = NECP_CLIENT_RESULT_REQUEST_IN_PROCESS_FLOW_DIVERT,
8996 .length = 0,
8997 };
8998 if (uap->buffer_size < sizeof(request_tlv)) {
8999 error = EINVAL;
9000 } else {
9001 error = copyout(&request_tlv, uap->buffer, sizeof(request_tlv));
9002 if (error) {
9003 NECPLOG(LOG_ERR, "necp_client_copy request flow divert TLV copyout error (%d)", error);
9004 } else {
9005 *retval = sizeof(request_tlv);
9006 }
9007 }
9008 }
9009
9010 // Unlock our own fd before moving on or returning
9011 NECP_FD_UNLOCK(fd_data);
9012
9013 if (client == NULL && !send_in_process_flow_divert_message) {
9014 if (fd_data->flags & NECP_OPEN_FLAG_OBSERVER) {
9015 // Observers are allowed to lookup clients on other fds
9016
9017 // Lock tree
9018 NECP_CLIENT_TREE_LOCK_SHARED();
9019
9020 bool found_client = FALSE;
9021
9022 client = necp_find_client_and_lock(client_id);
9023 if (client != NULL) {
9024 // Matched, copy out data
9025 found_client = TRUE;
9026 error = necp_client_copy_internal(client, client_id, TRUE, uap, retval);
9027 NECP_CLIENT_UNLOCK(client);
9028 }
9029
9030 // Unlock tree
9031 NECP_CLIENT_TREE_UNLOCK();
9032
9033 // No client found, fail
9034 if (!found_client) {
9035 return ENOENT;
9036 }
9037 } else {
9038 // No client found, and not allowed to search other fds, fail
9039 return ENOENT;
9040 }
9041 }
9042
9043 return error;
9044 }
9045
9046 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_client_update(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9047 necp_client_copy_client_update(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9048 {
9049 int error = 0;
9050
9051 *retval = 0;
9052
9053 if (!(fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER)) {
9054 NECPLOG0(LOG_ERR, "NECP fd is not observer, cannot copy client update");
9055 return EINVAL;
9056 }
9057
9058 if (uap->client_id_len != sizeof(uuid_t) || uap->client_id == 0) {
9059 NECPLOG0(LOG_ERR, "Client id invalid, cannot copy client update");
9060 return EINVAL;
9061 }
9062
9063 if (uap->buffer_size == 0 || uap->buffer == 0) {
9064 NECPLOG0(LOG_ERR, "Buffer invalid, cannot copy client update");
9065 return EINVAL;
9066 }
9067
9068 NECP_FD_LOCK(fd_data);
9069 struct necp_client_update *client_update = TAILQ_FIRST(&fd_data->update_list);
9070 if (client_update != NULL) {
9071 TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
9072 VERIFY(fd_data->update_count > 0);
9073 fd_data->update_count--;
9074 }
9075 NECP_FD_UNLOCK(fd_data);
9076
9077 if (client_update != NULL) {
9078 error = copyout(client_update->client_id, uap->client_id, sizeof(uuid_t));
9079 if (error) {
9080 NECPLOG(LOG_ERR, "Copy client update copyout client id error (%d)", error);
9081 } else {
9082 if (uap->buffer_size < client_update->update_length) {
9083 NECPLOG(LOG_ERR, "Buffer size cannot hold update (%zu < %zu)", (size_t)uap->buffer_size, client_update->update_length);
9084 error = EINVAL;
9085 } else {
9086 error = copyout(client_update->update, uap->buffer, client_update->update_length);
9087 if (error) {
9088 NECPLOG(LOG_ERR, "Copy client update copyout error (%d)", error);
9089 } else {
9090 *retval = client_update->update_length;
9091 }
9092 }
9093 }
9094
9095 necp_client_update_free(client_update);
9096 client_update = NULL;
9097 } else {
9098 error = ENOENT;
9099 }
9100
9101 return error;
9102 }
9103
9104 static int
necp_client_copy_parameters_locked(struct necp_client * client,struct necp_client_nexus_parameters * parameters)9105 necp_client_copy_parameters_locked(struct necp_client *client,
9106 struct necp_client_nexus_parameters *parameters)
9107 {
9108 VERIFY(parameters != NULL);
9109
9110 struct necp_client_parsed_parameters parsed_parameters = {};
9111 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, &parsed_parameters);
9112
9113 parameters->pid = client->proc_pid;
9114 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID) {
9115 parameters->epid = parsed_parameters.effective_pid;
9116 } else {
9117 parameters->epid = parameters->pid;
9118 }
9119 #if SKYWALK
9120 parameters->port_reservation = client->port_reservation;
9121 #endif /* !SKYWALK */
9122 memcpy(¶meters->local_addr, &parsed_parameters.local_addr, sizeof(parameters->local_addr));
9123 memcpy(¶meters->remote_addr, &parsed_parameters.remote_addr, sizeof(parameters->remote_addr));
9124 parameters->ip_protocol = parsed_parameters.ip_protocol;
9125 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL) {
9126 parameters->transport_protocol = parsed_parameters.transport_protocol;
9127 } else {
9128 parameters->transport_protocol = parsed_parameters.ip_protocol;
9129 }
9130 parameters->ethertype = parsed_parameters.ethertype;
9131 parameters->traffic_class = parsed_parameters.traffic_class;
9132 if (uuid_is_null(client->override_euuid)) {
9133 uuid_copy(parameters->euuid, parsed_parameters.effective_uuid);
9134 } else {
9135 uuid_copy(parameters->euuid, client->override_euuid);
9136 }
9137 parameters->is_listener = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) ? 1 : 0;
9138 parameters->is_interpose = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) ? 1 : 0;
9139 parameters->is_custom_ether = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) ? 1 : 0;
9140 parameters->policy_id = client->policy_id;
9141 parameters->skip_policy_id = client->skip_policy_id;
9142
9143 // parse client result flag
9144 u_int32_t client_result_flags = 0;
9145 u_int32_t value_size = 0;
9146 u_int8_t *flags_pointer = NULL;
9147 flags_pointer = necp_buffer_get_tlv_value(client->result, client->result_length, 0, &value_size);
9148 if (flags_pointer && value_size == sizeof(client_result_flags)) {
9149 memcpy(&client_result_flags, flags_pointer, value_size);
9150 }
9151 parameters->allow_qos_marking = (client_result_flags & NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING) ? 1 : 0;
9152
9153 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE) {
9154 if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_DEFAULT) {
9155 parameters->override_address_selection = false;
9156 } else if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_TEMPORARY) {
9157 parameters->override_address_selection = true;
9158 parameters->use_stable_address = false;
9159 } else if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_STABLE) {
9160 parameters->override_address_selection = true;
9161 parameters->use_stable_address = true;
9162 }
9163 } else {
9164 parameters->override_address_selection = false;
9165 }
9166
9167 if ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
9168 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_NO_WAKE_FROM_SLEEP)) {
9169 parameters->no_wake_from_sleep = true;
9170 }
9171
9172 if ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
9173 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_REUSE_LOCAL)) {
9174 parameters->reuse_port = true;
9175 }
9176
9177 #if SKYWALK
9178 if (!parameters->is_listener) {
9179 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN) {
9180 if (parsed_parameters.demux_patterns[0].len == 0) {
9181 parameters->is_demuxable_parent = 1;
9182 } else {
9183 if (client->validated_parent) {
9184 ASSERT(!uuid_is_null(client->parent_client_id));
9185
9186 NECP_CLIENT_TREE_LOCK_SHARED();
9187 struct necp_client *parent = necp_find_client_and_lock(client->parent_client_id);
9188 if (parent != NULL) {
9189 struct necp_client_flow_registration *parent_flow_registration = NULL;
9190 RB_FOREACH(parent_flow_registration, _necp_client_flow_tree, &parent->flow_registrations) {
9191 uuid_copy(parameters->parent_flow_uuid, parent_flow_registration->registration_id);
9192 break;
9193 }
9194
9195 NECP_CLIENT_UNLOCK(parent);
9196 }
9197 NECP_CLIENT_TREE_UNLOCK();
9198
9199 if (parsed_parameters.demux_pattern_count > 0) {
9200 for (int i = 0; i < parsed_parameters.demux_pattern_count; i++) {
9201 memcpy(¶meters->demux_patterns[i], &parsed_parameters.demux_patterns[i], sizeof(struct necp_demux_pattern));
9202 }
9203 parameters->demux_pattern_count = parsed_parameters.demux_pattern_count;
9204 }
9205 }
9206 }
9207 }
9208
9209 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EXTENDED_FLAGS) {
9210 if (parsed_parameters.extended_flags & NECP_CLIENT_PARAMETER_EXTENDED_FLAG_AOP2_OFFLOAD) {
9211 parameters->use_aop_offload = true;
9212 }
9213 }
9214 }
9215 #endif // SKYWALK
9216
9217 return error;
9218 }
9219
9220 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_list(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9221 necp_client_list(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9222 {
9223 int error = 0;
9224 struct necp_client *find_client = NULL;
9225 size_t copy_buffer_size = 0;
9226 uuid_t *list = NULL;
9227 u_int32_t requested_client_count = 0;
9228 u_int32_t client_count = 0;
9229
9230 if (uap->buffer_size < sizeof(requested_client_count) || uap->buffer == 0) {
9231 error = EINVAL;
9232 goto done;
9233 }
9234
9235 if (!(fd_data->flags & NECP_OPEN_FLAG_OBSERVER)) {
9236 NECPLOG0(LOG_ERR, "Client does not hold necessary entitlement to list other NECP clients");
9237 error = EACCES;
9238 goto done;
9239 }
9240
9241 error = copyin(uap->buffer, &requested_client_count, sizeof(requested_client_count));
9242 if (error) {
9243 goto done;
9244 }
9245
9246 if (os_mul_overflow(sizeof(uuid_t), requested_client_count, ©_buffer_size)) {
9247 error = ERANGE;
9248 goto done;
9249 }
9250
9251 if (uap->buffer_size - sizeof(requested_client_count) != copy_buffer_size) {
9252 error = EINVAL;
9253 goto done;
9254 }
9255
9256 if (copy_buffer_size > NECP_MAX_CLIENT_LIST_SIZE) {
9257 error = EINVAL;
9258 goto done;
9259 }
9260
9261 if (requested_client_count > 0) {
9262 list = (uuid_t*)kalloc_data(copy_buffer_size, Z_WAITOK | Z_ZERO);
9263 if (list == NULL) {
9264 error = ENOMEM;
9265 goto done;
9266 }
9267 }
9268
9269 // Lock tree
9270 NECP_CLIENT_TREE_LOCK_SHARED();
9271
9272 find_client = NULL;
9273 RB_FOREACH(find_client, _necp_client_global_tree, &necp_client_global_tree) {
9274 NECP_CLIENT_LOCK(find_client);
9275 if (!uuid_is_null(find_client->client_id)) {
9276 if (client_count < requested_client_count) {
9277 uuid_copy(list[client_count], find_client->client_id);
9278 }
9279 client_count++;
9280 }
9281 NECP_CLIENT_UNLOCK(find_client);
9282 }
9283
9284 // Unlock tree
9285 NECP_CLIENT_TREE_UNLOCK();
9286
9287 error = copyout(&client_count, uap->buffer, sizeof(client_count));
9288 if (error) {
9289 NECPLOG(LOG_ERR, "necp_client_list buffer copyout error (%d)", error);
9290 goto done;
9291 }
9292
9293 if (requested_client_count > 0 &&
9294 client_count > 0 &&
9295 list != NULL) {
9296 error = copyout(list, uap->buffer + sizeof(client_count), copy_buffer_size);
9297 if (error) {
9298 NECPLOG(LOG_ERR, "necp_client_list client count copyout error (%d)", error);
9299 goto done;
9300 }
9301 }
9302 done:
9303 if (list != NULL) {
9304 kfree_data(list, copy_buffer_size);
9305 }
9306 *retval = error;
9307
9308 return error;
9309 }
9310
9311 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_add_flow(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9312 necp_client_add_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9313 {
9314 int error = 0;
9315 struct necp_client *client = NULL;
9316 uuid_t client_id;
9317 struct necp_client_nexus_parameters parameters = {};
9318 struct proc *proc = PROC_NULL;
9319 struct necp_client_add_flow * __indexable add_request = NULL;
9320 struct necp_client_add_flow * __indexable allocated_add_request = NULL;
9321 struct necp_client_add_flow_default default_add_request = {};
9322 const size_t buffer_size = uap->buffer_size;
9323
9324 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
9325 error = EINVAL;
9326 NECPLOG(LOG_ERR, "necp_client_add_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
9327 goto done;
9328 }
9329
9330 if (uap->buffer == 0 || buffer_size < sizeof(struct necp_client_add_flow) ||
9331 buffer_size > sizeof(struct necp_client_add_flow_default) * 4) {
9332 error = EINVAL;
9333 NECPLOG(LOG_ERR, "necp_client_add_flow invalid buffer (length %zu)", buffer_size);
9334 goto done;
9335 }
9336
9337 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9338 if (error) {
9339 NECPLOG(LOG_ERR, "necp_client_add_flow copyin client_id error (%d)", error);
9340 goto done;
9341 }
9342
9343 if (buffer_size <= sizeof(struct necp_client_add_flow_default)) {
9344 // Fits in default size
9345 error = copyin(uap->buffer, &default_add_request, buffer_size);
9346 if (error) {
9347 NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error);
9348 goto done;
9349 }
9350
9351 add_request = (struct necp_client_add_flow *)&default_add_request;
9352 } else {
9353 allocated_add_request = (struct necp_client_add_flow *)kalloc_data(buffer_size, Z_WAITOK | Z_ZERO);
9354 if (allocated_add_request == NULL) {
9355 error = ENOMEM;
9356 goto done;
9357 }
9358
9359 error = copyin(uap->buffer, allocated_add_request, buffer_size);
9360 if (error) {
9361 NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error);
9362 goto done;
9363 }
9364
9365 add_request = allocated_add_request;
9366 }
9367
9368 NECP_FD_LOCK(fd_data);
9369 pid_t pid = fd_data->proc_pid;
9370 proc = proc_find(pid);
9371 if (proc == PROC_NULL) {
9372 NECP_FD_UNLOCK(fd_data);
9373 NECPLOG(LOG_ERR, "necp_client_add_flow process not found for pid %d error (%d)", pid, error);
9374 error = ESRCH;
9375 goto done;
9376 }
9377
9378 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9379 if (client == NULL) {
9380 error = ENOENT;
9381 NECP_FD_UNLOCK(fd_data);
9382 goto done;
9383 }
9384
9385 // Using ADD_FLOW indicates that the client supports multiple flows per client
9386 client->legacy_client_is_flow = false;
9387
9388 necp_client_retain_locked(client);
9389 necp_client_copy_parameters_locked(client, ¶meters);
9390
9391 struct necp_client_flow_registration *new_registration = necp_client_create_flow_registration(fd_data, client);
9392 if (new_registration == NULL) {
9393 error = ENOMEM;
9394 NECP_CLIENT_UNLOCK(client);
9395 NECP_FD_UNLOCK(fd_data);
9396 NECPLOG0(LOG_ERR, "Failed to allocate flow registration");
9397 goto done;
9398 }
9399
9400 new_registration->flags = add_request->flags;
9401
9402 // If NECP_CLIENT_FLOW_FLAGS_OPEN_FLOW_ON_BEHALF_OF_CLIENT is set, then set registration_id_to_add to the old
9403 // value in add_request->registration_id, otherwise use the new value in new_registration->registration_id.
9404 bool open_flow_on_behalf_of_client = (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OPEN_FLOW_ON_BEHALF_OF_CLIENT);
9405 uuid_t registration_id_to_add = {};
9406 if (open_flow_on_behalf_of_client && !uuid_is_null(add_request->registration_id)) {
9407 uuid_copy(registration_id_to_add, add_request->registration_id);
9408 } else {
9409 uuid_copy(registration_id_to_add, new_registration->registration_id);
9410 }
9411
9412 // Copy new ID out to caller
9413 uuid_copy(add_request->registration_id, new_registration->registration_id);
9414 new_registration->aop_offload = parameters.use_aop_offload;
9415
9416 NECP_CLIENT_FLOW_LOG(client, new_registration, "adding flow");
9417
9418 size_t trailer_offset = (sizeof(struct necp_client_add_flow) +
9419 add_request->stats_request_count * sizeof(struct necp_client_flow_stats));
9420
9421 // Copy override address
9422 struct sockaddr * __single override_address = NULL;
9423 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OVERRIDE_ADDRESS) {
9424 size_t offset_of_address = trailer_offset;
9425 if (buffer_size >= offset_of_address + sizeof(struct sockaddr_in)) {
9426 override_address = flow_req_get_address(add_request, offset_of_address);
9427 if (buffer_size >= offset_of_address + override_address->sa_len &&
9428 override_address->sa_len <= sizeof(parameters.remote_addr)) {
9429 SOCKADDR_COPY(override_address, ¶meters.remote_addr, override_address->sa_len);
9430 trailer_offset += override_address->sa_len;
9431
9432 // Clear out any local address if the remote address is overridden
9433 if (parameters.remote_addr.sa.sa_family == AF_INET) {
9434 parameters.local_addr.sin.sin_family = AF_INET;
9435 parameters.local_addr.sin.sin_len = sizeof(struct sockaddr_in);
9436 parameters.local_addr.sin.sin_addr.s_addr = 0;
9437 } else if (parameters.remote_addr.sa.sa_family == AF_INET6) {
9438 parameters.local_addr.sin6.sin6_family = AF_INET6;
9439 parameters.local_addr.sin6.sin6_len = sizeof(struct sockaddr_in6);
9440 memset((uint8_t *)¶meters.local_addr.sin6.sin6_addr, 0, sizeof(struct in6_addr));
9441 parameters.local_addr.sin6.sin6_scope_id = 0;
9442 }
9443 } else {
9444 override_address = NULL;
9445 }
9446 }
9447 }
9448
9449 // Copy override IP protocol
9450 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OVERRIDE_IP_PROTOCOL) {
9451 size_t offset_of_ip_protocol = trailer_offset;
9452 if (buffer_size >= offset_of_ip_protocol + sizeof(uint8_t)) {
9453 uint8_t * __single ip_protocol_p = flow_req_get_proto(add_request, offset_of_ip_protocol);
9454 memcpy(¶meters.ip_protocol, ip_protocol_p, sizeof(uint8_t));
9455 }
9456 }
9457
9458 // If opening the flow on behalf of the client, then replace the pid and parameters.pid with the effective PID
9459 // so that the client's PID is used for this flow instead of the PID of the process making the requests.
9460 if (open_flow_on_behalf_of_client) {
9461 parameters.pid = parameters.epid;
9462 pid = parameters.epid;
9463 }
9464
9465 #if SKYWALK
9466 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS) {
9467 size_t assigned_results_length = 0;
9468 void * __sized_by(assigned_results_length) assigned_results = NULL;
9469 uint32_t interface_index = 0;
9470
9471 // Validate that the nexus UUID is assigned
9472 bool found_nexus = false;
9473 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
9474 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
9475 struct necp_client_interface_option *option = &client->interface_options[option_i];
9476 if (uuid_compare(option->nexus_agent, add_request->agent_uuid) == 0) {
9477 interface_index = option->interface_index;
9478 found_nexus = true;
9479 break;
9480 }
9481 } else {
9482 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
9483 if (uuid_compare(option->nexus_agent, add_request->agent_uuid) == 0) {
9484 interface_index = option->interface_index;
9485 found_nexus = true;
9486 break;
9487 }
9488 }
9489 }
9490
9491 if (!found_nexus) {
9492 NECPLOG0(LOG_ERR, "Requested nexus not found");
9493 } else {
9494 necp_client_add_nexus_flow_if_needed(new_registration, add_request->agent_uuid, interface_index, parameters.use_aop_offload);
9495
9496 error = netagent_client_message_with_params(add_request->agent_uuid,
9497 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
9498 client->client_id :
9499 registration_id_to_add),
9500 pid, client->agent_handle,
9501 NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS,
9502 (struct necp_client_agent_parameters *)¶meters,
9503 &assigned_results, &assigned_results_length);
9504 if (error != 0) {
9505 VERIFY(assigned_results == NULL);
9506 VERIFY(assigned_results_length == 0);
9507 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
9508 } else if (assigned_results != NULL) {
9509 if (!necp_assign_client_result_locked(proc, fd_data, client, new_registration, add_request->agent_uuid,
9510 assigned_results, assigned_results_length, false, false)) {
9511 kfree_data_sized_by(assigned_results, assigned_results_length);
9512 }
9513 } else if (override_address != NULL) {
9514 // Save the overridden address in the flow. Find the correct flow,
9515 // and assign just the address TLV. Don't set the assigned flag.
9516 struct necp_client_flow *flow = NULL;
9517 LIST_FOREACH(flow, &new_registration->flow_list, flow_chain) {
9518 if (flow->nexus &&
9519 uuid_compare(flow->u.nexus_agent, add_request->agent_uuid) == 0) {
9520 if (flow->assigned_results == NULL) {
9521 SOCKADDR_COPY(override_address, &flow->remote_addr, override_address->sa_len);
9522 uuid_t empty_uuid;
9523 uuid_clear(empty_uuid);
9524 size_t message_length;
9525 void *message = necp_create_nexus_assign_message(empty_uuid, 0, NULL, 0,
9526 (struct necp_client_endpoint *)&flow->local_addr,
9527 (struct necp_client_endpoint *)&flow->remote_addr,
9528 NULL, 0, NULL, 0, &message_length);
9529 flow->assigned_results = message;
9530 flow->assigned_results_length = message_length;
9531 }
9532 break;
9533 }
9534 }
9535 }
9536 }
9537 }
9538
9539 // Don't request stats if nexus creation fails
9540 if (error == 0 && add_request->stats_request_count > 0 && necp_arena_initialize(fd_data, true) == 0) {
9541 struct necp_client_flow_stats * __single stats_request = &(necp_client_get_flow_stats(add_request))[0];
9542 struct necp_stats_bufreq bufreq = {};
9543
9544 NECP_CLIENT_FLOW_LOG(client, new_registration, "Initializing stats");
9545
9546 bufreq.necp_stats_bufreq_id = NECP_CLIENT_STATISTICS_BUFREQ_ID;
9547 bufreq.necp_stats_bufreq_type = stats_request->stats_type;
9548 bufreq.necp_stats_bufreq_ver = stats_request->stats_version;
9549 bufreq.necp_stats_bufreq_size = stats_request->stats_size;
9550 bufreq.necp_stats_bufreq_uaddr = stats_request->stats_addr;
9551 (void)necp_stats_initialize(fd_data, client, new_registration, &bufreq);
9552 stats_request->stats_type = bufreq.necp_stats_bufreq_type;
9553 stats_request->stats_version = bufreq.necp_stats_bufreq_ver;
9554 stats_request->stats_size = bufreq.necp_stats_bufreq_size;
9555 stats_request->stats_addr = bufreq.necp_stats_bufreq_uaddr;
9556 }
9557
9558 if (error == 0 && parameters.use_aop_offload) {
9559 error = necp_aop_offload_stats_initialize(
9560 new_registration, add_request->agent_uuid);
9561 }
9562 #endif /* !SKYWALK */
9563
9564 if (error == 0 &&
9565 (add_request->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE ||
9566 add_request->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) {
9567 uint32_t interface_index = IFSCOPE_NONE;
9568 ifnet_head_lock_shared();
9569 struct ifnet *interface = NULL;
9570 TAILQ_FOREACH(interface, &ifnet_head, if_link) {
9571 ifnet_lock_shared(interface);
9572 if (interface->if_agentids != NULL) {
9573 for (u_int32_t i = 0; i < interface->if_agentcount; i++) {
9574 if (uuid_compare(interface->if_agentids[i], add_request->agent_uuid) == 0) {
9575 interface_index = interface->if_index;
9576 break;
9577 }
9578 }
9579 }
9580 ifnet_lock_done(interface);
9581 if (interface_index != IFSCOPE_NONE) {
9582 break;
9583 }
9584 }
9585 ifnet_head_done();
9586
9587 necp_client_add_nexus_flow_if_needed(new_registration, add_request->agent_uuid, interface_index, parameters.use_aop_offload);
9588
9589 size_t dummy_length = 0;
9590 void * __sized_by(dummy_length) dummy_results = NULL;
9591 error = netagent_client_message_with_params(add_request->agent_uuid,
9592 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
9593 client->client_id :
9594 new_registration->registration_id),
9595 pid, client->agent_handle,
9596 NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT,
9597 (struct necp_client_agent_parameters *)¶meters,
9598 &dummy_results, &dummy_length);
9599 if (error != 0) {
9600 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
9601 }
9602 }
9603
9604 if (error != 0) {
9605 // Encountered an error in adding the flow, destroy the flow registration
9606 #if SKYWALK
9607 necp_destroy_flow_stats(fd_data, new_registration, NULL, false);
9608 #endif /* SKYWALK */
9609 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
9610 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, new_registration);
9611 NECP_FLOW_TREE_UNLOCK();
9612 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, new_registration);
9613 necp_destroy_client_flow_registration(client, new_registration, fd_data->proc_pid, true);
9614 new_registration = NULL;
9615 }
9616
9617 NECP_CLIENT_UNLOCK(client);
9618 NECP_FD_UNLOCK(fd_data);
9619
9620 necp_client_release(client);
9621
9622 if (error != 0) {
9623 goto done;
9624 }
9625
9626 // Copy the request back out to the caller with assigned fields
9627 error = copyout(add_request, uap->buffer, buffer_size);
9628 if (error != 0) {
9629 NECPLOG(LOG_ERR, "necp_client_add_flow copyout add_request error (%d)", error);
9630 }
9631
9632 done:
9633 *retval = error;
9634 if (error != 0) {
9635 NECPLOG(LOG_ERR, "Add flow error (%d)", error);
9636 }
9637
9638 if (allocated_add_request != NULL) {
9639 kfree_data(allocated_add_request, buffer_size);
9640 }
9641
9642 if (proc != PROC_NULL) {
9643 proc_rele(proc);
9644 }
9645 return error;
9646 }
9647
9648 #if SKYWALK
9649
9650 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_request_nexus(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9651 necp_client_request_nexus(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9652 {
9653 int error = 0;
9654 struct necp_client *client = NULL;
9655 uuid_t client_id;
9656 struct necp_client_nexus_parameters parameters = {};
9657 struct proc *proc = PROC_NULL;
9658 const size_t buffer_size = uap->buffer_size;
9659
9660 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
9661 error = EINVAL;
9662 goto done;
9663 }
9664
9665 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9666 if (error) {
9667 NECPLOG(LOG_ERR, "necp_client_request_nexus copyin client_id error (%d)", error);
9668 goto done;
9669 }
9670
9671 NECP_FD_LOCK(fd_data);
9672 pid_t pid = fd_data->proc_pid;
9673 proc = proc_find(pid);
9674 if (proc == PROC_NULL) {
9675 NECP_FD_UNLOCK(fd_data);
9676 NECPLOG(LOG_ERR, "necp_client_request_nexus process not found for pid %d error (%d)", pid, error);
9677 error = ESRCH;
9678 goto done;
9679 }
9680
9681 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9682 if (client == NULL) {
9683 NECP_FD_UNLOCK(fd_data);
9684 error = ENOENT;
9685 goto done;
9686 }
9687
9688 // Using REQUEST_NEXUS indicates that the client only supports one flow per client
9689 client->legacy_client_is_flow = true;
9690
9691 necp_client_retain_locked(client);
9692 necp_client_copy_parameters_locked(client, ¶meters);
9693
9694 do {
9695 size_t assigned_results_length = 0;
9696 void * __sized_by(assigned_results_length) assigned_results = NULL;
9697 uuid_t nexus_uuid;
9698 uint32_t interface_index = 0;
9699
9700 // Validate that the nexus UUID is assigned
9701 bool found_nexus = false;
9702 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
9703 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
9704 struct necp_client_interface_option *option = &client->interface_options[option_i];
9705 if (!uuid_is_null(option->nexus_agent)) {
9706 uuid_copy(nexus_uuid, option->nexus_agent);
9707 interface_index = option->interface_index;
9708 found_nexus = true;
9709 break;
9710 }
9711 } else {
9712 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
9713 if (!uuid_is_null(option->nexus_agent)) {
9714 uuid_copy(nexus_uuid, option->nexus_agent);
9715 interface_index = option->interface_index;
9716 found_nexus = true;
9717 break;
9718 }
9719 }
9720 }
9721
9722 if (!found_nexus) {
9723 NECP_CLIENT_UNLOCK(client);
9724 NECP_FD_UNLOCK(fd_data);
9725 necp_client_release(client);
9726 // Break the loop
9727 error = ENETDOWN;
9728 goto done;
9729 }
9730
9731 struct necp_client_flow_registration *new_registration = necp_client_create_flow_registration(fd_data, client);
9732 if (new_registration == NULL) {
9733 error = ENOMEM;
9734 NECP_CLIENT_UNLOCK(client);
9735 NECP_FD_UNLOCK(fd_data);
9736 necp_client_release(client);
9737 NECPLOG0(LOG_ERR, "Failed to allocate flow registration");
9738 goto done;
9739 }
9740
9741 new_registration->flags = (NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS | NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID);
9742
9743 necp_client_add_nexus_flow_if_needed(new_registration, nexus_uuid, interface_index, parameters.use_aop_offload);
9744
9745 // Note: Any clients using "request_nexus" are not flow-registration aware.
9746 // Register the Client ID rather than the Registration ID with the nexus, since
9747 // the client will send traffic based on the client ID.
9748 error = netagent_client_message_with_params(nexus_uuid,
9749 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
9750 client->client_id :
9751 new_registration->registration_id),
9752 pid, client->agent_handle,
9753 NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS,
9754 (struct necp_client_agent_parameters *)¶meters,
9755 &assigned_results, &assigned_results_length);
9756 if (error) {
9757 NECP_CLIENT_UNLOCK(client);
9758 NECP_FD_UNLOCK(fd_data);
9759 necp_client_release(client);
9760 VERIFY(assigned_results == NULL);
9761 VERIFY(assigned_results_length == 0);
9762 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
9763 goto done;
9764 }
9765
9766 if (assigned_results != NULL) {
9767 if (!necp_assign_client_result_locked(proc, fd_data, client, new_registration, nexus_uuid,
9768 assigned_results, assigned_results_length, false, false)) {
9769 kfree_data_sized_by(assigned_results, assigned_results_length);
9770 }
9771 }
9772
9773 if (uap->buffer != 0 && buffer_size == sizeof(struct necp_stats_bufreq) &&
9774 necp_arena_initialize(fd_data, true) == 0) {
9775 struct necp_stats_bufreq bufreq = {};
9776 int copy_error = copyin(uap->buffer, &bufreq, buffer_size);
9777 if (copy_error) {
9778 NECPLOG(LOG_ERR, "necp_client_request_nexus copyin bufreq error (%d)", copy_error);
9779 } else {
9780 (void)necp_stats_initialize(fd_data, client, new_registration, &bufreq);
9781 copy_error = copyout(&bufreq, uap->buffer, buffer_size);
9782 if (copy_error != 0) {
9783 NECPLOG(LOG_ERR, "necp_client_request_nexus copyout bufreq error (%d)", copy_error);
9784 }
9785 }
9786 }
9787 } while (false);
9788
9789 NECP_CLIENT_UNLOCK(client);
9790 NECP_FD_UNLOCK(fd_data);
9791
9792 necp_client_release(client);
9793
9794 done:
9795 *retval = error;
9796 if (error != 0) {
9797 NECPLOG(LOG_ERR, "Request nexus error (%d)", error);
9798 }
9799
9800 if (proc != PROC_NULL) {
9801 proc_rele(proc);
9802 }
9803 return error;
9804 }
9805 #endif /* !SKYWALK */
9806
9807 static void
necp_client_add_assertion(struct necp_client * client,uuid_t netagent_uuid)9808 necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid)
9809 {
9810 struct necp_client_assertion *new_assertion = NULL;
9811
9812 new_assertion = kalloc_type(struct necp_client_assertion,
9813 Z_WAITOK | Z_NOFAIL);
9814
9815 uuid_copy(new_assertion->asserted_netagent, netagent_uuid);
9816
9817 LIST_INSERT_HEAD(&client->assertion_list, new_assertion, assertion_chain);
9818 }
9819
9820 static bool
necp_client_remove_assertion(struct necp_client * client,uuid_t netagent_uuid)9821 necp_client_remove_assertion(struct necp_client *client, uuid_t netagent_uuid)
9822 {
9823 struct necp_client_assertion * __single found_assertion = NULL;
9824 struct necp_client_assertion *search_assertion = NULL;
9825 LIST_FOREACH(search_assertion, &client->assertion_list, assertion_chain) {
9826 if (uuid_compare(search_assertion->asserted_netagent, netagent_uuid) == 0) {
9827 found_assertion = search_assertion;
9828 break;
9829 }
9830 }
9831
9832 if (found_assertion == NULL) {
9833 NECPLOG0(LOG_ERR, "Netagent uuid not previously asserted");
9834 return false;
9835 }
9836
9837 LIST_REMOVE(found_assertion, assertion_chain);
9838 kfree_type(struct necp_client_assertion, found_assertion);
9839 return true;
9840 }
9841
9842 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_agent_action(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9843 necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9844 {
9845 int error = 0;
9846 struct necp_client *client = NULL;
9847 uuid_t client_id;
9848 bool acted_on_agent = FALSE;
9849 u_int8_t *parameters = NULL;
9850 const size_t buffer_size = uap->buffer_size;
9851
9852 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
9853 buffer_size == 0 || uap->buffer == 0) {
9854 NECPLOG0(LOG_ERR, "necp_client_agent_action invalid parameters");
9855 error = EINVAL;
9856 goto done;
9857 }
9858
9859 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9860 if (error) {
9861 NECPLOG(LOG_ERR, "necp_client_agent_action copyin client_id error (%d)", error);
9862 goto done;
9863 }
9864
9865 if (buffer_size > NECP_MAX_AGENT_ACTION_SIZE) {
9866 NECPLOG(LOG_ERR, "necp_client_agent_action invalid buffer size (>%u)", NECP_MAX_AGENT_ACTION_SIZE);
9867 error = EINVAL;
9868 goto done;
9869 }
9870
9871 parameters = (u_int8_t *)kalloc_data(buffer_size, Z_WAITOK | Z_ZERO);
9872 if (parameters == NULL) {
9873 error = ENOMEM;
9874 goto done;
9875 }
9876
9877 error = copyin(uap->buffer, parameters, buffer_size);
9878 if (error) {
9879 NECPLOG(LOG_ERR, "necp_client_agent_action parameters copyin error (%d)", error);
9880 goto done;
9881 }
9882
9883 NECP_FD_LOCK(fd_data);
9884 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9885 if (client != NULL) {
9886 size_t offset = 0;
9887 while ((offset + sizeof(struct necp_tlv_header)) <= buffer_size) {
9888 u_int8_t type = necp_buffer_get_tlv_type(parameters, buffer_size, offset);
9889 u_int32_t length = necp_buffer_get_tlv_length(parameters, buffer_size, offset);
9890
9891 if (length > (buffer_size - (offset + sizeof(struct necp_tlv_header)))) {
9892 // If the length is larger than what can fit in the remaining parameters size, bail
9893 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
9894 break;
9895 }
9896
9897 if (length >= sizeof(uuid_t)) {
9898 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, buffer_size, offset, NULL);
9899 if (value == NULL) {
9900 NECPLOG0(LOG_ERR, "Invalid TLV value");
9901 break;
9902 }
9903 if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT ||
9904 type == NECP_CLIENT_PARAMETER_ASSERT_AGENT ||
9905 type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
9906 uuid_t agent_uuid;
9907 uuid_copy(agent_uuid, value);
9908 u_int8_t netagent_message_type = 0;
9909 if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT) {
9910 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER;
9911 } else if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) {
9912 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT;
9913 } else if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
9914 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
9915 }
9916
9917 // Before unasserting, verify that the assertion was already taken
9918 if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
9919 if (!necp_client_remove_assertion(client, agent_uuid)) {
9920 error = ENOENT;
9921 break;
9922 }
9923 }
9924
9925 struct necp_client_nexus_parameters parsed_parameters = {};
9926 necp_client_copy_parameters_locked(client, &parsed_parameters);
9927 size_t dummy_length = 0;
9928 void * __sized_by(dummy_length) dummy_results = NULL;
9929
9930 error = netagent_client_message_with_params(agent_uuid,
9931 client_id,
9932 fd_data->proc_pid,
9933 client->agent_handle,
9934 netagent_message_type,
9935 (struct necp_client_agent_parameters *)&parsed_parameters,
9936 &dummy_results, &dummy_length);
9937 if (error == 0) {
9938 acted_on_agent = TRUE;
9939 } else {
9940 break;
9941 }
9942
9943 // Only save the assertion if the action succeeded
9944 if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) {
9945 necp_client_add_assertion(client, agent_uuid);
9946 }
9947 } else if (type == NECP_CLIENT_PARAMETER_AGENT_ADD_GROUP_MEMBERS ||
9948 type == NECP_CLIENT_PARAMETER_AGENT_REMOVE_GROUP_MEMBERS) {
9949 uuid_t agent_uuid;
9950 uuid_copy(agent_uuid, value);
9951 u_int8_t netagent_message_type = 0;
9952 if (type == NECP_CLIENT_PARAMETER_AGENT_ADD_GROUP_MEMBERS) {
9953 netagent_message_type = NETAGENT_MESSAGE_TYPE_ADD_GROUP_MEMBERS;
9954 } else if (type == NECP_CLIENT_PARAMETER_AGENT_REMOVE_GROUP_MEMBERS) {
9955 netagent_message_type = NETAGENT_MESSAGE_TYPE_REMOVE_GROUP_MEMBERS;
9956 }
9957
9958 struct necp_client_group_members group_members = {};
9959 group_members.group_members_length = (length - sizeof(uuid_t));
9960 group_members.group_members = (value + sizeof(uuid_t));
9961 size_t dummy_length = 0;
9962 void * __sized_by(dummy_length) dummy_results = NULL;
9963 error = netagent_client_message_with_params(agent_uuid,
9964 client_id,
9965 fd_data->proc_pid,
9966 client->agent_handle,
9967 netagent_message_type,
9968 (struct necp_client_agent_parameters *)&group_members,
9969 &dummy_results, &dummy_length);
9970 if (error == 0) {
9971 acted_on_agent = TRUE;
9972 } else {
9973 break;
9974 }
9975 } else if (type == NECP_CLIENT_PARAMETER_REPORT_AGENT_ERROR) {
9976 uuid_t agent_uuid;
9977 uuid_copy(agent_uuid, value);
9978 struct necp_client_agent_parameters agent_params = {};
9979 if ((length - sizeof(uuid_t)) >= sizeof(agent_params.u.error.error)) {
9980 memcpy(&agent_params.u.error.error,
9981 (value + sizeof(uuid_t)),
9982 sizeof(agent_params.u.error.error));
9983 }
9984 bool agent_reported = false;
9985 for (int agent_i = 0; agent_i < NECP_FD_REPORTED_AGENT_COUNT; agent_i++) {
9986 if (uuid_compare(agent_uuid, fd_data->reported_agents.agent_uuid[agent_i]) == 0) {
9987 // Found a match, already reported
9988 agent_reported = true;
9989 break;
9990 }
9991 }
9992 agent_params.u.error.force_report = !agent_reported;
9993 if (!agent_reported) {
9994 // Save this agent as having been reported
9995 bool saved_agent_uuid = false;
9996 for (int agent_i = 0; agent_i < NECP_FD_REPORTED_AGENT_COUNT; agent_i++) {
9997 if (uuid_is_null(fd_data->reported_agents.agent_uuid[agent_i])) {
9998 uuid_copy(fd_data->reported_agents.agent_uuid[agent_i], agent_uuid);
9999 saved_agent_uuid = true;
10000 break;
10001 }
10002 }
10003 if (!saved_agent_uuid) {
10004 // Reported agent UUIDs full, move over and insert at the end
10005 for (int agent_i = 0; agent_i < NECP_FD_REPORTED_AGENT_COUNT; agent_i++) {
10006 if (agent_i + 1 < NECP_FD_REPORTED_AGENT_COUNT) {
10007 uuid_copy(fd_data->reported_agents.agent_uuid[agent_i], fd_data->reported_agents.agent_uuid[agent_i + 1]);
10008 } else {
10009 uuid_copy(fd_data->reported_agents.agent_uuid[agent_i], agent_uuid);
10010 }
10011 }
10012 }
10013 }
10014 size_t dummy_length = 0;
10015 void * __sized_by(dummy_length) dummy_results = NULL;
10016 error = netagent_client_message_with_params(agent_uuid,
10017 client_id,
10018 fd_data->proc_pid,
10019 client->agent_handle,
10020 NETAGENT_MESSAGE_TYPE_CLIENT_ERROR,
10021 &agent_params,
10022 &dummy_results, &dummy_length);
10023 if (error == 0) {
10024 acted_on_agent = TRUE;
10025 } else {
10026 break;
10027 }
10028 }
10029 }
10030
10031 offset += sizeof(struct necp_tlv_header) + length;
10032 }
10033
10034 NECP_CLIENT_UNLOCK(client);
10035 }
10036 NECP_FD_UNLOCK(fd_data);
10037
10038 if (!acted_on_agent &&
10039 error == 0) {
10040 error = ENOENT;
10041 }
10042 done:
10043 *retval = error;
10044 if (parameters != NULL) {
10045 kfree_data(parameters, buffer_size);
10046 parameters = NULL;
10047 }
10048
10049 return error;
10050 }
10051
10052 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_agent(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10053 necp_client_copy_agent(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10054 {
10055 int error = 0;
10056 uuid_t agent_uuid;
10057 const size_t buffer_size = uap->buffer_size;
10058
10059 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
10060 buffer_size == 0 || uap->buffer == 0) {
10061 NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input");
10062 error = EINVAL;
10063 goto done;
10064 }
10065
10066 error = copyin(uap->client_id, agent_uuid, sizeof(uuid_t));
10067 if (error) {
10068 NECPLOG(LOG_ERR, "necp_client_copy_agent copyin agent_uuid error (%d)", error);
10069 goto done;
10070 }
10071
10072 error = netagent_copyout(agent_uuid, uap->buffer, buffer_size);
10073 if (error) {
10074 // netagent_copyout already logs appropriate errors
10075 goto done;
10076 }
10077 done:
10078 *retval = error;
10079
10080 return error;
10081 }
10082
10083 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_agent_use(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10084 necp_client_agent_use(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10085 {
10086 int error = 0;
10087 struct necp_client *client = NULL;
10088 uuid_t client_id;
10089 struct necp_agent_use_parameters parameters = {};
10090 const size_t buffer_size = uap->buffer_size;
10091
10092 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
10093 buffer_size != sizeof(parameters) || uap->buffer == 0) {
10094 error = EINVAL;
10095 goto done;
10096 }
10097
10098 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
10099 if (error) {
10100 NECPLOG(LOG_ERR, "Copyin client_id error (%d)", error);
10101 goto done;
10102 }
10103
10104 error = copyin(uap->buffer, ¶meters, buffer_size);
10105 if (error) {
10106 NECPLOG(LOG_ERR, "Parameters copyin error (%d)", error);
10107 goto done;
10108 }
10109
10110 NECP_FD_LOCK(fd_data);
10111 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
10112 if (client != NULL) {
10113 error = netagent_use(parameters.agent_uuid, ¶meters.out_use_count);
10114 NECP_CLIENT_UNLOCK(client);
10115 } else {
10116 error = ENOENT;
10117 }
10118
10119 NECP_FD_UNLOCK(fd_data);
10120
10121 if (error == 0) {
10122 error = copyout(¶meters, uap->buffer, buffer_size);
10123 if (error) {
10124 NECPLOG(LOG_ERR, "Parameters copyout error (%d)", error);
10125 goto done;
10126 }
10127 }
10128
10129 done:
10130 *retval = error;
10131
10132 return error;
10133 }
10134
10135 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_acquire_agent_token(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10136 necp_client_acquire_agent_token(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10137 {
10138 int error = 0;
10139 uuid_t agent_uuid = {};
10140 const size_t buffer_size = uap->buffer_size;
10141
10142 *retval = 0;
10143
10144 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
10145 buffer_size == 0 || uap->buffer == 0) {
10146 NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input");
10147 error = EINVAL;
10148 goto done;
10149 }
10150
10151 error = copyin(uap->client_id, agent_uuid, sizeof(uuid_t));
10152 if (error) {
10153 NECPLOG(LOG_ERR, "necp_client_copy_agent copyin agent_uuid error (%d)", error);
10154 goto done;
10155 }
10156
10157 error = netagent_acquire_token(agent_uuid, uap->buffer, buffer_size, retval);
10158 done:
10159 return error;
10160 }
10161
10162 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_interface(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10163 necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10164 {
10165 int error = 0;
10166 u_int32_t interface_index = 0;
10167 struct necp_interface_details interface_details = {};
10168
10169 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
10170 uap->buffer_size < sizeof(interface_details) ||
10171 uap->buffer == 0) {
10172 NECPLOG0(LOG_ERR, "necp_client_copy_interface bad input");
10173 error = EINVAL;
10174 goto done;
10175 }
10176
10177 error = copyin(uap->client_id, &interface_index, sizeof(u_int32_t));
10178 if (error) {
10179 NECPLOG(LOG_ERR, "necp_client_copy_interface copyin interface_index error (%d)", error);
10180 goto done;
10181 }
10182
10183 if (interface_index == 0) {
10184 error = ENOENT;
10185 NECPLOG(LOG_ERR, "necp_client_copy_interface bad interface_index (%d)", interface_index);
10186 goto done;
10187 }
10188
10189 lck_mtx_lock(rnh_lock);
10190 ifnet_head_lock_shared();
10191 ifnet_t interface = NULL;
10192 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
10193 interface = ifindex2ifnet[interface_index];
10194 }
10195
10196 if (interface != NULL) {
10197 if (interface->if_xname != NULL) {
10198 strlcpy((char *)&interface_details.name, interface->if_xname, sizeof(interface_details.name));
10199 }
10200 interface_details.index = interface->if_index;
10201 interface_details.generation = ifnet_get_generation(interface);
10202 if (interface->if_delegated.ifp != NULL) {
10203 interface_details.delegate_index = interface->if_delegated.ifp->if_index;
10204 }
10205 interface_details.functional_type = if_functional_type(interface, TRUE);
10206 if (IFNET_IS_EXPENSIVE(interface)) {
10207 interface_details.flags |= NECP_INTERFACE_FLAG_EXPENSIVE;
10208 }
10209 if (IFNET_IS_CONSTRAINED(interface)) {
10210 interface_details.flags |= NECP_INTERFACE_FLAG_CONSTRAINED;
10211 }
10212 if (IFNET_IS_ULTRA_CONSTRAINED(interface)) {
10213 interface_details.flags |= NECP_INTERFACE_FLAG_ULTRA_CONSTRAINED;
10214 }
10215 if ((interface->if_eflags & IFEF_TXSTART) == IFEF_TXSTART) {
10216 interface_details.flags |= NECP_INTERFACE_FLAG_TXSTART;
10217 }
10218 if ((interface->if_eflags & IFEF_NOACKPRI) == IFEF_NOACKPRI) {
10219 interface_details.flags |= NECP_INTERFACE_FLAG_NOACKPRI;
10220 }
10221 if ((interface->if_eflags & IFEF_3CA) == IFEF_3CA) {
10222 interface_details.flags |= NECP_INTERFACE_FLAG_3CARRIERAGG;
10223 }
10224 if (IFNET_IS_LOW_POWER(interface)) {
10225 interface_details.flags |= NECP_INTERFACE_FLAG_IS_LOW_POWER;
10226 }
10227 if (interface->if_xflags & IFXF_MPK_LOG) {
10228 interface_details.flags |= NECP_INTERFACE_FLAG_MPK_LOG;
10229 }
10230 if (interface->if_flags & IFF_MULTICAST) {
10231 interface_details.flags |= NECP_INTERFACE_FLAG_SUPPORTS_MULTICAST;
10232 }
10233 if (IS_INTF_CLAT46(interface)) {
10234 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_NAT64;
10235 }
10236 if (interface->if_xflags & IFXF_LOW_POWER_WAKE) {
10237 interface_details.flags |= NECP_INTERFACE_FLAG_LOW_POWER_WAKE;
10238 }
10239 interface_details.l4s_mode = interface->if_l4s_mode;
10240 interface_details.mtu = interface->if_mtu;
10241 #if SKYWALK
10242 fsw_get_tso_capabilities(interface, &interface_details.tso_max_segment_size_v4,
10243 &interface_details.tso_max_segment_size_v6);
10244
10245 interface_details.hwcsum_flags = interface->if_hwassist & IFNET_CHECKSUMF;
10246 #endif /* SKYWALK */
10247
10248 u_int8_t ipv4_signature_len = sizeof(interface_details.ipv4_signature.signature);
10249 u_int16_t ipv4_signature_flags;
10250 if (ifnet_get_netsignature(interface, AF_INET, &ipv4_signature_len, &ipv4_signature_flags,
10251 (u_int8_t *)&interface_details.ipv4_signature) != 0) {
10252 ipv4_signature_len = 0;
10253 }
10254 interface_details.ipv4_signature.signature_len = ipv4_signature_len;
10255
10256 // Check for default scoped routes for IPv4 and IPv6
10257 union necp_sockaddr_union default_address;
10258 struct rtentry *v4Route = NULL;
10259 memset(&default_address, 0, sizeof(default_address));
10260 default_address.sa.sa_family = AF_INET;
10261 default_address.sa.sa_len = sizeof(struct sockaddr_in);
10262 v4Route = rtalloc1_scoped_locked(SA(&default_address), 0, 0,
10263 interface->if_index);
10264 if (v4Route != NULL) {
10265 if (v4Route->rt_ifp != NULL && !IS_INTF_CLAT46(v4Route->rt_ifp)) {
10266 interface_details.flags |= NECP_INTERFACE_FLAG_IPV4_ROUTABLE;
10267 }
10268 rtfree_locked(v4Route);
10269 v4Route = NULL;
10270 }
10271
10272 struct rtentry *v6Route = NULL;
10273 memset(&default_address, 0, sizeof(default_address));
10274 default_address.sa.sa_family = AF_INET6;
10275 default_address.sa.sa_len = sizeof(struct sockaddr_in6);
10276 v6Route = rtalloc1_scoped_locked(SA(&default_address), 0, 0,
10277 interface->if_index);
10278 if (v6Route != NULL) {
10279 if (v6Route->rt_ifp != NULL) {
10280 interface_details.flags |= NECP_INTERFACE_FLAG_IPV6_ROUTABLE;
10281 }
10282 rtfree_locked(v6Route);
10283 v6Route = NULL;
10284 }
10285
10286 u_int8_t ipv6_signature_len = sizeof(interface_details.ipv6_signature.signature);
10287 u_int16_t ipv6_signature_flags;
10288 if (ifnet_get_netsignature(interface, AF_INET6, &ipv6_signature_len, &ipv6_signature_flags,
10289 (u_int8_t *)&interface_details.ipv6_signature) != 0) {
10290 ipv6_signature_len = 0;
10291 }
10292 interface_details.ipv6_signature.signature_len = ipv6_signature_len;
10293
10294 ifnet_lock_shared(interface);
10295 struct ifaddr * __single ifa = NULL;
10296 TAILQ_FOREACH(ifa, &interface->if_addrhead, ifa_link) {
10297 IFA_LOCK(ifa);
10298 if (ifa->ifa_addr->sa_family == AF_INET) {
10299 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_NETMASK;
10300 interface_details.ipv4_netmask = (ifatoia(ifa))->ia_sockmask.sin_addr.s_addr;
10301 if (interface->if_flags & IFF_BROADCAST) {
10302 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_BROADCAST;
10303 interface_details.ipv4_broadcast = (ifatoia(ifa))->ia_broadaddr.sin_addr.s_addr;
10304 }
10305 }
10306 IFA_UNLOCK(ifa);
10307 }
10308
10309 interface_details.radio_type = interface->if_radio_type;
10310 if (interface_details.radio_type == 0 && interface->if_delegated.ifp) {
10311 interface_details.radio_type = interface->if_delegated.ifp->if_radio_type;
10312 }
10313 ifnet_lock_done(interface);
10314 }
10315
10316 ifnet_head_done();
10317 lck_mtx_unlock(rnh_lock);
10318
10319 // If the client is using an older version of the struct, copy that length
10320 error = copyout(&interface_details, uap->buffer, sizeof(interface_details));
10321 if (error) {
10322 NECPLOG(LOG_ERR, "necp_client_copy_interface copyout error (%d)", error);
10323 goto done;
10324 }
10325 done:
10326 *retval = error;
10327
10328 return error;
10329 }
10330
10331 #if SKYWALK
10332
10333 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_interface_address(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10334 necp_client_get_interface_address(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10335 {
10336 int error = 0;
10337 u_int32_t interface_index = IFSCOPE_NONE;
10338 struct sockaddr_storage address = {};
10339 const size_t buffer_size = uap->buffer_size;
10340
10341 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
10342 buffer_size < sizeof(struct sockaddr_in) ||
10343 buffer_size > sizeof(struct sockaddr_storage) ||
10344 uap->buffer == 0) {
10345 NECPLOG0(LOG_ERR, "necp_client_get_interface_address bad input");
10346 error = EINVAL;
10347 goto done;
10348 }
10349
10350 error = copyin(uap->client_id, &interface_index, sizeof(u_int32_t));
10351 if (error) {
10352 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyin interface_index error (%d)", error);
10353 goto done;
10354 }
10355
10356 if (interface_index == IFSCOPE_NONE) {
10357 error = ENOENT;
10358 NECPLOG(LOG_ERR, "necp_client_get_interface_address bad interface_index (%d)", interface_index);
10359 goto done;
10360 }
10361
10362 error = copyin(uap->buffer, &address, buffer_size);
10363 if (error) {
10364 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyin address error (%d)", error);
10365 goto done;
10366 }
10367
10368 if (address.ss_family != AF_INET && address.ss_family != AF_INET6) {
10369 error = EINVAL;
10370 NECPLOG(LOG_ERR, "necp_client_get_interface_address invalid address family (%u)", address.ss_family);
10371 goto done;
10372 }
10373
10374 if (address.ss_len != buffer_size) {
10375 error = EINVAL;
10376 NECPLOG(LOG_ERR, "necp_client_get_interface_address invalid address length (%u)", address.ss_len);
10377 goto done;
10378 }
10379
10380 ifnet_head_lock_shared();
10381 ifnet_t ifp = NULL;
10382 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
10383 ifp = ifindex2ifnet[interface_index];
10384 }
10385 ifnet_head_done();
10386 if (ifp == NULL) {
10387 error = ENOENT;
10388 NECPLOG0(LOG_ERR, "necp_client_get_interface_address no matching interface found");
10389 goto done;
10390 }
10391
10392 struct rtentry *rt = rtalloc1_scoped(SA(&address), 0, 0, interface_index);
10393 if (rt == NULL) {
10394 error = EINVAL;
10395 NECPLOG0(LOG_ERR, "necp_client_get_interface_address route lookup failed");
10396 goto done;
10397 }
10398
10399 uint32_t gencount = 0;
10400 struct sockaddr_storage local_address = {};
10401 error = flow_route_select_laddr((union sockaddr_in_4_6 *)&local_address,
10402 (union sockaddr_in_4_6 *)&address, ifp, rt, &gencount, 1);
10403 rtfree(rt);
10404 rt = NULL;
10405
10406 if (error) {
10407 NECPLOG(LOG_ERR, "necp_client_get_interface_address local address selection failed (%d)", error);
10408 goto done;
10409 }
10410
10411 if (local_address.ss_len > buffer_size) {
10412 error = EMSGSIZE;
10413 NECPLOG(LOG_ERR, "necp_client_get_interface_address local address too long for buffer (%u)",
10414 local_address.ss_len);
10415 goto done;
10416 }
10417
10418 error = copyout(&local_address, uap->buffer, local_address.ss_len);
10419 if (error) {
10420 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyout error (%d)", error);
10421 goto done;
10422 }
10423 done:
10424 *retval = error;
10425
10426 return error;
10427 }
10428
10429 extern const char *proc_name_address(void *p);
10430
10431 int
necp_stats_ctor(struct skmem_obj_info * oi,struct skmem_obj_info * oim,void * arg,uint32_t skmflag)10432 necp_stats_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim,
10433 void *arg, uint32_t skmflag)
10434 {
10435 #pragma unused(arg, skmflag)
10436 struct necp_all_kstats * __single kstats = SKMEM_OBJ_ADDR(oi);
10437
10438 ASSERT(oim != NULL && SKMEM_OBJ_ADDR(oim) != NULL);
10439 ASSERT(SKMEM_OBJ_SIZE(oi) == SKMEM_OBJ_SIZE(oim));
10440
10441 kstats->necp_stats_ustats = SKMEM_OBJ_ADDR(oim);
10442
10443 return 0;
10444 }
10445
10446 int
necp_stats_dtor(void * addr,void * arg)10447 necp_stats_dtor(void *addr, void *arg)
10448 {
10449 #pragma unused(addr, arg)
10450 struct necp_all_kstats * __single kstats = addr;
10451
10452 kstats->necp_stats_ustats = NULL;
10453
10454 return 0;
10455 }
10456
10457 static void
necp_fd_insert_stats_arena(struct necp_fd_data * fd_data,struct necp_arena_info * nai)10458 necp_fd_insert_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai)
10459 {
10460 NECP_FD_ASSERT_LOCKED(fd_data);
10461 VERIFY(!(nai->nai_flags & NAIF_ATTACHED));
10462 VERIFY(nai->nai_chain.le_next == NULL && nai->nai_chain.le_prev == NULL);
10463
10464 LIST_INSERT_HEAD(&fd_data->stats_arena_list, nai, nai_chain);
10465 nai->nai_flags |= NAIF_ATTACHED;
10466 necp_arena_info_retain(nai); // for the list
10467 }
10468
10469 static void
necp_fd_remove_stats_arena(struct necp_fd_data * fd_data,struct necp_arena_info * nai)10470 necp_fd_remove_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai)
10471 {
10472 #pragma unused(fd_data)
10473 NECP_FD_ASSERT_LOCKED(fd_data);
10474 VERIFY(nai->nai_flags & NAIF_ATTACHED);
10475 VERIFY(nai->nai_use_count >= 1);
10476
10477 LIST_REMOVE(nai, nai_chain);
10478 nai->nai_flags &= ~NAIF_ATTACHED;
10479 nai->nai_chain.le_next = NULL;
10480 nai->nai_chain.le_prev = NULL;
10481 necp_arena_info_release(nai); // for the list
10482 }
10483
10484 static struct necp_arena_info *
necp_fd_mredirect_stats_arena(struct necp_fd_data * fd_data,struct proc * proc)10485 necp_fd_mredirect_stats_arena(struct necp_fd_data *fd_data, struct proc *proc)
10486 {
10487 struct necp_arena_info *nai, *nai_ret = NULL;
10488
10489 NECP_FD_ASSERT_LOCKED(fd_data);
10490
10491 // Redirect currently-active stats arena and remove it from the active state;
10492 // upon process resumption, new flow request would trigger the creation of
10493 // another active arena.
10494 if ((nai = fd_data->stats_arena_active) != NULL) {
10495 boolean_t need_defunct = FALSE;
10496
10497 ASSERT(!(nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)));
10498 VERIFY(nai->nai_use_count >= 2);
10499 ASSERT(nai->nai_arena != NULL);
10500 ASSERT(nai->nai_mmap.ami_mapref != NULL);
10501
10502 int err = skmem_arena_mredirect(nai->nai_arena, &nai->nai_mmap, proc, &need_defunct);
10503 VERIFY(err == 0);
10504 // must be TRUE since we don't mmap the arena more than once
10505 VERIFY(need_defunct == TRUE);
10506
10507 nai->nai_flags |= NAIF_REDIRECT;
10508 nai_ret = nai; // return to caller
10509
10510 necp_arena_info_release(nai); // for fd_data
10511 fd_data->stats_arena_active = nai = NULL;
10512 }
10513
10514 #if (DEVELOPMENT || DEBUG)
10515 // make sure this list now contains nothing but redirected/defunct arenas
10516 LIST_FOREACH(nai, &fd_data->stats_arena_list, nai_chain) {
10517 ASSERT(nai->nai_use_count >= 1);
10518 ASSERT(nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT));
10519 }
10520 #endif /* (DEVELOPMENT || DEBUG) */
10521
10522 return nai_ret;
10523 }
10524
10525 static void
necp_arena_info_retain(struct necp_arena_info * nai)10526 necp_arena_info_retain(struct necp_arena_info *nai)
10527 {
10528 nai->nai_use_count++;
10529 VERIFY(nai->nai_use_count != 0);
10530 }
10531
10532 static void
necp_arena_info_release(struct necp_arena_info * nai)10533 necp_arena_info_release(struct necp_arena_info *nai)
10534 {
10535 VERIFY(nai->nai_use_count > 0);
10536 if (--nai->nai_use_count == 0) {
10537 necp_arena_info_free(nai);
10538 }
10539 }
10540
10541 static struct necp_arena_info *
necp_arena_info_alloc(void)10542 necp_arena_info_alloc(void)
10543 {
10544 return zalloc_flags(necp_arena_info_zone, Z_WAITOK | Z_ZERO);
10545 }
10546
10547 static void
necp_arena_info_free(struct necp_arena_info * nai)10548 necp_arena_info_free(struct necp_arena_info *nai)
10549 {
10550 VERIFY(nai->nai_chain.le_next == NULL && nai->nai_chain.le_prev == NULL);
10551 VERIFY(nai->nai_use_count == 0);
10552
10553 // NOTE: destroying the arena requires that all outstanding objects
10554 // that were allocated have been freed, else it will assert.
10555 if (nai->nai_arena != NULL) {
10556 skmem_arena_munmap(nai->nai_arena, &nai->nai_mmap);
10557 skmem_arena_release(nai->nai_arena);
10558 OSDecrementAtomic(&necp_arena_count);
10559 nai->nai_arena = NULL;
10560 nai->nai_roff = 0;
10561 }
10562
10563 ASSERT(nai->nai_arena == NULL);
10564 ASSERT(nai->nai_mmap.ami_mapref == NULL);
10565 ASSERT(nai->nai_mmap.ami_arena == NULL);
10566 ASSERT(nai->nai_mmap.ami_maptask == TASK_NULL);
10567
10568 zfree(necp_arena_info_zone, nai);
10569 }
10570
10571 static int
necp_arena_create(struct necp_fd_data * fd_data,size_t obj_size,size_t obj_cnt,struct proc * p)10572 necp_arena_create(struct necp_fd_data *fd_data, size_t obj_size, size_t obj_cnt, struct proc *p)
10573 {
10574 struct skmem_region_params srp_ustats = {};
10575 struct skmem_region_params srp_kstats = {};
10576 struct necp_arena_info *nai;
10577 char name[32];
10578 const char *__null_terminated name_ptr = NULL;
10579 int error = 0;
10580
10581 NECP_FD_ASSERT_LOCKED(fd_data);
10582 ASSERT(fd_data->stats_arena_active == NULL);
10583 ASSERT(p != PROC_NULL);
10584 ASSERT(proc_pid(p) == fd_data->proc_pid);
10585
10586 // inherit the default parameters for the stats region
10587 srp_ustats = *skmem_get_default(SKMEM_REGION_USTATS);
10588 srp_kstats = *skmem_get_default(SKMEM_REGION_KSTATS);
10589
10590 // enable multi-segment mode
10591 srp_ustats.srp_cflags &= ~SKMEM_REGION_CR_MONOLITHIC;
10592 srp_kstats.srp_cflags &= ~SKMEM_REGION_CR_MONOLITHIC;
10593
10594 // configure and adjust the region parameters
10595 srp_ustats.srp_r_obj_cnt = srp_kstats.srp_r_obj_cnt = obj_cnt;
10596 srp_ustats.srp_r_obj_size = srp_kstats.srp_r_obj_size = obj_size;
10597 skmem_region_params_config(&srp_ustats);
10598 skmem_region_params_config(&srp_kstats);
10599
10600 nai = necp_arena_info_alloc();
10601
10602 nai->nai_proc_pid = fd_data->proc_pid;
10603 name_ptr = tsnprintf(name, sizeof(name), "stats-%u.%s.%d", fd_data->stats_arena_gencnt, proc_name_address(p), fd_data->proc_pid);
10604 nai->nai_arena = skmem_arena_create_for_necp(name_ptr, &srp_ustats, &srp_kstats, &error);
10605 ASSERT(nai->nai_arena != NULL || error != 0);
10606 if (error != 0) {
10607 NECPLOG(LOG_ERR, "failed to create stats arena for pid %d\n", fd_data->proc_pid);
10608 } else {
10609 OSIncrementAtomic(&necp_arena_count);
10610
10611 // Get region offsets from base of mmap span; the arena
10612 // doesn't need to be mmap'd at this point, since we simply
10613 // compute the relative offset.
10614 nai->nai_roff = skmem_arena_get_region_offset(nai->nai_arena, SKMEM_REGION_USTATS);
10615
10616 // map to the task/process; upon success, the base address of the region
10617 // will be returned in nai_mmap.ami_mapaddr; this can be communicated to
10618 // the process.
10619 error = skmem_arena_mmap(nai->nai_arena, p, &nai->nai_mmap);
10620 if (error != 0) {
10621 NECPLOG(LOG_ERR, "failed to map stats arena for pid %d\n", fd_data->proc_pid);
10622 }
10623 }
10624
10625 if (error == 0) {
10626 fd_data->stats_arena_active = nai;
10627 necp_arena_info_retain(nai); // for fd_data
10628 necp_fd_insert_stats_arena(fd_data, nai);
10629 ++fd_data->stats_arena_gencnt;
10630 } else {
10631 necp_arena_info_free(nai);
10632 }
10633
10634 return error;
10635 }
10636
10637 static int
necp_arena_stats_obj_alloc(struct necp_fd_data * fd_data,mach_vm_offset_t * off,struct necp_arena_info ** stats_arena,void ** kstats_kaddr,boolean_t cansleep)10638 necp_arena_stats_obj_alloc(struct necp_fd_data *fd_data,
10639 mach_vm_offset_t *off,
10640 struct necp_arena_info **stats_arena,
10641 void **kstats_kaddr,
10642 boolean_t cansleep)
10643 {
10644 struct skmem_cache *kstats_cp = NULL;
10645 struct skmem_obj_info kstats_oi = {};
10646 uint32_t ustats_obj_sz = 0;
10647 void *__sized_by(ustats_obj_sz) ustats_obj = NULL;
10648 uint32_t kstats_obj_sz = 0;
10649 void *__sized_by(kstats_obj_sz) kstats_obj = NULL;
10650 void * __indexable kstats_obj_tmp = NULL;
10651 struct necp_all_kstats * __single kstats = NULL;
10652
10653 ASSERT(off != NULL);
10654 ASSERT(stats_arena != NULL && *stats_arena == NULL);
10655 ASSERT(kstats_kaddr != NULL && *kstats_kaddr == NULL);
10656
10657 NECP_FD_ASSERT_LOCKED(fd_data);
10658 ASSERT(fd_data->stats_arena_active != NULL);
10659 ASSERT(fd_data->stats_arena_active->nai_arena != NULL);
10660
10661 kstats_cp = skmem_arena_necp(fd_data->stats_arena_active->nai_arena)->arc_kstats_cache;
10662 if ((kstats_obj_tmp = skmem_cache_alloc(kstats_cp, (cansleep ? SKMEM_SLEEP : SKMEM_NOSLEEP))) == NULL) {
10663 return ENOMEM;
10664 }
10665 skmem_cache_get_obj_info(kstats_cp, kstats_obj_tmp, &kstats_oi, NULL);
10666 ASSERT(SKMEM_OBJ_SIZE(&kstats_oi) >= sizeof(struct necp_all_stats));
10667 kstats_obj = kstats_obj_tmp;
10668 kstats_obj_sz = SKMEM_OBJ_SIZE(&kstats_oi);
10669
10670 kstats = (struct necp_all_kstats*)kstats_obj;
10671 ustats_obj = __unsafe_forge_bidi_indexable(uint8_t *, kstats->necp_stats_ustats, kstats_obj_sz);
10672 ustats_obj_sz = kstats_obj_sz;
10673
10674 bzero(ustats_obj, ustats_obj_sz);
10675 bzero(&kstats->necp_stats_comm, sizeof(struct necp_all_stats));
10676 *stats_arena = fd_data->stats_arena_active;
10677 *kstats_kaddr = kstats_obj;
10678 // kstats and ustats are mirrored and have the same offset
10679 *off = fd_data->stats_arena_active->nai_roff + SKMEM_OBJ_ROFF(&kstats_oi);
10680
10681 return 0;
10682 }
10683
10684 static void
necp_arena_stats_obj_free(struct necp_fd_data * fd_data,struct necp_arena_info * stats_arena,void ** kstats_kaddr,mach_vm_address_t * ustats_uaddr)10685 necp_arena_stats_obj_free(struct necp_fd_data *fd_data, struct necp_arena_info *stats_arena, void **kstats_kaddr, mach_vm_address_t *ustats_uaddr)
10686 {
10687 #pragma unused(fd_data)
10688 NECP_FD_ASSERT_LOCKED(fd_data);
10689
10690 ASSERT(stats_arena != NULL);
10691 ASSERT(stats_arena->nai_arena != NULL);
10692 ASSERT(kstats_kaddr != NULL && *kstats_kaddr != NULL);
10693 ASSERT(ustats_uaddr != NULL);
10694
10695 skmem_cache_free(skmem_arena_necp(stats_arena->nai_arena)->arc_kstats_cache, *kstats_kaddr);
10696 *kstats_kaddr = NULL;
10697 *ustats_uaddr = 0;
10698 }
10699
10700 // This routine returns the KVA of the sysctls object, as well as the
10701 // offset of that object relative to the mmap base address for the
10702 // task/process.
10703 static void *
necp_arena_sysctls_obj(struct necp_fd_data * fd_data,mach_vm_offset_t * off,size_t * size)10704 necp_arena_sysctls_obj(struct necp_fd_data *fd_data, mach_vm_offset_t *off, size_t *size)
10705 {
10706 void * __single objaddr;
10707
10708 NECP_FD_ASSERT_LOCKED(fd_data);
10709 ASSERT(fd_data->sysctl_arena != NULL);
10710
10711 // kernel virtual address of the sysctls object
10712 objaddr = skmem_arena_system_sysctls_obj_addr(fd_data->sysctl_arena);
10713 ASSERT(objaddr != NULL);
10714
10715 // Return the relative offset of the sysctls object; there is
10716 // only 1 object in the entire sysctls region, and therefore the
10717 // object's offset is simply the region's offset in the arena.
10718 // (sysctl_mmap.ami_mapaddr + offset) is the address of this object
10719 // in the task/process.
10720 if (off != NULL) {
10721 *off = fd_data->system_sysctls_roff;
10722 }
10723
10724 if (size != NULL) {
10725 *size = skmem_arena_system_sysctls_obj_size(fd_data->sysctl_arena);
10726 ASSERT(*size != 0);
10727 }
10728
10729 return objaddr;
10730 }
10731
10732 static void
necp_stats_arenas_destroy(struct necp_fd_data * fd_data,boolean_t closing)10733 necp_stats_arenas_destroy(struct necp_fd_data *fd_data, boolean_t closing)
10734 {
10735 struct necp_arena_info *nai, *nai_tmp;
10736
10737 NECP_FD_ASSERT_LOCKED(fd_data);
10738
10739 // If reaping (not closing), release reference only for idle active arena; the reference
10740 // count must be 2 by now, when it's not being referred to by any clients/flows.
10741 if ((nai = fd_data->stats_arena_active) != NULL && (closing || nai->nai_use_count == 2)) {
10742 VERIFY(nai->nai_use_count >= 2);
10743 necp_arena_info_release(nai); // for fd_data
10744 fd_data->stats_arena_active = NULL;
10745 }
10746
10747 // clean up any defunct arenas left in the list
10748 LIST_FOREACH_SAFE(nai, &fd_data->stats_arena_list, nai_chain, nai_tmp) {
10749 // If reaping, release reference if the list holds the last one
10750 if (closing || nai->nai_use_count == 1) {
10751 VERIFY(nai->nai_use_count >= 1);
10752 // callee unchains nai (and may free it)
10753 necp_fd_remove_stats_arena(fd_data, nai);
10754 }
10755 }
10756 }
10757
10758 static void
necp_sysctl_arena_destroy(struct necp_fd_data * fd_data)10759 necp_sysctl_arena_destroy(struct necp_fd_data *fd_data)
10760 {
10761 NECP_FD_ASSERT_LOCKED(fd_data);
10762
10763 // NOTE: destroying the arena requires that all outstanding objects
10764 // that were allocated have been freed, else it will assert.
10765 if (fd_data->sysctl_arena != NULL) {
10766 skmem_arena_munmap(fd_data->sysctl_arena, &fd_data->sysctl_mmap);
10767 skmem_arena_release(fd_data->sysctl_arena);
10768 OSDecrementAtomic(&necp_sysctl_arena_count);
10769 fd_data->sysctl_arena = NULL;
10770 fd_data->system_sysctls_roff = 0;
10771 }
10772 }
10773
10774 static int
necp_arena_initialize(struct necp_fd_data * fd_data,bool locked)10775 necp_arena_initialize(struct necp_fd_data *fd_data, bool locked)
10776 {
10777 int error = 0;
10778 size_t stats_obj_size = MAX(sizeof(struct necp_all_stats), sizeof(struct necp_all_kstats));
10779
10780 if (!locked) {
10781 NECP_FD_LOCK(fd_data);
10782 }
10783 if (fd_data->stats_arena_active == NULL) {
10784 error = necp_arena_create(fd_data, stats_obj_size,
10785 NECP_MAX_PER_PROCESS_CLIENT_STATISTICS_STRUCTS,
10786 current_proc());
10787 }
10788 if (!locked) {
10789 NECP_FD_UNLOCK(fd_data);
10790 }
10791
10792 return error;
10793 }
10794
10795 static int
necp_sysctl_arena_initialize(struct necp_fd_data * fd_data,bool locked)10796 necp_sysctl_arena_initialize(struct necp_fd_data *fd_data, bool locked)
10797 {
10798 int error = 0;
10799
10800 if (!locked) {
10801 NECP_FD_LOCK(fd_data);
10802 }
10803
10804 NECP_FD_ASSERT_LOCKED(fd_data);
10805
10806 if (fd_data->sysctl_arena == NULL) {
10807 char name[32];
10808 const char *__null_terminated name_ptr = NULL;
10809 struct proc *p = current_proc();
10810
10811 ASSERT(p != PROC_NULL);
10812 ASSERT(proc_pid(p) == fd_data->proc_pid);
10813
10814 name_ptr = tsnprintf(name, sizeof(name), "sysctl.%s.%d", proc_name_address(p), fd_data->proc_pid);
10815 fd_data->sysctl_arena = skmem_arena_create_for_system(name_ptr, &error);
10816 ASSERT(fd_data->sysctl_arena != NULL || error != 0);
10817 if (error != 0) {
10818 NECPLOG(LOG_ERR, "failed to create arena for pid %d\n", fd_data->proc_pid);
10819 } else {
10820 OSIncrementAtomic(&necp_sysctl_arena_count);
10821
10822 // Get region offsets from base of mmap span; the arena
10823 // doesn't need to be mmap'd at this point, since we simply
10824 // compute the relative offset.
10825 fd_data->system_sysctls_roff = skmem_arena_get_region_offset(fd_data->sysctl_arena, SKMEM_REGION_SYSCTLS);
10826
10827 // map to the task/process; upon success, the base address of the region
10828 // will be returned in nai_mmap.ami_mapaddr; this can be communicated to
10829 // the process.
10830 error = skmem_arena_mmap(fd_data->sysctl_arena, p, &fd_data->sysctl_mmap);
10831 if (error != 0) {
10832 NECPLOG(LOG_ERR, "failed to map sysctl arena for pid %d\n", fd_data->proc_pid);
10833 necp_sysctl_arena_destroy(fd_data);
10834 }
10835 }
10836 }
10837
10838 if (!locked) {
10839 NECP_FD_UNLOCK(fd_data);
10840 }
10841
10842 return error;
10843 }
10844
10845 static int
necp_client_stats_bufreq(struct necp_fd_data * fd_data,struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct necp_stats_bufreq * bufreq,struct necp_stats_hdr * out_header)10846 necp_client_stats_bufreq(struct necp_fd_data *fd_data,
10847 struct necp_client *client,
10848 struct necp_client_flow_registration *flow_registration,
10849 struct necp_stats_bufreq *bufreq,
10850 struct necp_stats_hdr *out_header)
10851 {
10852 int error = 0;
10853 NECP_CLIENT_ASSERT_LOCKED(client);
10854 NECP_FD_ASSERT_LOCKED(fd_data);
10855
10856 if ((bufreq->necp_stats_bufreq_id == NECP_CLIENT_STATISTICS_BUFREQ_ID) &&
10857 ((bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_TCP &&
10858 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_TCP_CURRENT_VER) ||
10859 (bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_UDP &&
10860 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_UDP_CURRENT_VER) ||
10861 (bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_QUIC &&
10862 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_QUIC_CURRENT_VER)) &&
10863 (bufreq->necp_stats_bufreq_size == sizeof(struct necp_all_stats))) {
10864 // There should be one and only one stats allocation per client.
10865 // If asked more than once, we just repeat ourselves.
10866 if (flow_registration->ustats_uaddr == 0) {
10867 mach_vm_offset_t off;
10868 ASSERT(flow_registration->stats_arena == NULL);
10869 ASSERT(flow_registration->kstats_kaddr == NULL);
10870 ASSERT(flow_registration->ustats_uaddr == 0);
10871 error = necp_arena_stats_obj_alloc(fd_data, &off, &flow_registration->stats_arena, &flow_registration->kstats_kaddr, FALSE);
10872 if (error == 0) {
10873 // upon success, hold a reference for the client; this is released when the client is removed/closed
10874 ASSERT(flow_registration->stats_arena != NULL);
10875 necp_arena_info_retain(flow_registration->stats_arena);
10876
10877 // compute user address based on mapping info and object offset
10878 flow_registration->ustats_uaddr = flow_registration->stats_arena->nai_mmap.ami_mapaddr + off;
10879
10880 // add to collect_stats list
10881 NECP_STATS_LIST_LOCK_EXCLUSIVE();
10882 necp_client_retain_locked(client); // Add a reference to the client
10883 LIST_INSERT_HEAD(&necp_collect_stats_flow_list, flow_registration, collect_stats_chain);
10884 NECP_STATS_LIST_UNLOCK();
10885 necp_schedule_collect_stats_clients(FALSE);
10886 } else {
10887 ASSERT(flow_registration->stats_arena == NULL);
10888 ASSERT(flow_registration->kstats_kaddr == NULL);
10889 }
10890 }
10891 if (flow_registration->ustats_uaddr != 0) {
10892 ASSERT(error == 0);
10893 ASSERT(flow_registration->stats_arena != NULL);
10894 ASSERT(flow_registration->kstats_kaddr != NULL);
10895
10896 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
10897 kstats->necp_stats_ustats->all_stats_u.tcp_stats.necp_tcp_hdr.necp_stats_type = bufreq->necp_stats_bufreq_type;
10898 kstats->necp_stats_ustats->all_stats_u.tcp_stats.necp_tcp_hdr.necp_stats_ver = bufreq->necp_stats_bufreq_ver;
10899
10900 if (out_header) {
10901 out_header->necp_stats_type = bufreq->necp_stats_bufreq_type;
10902 out_header->necp_stats_ver = bufreq->necp_stats_bufreq_ver;
10903 }
10904
10905 bufreq->necp_stats_bufreq_uaddr = flow_registration->ustats_uaddr;
10906 }
10907 } else {
10908 error = EINVAL;
10909 }
10910
10911 return error;
10912 }
10913
10914 static int
necp_client_stats_initial(struct necp_client_flow_registration * flow_registration,uint32_t stats_type,uint32_t stats_ver)10915 necp_client_stats_initial(struct necp_client_flow_registration *flow_registration, uint32_t stats_type, uint32_t stats_ver)
10916 {
10917 // An attempted create
10918 assert(flow_registration->stats_handler_context == NULL);
10919 assert(flow_registration->stats_arena);
10920 assert(flow_registration->ustats_uaddr);
10921 assert(flow_registration->kstats_kaddr);
10922
10923 int error = 0;
10924 uint64_t ntstat_properties = necp_find_netstat_initial_properties(flow_registration->client);
10925
10926 switch (stats_type) {
10927 case NECP_CLIENT_STATISTICS_TYPE_TCP: {
10928 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_TCP_VER_1) {
10929 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
10930 NSTAT_PROVIDER_TCP_USERLAND, ntstat_properties, necp_request_tcp_netstats, necp_find_extension_info);
10931 if (flow_registration->stats_handler_context == NULL) {
10932 error = EIO;
10933 }
10934 } else {
10935 error = ENOTSUP;
10936 }
10937 break;
10938 }
10939 case NECP_CLIENT_STATISTICS_TYPE_UDP: {
10940 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_UDP_VER_1) {
10941 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
10942 NSTAT_PROVIDER_UDP_USERLAND, ntstat_properties, necp_request_udp_netstats, necp_find_extension_info);
10943 if (flow_registration->stats_handler_context == NULL) {
10944 error = EIO;
10945 }
10946 } else {
10947 error = ENOTSUP;
10948 }
10949 break;
10950 }
10951 case NECP_CLIENT_STATISTICS_TYPE_QUIC: {
10952 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_QUIC_VER_1 && flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS) {
10953 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
10954 NSTAT_PROVIDER_QUIC_USERLAND, ntstat_properties, necp_request_quic_netstats, necp_find_extension_info);
10955 if (flow_registration->stats_handler_context == NULL) {
10956 error = EIO;
10957 }
10958 } else {
10959 error = ENOTSUP;
10960 }
10961 break;
10962 }
10963 default: {
10964 error = ENOTSUP;
10965 break;
10966 }
10967 }
10968 return error;
10969 }
10970
10971 static int
necp_stats_initialize(struct necp_fd_data * fd_data,struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct necp_stats_bufreq * bufreq)10972 necp_stats_initialize(struct necp_fd_data *fd_data,
10973 struct necp_client *client,
10974 struct necp_client_flow_registration *flow_registration,
10975 struct necp_stats_bufreq *bufreq)
10976 {
10977 int error = 0;
10978 struct necp_stats_hdr stats_hdr = {};
10979
10980 NECP_CLIENT_ASSERT_LOCKED(client);
10981 NECP_FD_ASSERT_LOCKED(fd_data);
10982 VERIFY(fd_data->stats_arena_active != NULL);
10983 VERIFY(fd_data->stats_arena_active->nai_arena != NULL);
10984 VERIFY(!(fd_data->stats_arena_active->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)));
10985
10986 if (bufreq == NULL) {
10987 return EINVAL;
10988 }
10989
10990 // Setup stats region
10991 error = necp_client_stats_bufreq(fd_data, client, flow_registration, bufreq, &stats_hdr);
10992 if (error) {
10993 return error;
10994 }
10995 // Notify ntstat about new flow
10996 if (flow_registration->stats_handler_context == NULL) {
10997 error = necp_client_stats_initial(flow_registration, stats_hdr.necp_stats_type, stats_hdr.necp_stats_ver);
10998 if (flow_registration->stats_handler_context != NULL) {
10999 ntstat_userland_stats_event(flow_registration->stats_handler_context, NECP_CLIENT_STATISTICS_EVENT_INIT);
11000 }
11001 NECP_CLIENT_FLOW_LOG(client, flow_registration, "Initialized stats <error %d>", error);
11002 }
11003
11004 return error;
11005 }
11006
11007 static int
necp_aop_offload_stats_initialize(struct necp_client_flow_registration * flow_registration,uuid_t netagent_uuid)11008 necp_aop_offload_stats_initialize(struct necp_client_flow_registration *flow_registration,
11009 uuid_t netagent_uuid)
11010 {
11011 int error = 0;
11012
11013 struct necp_client_flow *flow = NULL;
11014 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
11015 // Verify that the client nexus agent matches
11016 if (flow->nexus &&
11017 uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) {
11018 ASSERT(flow->flow_tag != 0);
11019 ASSERT(flow->aop_offload);
11020
11021 error = net_aop_setup_flow(flow->flow_tag,
11022 true, &flow->stats_index);
11023 if (error != 0) {
11024 NECPLOG(LOG_ERR, "failed to setup aop flow "
11025 "stats area, error %d", error);
11026 } else {
11027 flow->aop_stat_index_valid = true;
11028 }
11029 break;
11030 }
11031 }
11032
11033 return error;
11034 }
11035
11036 static void
necp_aop_offload_stats_destroy(struct necp_client_flow * flow)11037 necp_aop_offload_stats_destroy(struct necp_client_flow *flow)
11038 {
11039 int error = 0;
11040
11041 if (flow->flow_tag != 0 && flow->aop_stat_index_valid) {
11042 error = net_aop_setup_flow(flow->flow_tag,
11043 false, &flow->stats_index);
11044 if (error != 0) {
11045 NECPLOG(LOG_ERR, "failed to cleanup aop offload stats with error %d", error);
11046 }
11047 flow->aop_stat_index_valid = false;
11048 }
11049 return;
11050 }
11051
11052 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_map_sysctls(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11053 necp_client_map_sysctls(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11054 {
11055 int result = 0;
11056 if (!retval) {
11057 retval = &result;
11058 }
11059
11060 do {
11061 mach_vm_address_t uaddr = 0;
11062 if (uap->buffer_size != sizeof(uaddr)) {
11063 *retval = EINVAL;
11064 break;
11065 }
11066
11067 *retval = necp_sysctl_arena_initialize(fd_data, false);
11068 if (*retval != 0) {
11069 break;
11070 }
11071
11072 mach_vm_offset_t off = 0;
11073 void * __single location = NULL;
11074 NECP_FD_LOCK(fd_data);
11075 location = necp_arena_sysctls_obj(fd_data, &off, NULL);
11076 NECP_FD_UNLOCK(fd_data);
11077
11078 if (location == NULL) {
11079 *retval = ENOENT;
11080 break;
11081 }
11082
11083 uaddr = fd_data->sysctl_mmap.ami_mapaddr + off;
11084 *retval = copyout(&uaddr, uap->buffer, sizeof(uaddr));
11085 } while (false);
11086
11087 return *retval;
11088 }
11089
11090 #endif /* !SKYWALK */
11091
11092 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_route_statistics(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11093 necp_client_copy_route_statistics(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11094 {
11095 int error = 0;
11096 struct necp_client *client = NULL;
11097 uuid_t client_id;
11098
11099 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
11100 uap->buffer_size < sizeof(struct necp_stat_counts) || uap->buffer == 0) {
11101 NECPLOG0(LOG_ERR, "necp_client_copy_route_statistics bad input");
11102 error = EINVAL;
11103 goto done;
11104 }
11105
11106 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
11107 if (error) {
11108 NECPLOG(LOG_ERR, "necp_client_copy_route_statistics copyin client_id error (%d)", error);
11109 goto done;
11110 }
11111
11112 // Lock
11113 NECP_FD_LOCK(fd_data);
11114 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
11115 if (client != NULL) {
11116 NECP_CLIENT_ROUTE_LOCK(client);
11117 struct necp_stat_counts route_stats = {};
11118 if (client->current_route != NULL && client->current_route->rt_stats != NULL) {
11119 struct nstat_counts *rt_stats = client->current_route->rt_stats;
11120 route_stats.necp_stat_rxpackets = os_atomic_load(&rt_stats->nstat_rxpackets, relaxed);
11121 route_stats.necp_stat_rxbytes = os_atomic_load(&rt_stats->nstat_rxbytes, relaxed);
11122 route_stats.necp_stat_txpackets = os_atomic_load(&rt_stats->nstat_txpackets, relaxed);
11123 route_stats.necp_stat_txbytes = os_atomic_load(&rt_stats->nstat_txbytes, relaxed);
11124 route_stats.necp_stat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
11125 route_stats.necp_stat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
11126 route_stats.necp_stat_txretransmit = rt_stats->nstat_txretransmit;
11127 route_stats.necp_stat_connectattempts = rt_stats->nstat_connectattempts;
11128 route_stats.necp_stat_connectsuccesses = rt_stats->nstat_connectsuccesses;
11129 if (__probable(necp_client_stats_use_route_metrics == 0)) {
11130 route_stats.necp_stat_min_rtt = rt_stats->nstat_min_rtt;
11131 route_stats.necp_stat_avg_rtt = rt_stats->nstat_avg_rtt;
11132 route_stats.necp_stat_var_rtt = rt_stats->nstat_var_rtt;
11133 } else {
11134 route_stats.necp_stat_min_rtt = client->current_route->rtt_min;
11135 route_stats.necp_stat_avg_rtt = client->current_route->rt_rmx.rmx_rtt;
11136 route_stats.necp_stat_var_rtt = client->current_route->rt_rmx.rmx_rttvar;
11137 }
11138 route_stats.necp_stat_route_flags = client->current_route->rt_flags;
11139 }
11140
11141 // Unlock before copying out
11142 NECP_CLIENT_ROUTE_UNLOCK(client);
11143 NECP_CLIENT_UNLOCK(client);
11144 NECP_FD_UNLOCK(fd_data);
11145
11146 error = copyout(&route_stats, uap->buffer, sizeof(route_stats));
11147 if (error) {
11148 NECPLOG(LOG_ERR, "necp_client_copy_route_statistics copyout error (%d)", error);
11149 }
11150 } else {
11151 // Unlock
11152 NECP_FD_UNLOCK(fd_data);
11153 error = ENOENT;
11154 }
11155
11156
11157 done:
11158 *retval = error;
11159 return error;
11160 }
11161
11162 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_update_cache(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11163 necp_client_update_cache(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11164 {
11165 int error = 0;
11166 struct necp_client *client = NULL;
11167 uuid_t client_id;
11168
11169 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
11170 error = EINVAL;
11171 goto done;
11172 }
11173
11174 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
11175 if (error) {
11176 NECPLOG(LOG_ERR, "necp_client_update_cache copyin client_id error (%d)", error);
11177 goto done;
11178 }
11179
11180 NECP_FD_LOCK(fd_data);
11181 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
11182 if (client == NULL) {
11183 NECP_FD_UNLOCK(fd_data);
11184 error = ENOENT;
11185 goto done;
11186 }
11187
11188 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
11189 if (flow_registration == NULL) {
11190 NECP_CLIENT_UNLOCK(client);
11191 NECP_FD_UNLOCK(fd_data);
11192 error = ENOENT;
11193 goto done;
11194 }
11195
11196 NECP_CLIENT_ROUTE_LOCK(client);
11197 // This needs to be changed when TFO/ECN is supported by multiple flows
11198 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
11199 if (flow == NULL ||
11200 (flow->remote_addr.sa.sa_family != AF_INET &&
11201 flow->remote_addr.sa.sa_family != AF_INET6) ||
11202 (flow->local_addr.sa.sa_family != AF_INET &&
11203 flow->local_addr.sa.sa_family != AF_INET6)) {
11204 error = EINVAL;
11205 NECPLOG(LOG_ERR, "necp_client_update_cache no flow error (%d)", error);
11206 goto done_unlock;
11207 }
11208
11209 necp_cache_buffer cache_buffer;
11210 memset(&cache_buffer, 0, sizeof(cache_buffer));
11211
11212 if (uap->buffer_size != sizeof(necp_cache_buffer) ||
11213 uap->buffer == USER_ADDR_NULL) {
11214 error = EINVAL;
11215 goto done_unlock;
11216 }
11217
11218 error = copyin(uap->buffer, &cache_buffer, sizeof(cache_buffer));
11219 if (error) {
11220 NECPLOG(LOG_ERR, "necp_client_update_cache copyin cache buffer error (%d)", error);
11221 goto done_unlock;
11222 }
11223
11224 if (cache_buffer.necp_cache_buf_type == NECP_CLIENT_CACHE_TYPE_ECN &&
11225 cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_ECN_VER_1) {
11226 if (cache_buffer.necp_cache_buf_size != sizeof(necp_tcp_ecn_cache) ||
11227 cache_buffer.necp_cache_buf_addr == USER_ADDR_NULL) {
11228 error = EINVAL;
11229 goto done_unlock;
11230 }
11231
11232 necp_tcp_ecn_cache ecn_cache_buffer;
11233 memset(&ecn_cache_buffer, 0, sizeof(ecn_cache_buffer));
11234
11235 error = copyin(cache_buffer.necp_cache_buf_addr, &ecn_cache_buffer, sizeof(necp_tcp_ecn_cache));
11236 if (error) {
11237 NECPLOG(LOG_ERR, "necp_client_update_cache copyin ecn cache buffer error (%d)", error);
11238 goto done_unlock;
11239 }
11240
11241 if (client->current_route != NULL && client->current_route->rt_ifp != NULL) {
11242 if (!client->platform_binary) {
11243 ecn_cache_buffer.necp_tcp_ecn_heuristics_success = 0;
11244 }
11245 tcp_heuristics_ecn_update(&ecn_cache_buffer, client->current_route->rt_ifp,
11246 (union sockaddr_in_4_6 *)&flow->local_addr);
11247 }
11248 } else if (cache_buffer.necp_cache_buf_type == NECP_CLIENT_CACHE_TYPE_TFO &&
11249 cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_TFO_VER_1) {
11250 if (cache_buffer.necp_cache_buf_size != sizeof(necp_tcp_tfo_cache) ||
11251 cache_buffer.necp_cache_buf_addr == USER_ADDR_NULL) {
11252 error = EINVAL;
11253 goto done_unlock;
11254 }
11255
11256 necp_tcp_tfo_cache tfo_cache_buffer;
11257 memset(&tfo_cache_buffer, 0, sizeof(tfo_cache_buffer));
11258
11259 error = copyin(cache_buffer.necp_cache_buf_addr, &tfo_cache_buffer, sizeof(necp_tcp_tfo_cache));
11260 if (error) {
11261 NECPLOG(LOG_ERR, "necp_client_update_cache copyin tfo cache buffer error (%d)", error);
11262 goto done_unlock;
11263 }
11264
11265 if (client->current_route != NULL && client->current_route->rt_ifp != NULL) {
11266 if (!client->platform_binary) {
11267 tfo_cache_buffer.necp_tcp_tfo_heuristics_success = 0;
11268 }
11269 tcp_heuristics_tfo_update(&tfo_cache_buffer, client->current_route->rt_ifp,
11270 (union sockaddr_in_4_6 *)&flow->local_addr,
11271 (union sockaddr_in_4_6 *)&flow->remote_addr);
11272 }
11273 } else {
11274 error = EINVAL;
11275 }
11276 done_unlock:
11277 NECP_CLIENT_ROUTE_UNLOCK(client);
11278 NECP_CLIENT_UNLOCK(client);
11279 NECP_FD_UNLOCK(fd_data);
11280 done:
11281 *retval = error;
11282 return error;
11283 }
11284
11285 // Most results will fit into this size
11286 struct necp_client_signable_default {
11287 uuid_t client_id;
11288 u_int32_t sign_type;
11289 u_int8_t signable_data[NECP_CLIENT_ACTION_SIGN_DEFAULT_DATA_LENGTH];
11290 } __attribute__((__packed__));
11291
11292 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_sign(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11293 necp_client_sign(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11294 {
11295 int error = 0;
11296 u_int8_t tag[NECP_CLIENT_ACTION_SIGN_TAG_LENGTH] = {};
11297 struct necp_client_signable * __indexable signable = NULL;
11298 struct necp_client_signable * __indexable allocated_signable = NULL;
11299 struct necp_client_signable_default default_signable = {};
11300 size_t tag_size = sizeof(tag);
11301
11302 const size_t signable_length = uap->client_id_len;
11303 const size_t return_tag_length = uap->buffer_size;
11304
11305 *retval = 0;
11306
11307 const bool has_resolver_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_VALIDATED_RESOLVER, 0) == 0);
11308 if (!has_resolver_entitlement) {
11309 NECPLOG0(LOG_ERR, "Process does not hold the necessary entitlement to sign resolver answers");
11310 error = EPERM;
11311 goto done;
11312 }
11313
11314 if (uap->client_id == 0 || signable_length < sizeof(*signable) || signable_length > NECP_CLIENT_ACTION_SIGN_MAX_TOTAL_LENGTH) {
11315 error = EINVAL;
11316 goto done;
11317 }
11318
11319 if (uap->buffer == 0 || return_tag_length != NECP_CLIENT_ACTION_SIGN_TAG_LENGTH) {
11320 error = EINVAL;
11321 goto done;
11322 }
11323
11324 if (signable_length <= sizeof(default_signable)) {
11325 signable = (struct necp_client_signable *)&default_signable;
11326 } else {
11327 if ((allocated_signable = (struct necp_client_signable *)kalloc_data(signable_length, Z_WAITOK | Z_ZERO)) == NULL) {
11328 NECPLOG(LOG_ERR, "necp_client_sign allocate signable %zu failed", signable_length);
11329 error = ENOMEM;
11330 goto done;
11331 }
11332 signable = allocated_signable;
11333 }
11334
11335 error = copyin(uap->client_id, signable, signable_length);
11336 if (error) {
11337 NECPLOG(LOG_ERR, "necp_client_sign copyin signable error (%d)", error);
11338 goto done;
11339 }
11340
11341 size_t data_length = 0;
11342 switch (signable->sign_type) {
11343 case NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER:
11344 case NECP_CLIENT_SIGN_TYPE_SYSTEM_RESOLVER_ANSWER: {
11345 data_length = (sizeof(struct necp_client_host_resolver_answer) - sizeof(struct necp_client_signable));
11346 if (signable_length < (sizeof(struct necp_client_signable) + data_length)) {
11347 error = EINVAL;
11348 goto done;
11349 }
11350 struct necp_client_host_resolver_answer * __single signable_struct = (struct necp_client_host_resolver_answer *)signable;
11351 if (signable_struct->hostname_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11352 signable_length != (sizeof(struct necp_client_signable) + data_length + signable_struct->hostname_length)) {
11353 error = EINVAL;
11354 goto done;
11355 }
11356 data_length += signable_struct->hostname_length;
11357 break;
11358 }
11359 case NECP_CLIENT_SIGN_TYPE_BROWSE_RESULT:
11360 case NECP_CLIENT_SIGN_TYPE_SYSTEM_BROWSE_RESULT: {
11361 data_length = (sizeof(struct necp_client_browse_result) - sizeof(struct necp_client_signable));
11362 if (signable_length < (sizeof(struct necp_client_signable) + data_length)) {
11363 error = EINVAL;
11364 goto done;
11365 }
11366 struct necp_client_browse_result *signable_struct = (struct necp_client_browse_result *)signable;
11367 if (signable_struct->service_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11368 signable_length != (sizeof(struct necp_client_signable) + data_length + signable_struct->service_length)) {
11369 error = EINVAL;
11370 goto done;
11371 }
11372 data_length += signable_struct->service_length;
11373 break;
11374 }
11375 case NECP_CLIENT_SIGN_TYPE_SERVICE_RESOLVER_ANSWER:
11376 case NECP_CLIENT_SIGN_TYPE_SYSTEM_SERVICE_RESOLVER_ANSWER: {
11377 data_length = (sizeof(struct necp_client_service_resolver_answer) - sizeof(struct necp_client_signable));
11378 if (signable_length < (sizeof(struct necp_client_signable) + data_length)) {
11379 error = EINVAL;
11380 goto done;
11381 }
11382 struct necp_client_service_resolver_answer * __single signable_struct = (struct necp_client_service_resolver_answer *)signable;
11383 if (signable_struct->service_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11384 signable_struct->hostname_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11385 signable_length != (sizeof(struct necp_client_signable) + data_length + signable_struct->service_length + signable_struct->hostname_length)) {
11386 error = EINVAL;
11387 goto done;
11388 }
11389 data_length += signable_struct->service_length;
11390 data_length += signable_struct->hostname_length;
11391 break;
11392 }
11393 default: {
11394 NECPLOG(LOG_ERR, "necp_client_sign unknown signable type (%u)", signable->sign_type);
11395 error = EINVAL;
11396 goto done;
11397 }
11398 }
11399
11400 error = necp_sign_resolver_answer(signable->client_id, signable->sign_type,
11401 signable_get_data(signable, data_length), data_length,
11402 tag, &tag_size);
11403 if (tag_size != sizeof(tag)) {
11404 NECPLOG(LOG_ERR, "necp_client_sign unexpected tag size %zu", tag_size);
11405 error = EINVAL;
11406 goto done;
11407 }
11408 error = copyout(tag, uap->buffer, tag_size);
11409 if (error) {
11410 NECPLOG(LOG_ERR, "necp_client_sign copyout error (%d)", error);
11411 goto done;
11412 }
11413
11414 done:
11415 if (allocated_signable != NULL) {
11416 kfree_data(allocated_signable, signable_length);
11417 allocated_signable = NULL;
11418 }
11419 *retval = error;
11420 return error;
11421 }
11422
11423 // Most results will fit into this size
11424 struct necp_client_validatable_default {
11425 struct necp_client_signature signature;
11426 struct necp_client_signable_default signable;
11427 } __attribute__((__packed__));
11428
11429 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_validate(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11430 necp_client_validate(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11431 {
11432 int error = 0;
11433 struct necp_client_validatable *validatable = NULL;
11434 struct necp_client_validatable * __single allocated_validatable = NULL;
11435 struct necp_client_validatable_default default_validatable = {};
11436
11437 const size_t validatable_length = uap->client_id_len;
11438
11439 *retval = 0;
11440
11441 const bool has_resolver_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_VALIDATED_RESOLVER, 0) == 0);
11442 if (!has_resolver_entitlement) {
11443 NECPLOG0(LOG_ERR, "Process does not hold the necessary entitlement to directly validate resolver answers");
11444 error = EPERM;
11445 goto done;
11446 }
11447
11448 if (uap->client_id == 0 || validatable_length < sizeof(*validatable) ||
11449 validatable_length > (NECP_CLIENT_ACTION_SIGN_MAX_TOTAL_LENGTH + NECP_CLIENT_ACTION_SIGN_TAG_LENGTH)) {
11450 error = EINVAL;
11451 goto done;
11452 }
11453
11454 if (validatable_length <= sizeof(default_validatable)) {
11455 validatable = (struct necp_client_validatable *)&default_validatable;
11456 } else {
11457 if ((allocated_validatable = (struct necp_client_validatable *)kalloc_data(validatable_length, Z_WAITOK | Z_ZERO)) == NULL) {
11458 NECPLOG(LOG_ERR, "necp_client_validate allocate struct %zu failed", validatable_length);
11459 error = ENOMEM;
11460 goto done;
11461 }
11462 validatable = allocated_validatable;
11463 }
11464
11465 error = copyin(uap->client_id, validatable, validatable_length);
11466 if (error) {
11467 NECPLOG(LOG_ERR, "necp_client_validate copyin error (%d)", error);
11468 goto done;
11469 }
11470
11471 size_t signable_data_len = validatable_length - sizeof(struct necp_client_validatable);
11472 const bool validated = necp_validate_resolver_answer(validatable->signable.client_id, validatable->signable.sign_type,
11473 signable_get_data(&validatable->signable, signable_data_len), signable_data_len,
11474 validatable->signature.signed_tag, sizeof(validatable->signature.signed_tag));
11475 if (!validated) {
11476 // Return EAUTH to indicate that the signature failed
11477 error = EAUTH;
11478 }
11479
11480 done:
11481 if (allocated_validatable != NULL) {
11482 kfree_data(allocated_validatable, validatable_length);
11483 allocated_validatable = NULL;
11484 }
11485 *retval = error;
11486 return error;
11487 }
11488
11489 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_signed_client_id(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11490 necp_client_get_signed_client_id(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11491 {
11492 int error = 0;
11493 *retval = 0;
11494 u_int32_t request_type = 0;
11495 struct necp_client_signed_client_id_uuid client_id = { 0 };
11496 const size_t buffer_size = uap->buffer_size;
11497 u_int8_t tag[NECP_CLIENT_ACTION_SIGN_TAG_LENGTH] = {};
11498 size_t tag_size = sizeof(tag);
11499 proc_t proc = current_proc();
11500 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
11501 buffer_size < sizeof(struct necp_client_signed_client_id_uuid) ||
11502 uap->buffer == 0) {
11503 NECPLOG0(LOG_ERR, "necp_client_get_signed_client_id bad input");
11504 error = EINVAL;
11505 goto done;
11506 }
11507
11508 error = copyin(uap->client_id, &request_type, sizeof(u_int32_t));
11509 if (error) {
11510 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id copyin request_type error (%d)", error);
11511 goto done;
11512 }
11513
11514 if (request_type != NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID) {
11515 error = ENOENT;
11516 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id bad request_type (%d)", request_type);
11517 goto done;
11518 }
11519
11520 uuid_t application_uuid;
11521 uuid_clear(application_uuid);
11522 proc_getexecutableuuid(proc, application_uuid, sizeof(application_uuid));
11523
11524 error = necp_sign_application_id(application_uuid,
11525 NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID,
11526 tag, &tag_size);
11527 if (tag_size != sizeof(tag)) {
11528 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id unexpected tag size %zu", tag_size);
11529 error = EINVAL;
11530 goto done;
11531 }
11532 uuid_copy(client_id.client_id, application_uuid);
11533 client_id.signature_length = tag_size;
11534 memcpy(client_id.signature_data, tag, tag_size);
11535
11536 error = copyout(&client_id, uap->buffer, sizeof(client_id));
11537 if (error != 0) {
11538 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id copyout error (%d)", error);
11539 goto done;
11540 }
11541
11542 done:
11543 *retval = error;
11544 return error;
11545 }
11546
11547 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_set_signed_client_id(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11548 necp_client_set_signed_client_id(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11549 {
11550 int error = 0;
11551 *retval = 0;
11552 u_int32_t request_type = 0;
11553 struct necp_client_signed_client_id_uuid client_id = { 0 };
11554 const size_t buffer_size = uap->buffer_size;
11555
11556 // Only allow entitled processes to set the client ID.
11557 proc_t proc = current_proc();
11558 task_t __single task = proc_task(proc);
11559 bool has_delegation_entitlement = task != NULL && IOTaskHasEntitlement(task, kCSWebBrowserNetworkEntitlement);
11560 if (!has_delegation_entitlement) {
11561 has_delegation_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0);
11562 }
11563 if (!has_delegation_entitlement) {
11564 NECPLOG0(LOG_ERR, "necp_client_set_signed_client_id client lacks the necessary entitlement");
11565 error = EAUTH;
11566 goto done;
11567 }
11568
11569 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
11570 buffer_size < sizeof(struct necp_client_signed_client_id_uuid) ||
11571 uap->buffer == 0) {
11572 NECPLOG0(LOG_ERR, "necp_client_set_signed_client_id bad input");
11573 error = EINVAL;
11574 goto done;
11575 }
11576
11577 error = copyin(uap->client_id, &request_type, sizeof(u_int32_t));
11578 if (error) {
11579 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id copyin request_type error (%d)", error);
11580 goto done;
11581 }
11582
11583 if (request_type != NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID) {
11584 error = ENOENT;
11585 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id bad request_type (%d)", request_type);
11586 goto done;
11587 }
11588
11589 error = copyin(uap->buffer, &client_id, sizeof(struct necp_client_signed_client_id_uuid));
11590 if (error) {
11591 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id copyin request error (%d)", error);
11592 goto done;
11593 }
11594
11595 const bool validated = necp_validate_application_id(client_id.client_id,
11596 NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID,
11597 client_id.signature_data, sizeof(client_id.signature_data));
11598 if (!validated) {
11599 // Return EAUTH to indicate that the signature failed
11600 error = EAUTH;
11601 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id signature validation failed (%d)", error);
11602 goto done;
11603 }
11604
11605 proc_setresponsibleuuid(proc, client_id.client_id, sizeof(client_id.client_id));
11606
11607 done:
11608 *retval = error;
11609 return error;
11610 }
11611
11612 static int
necp_client_copy_flow_stats(struct necp_client_flow_registration * flow_registration,struct necp_flow_statistics * flow_stats)11613 necp_client_copy_flow_stats(struct necp_client_flow_registration *flow_registration,
11614 struct necp_flow_statistics *flow_stats)
11615 {
11616 struct aop_flow_stats aop_flow_stats = {};
11617 int error = 0;
11618
11619 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
11620 if (flow == NULL || !flow->aop_offload || !flow->aop_stat_index_valid) {
11621 NECPLOG0(LOG_ERR, "necp_client_copy_flow_stats only supported for aop flows");
11622 return EINVAL;
11623 }
11624 error = net_aop_get_flow_stats(flow->stats_index, &aop_flow_stats);
11625 if (error != 0) {
11626 NECPLOG(LOG_ERR, "net_aop_get_flow_stats failed (%d)", error);
11627 return error;
11628 }
11629
11630 if (flow_stats->transport_proto == IPPROTO_TCP) {
11631 struct tcp_info *tcpi = &flow_stats->transport.tcpi;
11632 struct tcp_info *a_tcpi = &aop_flow_stats.transport.tcp_stats.tcp_info;
11633 memcpy(tcpi, a_tcpi, sizeof(*tcpi));
11634 }
11635
11636 return 0;
11637 }
11638
11639 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_flow_statistics(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11640 necp_client_get_flow_statistics(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11641 {
11642 int error = 0;
11643 uuid_t flow_id = {};
11644 struct necp_flow_statistics flow_stats = {};
11645
11646 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
11647 error = EINVAL;
11648 NECPLOG(LOG_ERR, "necp_client_remove_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
11649 goto done;
11650 }
11651
11652 error = copyin(uap->client_id, flow_id, sizeof(uuid_t));
11653 if (error) {
11654 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics copyin client_id error (%d)", error);
11655 goto done;
11656 }
11657
11658 if (uap->buffer_size < sizeof(flow_stats) || uap->buffer == 0) {
11659 error = EINVAL;
11660 goto done;
11661 }
11662
11663 error = copyin(uap->buffer, &flow_stats, sizeof(flow_stats));
11664 if (error) {
11665 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics copyin protocol error (%d)", error);
11666 goto done;
11667 }
11668
11669 if (flow_stats.transport_proto != IPPROTO_TCP) {
11670 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics, transport proto %u not supported",
11671 flow_stats.transport_proto);
11672 error = ENOTSUP;
11673 goto done;
11674 }
11675
11676 NECP_FD_LOCK(fd_data);
11677 struct necp_client *client = NULL;
11678 struct necp_client_flow_registration *flow_registration = necp_client_fd_find_flow(fd_data, flow_id);
11679 if (flow_registration != NULL) {
11680 client = flow_registration->client;
11681 if (client != NULL) {
11682 necp_client_retain(client);
11683 }
11684 }
11685 NECP_FD_UNLOCK(fd_data);
11686
11687 if (flow_registration != NULL && client != NULL) {
11688 NECP_CLIENT_LOCK(client);
11689 if (flow_registration->client == client) {
11690 error = necp_client_copy_flow_stats(flow_registration, &flow_stats);
11691 if (error == 0) {
11692 error = copyout(&flow_stats, uap->buffer, sizeof(flow_stats));
11693 if (error != 0) {
11694 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics copyout failed (%d)", error);
11695 }
11696 }
11697 }
11698
11699 necp_client_release_locked(client);
11700 NECP_CLIENT_UNLOCK(client);
11701 }
11702
11703 done:
11704 *retval = error;
11705 if (error != 0) {
11706 NECPLOG(LOG_ERR, "get flow statistics error (%d)", error);
11707 }
11708
11709 return error;
11710 }
11711
11712 int
necp_client_action(struct proc * p,struct necp_client_action_args * uap,int * retval)11713 necp_client_action(struct proc *p, struct necp_client_action_args *uap, int *retval)
11714 {
11715 struct fileproc * __single fp;
11716 int error = 0;
11717 int return_value = 0;
11718 struct necp_fd_data * __single fd_data = NULL;
11719
11720 error = necp_find_fd_data(p, uap->necp_fd, &fp, &fd_data);
11721 if (error != 0) {
11722 NECPLOG(LOG_ERR, "necp_client_action find fd error (%d)", error);
11723 return error;
11724 }
11725
11726 u_int32_t action = uap->action;
11727
11728 #if CONFIG_MACF
11729 error = mac_necp_check_client_action(p, fp->fp_glob, action);
11730 if (error) {
11731 return_value = error;
11732 goto done;
11733 }
11734 #endif /* MACF */
11735
11736 switch (action) {
11737 case NECP_CLIENT_ACTION_ADD: {
11738 return_value = necp_client_add(p, fd_data, uap, retval);
11739 break;
11740 }
11741 case NECP_CLIENT_ACTION_CLAIM: {
11742 return_value = necp_client_claim(p, fd_data, uap, retval);
11743 break;
11744 }
11745 case NECP_CLIENT_ACTION_REMOVE: {
11746 return_value = necp_client_remove(fd_data, uap, retval);
11747 break;
11748 }
11749 case NECP_CLIENT_ACTION_COPY_PARAMETERS:
11750 case NECP_CLIENT_ACTION_COPY_RESULT:
11751 case NECP_CLIENT_ACTION_COPY_UPDATED_RESULT:
11752 case NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL: {
11753 return_value = necp_client_copy(fd_data, uap, retval);
11754 break;
11755 }
11756 case NECP_CLIENT_ACTION_COPY_LIST: {
11757 return_value = necp_client_list(fd_data, uap, retval);
11758 break;
11759 }
11760 case NECP_CLIENT_ACTION_ADD_FLOW: {
11761 return_value = necp_client_add_flow(fd_data, uap, retval);
11762 break;
11763 }
11764 case NECP_CLIENT_ACTION_REMOVE_FLOW: {
11765 return_value = necp_client_remove_flow(fd_data, uap, retval);
11766 break;
11767 }
11768 #if SKYWALK
11769 case NECP_CLIENT_ACTION_REQUEST_NEXUS_INSTANCE: {
11770 return_value = necp_client_request_nexus(fd_data, uap, retval);
11771 break;
11772 }
11773 #endif /* !SKYWALK */
11774 case NECP_CLIENT_ACTION_AGENT: {
11775 return_value = necp_client_agent_action(fd_data, uap, retval);
11776 break;
11777 }
11778 case NECP_CLIENT_ACTION_COPY_AGENT: {
11779 return_value = necp_client_copy_agent(fd_data, uap, retval);
11780 break;
11781 }
11782 case NECP_CLIENT_ACTION_AGENT_USE: {
11783 return_value = necp_client_agent_use(fd_data, uap, retval);
11784 break;
11785 }
11786 case NECP_CLIENT_ACTION_ACQUIRE_AGENT_TOKEN: {
11787 return_value = necp_client_acquire_agent_token(fd_data, uap, retval);
11788 break;
11789 }
11790 case NECP_CLIENT_ACTION_COPY_INTERFACE: {
11791 return_value = necp_client_copy_interface(fd_data, uap, retval);
11792 break;
11793 }
11794 #if SKYWALK
11795 case NECP_CLIENT_ACTION_GET_INTERFACE_ADDRESS: {
11796 return_value = necp_client_get_interface_address(fd_data, uap, retval);
11797 break;
11798 }
11799 case NECP_CLIENT_ACTION_SET_STATISTICS: {
11800 return_value = ENOTSUP;
11801 break;
11802 }
11803 case NECP_CLIENT_ACTION_MAP_SYSCTLS: {
11804 return_value = necp_client_map_sysctls(fd_data, uap, retval);
11805 break;
11806 }
11807 #endif /* !SKYWALK */
11808 case NECP_CLIENT_ACTION_COPY_ROUTE_STATISTICS: {
11809 return_value = necp_client_copy_route_statistics(fd_data, uap, retval);
11810 break;
11811 }
11812 case NECP_CLIENT_ACTION_UPDATE_CACHE: {
11813 return_value = necp_client_update_cache(fd_data, uap, retval);
11814 break;
11815 }
11816 case NECP_CLIENT_ACTION_COPY_CLIENT_UPDATE: {
11817 return_value = necp_client_copy_client_update(fd_data, uap, retval);
11818 break;
11819 }
11820 case NECP_CLIENT_ACTION_SIGN: {
11821 return_value = necp_client_sign(fd_data, uap, retval);
11822 break;
11823 }
11824 case NECP_CLIENT_ACTION_VALIDATE: {
11825 return_value = necp_client_validate(fd_data, uap, retval);
11826 break;
11827 }
11828 case NECP_CLIENT_ACTION_GET_SIGNED_CLIENT_ID: {
11829 return_value = necp_client_get_signed_client_id(fd_data, uap, retval);
11830 break;
11831 }
11832 case NECP_CLIENT_ACTION_SET_SIGNED_CLIENT_ID: {
11833 return_value = necp_client_set_signed_client_id(fd_data, uap, retval);
11834 break;
11835 }
11836 case NECP_CLIENT_ACTION_GET_FLOW_STATISTICS: {
11837 return_value = necp_client_get_flow_statistics(fd_data, uap, retval);
11838 break;
11839 }
11840 default: {
11841 NECPLOG(LOG_ERR, "necp_client_action unknown action (%u)", action);
11842 return_value = EINVAL;
11843 break;
11844 }
11845 }
11846
11847 done:
11848 fp_drop(p, uap->necp_fd, fp, 0);
11849 return return_value;
11850 }
11851
11852 #define NECP_MAX_MATCH_POLICY_PARAMETER_SIZE 1024
11853
11854 int
necp_match_policy(struct proc * p,struct necp_match_policy_args * uap,int32_t * retval)11855 necp_match_policy(struct proc *p, struct necp_match_policy_args *uap, int32_t *retval)
11856 {
11857 #pragma unused(retval)
11858 size_t buffer_size = 0;
11859 u_int8_t * __sized_by(buffer_size) parameters = NULL;
11860 struct necp_aggregate_result returned_result;
11861 int error = 0;
11862
11863 if (uap == NULL) {
11864 error = EINVAL;
11865 goto done;
11866 }
11867
11868 if (uap->parameters == 0 || uap->parameters_size == 0 || uap->parameters_size > NECP_MAX_MATCH_POLICY_PARAMETER_SIZE || uap->returned_result == 0) {
11869 error = EINVAL;
11870 goto done;
11871 }
11872
11873 parameters = (u_int8_t *)kalloc_data(uap->parameters_size, Z_WAITOK | Z_ZERO);
11874 buffer_size = uap->parameters_size;
11875 if (parameters == NULL) {
11876 error = ENOMEM;
11877 goto done;
11878 }
11879 // Copy parameters in
11880 error = copyin(uap->parameters, parameters, buffer_size);
11881 if (error) {
11882 goto done;
11883 }
11884
11885 error = necp_application_find_policy_match_internal(p, parameters, buffer_size,
11886 &returned_result, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, false, false, NULL);
11887 if (error) {
11888 goto done;
11889 }
11890
11891 // Copy return value back
11892 error = copyout(&returned_result, uap->returned_result, sizeof(struct necp_aggregate_result));
11893 if (error) {
11894 goto done;
11895 }
11896 done:
11897 if (parameters != NULL) {
11898 kfree_data_sized_by(parameters, buffer_size);
11899 }
11900 return error;
11901 }
11902
11903 /// Socket operations
11904
11905 static errno_t
necp_set_socket_attribute(u_int8_t * __sized_by (buffer_length)buffer,size_t buffer_length,u_int8_t type,char * __null_terminated * buffer_p,bool * single_tlv)11906 necp_set_socket_attribute(u_int8_t * __sized_by(buffer_length)buffer, size_t buffer_length, u_int8_t type, char *__null_terminated *buffer_p, bool *single_tlv)
11907 {
11908 int error = 0;
11909 int cursor = 0;
11910 size_t string_size = 0;
11911 size_t local_string_length = 0;
11912 char * __sized_by(local_string_length) local_string = NULL;
11913 u_int8_t * __indexable value = NULL;
11914 char * __indexable buffer_to_free = NULL;
11915
11916 cursor = necp_buffer_find_tlv(buffer, buffer_length, 0, type, NULL, 0);
11917 if (cursor < 0) {
11918 // This will clear out the parameter
11919 goto done;
11920 }
11921
11922 string_size = necp_buffer_get_tlv_length(buffer, buffer_length, cursor);
11923 if (single_tlv != NULL && (buffer_length == sizeof(struct necp_tlv_header) + string_size)) {
11924 *single_tlv = true;
11925 }
11926 if (string_size == 0 || string_size > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
11927 // This will clear out the parameter
11928 goto done;
11929 }
11930
11931 local_string = (char *)kalloc_data(string_size + 1, Z_WAITOK | Z_ZERO);
11932 local_string_length = string_size + 1;
11933 if (local_string == NULL) {
11934 NECPLOG(LOG_ERR, "Failed to allocate a socket attribute buffer (size %zu)", string_size);
11935 goto fail;
11936 }
11937
11938 value = necp_buffer_get_tlv_value(buffer, buffer_length, cursor, NULL);
11939 if (value == NULL) {
11940 NECPLOG0(LOG_ERR, "Failed to get socket attribute");
11941 goto fail;
11942 }
11943
11944 memcpy(local_string, value, string_size);
11945 local_string[string_size] = 0;
11946
11947 done:
11948 if (*buffer_p != NULL) {
11949 buffer_to_free = __unsafe_null_terminated_to_indexable(*buffer_p);
11950 }
11951
11952 // Protect switching of buffer pointer
11953 necp_lock_socket_attributes();
11954 if (local_string != NULL) {
11955 *buffer_p = __unsafe_null_terminated_from_indexable(local_string, &local_string[string_size]);
11956 } else {
11957 *buffer_p = NULL;
11958 }
11959 necp_unlock_socket_attributes();
11960
11961 if (buffer_to_free != NULL) {
11962 kfree_data_addr(buffer_to_free);
11963 }
11964 return 0;
11965 fail:
11966 if (local_string != NULL) {
11967 kfree_data_sized_by(local_string, local_string_length);
11968 }
11969 return error;
11970 }
11971
11972 errno_t
necp_set_socket_attributes(struct inp_necp_attributes * attributes,struct sockopt * sopt)11973 necp_set_socket_attributes(struct inp_necp_attributes *attributes, struct sockopt *sopt)
11974 {
11975 int error = 0;
11976 u_int8_t *buffer = NULL;
11977 bool single_tlv = false;
11978 size_t valsize = sopt->sopt_valsize;
11979 if (valsize == 0 ||
11980 valsize > ((sizeof(struct necp_tlv_header) + NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) * 4)) {
11981 goto done;
11982 }
11983
11984 buffer = (u_int8_t *)kalloc_data(valsize, Z_WAITOK | Z_ZERO);
11985 if (buffer == NULL) {
11986 goto done;
11987 }
11988
11989 error = sooptcopyin(sopt, buffer, valsize, 0);
11990 if (error) {
11991 goto done;
11992 }
11993
11994 // If NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT is being set/cleared separately from the other attributes,
11995 // do not clear other attributes.
11996 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT, &attributes->inp_domain_context, &single_tlv);
11997 if (error) {
11998 NECPLOG0(LOG_ERR, "Could not set domain context TLV for socket attributes");
11999 goto done;
12000 }
12001 if (single_tlv == true) {
12002 goto done;
12003 }
12004
12005 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN, &attributes->inp_domain, NULL);
12006 if (error) {
12007 NECPLOG0(LOG_ERR, "Could not set domain TLV for socket attributes");
12008 goto done;
12009 }
12010
12011 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN_OWNER, &attributes->inp_domain_owner, NULL);
12012 if (error) {
12013 NECPLOG0(LOG_ERR, "Could not set domain owner TLV for socket attributes");
12014 goto done;
12015 }
12016
12017 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_TRACKER_DOMAIN, &attributes->inp_tracker_domain, NULL);
12018 if (error) {
12019 NECPLOG0(LOG_ERR, "Could not set tracker domain TLV for socket attributes");
12020 goto done;
12021 }
12022
12023 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_ACCOUNT, &attributes->inp_account, NULL);
12024 if (error) {
12025 NECPLOG0(LOG_ERR, "Could not set account TLV for socket attributes");
12026 goto done;
12027 }
12028
12029 done:
12030 NECP_SOCKET_ATTRIBUTE_LOG("NECP ATTRIBUTES SOCKET - domain <%s> owner <%s> context <%s> tracker domain <%s> account <%s>",
12031 attributes->inp_domain,
12032 attributes->inp_domain_owner,
12033 attributes->inp_domain_context,
12034 attributes->inp_tracker_domain,
12035 attributes->inp_account);
12036
12037 if (necp_debug) {
12038 NECPLOG(LOG_DEBUG, "Set on socket: Domain %s, Domain owner %s, Domain context %s, Tracker domain %s, Account %s",
12039 attributes->inp_domain,
12040 attributes->inp_domain_owner,
12041 attributes->inp_domain_context,
12042 attributes->inp_tracker_domain,
12043 attributes->inp_account);
12044 }
12045
12046 if (buffer != NULL) {
12047 kfree_data(buffer, valsize);
12048 }
12049
12050 return error;
12051 }
12052
12053 errno_t
necp_get_socket_attributes(struct inp_necp_attributes * attributes,struct sockopt * sopt)12054 necp_get_socket_attributes(struct inp_necp_attributes *attributes, struct sockopt *sopt)
12055 {
12056 int error = 0;
12057 size_t valsize = 0;
12058 u_int8_t *buffer = NULL;
12059 u_int8_t * __indexable cursor = NULL;
12060
12061 if (attributes->inp_domain != NULL) {
12062 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain);
12063 }
12064 if (attributes->inp_domain_owner != NULL) {
12065 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain_owner);
12066 }
12067 if (attributes->inp_domain_context != NULL) {
12068 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain_context);
12069 }
12070 if (attributes->inp_tracker_domain != NULL) {
12071 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_tracker_domain);
12072 }
12073 if (attributes->inp_account != NULL) {
12074 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_account);
12075 }
12076 if (valsize == 0) {
12077 goto done;
12078 }
12079
12080 buffer = (u_int8_t *)kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12081 if (buffer == NULL) {
12082 goto done;
12083 }
12084
12085 cursor = buffer;
12086 if (attributes->inp_domain != NULL) {
12087 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN, strlen(attributes->inp_domain), __terminated_by_to_indexable(attributes->inp_domain),
12088 buffer, valsize);
12089 }
12090
12091 if (attributes->inp_domain_owner != NULL) {
12092 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN_OWNER, strlen(attributes->inp_domain_owner), __terminated_by_to_indexable(attributes->inp_domain_owner),
12093 buffer, valsize);
12094 }
12095
12096 if (attributes->inp_domain_context != NULL) {
12097 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT, strlen(attributes->inp_domain_context), __terminated_by_to_indexable(attributes->inp_domain_context),
12098 buffer, valsize);
12099 }
12100
12101 if (attributes->inp_tracker_domain != NULL) {
12102 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_TRACKER_DOMAIN, strlen(attributes->inp_tracker_domain), __terminated_by_to_indexable(attributes->inp_tracker_domain),
12103 buffer, valsize);
12104 }
12105
12106 if (attributes->inp_account != NULL) {
12107 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_ACCOUNT, strlen(attributes->inp_account), __terminated_by_to_indexable(attributes->inp_account),
12108 buffer, valsize);
12109 }
12110
12111 error = sooptcopyout(sopt, buffer, valsize);
12112 if (error) {
12113 goto done;
12114 }
12115 done:
12116 if (buffer != NULL) {
12117 kfree_data(buffer, valsize);
12118 }
12119
12120 return error;
12121 }
12122
12123 int
necp_set_socket_resolver_signature(struct inpcb * inp,struct sockopt * sopt)12124 necp_set_socket_resolver_signature(struct inpcb *inp, struct sockopt *sopt)
12125 {
12126 const size_t valsize = sopt->sopt_valsize;
12127 if (valsize > NECP_CLIENT_ACTION_SIGN_MAX_TOTAL_LENGTH + NECP_CLIENT_ACTION_SIGN_TAG_LENGTH) {
12128 return EINVAL;
12129 }
12130
12131 necp_lock_socket_attributes();
12132 if (inp->inp_resolver_signature != NULL) {
12133 kfree_data_sized_by(inp->inp_resolver_signature, inp->inp_resolver_signature_length);
12134 }
12135
12136 int error = 0;
12137 if (valsize > 0) {
12138 inp->inp_resolver_signature = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12139 inp->inp_resolver_signature_length = valsize;
12140 if ((error = sooptcopyin(sopt, inp->inp_resolver_signature, valsize,
12141 valsize)) != 0) {
12142 // Free the signature buffer if the copyin failed
12143 kfree_data_sized_by(inp->inp_resolver_signature, inp->inp_resolver_signature_length);
12144 }
12145 }
12146 necp_unlock_socket_attributes();
12147
12148 return error;
12149 }
12150
12151 int
necp_get_socket_resolver_signature(struct inpcb * inp,struct sockopt * sopt)12152 necp_get_socket_resolver_signature(struct inpcb *inp, struct sockopt *sopt)
12153 {
12154 int error = 0;
12155 necp_lock_socket_attributes();
12156 if (inp->inp_resolver_signature == NULL ||
12157 inp->inp_resolver_signature_length == 0) {
12158 error = ENOENT;
12159 } else {
12160 error = sooptcopyout(sopt, inp->inp_resolver_signature,
12161 inp->inp_resolver_signature_length);
12162 }
12163 necp_unlock_socket_attributes();
12164 return error;
12165 }
12166
12167 bool
necp_socket_has_resolver_signature(struct inpcb * inp)12168 necp_socket_has_resolver_signature(struct inpcb *inp)
12169 {
12170 necp_lock_socket_attributes();
12171 bool has_signature = (inp->inp_resolver_signature != NULL && inp->inp_resolver_signature_length != 0);
12172 necp_unlock_socket_attributes();
12173 return has_signature;
12174 }
12175
12176 bool
necp_socket_resolver_signature_matches_address(struct inpcb * inp,union necp_sockaddr_union * address)12177 necp_socket_resolver_signature_matches_address(struct inpcb *inp, union necp_sockaddr_union *address)
12178 {
12179 bool matches_address = false;
12180 necp_lock_socket_attributes();
12181 if (inp->inp_resolver_signature != NULL && inp->inp_resolver_signature_length > 0 && address->sa.sa_len > 0) {
12182 struct necp_client_validatable *validatable = (struct necp_client_validatable *)inp->inp_resolver_signature;
12183 if (inp->inp_resolver_signature_length > sizeof(struct necp_client_validatable) &&
12184 validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_SYSTEM_RESOLVER_ANSWER) {
12185 size_t data_length = inp->inp_resolver_signature_length - sizeof(struct necp_client_validatable);
12186 if (data_length >= (sizeof(struct necp_client_host_resolver_answer) - sizeof(struct necp_client_signable))) {
12187 struct necp_client_host_resolver_answer * __single answer_struct = (struct necp_client_host_resolver_answer *)&validatable->signable;
12188 struct sockaddr_in6 sin6 = answer_struct->address_answer.sin6;
12189 if (data_length == (sizeof(struct necp_client_host_resolver_answer) + answer_struct->hostname_length - sizeof(struct necp_client_signable)) &&
12190 answer_struct->address_answer.sa.sa_family == address->sa.sa_family &&
12191 answer_struct->address_answer.sa.sa_len == address->sa.sa_len &&
12192 (answer_struct->address_answer.sin.sin_port == 0 ||
12193 answer_struct->address_answer.sin.sin_port == address->sin.sin_port) &&
12194 ((answer_struct->address_answer.sa.sa_family == AF_INET &&
12195 answer_struct->address_answer.sin.sin_addr.s_addr == address->sin.sin_addr.s_addr) ||
12196 (answer_struct->address_answer.sa.sa_family == AF_INET6 &&
12197 memcmp(&sin6.sin6_addr, &address->sin6.sin6_addr, sizeof(struct in6_addr)) == 0))) {
12198 // Address matches
12199 const bool validated = necp_validate_resolver_answer(validatable->signable.client_id,
12200 validatable->signable.sign_type,
12201 signable_get_data(&validatable->signable, data_length), data_length,
12202 validatable->signature.signed_tag, sizeof(validatable->signature.signed_tag));
12203 if (validated) {
12204 // Answer is validated
12205 matches_address = true;
12206 }
12207 }
12208 }
12209 }
12210 }
12211 necp_unlock_socket_attributes();
12212 return matches_address;
12213 }
12214
12215 /*
12216 * necp_set_socket_domain_attributes
12217 * Called from soconnectlock/soconnectxlock to directly set the tracker domain and owner for
12218 * a newly marked tracker socket.
12219 */
12220 errno_t
necp_set_socket_domain_attributes(struct socket * so,const char * domain __null_terminated,const char * domain_owner __null_terminated)12221 necp_set_socket_domain_attributes(struct socket *so, const char *domain __null_terminated, const char *domain_owner __null_terminated)
12222 {
12223 int error = 0;
12224 struct inpcb * __single inp = NULL;
12225 size_t valsize = 0;
12226 size_t buffer_size = 0;
12227 u_int8_t * __sized_by(buffer_size) buffer = NULL;
12228 char * __indexable buffer_to_free = NULL;
12229
12230 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
12231 error = EINVAL;
12232 goto fail;
12233 }
12234
12235 // Set domain (required)
12236
12237 valsize = strlen(domain);
12238 if (valsize == 0 || valsize > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
12239 error = EINVAL;
12240 goto fail;
12241 }
12242
12243 buffer = (u_int8_t *)kalloc_data(valsize + 1, Z_WAITOK | Z_ZERO);
12244 buffer_size = valsize + 1;
12245 if (buffer == NULL) {
12246 error = ENOMEM;
12247 goto fail;
12248 }
12249 strlcpy((char *)buffer, domain, buffer_size);
12250 buffer[valsize] = 0;
12251
12252 inp = sotoinpcb(so);
12253 // Do not overwrite a previously set domain if tracker domain is different.
12254 if (inp->inp_necp_attributes.inp_domain != NULL) {
12255 if (strlen(inp->inp_necp_attributes.inp_domain) != strlen(domain) ||
12256 strcmp(inp->inp_necp_attributes.inp_domain, domain) != 0) {
12257 buffer_to_free = (inp->inp_necp_attributes.inp_tracker_domain != NULL) ? __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_tracker_domain) : NULL;
12258 // Protect switching of buffer pointer
12259 necp_lock_socket_attributes();
12260 inp->inp_necp_attributes.inp_tracker_domain = __unsafe_null_terminated_from_indexable((char *)buffer, (char *)&buffer[valsize]);
12261 necp_unlock_socket_attributes();
12262 if (buffer_to_free != NULL) {
12263 kfree_data_addr(buffer_to_free);
12264 }
12265 } else {
12266 kfree_data_sized_by(buffer, buffer_size);
12267 }
12268 } else {
12269 // Protect switching of buffer pointer
12270 necp_lock_socket_attributes();
12271 inp->inp_necp_attributes.inp_domain = __unsafe_null_terminated_from_indexable((char *)buffer, (char *)&buffer[valsize]);
12272 necp_unlock_socket_attributes();
12273 }
12274 buffer = NULL;
12275 buffer_size = 0;
12276
12277 // set domain_owner (required only for tracker)
12278 if (!(so->so_flags1 & SOF1_KNOWN_TRACKER)) {
12279 goto done;
12280 }
12281
12282 valsize = strlen(domain_owner);
12283 if (valsize == 0 || valsize > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
12284 error = EINVAL;
12285 goto fail;
12286 }
12287
12288 buffer = (u_int8_t *)kalloc_data(valsize + 1, Z_WAITOK | Z_ZERO);
12289 buffer_size = valsize + 1;
12290 if (buffer == NULL) {
12291 error = ENOMEM;
12292 goto fail;
12293 }
12294 strlcpy((char *)buffer, domain_owner, buffer_size);
12295 buffer[valsize] = 0;
12296
12297 inp = sotoinpcb(so);
12298
12299 buffer_to_free = (inp->inp_necp_attributes.inp_domain_owner != NULL) ? __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain_owner) : NULL;
12300 // Protect switching of buffer pointer
12301 necp_lock_socket_attributes();
12302 inp->inp_necp_attributes.inp_domain_owner = __unsafe_null_terminated_from_indexable((char *)buffer, (char *)&buffer[valsize]);
12303 necp_unlock_socket_attributes();
12304 buffer = NULL;
12305 buffer_size = 0;
12306
12307 if (buffer_to_free != NULL) {
12308 kfree_data_addr(buffer_to_free);
12309 }
12310
12311 done:
12312 NECP_SOCKET_PARAMS_LOG(so, "NECP ATTRIBUTES SOCKET - domain <%s> owner <%s> context <%s> tracker domain <%s> account <%s> "
12313 "<so flags - is_tracker %X non-app-initiated %X app-approved-domain %X",
12314 inp->inp_necp_attributes.inp_domain,
12315 inp->inp_necp_attributes.inp_domain_owner,
12316 inp->inp_necp_attributes.inp_domain_context,
12317 inp->inp_necp_attributes.inp_tracker_domain,
12318 inp->inp_necp_attributes.inp_account,
12319 so->so_flags1 & SOF1_KNOWN_TRACKER,
12320 so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED,
12321 so->so_flags1 & SOF1_APPROVED_APP_DOMAIN);
12322
12323 if (necp_debug) {
12324 NECPLOG(LOG_DEBUG, "Set on socket: Domain <%s> Domain owner <%s> Domain context <%s> Tracker domain <%s> Account <%s> ",
12325 inp->inp_necp_attributes.inp_domain,
12326 inp->inp_necp_attributes.inp_domain_owner,
12327 inp->inp_necp_attributes.inp_domain_context,
12328 inp->inp_necp_attributes.inp_tracker_domain,
12329 inp->inp_necp_attributes.inp_account);
12330 }
12331 fail:
12332 if (buffer != NULL) {
12333 kfree_data_sized_by(buffer, buffer_size);
12334 }
12335 return error;
12336 }
12337
12338 void *
12339 __sized_by(*message_length)
necp_create_nexus_assign_message(uuid_t nexus_instance,nexus_port_t nexus_port,void * __sized_by (key_length)key,uint32_t key_length,struct necp_client_endpoint * local_endpoint,struct necp_client_endpoint * remote_endpoint,struct ether_addr * local_ether_addr,u_int32_t flow_adv_index,void * flow_stats,uint32_t flow_id,size_t * message_length)12340 necp_create_nexus_assign_message(uuid_t nexus_instance, nexus_port_t nexus_port, void * __sized_by(key_length) key, uint32_t key_length,
12341 struct necp_client_endpoint *local_endpoint, struct necp_client_endpoint *remote_endpoint, struct ether_addr *local_ether_addr,
12342 u_int32_t flow_adv_index, void *flow_stats, uint32_t flow_id, size_t *message_length)
12343 {
12344 u_int8_t * __indexable buffer = NULL;
12345 u_int8_t * __indexable cursor = NULL;
12346 size_t valsize = 0;
12347 bool has_nexus_assignment = FALSE;
12348
12349 if (!uuid_is_null(nexus_instance)) {
12350 has_nexus_assignment = TRUE;
12351 valsize += sizeof(struct necp_tlv_header) + sizeof(uuid_t);
12352 valsize += sizeof(struct necp_tlv_header) + sizeof(nexus_port_t);
12353 }
12354 if (flow_adv_index != NECP_FLOWADV_IDX_INVALID) {
12355 valsize += sizeof(struct necp_tlv_header) + sizeof(u_int32_t);
12356 }
12357 if (key != NULL && key_length > 0) {
12358 valsize += sizeof(struct necp_tlv_header) + key_length;
12359 }
12360 if (local_endpoint != NULL) {
12361 valsize += sizeof(struct necp_tlv_header) + sizeof(struct necp_client_endpoint);
12362 }
12363 if (remote_endpoint != NULL) {
12364 valsize += sizeof(struct necp_tlv_header) + sizeof(struct necp_client_endpoint);
12365 }
12366 if (local_ether_addr != NULL) {
12367 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
12368 }
12369 if (flow_stats != NULL) {
12370 valsize += sizeof(struct necp_tlv_header) + sizeof(void *);
12371 }
12372 if (flow_id != 0) {
12373 valsize += sizeof(struct necp_tlv_header) + sizeof(u_int32_t);
12374 }
12375 if (valsize == 0) {
12376 *message_length = 0;
12377 return NULL;
12378 }
12379
12380 buffer = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12381 if (buffer == NULL) {
12382 *message_length = 0;
12383 return NULL;
12384 }
12385
12386 cursor = buffer;
12387 if (has_nexus_assignment) {
12388 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_INSTANCE, sizeof(uuid_t), nexus_instance, buffer, valsize);
12389 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_PORT, sizeof(nexus_port_t), &nexus_port, buffer, valsize);
12390 }
12391 if (flow_adv_index != NECP_FLOWADV_IDX_INVALID) {
12392 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_PORT_FLOW_INDEX, sizeof(u_int32_t), &flow_adv_index, buffer, valsize);
12393 }
12394 if (key != NULL && key_length > 0) {
12395 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_PARAMETER_NEXUS_KEY, key_length, key, buffer, valsize);
12396 }
12397 if (local_endpoint != NULL) {
12398 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ENDPOINT, sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)local_endpoint, buffer, valsize);
12399 }
12400 if (remote_endpoint != NULL) {
12401 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_REMOTE_ENDPOINT, sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)remote_endpoint, buffer, valsize);
12402 }
12403 if (local_ether_addr != NULL) {
12404 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ETHER_ADDR, sizeof(struct ether_addr), (uint8_t *)(struct ether_addr * __bidi_indexable)local_ether_addr, buffer, valsize);
12405 }
12406 if (flow_stats != NULL) {
12407 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_FLOW_STATS, sizeof(void *), &flow_stats, buffer, valsize);
12408 }
12409 if (flow_id != 0) {
12410 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_UNIQUE_FLOW_TAG, sizeof(u_int32_t), &flow_id, buffer, valsize);
12411 }
12412
12413 *message_length = valsize;
12414
12415 return buffer;
12416 }
12417
12418 void
necp_inpcb_remove_cb(struct inpcb * inp)12419 necp_inpcb_remove_cb(struct inpcb *inp)
12420 {
12421 if (!uuid_is_null(inp->necp_client_uuid)) {
12422 necp_client_unregister_socket_flow(inp->necp_client_uuid, inp);
12423 uuid_clear(inp->necp_client_uuid);
12424 }
12425 }
12426
12427 void
necp_inpcb_dispose(struct inpcb * inp)12428 necp_inpcb_dispose(struct inpcb *inp)
12429 {
12430 char * __indexable buffer = NULL;
12431
12432 necp_inpcb_remove_cb(inp); // Clear out socket registrations if not yet done
12433 if (inp->inp_necp_attributes.inp_domain != NULL) {
12434 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain);
12435 kfree_data_addr(buffer);
12436 inp->inp_necp_attributes.inp_domain = NULL;
12437 }
12438 if (inp->inp_necp_attributes.inp_account != NULL) {
12439 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_account);
12440 kfree_data_addr(buffer);
12441 inp->inp_necp_attributes.inp_account = NULL;
12442 }
12443 if (inp->inp_necp_attributes.inp_domain_owner != NULL) {
12444 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain_owner);
12445 kfree_data_addr(buffer);
12446 inp->inp_necp_attributes.inp_domain_owner = NULL;
12447 }
12448 if (inp->inp_necp_attributes.inp_domain_context != NULL) {
12449 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain_context);
12450 kfree_data_addr(buffer);
12451 inp->inp_necp_attributes.inp_domain_context = NULL;
12452 }
12453 if (inp->inp_necp_attributes.inp_tracker_domain != NULL) {
12454 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_tracker_domain);
12455 kfree_data_addr(buffer);
12456 inp->inp_necp_attributes.inp_tracker_domain = NULL;
12457 }
12458 if (inp->inp_resolver_signature != NULL) {
12459 kfree_data_sized_by(inp->inp_resolver_signature, inp->inp_resolver_signature_length);
12460 }
12461 }
12462
12463 void
necp_mppcb_dispose(struct mppcb * mpp)12464 necp_mppcb_dispose(struct mppcb *mpp)
12465 {
12466 char * __indexable buffer = NULL;
12467
12468 if (!uuid_is_null(mpp->necp_client_uuid)) {
12469 necp_client_unregister_multipath_cb(mpp->necp_client_uuid, mpp);
12470 uuid_clear(mpp->necp_client_uuid);
12471 }
12472
12473 if (mpp->inp_necp_attributes.inp_domain != NULL) {
12474 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_domain);
12475 kfree_data_addr(buffer);
12476 mpp->inp_necp_attributes.inp_domain = NULL;
12477 }
12478 if (mpp->inp_necp_attributes.inp_account != NULL) {
12479 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_account);
12480 kfree_data_addr(buffer);
12481 mpp->inp_necp_attributes.inp_account = NULL;
12482 }
12483 if (mpp->inp_necp_attributes.inp_domain_owner != NULL) {
12484 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_domain_owner);
12485 kfree_data_addr(buffer);
12486 mpp->inp_necp_attributes.inp_domain_owner = NULL;
12487 }
12488 if (mpp->inp_necp_attributes.inp_tracker_domain != NULL) {
12489 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_tracker_domain);
12490 kfree_data_addr(buffer);
12491 mpp->inp_necp_attributes.inp_tracker_domain = NULL;
12492 }
12493 if (mpp->inp_necp_attributes.inp_domain_context != NULL) {
12494 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_domain_context);
12495 kfree_data_addr(buffer);
12496 mpp->inp_necp_attributes.inp_domain_context = NULL;
12497 }
12498 }
12499
12500 /// Module init
12501
12502 void
necp_client_init(void)12503 necp_client_init(void)
12504 {
12505 necp_client_update_tcall = thread_call_allocate_with_options(necp_update_all_clients_callout, NULL,
12506 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
12507 VERIFY(necp_client_update_tcall != NULL);
12508 #if SKYWALK
12509
12510 necp_client_collect_stats_tcall = thread_call_allocate_with_options(necp_collect_stats_client_callout, NULL,
12511 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
12512 VERIFY(necp_client_collect_stats_tcall != NULL);
12513
12514 necp_close_empty_arenas_tcall = thread_call_allocate_with_options(necp_close_empty_arenas_callout, NULL,
12515 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
12516 VERIFY(necp_close_empty_arenas_tcall != NULL);
12517 #endif /* SKYWALK */
12518
12519 LIST_INIT(&necp_fd_list);
12520 LIST_INIT(&necp_fd_observer_list);
12521 LIST_INIT(&necp_collect_stats_flow_list);
12522
12523 RB_INIT(&necp_client_global_tree);
12524 RB_INIT(&necp_client_flow_global_tree);
12525 }
12526
12527 #if SKYWALK
12528 pid_t
necp_client_get_proc_pid_from_arena_info(struct skmem_arena_mmap_info * arena_info)12529 necp_client_get_proc_pid_from_arena_info(struct skmem_arena_mmap_info *arena_info)
12530 {
12531 ASSERT((arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_NECP) || (arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_SYSTEM));
12532
12533 if (arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_NECP) {
12534 struct necp_arena_info * __single nai = __unsafe_forge_single(struct necp_arena_info *, container_of(arena_info, struct necp_arena_info, nai_mmap));
12535 return nai->nai_proc_pid;
12536 } else {
12537 struct necp_fd_data * __single fd_data = __unsafe_forge_single(struct necp_fd_data *, container_of(arena_info, struct necp_fd_data, sysctl_mmap));
12538 return fd_data->proc_pid;
12539 }
12540 }
12541 #endif /* !SKYWALK */
12542