1 /*
2 * Copyright (c) 2015-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30
31 #include <kern/thread_call.h>
32 #include <kern/uipc_domain.h>
33 #include <kern/zalloc.h>
34
35 #include <net/if.h>
36 #include <net/if_types.h>
37 #include <net/if_var.h>
38 #include <net/net_api_stats.h>
39 #include <net/necp.h>
40 #include <net/network_agent.h>
41 #include <net/ntstat.h>
42 #include <net/aop/kpi_aop.h>
43 #include <net/aop/aop_stats.h>
44
45 #include <netinet/in_pcb.h>
46 #include <netinet/in_var.h>
47 #include <netinet/ip.h>
48 #include <netinet/ip6.h>
49 #include <netinet/mp_pcb.h>
50 #include <netinet/tcp_cc.h>
51 #include <netinet/tcp_fsm.h>
52 #include <netinet/tcp_cache.h>
53 #include <netinet6/in6_var.h>
54
55 #include <sys/domain.h>
56 #include <sys/file_internal.h>
57 #include <sys/kauth.h>
58 #include <sys/kernel.h>
59 #include <sys/malloc.h>
60 #include <sys/poll.h>
61 #include <sys/priv.h>
62 #include <sys/protosw.h>
63 #include <sys/queue.h>
64 #include <sys/socket.h>
65 #include <sys/socketvar.h>
66 #include <sys/sysproto.h>
67 #include <sys/systm.h>
68 #include <sys/types.h>
69 #include <sys/codesign.h>
70 #include <libkern/section_keywords.h>
71 #include <IOKit/IOBSD.h>
72
73 #include <os/refcnt.h>
74
75 #include <CodeSignature/Entitlements.h>
76
77 #if SKYWALK
78 #include <skywalk/os_skywalk_private.h>
79 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
80 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
81 #endif /* SKYWALK */
82
83 #if CONFIG_MACF
84 #include <security/mac_framework.h>
85 #endif
86
87 #include <net/sockaddr_utils.h>
88
89 /*
90 * NECP Client Architecture
91 * ------------------------------------------------
92 * See <net/necp.c> for a discussion on NECP database architecture.
93 *
94 * Each client of NECP provides a set of parameters for a connection or network state
95 * evaluation, on which NECP policy evaluation is run. This produces a policy result
96 * which can be accessed by the originating process, along with events for when policies
97 * results have changed.
98 *
99 * ------------------------------------------------
100 * NECP Client FD
101 * ------------------------------------------------
102 * A process opens an NECP file descriptor using necp_open(). This is a very simple
103 * file descriptor, upon which the process may do the following operations:
104 * - necp_client_action(...), to add/remove/query clients
105 * - kqueue, to watch for readable events
106 * - close(), to close the client session and release all clients
107 *
108 * Client objects are allocated structures that hang off of the file descriptor. Each
109 * client contains:
110 * - Client ID, a UUID that references the client across the system
111 * - Parameters, a buffer of TLVs that describe the client's connection parameters,
112 * such as the remote and local endpoints, interface requirements, etc.
113 * - Result, a buffer of TLVs containing the current policy evaluation for the client.
114 * This result will be updated whenever a network change occurs that impacts the
115 * policy result for that client.
116 *
117 * +--------------+
118 * | NECP fd |
119 * +--------------+
120 * ||
121 * ==================================
122 * || || ||
123 * +--------------+ +--------------+ +--------------+
124 * | Client ID | | Client ID | | Client ID |
125 * | ---- | | ---- | | ---- |
126 * | Parameters | | Parameters | | Parameters |
127 * | ---- | | ---- | | ---- |
128 * | Result | | Result | | Result |
129 * +--------------+ +--------------+ +--------------+
130 *
131 * ------------------------------------------------
132 * Client Actions
133 * ------------------------------------------------
134 * - Add. Input parameters as a buffer of TLVs, and output a client ID. Allocates a
135 * new client structure on the file descriptor.
136 * - Remove. Input a client ID. Removes a client structure from the file descriptor.
137 * - Copy Parameters. Input a client ID, and output parameter TLVs.
138 * - Copy Result. Input a client ID, and output result TLVs. Alternatively, input empty
139 * client ID and get next unread client result.
140 * - Copy List. List all client IDs.
141 *
142 * ------------------------------------------------
143 * Client Policy Evaluation
144 * ------------------------------------------------
145 * Policies are evaluated for clients upon client creation, and upon update events,
146 * which are network/agent/policy changes coalesced by a timer.
147 *
148 * The policy evaluation goes through the following steps:
149 * 1. Parse client parameters.
150 * 2. Select a scoped interface if applicable. This involves using require/prohibit
151 * parameters, along with the local address, to select the most appropriate interface
152 * if not explicitly set by the client parameters.
153 * 3. Run NECP application-level policy evalution
154 * 4. Set policy result into client result buffer.
155 *
156 * ------------------------------------------------
157 * Client Observers
158 * ------------------------------------------------
159 * If necp_open() is called with the NECP_OPEN_FLAG_OBSERVER flag, and the process
160 * passes the necessary privilege check, the fd is allowed to use necp_client_action()
161 * to copy client state attached to the file descriptors of other processes, and to
162 * list all client IDs on the system.
163 */
164
165 extern u_int32_t necp_debug;
166
167 static int necpop_select(struct fileproc *, int, void *, vfs_context_t);
168 static int necpop_close(struct fileglob *, vfs_context_t);
169 static int necpop_kqfilter(struct fileproc *, struct knote *, struct kevent_qos_s *);
170
171 // Timer functions
172 static int necp_timeout_microseconds = 1000 * 100; // 100ms
173 static int necp_timeout_leeway_microseconds = 1000 * 50; // 50ms
174 #if SKYWALK
175 static int necp_collect_stats_timeout_microseconds = 1000 * 1000 * 1; // 1s
176 static int necp_collect_stats_timeout_leeway_microseconds = 1000 * 500; // 500ms
177 static int necp_close_arenas_timeout_microseconds = 1000 * 1000 * 10; // 10s
178 static int necp_close_arenas_timeout_leeway_microseconds = 1000 * 1000 * 1; // 1s
179 #endif /* SKYWALK */
180
181 static int necp_client_fd_count = 0;
182 static int necp_observer_fd_count = 0;
183 static int necp_client_count = 0;
184 static int necp_socket_flow_count = 0;
185 static int necp_if_flow_count = 0;
186 static int necp_observer_message_limit = 256;
187
188 /*
189 * NECP client tracing control -
190 *
191 * necp_client_tracing_level : 1 for client trace, 2 for flow trace, 3 for parameter details
192 * necp_client_tracing_pid : match client with pid
193 */
194 static int necp_client_tracing_level = 0;
195 static int necp_client_tracing_pid = 0;
196
197 #define NECP_CLIENT_TRACE_LEVEL_CLIENT 1
198 #define NECP_CLIENT_TRACE_LEVEL_FLOW 2
199 #define NECP_CLIENT_TRACE_LEVEL_PARAMS 3
200
201 #define NECP_CLIENT_TRACE_PID_MATCHED(pid) \
202 (pid == necp_client_tracing_pid)
203
204 #define NECP_ENABLE_CLIENT_TRACE(level) \
205 ((necp_client_tracing_level >= level && \
206 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(client->proc_pid))) ? necp_client_tracing_level : 0)
207
208 #define NECP_CLIENT_LOG(client, fmt, ...) \
209 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_CLIENT)) { \
210 uuid_string_t client_uuid_str = { }; \
211 uuid_unparse_lower(client->client_id, client_uuid_str); \
212 NECPLOG(LOG_NOTICE, "NECP_CLIENT_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
213 }
214
215 #define NECP_CLIENT_FLOW_LOG(client, flow, fmt, ...) \
216 if (client && flow && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) { \
217 uuid_string_t client_uuid_str = { }; \
218 uuid_unparse_lower(client->client_id, client_uuid_str); \
219 uuid_string_t flow_uuid_str = { }; \
220 uuid_unparse_lower(flow->registration_id, flow_uuid_str); \
221 NECPLOG(LOG_NOTICE, "NECP CLIENT FLOW TRACE <pid %d %s> <flow %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, flow_uuid_str, ##__VA_ARGS__); \
222 }
223
224 #define NECP_CLIENT_PARAMS_LOG(client, fmt, ...) \
225 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
226 uuid_string_t client_uuid_str = { }; \
227 uuid_unparse_lower(client->client_id, client_uuid_str); \
228 NECPLOG(LOG_NOTICE, "NECP_CLIENT_PARAMS_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
229 }
230
231 #define NECP_SOCKET_PID(so) \
232 ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid)
233
234 #define NECP_ENABLE_SOCKET_TRACE(level) \
235 ((necp_client_tracing_level >= level && \
236 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(NECP_SOCKET_PID(so)))) ? necp_client_tracing_level : 0)
237
238 #define NECP_SOCKET_PARAMS_LOG(so, fmt, ...) \
239 if (so && NECP_ENABLE_SOCKET_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
240 NECPLOG(LOG_NOTICE, "NECP_SOCKET_PARAMS_LOG <pid %d>: " fmt "\n", NECP_SOCKET_PID(so), ##__VA_ARGS__); \
241 }
242
243 #define NECP_SOCKET_ATTRIBUTE_LOG(fmt, ...) \
244 if (necp_client_tracing_level >= NECP_CLIENT_TRACE_LEVEL_PARAMS) { \
245 NECPLOG(LOG_NOTICE, "NECP_SOCKET_ATTRIBUTE_LOG: " fmt "\n", ##__VA_ARGS__); \
246 }
247
248 #define NECP_CLIENT_TRACKER_LOG(pid, fmt, ...) \
249 if (pid) { \
250 NECPLOG(LOG_NOTICE, "NECP_CLIENT_TRACKER_LOG <pid %d>: " fmt "\n", pid, ##__VA_ARGS__); \
251 }
252
253 #if SKYWALK
254 static int necp_arena_count = 0;
255 static int necp_sysctl_arena_count = 0;
256 static int necp_nexus_flow_count = 0;
257
258 /* userspace stats sanity check range, same unit as TCP (see TCP_RTT_SCALE) */
259 static uint32_t necp_client_stats_rtt_floor = 1; // 32us
260 static uint32_t necp_client_stats_rtt_ceiling = 1920000; // 60s
261 const static struct sk_stats_flow ntstat_sk_stats_zero;
262 #endif /* SKYWALK */
263
264 static int necp_client_stats_use_route_metrics = 0;
265
266 /*
267 * Global lock to protect socket inp_necp_attributes across updates.
268 * NECP updating these attributes and clients accessing these attributes
269 * must take this lock.
270 */
271 static LCK_GRP_DECLARE(necp_socket_attr_lock_grp, "necpSocketAttrGroup");
272 LCK_MTX_DECLARE(necp_socket_attr_lock, &necp_socket_attr_lock_grp);
273
274 os_refgrp_decl(static, necp_client_refgrp, "NECPClientRefGroup", NULL);
275
276 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_FD_COUNT, client_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_fd_count, 0, "");
277 SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_FD_COUNT, observer_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_observer_fd_count, 0, "");
278 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_COUNT, client_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_count, 0, "");
279 SYSCTL_INT(_net_necp, NECPCTL_SOCKET_FLOW_COUNT, socket_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_socket_flow_count, 0, "");
280 SYSCTL_INT(_net_necp, NECPCTL_IF_FLOW_COUNT, if_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_if_flow_count, 0, "");
281 SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_MESSAGE_LIMIT, observer_message_limit, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_observer_message_limit, 256, "");
282 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_LEVEL, necp_client_tracing_level, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_level, 0, "");
283 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_PID, necp_client_tracing_pid, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_pid, 0, "");
284
285 #if SKYWALK
286 SYSCTL_INT(_net_necp, NECPCTL_ARENA_COUNT, arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_arena_count, 0, "");
287 SYSCTL_INT(_net_necp, NECPCTL_SYSCTL_ARENA_COUNT, sysctl_arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_sysctl_arena_count, 0, "");
288 SYSCTL_INT(_net_necp, NECPCTL_NEXUS_FLOW_COUNT, nexus_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_nexus_flow_count, 0, "");
289 #if (DEVELOPMENT || DEBUG)
290 SYSCTL_UINT(_net_necp, OID_AUTO, collect_stats_interval_us, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_collect_stats_timeout_microseconds, 0, "");
291 SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_floor, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_floor, 0, "");
292 SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_ceiling, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_ceiling, 0, "");
293 SYSCTL_INT(_net_necp, OID_AUTO, necp_client_stats_use_route_metrics, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_use_route_metrics, 0, "");
294 #endif /* (DEVELOPMENT || DEBUG) */
295 #endif /* SKYWALK */
296
297 #define NECP_MAX_CLIENT_LIST_SIZE 1024 * 1024 // 1MB
298 #define NECP_MAX_AGENT_ACTION_SIZE 10 * 1024 // 10K
299
300 extern int tvtohz(struct timeval *);
301 extern unsigned int get_maxmtu(struct rtentry *);
302
303 // Parsed parameters
304 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR 0x00001
305 #define NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR 0x00002
306 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF 0x00004
307 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF 0x00008
308 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE 0x00010
309 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE 0x00020
310 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT 0x00040
311 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT 0x00080
312 #define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT 0x00100
313 #define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT 0x00200
314 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE 0x00400
315 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE 0x00800
316 #define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE 0x01000
317 #define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE 0x02000
318 #define NECP_PARSED_PARAMETERS_FIELD_FLAGS 0x04000
319 #define NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL 0x08000
320 #define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID 0x10000
321 #define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID 0x20000
322 #define NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS 0x40000
323 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT 0x80000
324 #define NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID 0x100000
325 #define NECP_PARSED_PARAMETERS_FIELD_ETHERTYPE 0x200000
326 #define NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL 0x400000
327 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE 0x800000
328 #define NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER 0x1000000
329 #define NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID 0x2000000
330 #define NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN 0x4000000
331 #define NECP_PARSED_PARAMETERS_FIELD_UID 0x8000000
332 #define NECP_PARSED_PARAMETERS_FIELD_PERSONA_ID 0x10000000
333 #define NECP_PARSED_PARAMETERS_FIELD_EXTENDED_FLAGS 0x20000000
334
335
336 #define NECP_MAX_INTERFACE_PARAMETERS 16
337 #define NECP_MAX_AGENT_PARAMETERS 4
338 struct necp_client_parsed_parameters {
339 u_int32_t valid_fields;
340 u_int32_t flags;
341 u_int64_t delegated_upid;
342 union necp_sockaddr_union local_addr;
343 union necp_sockaddr_union remote_addr;
344 u_int32_t required_interface_index;
345 char prohibited_interfaces[NECP_MAX_INTERFACE_PARAMETERS][IFXNAMSIZ];
346 u_int8_t required_interface_type;
347 u_int8_t local_address_preference;
348 u_int8_t prohibited_interface_types[NECP_MAX_INTERFACE_PARAMETERS];
349 struct necp_client_parameter_netagent_type required_netagent_types[NECP_MAX_AGENT_PARAMETERS];
350 struct necp_client_parameter_netagent_type prohibited_netagent_types[NECP_MAX_AGENT_PARAMETERS];
351 struct necp_client_parameter_netagent_type preferred_netagent_types[NECP_MAX_AGENT_PARAMETERS];
352 struct necp_client_parameter_netagent_type avoided_netagent_types[NECP_MAX_AGENT_PARAMETERS];
353 uuid_t required_netagents[NECP_MAX_AGENT_PARAMETERS];
354 uuid_t prohibited_netagents[NECP_MAX_AGENT_PARAMETERS];
355 uuid_t preferred_netagents[NECP_MAX_AGENT_PARAMETERS];
356 uuid_t avoided_netagents[NECP_MAX_AGENT_PARAMETERS];
357 u_int8_t ip_protocol;
358 u_int8_t transport_protocol;
359 u_int16_t ethertype;
360 pid_t effective_pid;
361 uuid_t effective_uuid;
362 uuid_t parent_uuid;
363 u_int32_t traffic_class;
364 struct necp_demux_pattern demux_patterns[NECP_MAX_DEMUX_PATTERNS];
365 u_int8_t demux_pattern_count;
366 uid_t uid;
367 uid_t persona_id;
368 u_int64_t extended_flags;
369 };
370
371 static bool
372 necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters,
373 u_int *return_ifindex, bool *validate_agents);
374
375 static bool
376 necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa);
377
378 static bool
379 necp_ifnet_matches_parameters(struct ifnet *ifp,
380 struct necp_client_parsed_parameters *parsed_parameters,
381 u_int32_t override_flags,
382 u_int32_t *preferred_count,
383 bool secondary_interface,
384 bool require_scoped_field);
385
386 static const struct fileops necp_fd_ops = {
387 .fo_type = DTYPE_NETPOLICY,
388 .fo_read = fo_no_read,
389 .fo_write = fo_no_write,
390 .fo_ioctl = fo_no_ioctl,
391 .fo_select = necpop_select,
392 .fo_close = necpop_close,
393 .fo_drain = fo_no_drain,
394 .fo_kqfilter = necpop_kqfilter,
395 };
396
397 struct necp_client_assertion {
398 LIST_ENTRY(necp_client_assertion) assertion_chain;
399 uuid_t asserted_netagent;
400 };
401
402 struct necp_client_flow_header {
403 struct necp_tlv_header outer_header;
404 struct necp_tlv_header flow_id_tlv_header;
405 uuid_t flow_id;
406 struct necp_tlv_header flags_tlv_header;
407 u_int32_t flags_value;
408 struct necp_tlv_header interface_tlv_header;
409 struct necp_client_result_interface interface_value;
410 } __attribute__((__packed__));
411
412 struct necp_client_flow_protoctl_event_header {
413 struct necp_tlv_header protoctl_tlv_header;
414 struct necp_client_flow_protoctl_event protoctl_event;
415 } __attribute__((__packed__));
416
417 struct necp_client_flow_stats_index_header {
418 struct necp_tlv_header stats_index_tlv_header;
419 uint32_t stats_index;
420 } __attribute__((__packed__));
421
422 struct necp_client_nexus_flow_header {
423 struct necp_client_flow_header flow_header;
424 struct necp_tlv_header agent_tlv_header;
425 struct necp_client_result_netagent agent_value;
426 struct necp_tlv_header tfo_cookie_tlv_header;
427 u_int8_t tfo_cookie_value[NECP_TFO_COOKIE_LEN_MAX];
428 } __attribute__((__packed__));
429
430 #if SKYWALK
431 struct necp_arena_info;
432 #endif
433
434 struct necp_client_flow {
435 LIST_ENTRY(necp_client_flow) flow_chain;
436 unsigned invalid : 1;
437 unsigned nexus : 1; // If true, flow is a nexus; if false, flow is attached to socket
438 unsigned socket : 1;
439 unsigned viable : 1;
440 unsigned assigned : 1;
441 unsigned has_protoctl_event : 1;
442 unsigned check_tcp_heuristics : 1;
443 unsigned aop_offload : 1;
444 unsigned aop_stat_index_valid : 1;
445 union {
446 uuid_t nexus_agent;
447 struct {
448 void *socket_handle;
449 necp_client_flow_cb cb;
450 };
451 } u;
452 uint32_t interface_index;
453 u_short delegated_interface_index;
454 uint32_t interface_flags;
455 uint32_t necp_flow_flags;
456 struct necp_client_flow_protoctl_event protoctl_event;
457 union necp_sockaddr_union local_addr;
458 union necp_sockaddr_union remote_addr;
459 uint32_t flow_tag;
460 uint32_t stats_index; // Index associated with AOP flows
461
462 size_t assigned_results_length;
463 u_int8_t *__counted_by(assigned_results_length) assigned_results;
464 };
465
466 struct necp_client_flow_registration {
467 RB_ENTRY(necp_client_flow_registration) fd_link;
468 RB_ENTRY(necp_client_flow_registration) global_link;
469 RB_ENTRY(necp_client_flow_registration) client_link;
470 LIST_ENTRY(necp_client_flow_registration) collect_stats_chain;
471 uuid_t registration_id;
472 u_int32_t flags;
473 unsigned flow_result_read : 1;
474 unsigned defunct : 1;
475 unsigned aop_offload : 1;
476 void *interface_handle;
477 necp_client_flow_cb interface_cb;
478 struct necp_client *client;
479 LIST_HEAD(_necp_registration_flow_list, necp_client_flow) flow_list;
480 #if SKYWALK
481 struct necp_arena_info *stats_arena; /* arena where the stats objects came from */
482 void * kstats_kaddr; /* kernel snapshot of untrusted userspace stats, for calculating delta */
483 mach_vm_address_t ustats_uaddr; /* userspace stats (untrusted) */
484 nstat_userland_context stats_handler_context;
485 struct flow_stats *nexus_stats; /* shared stats objects between necp_client and skywalk */
486 #endif /* !SKYWALK */
487 u_int64_t last_interface_details __attribute__((aligned(sizeof(u_int64_t))));
488 };
489
490 static int necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1);
491
492 RB_HEAD(_necp_client_flow_tree, necp_client_flow_registration);
493 RB_PROTOTYPE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
494 RB_GENERATE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
495
496 #define NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT 4
497 #define NECP_CLIENT_MAX_INTERFACE_OPTIONS 32
498
499 #define NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT (NECP_CLIENT_MAX_INTERFACE_OPTIONS - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT)
500
501 struct necp_client {
502 RB_ENTRY(necp_client) link;
503 RB_ENTRY(necp_client) global_link;
504
505 decl_lck_mtx_data(, lock);
506 decl_lck_mtx_data(, route_lock);
507 os_refcnt_t reference_count;
508
509 uuid_t client_id;
510 unsigned result_read : 1;
511 unsigned group_members_read : 1;
512 unsigned allow_multiple_flows : 1;
513 unsigned legacy_client_is_flow : 1;
514
515 unsigned platform_binary : 1;
516 unsigned validated_parent : 1;
517
518 size_t result_length;
519 u_int8_t result[NECP_BASE_CLIENT_RESULT_SIZE];
520
521 necp_policy_id policy_id;
522 necp_policy_id skip_policy_id;
523
524 necp_kernel_policy_result policy_result;
525 necp_kernel_policy_routing_result_parameter policy_result_parameter;
526 u_int32_t flow_divert_control_unit;
527 u_int32_t filter_control_unit;
528
529 u_int8_t ip_protocol;
530 int proc_pid;
531
532 u_int64_t delegated_upid;
533
534 struct _necp_client_flow_tree flow_registrations;
535 LIST_HEAD(_necp_client_assertion_list, necp_client_assertion) assertion_list;
536
537 size_t assigned_group_members_length;
538 u_int8_t *__counted_by(assigned_group_members_length) assigned_group_members;
539
540 struct rtentry *current_route;
541
542 struct necp_client_interface_option interface_options[NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
543 struct necp_client_interface_option * __indexable extra_interface_options;
544 u_int8_t interface_option_count; // Number in interface_options + extra_interface_options
545
546 struct necp_client_result_netagent failed_trigger_agent;
547
548 void *agent_handle;
549
550 uuid_t override_euuid;
551
552 #if SKYWALK
553 netns_token port_reservation;
554 nstat_context nstat_context;
555 uuid_t latest_flow_registration_id;
556 uuid_t parent_client_id;
557 struct necp_client *original_parameters_source;
558 #endif /* !SKYWALK */
559
560 size_t parameters_length;
561 u_int8_t * __sized_by(parameters_length) parameters;
562 };
563
564 #define NECP_CLIENT_LOCK(_c) lck_mtx_lock(&_c->lock)
565 #define NECP_CLIENT_UNLOCK(_c) lck_mtx_unlock(&_c->lock)
566 #define NECP_CLIENT_ASSERT_LOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_OWNED)
567 #define NECP_CLIENT_ASSERT_UNLOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_NOTOWNED)
568
569 #define NECP_CLIENT_ROUTE_LOCK(_c) lck_mtx_lock(&_c->route_lock)
570 #define NECP_CLIENT_ROUTE_UNLOCK(_c) lck_mtx_unlock(&_c->route_lock)
571
572 static void necp_client_retain_locked(struct necp_client *client);
573 static void necp_client_retain(struct necp_client *client);
574
575 static bool necp_client_release_locked(struct necp_client *client);
576 static bool necp_client_release(struct necp_client *client);
577
578 static void
579 necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid);
580
581 static bool
582 necp_client_remove_assertion(struct necp_client *client, uuid_t netagent_uuid);
583
584 static int
585 necp_client_copy_parameters_locked(struct necp_client *client,
586 struct necp_client_nexus_parameters *parameters);
587
588 LIST_HEAD(_necp_flow_registration_list, necp_client_flow_registration);
589 static struct _necp_flow_registration_list necp_collect_stats_flow_list;
590
591 struct necp_flow_defunct {
592 LIST_ENTRY(necp_flow_defunct) chain;
593
594 uuid_t flow_id;
595 uuid_t nexus_agent;
596 void *agent_handle;
597 int proc_pid;
598 u_int32_t flags;
599 struct necp_client_agent_parameters close_parameters;
600 bool has_close_parameters;
601 };
602
603 LIST_HEAD(_necp_flow_defunct_list, necp_flow_defunct);
604
605 static int necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1);
606
607 RB_HEAD(_necp_client_tree, necp_client);
608 RB_PROTOTYPE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
609 RB_GENERATE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
610
611 RB_HEAD(_necp_client_global_tree, necp_client);
612 RB_PROTOTYPE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
613 RB_GENERATE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
614
615 RB_HEAD(_necp_fd_flow_tree, necp_client_flow_registration);
616 RB_PROTOTYPE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
617 RB_GENERATE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
618
619 RB_HEAD(_necp_client_flow_global_tree, necp_client_flow_registration);
620 RB_PROTOTYPE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
621 RB_GENERATE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
622
623 static struct _necp_client_global_tree necp_client_global_tree;
624 static struct _necp_client_flow_global_tree necp_client_flow_global_tree;
625
626 struct necp_client_update {
627 TAILQ_ENTRY(necp_client_update) chain;
628
629 uuid_t client_id;
630
631 size_t update_length;
632 struct necp_client_observer_update *__sized_by(update_length) update;
633 };
634
635 #if SKYWALK
636 struct necp_arena_info {
637 LIST_ENTRY(necp_arena_info) nai_chain;
638 u_int32_t nai_flags;
639 pid_t nai_proc_pid;
640 struct skmem_arena *nai_arena;
641 struct skmem_arena_mmap_info nai_mmap;
642 mach_vm_offset_t nai_roff;
643 u_int32_t nai_use_count;
644 };
645 #endif /* !SKYWALK */
646
647 #define NAIF_ATTACHED 0x1 // arena is attached to list
648 #define NAIF_REDIRECT 0x2 // arena mmap has been redirected
649 #define NAIF_DEFUNCT 0x4 // arena is now defunct
650
651 #define NECP_FD_REPORTED_AGENT_COUNT 2
652
653 struct necp_fd_reported_agents {
654 uuid_t agent_uuid[NECP_FD_REPORTED_AGENT_COUNT];
655 };
656
657 struct necp_fd_data {
658 u_int8_t necp_fd_type;
659 LIST_ENTRY(necp_fd_data) chain;
660 struct _necp_client_tree clients;
661 struct _necp_fd_flow_tree flows;
662 TAILQ_HEAD(_necp_client_update_list, necp_client_update) update_list;
663 int update_count;
664 int flags;
665
666 unsigned background : 1;
667 unsigned request_in_process_flow_divert : 1;
668
669 int proc_pid;
670 decl_lck_mtx_data(, fd_lock);
671 struct selinfo si;
672
673 struct necp_fd_reported_agents reported_agents;
674 #if SKYWALK
675 // Arenas and their mmap info for per-process stats. Stats objects are allocated from an active arena
676 // that is not redirected/defunct. The stats_arena_active keeps track of such an arena, and it also
677 // holds a reference count on the object. Each flow allocating a stats object also holds a reference
678 // the necp_arena_info (where the object got allocated from). During defunct, we redirect the mapping
679 // of the arena such that any attempt to access (read/write) will result in getting zero-filled pages.
680 // We then go thru all of the flows for the process and free the stats objects associated with them,
681 // followed by destroying the skmem region(s) associated with the arena. The stats_arena_list keeps
682 // track of all current and defunct stats arenas; there could be more than one arena created for the
683 // process as the arena destruction happens when its reference count drops to 0.
684 struct necp_arena_info *stats_arena_active;
685 LIST_HEAD(_necp_arena_info_list, necp_arena_info) stats_arena_list;
686 u_int32_t stats_arena_gencnt;
687
688 struct skmem_arena *sysctl_arena;
689 struct skmem_arena_mmap_info sysctl_mmap;
690 mach_vm_offset_t system_sysctls_roff;
691 #endif /* !SKYWALK */
692 };
693
694 #define NECP_FD_LOCK(_f) lck_mtx_lock(&_f->fd_lock)
695 #define NECP_FD_UNLOCK(_f) lck_mtx_unlock(&_f->fd_lock)
696 #define NECP_FD_ASSERT_LOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_OWNED)
697 #define NECP_FD_ASSERT_UNLOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_NOTOWNED)
698
699 static LIST_HEAD(_necp_fd_list, necp_fd_data) necp_fd_list;
700 static LIST_HEAD(_necp_fd_observer_list, necp_fd_data) necp_fd_observer_list;
701
702 #if SKYWALK
703 static KALLOC_TYPE_DEFINE(necp_arena_info_zone, struct necp_arena_info, NET_KT_DEFAULT);
704 #endif /* !SKYWALK */
705
706 static LCK_ATTR_DECLARE(necp_fd_mtx_attr, 0, 0);
707 static LCK_GRP_DECLARE(necp_fd_mtx_grp, "necp_fd");
708
709 static LCK_RW_DECLARE_ATTR(necp_fd_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
710 static LCK_RW_DECLARE_ATTR(necp_observer_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
711 static LCK_RW_DECLARE_ATTR(necp_client_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
712 static LCK_RW_DECLARE_ATTR(necp_flow_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
713 static LCK_RW_DECLARE_ATTR(necp_collect_stats_list_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
714
715
716 #define NECP_STATS_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_collect_stats_list_lock)
717 #define NECP_STATS_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_collect_stats_list_lock)
718 #define NECP_STATS_LIST_UNLOCK() lck_rw_done(&necp_collect_stats_list_lock)
719
720 #define NECP_CLIENT_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_client_tree_lock)
721 #define NECP_CLIENT_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_client_tree_lock)
722 #define NECP_CLIENT_TREE_UNLOCK() lck_rw_done(&necp_client_tree_lock)
723 #define NECP_CLIENT_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_client_tree_lock, LCK_RW_ASSERT_HELD)
724
725 #define NECP_FLOW_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_flow_tree_lock)
726 #define NECP_FLOW_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_flow_tree_lock)
727 #define NECP_FLOW_TREE_UNLOCK() lck_rw_done(&necp_flow_tree_lock)
728 #define NECP_FLOW_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_flow_tree_lock, LCK_RW_ASSERT_HELD)
729
730 #define NECP_FD_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_fd_lock)
731 #define NECP_FD_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_fd_lock)
732 #define NECP_FD_LIST_UNLOCK() lck_rw_done(&necp_fd_lock)
733 #define NECP_FD_LIST_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_fd_lock, LCK_RW_ASSERT_HELD)
734
735 #define NECP_OBSERVER_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_observer_lock)
736 #define NECP_OBSERVER_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_observer_lock)
737 #define NECP_OBSERVER_LIST_UNLOCK() lck_rw_done(&necp_observer_lock)
738
739 // Locking Notes
740
741 // Take NECP_FD_LIST_LOCK when accessing or modifying the necp_fd_list
742 // Take NECP_CLIENT_TREE_LOCK when accessing or modifying the necp_client_global_tree
743 // Take NECP_FLOW_TREE_LOCK when accessing or modifying the necp_client_flow_global_tree
744 // Take NECP_STATS_LIST_LOCK when accessing or modifying the necp_collect_stats_flow_list
745 // Take NECP_FD_LOCK when accessing or modifying an necp_fd_data entry
746 // Take NECP_CLIENT_LOCK when accessing or modifying a single necp_client
747 // Take NECP_CLIENT_ROUTE_LOCK when accessing or modifying a client's route
748
749 // Precedence, where 1 is the first lock that must be taken
750 // 1. NECP_FD_LIST_LOCK
751 // 2. NECP_FD_LOCK (any)
752 // 3. NECP_CLIENT_TREE_LOCK
753 // 4. NECP_CLIENT_LOCK (any)
754 // 5. NECP_FLOW_TREE_LOCK
755 // 6. NECP_STATS_LIST_LOCK
756 // 7. NECP_CLIENT_ROUTE_LOCK (any)
757
758 static thread_call_t necp_client_update_tcall;
759 static uint32_t necp_update_all_clients_sched_cnt = 0;
760 static uint64_t necp_update_all_clients_sched_abstime = 0;
761 static LCK_RW_DECLARE_ATTR(necp_update_all_clients_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
762 #define NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_update_all_clients_lock)
763 #define NECP_UPDATE_ALL_CLIENTS_SHARED_TO_EXCLUSIVE() lck_rw_lock_shared_to_exclusive(&necp_update_all_clients_lock)
764 #define NECP_UPDATE_ALL_CLIENTS_SHARED() lck_rw_lock_shared(&necp_update_all_clients_lock)
765 #define NECP_UPDATE_ALL_CLIENTS_UNLOCK() lck_rw_done(&necp_update_all_clients_lock)
766
767 // Array of PIDs that will trigger in-process flow divert, protected by NECP_FD_LIST_LOCK
768 #define NECP_MAX_FLOW_DIVERT_NEEDED_PIDS 4
769 static pid_t necp_flow_divert_needed_pids[NECP_MAX_FLOW_DIVERT_NEEDED_PIDS];
770
771 #if SKYWALK
772 static thread_call_t necp_client_collect_stats_tcall;
773 static thread_call_t necp_close_empty_arenas_tcall;
774
775 static void necp_fd_insert_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
776 static void necp_fd_remove_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
777 static struct necp_arena_info *necp_fd_mredirect_stats_arena(struct necp_fd_data *fd_data, struct proc *proc);
778
779 static void necp_arena_info_retain(struct necp_arena_info *nai);
780 static void necp_arena_info_release(struct necp_arena_info *nai);
781 static struct necp_arena_info *necp_arena_info_alloc(void);
782 static void necp_arena_info_free(struct necp_arena_info *nai);
783
784 static int necp_arena_initialize(struct necp_fd_data *fd_data, bool locked);
785 static int necp_stats_initialize(struct necp_fd_data *fd_data, struct necp_client *client,
786 struct necp_client_flow_registration *flow_registration, struct necp_stats_bufreq *bufreq);
787 static int necp_arena_create(struct necp_fd_data *fd_data, size_t obj_size, size_t obj_cnt, struct proc *p);
788 static int necp_arena_stats_obj_alloc(struct necp_fd_data *fd_data, mach_vm_offset_t *off, struct necp_arena_info **stats_arena, void **kstats_kaddr, boolean_t cansleep);
789 static void necp_arena_stats_obj_free(struct necp_fd_data *fd_data, struct necp_arena_info *stats_arena, void **kstats_kaddr, mach_vm_address_t *ustats_uaddr);
790 static void necp_stats_arenas_destroy(struct necp_fd_data *fd_data, boolean_t closing);
791
792 static int necp_sysctl_arena_initialize(struct necp_fd_data *fd_data, bool locked);
793 static void necp_sysctl_arena_destroy(struct necp_fd_data *fd_data);
794 static void *necp_arena_sysctls_obj(struct necp_fd_data *fd_data, mach_vm_offset_t *off, size_t *size);
795 #endif /* !SKYWALK */
796
797 static int necp_aop_offload_stats_initialize(struct necp_client_flow_registration *flow_registration, uuid_t netagent_uuid);
798 static void necp_aop_offload_stats_destroy(struct necp_client_flow *flow);
799
800 void necp_copy_inp_domain_info(struct inpcb *, struct socket *, nstat_domain_info *);
801 void necp_with_inp_domain_name(struct socket *so, void *ctx, void (*with_func)(char *domain_name __null_terminated, void *ctx));
802
803 #if __has_ptrcheck
804 static inline
805 __attribute__((always_inline)) __pure
806 struct necp_client_flow_stats * __indexable
necp_client_get_flow_stats(const struct necp_client_add_flow * req)807 necp_client_get_flow_stats(const struct necp_client_add_flow *req)
808 {
809 if (req == NULL) {
810 return NULL;
811 }
812
813 return __unsafe_forge_bidi_indexable(struct necp_client_flow_stats *, req->stats_requests, sizeof(struct necp_client_flow_stats) * req->stats_request_count);
814 }
815 #else
816 #define necp_client_get_flow_stats(req) ((struct necp_client_flow_stats *)&(req)->stats_requests[0])
817 #endif
818
819 #if __has_ptrcheck
820 static inline
821 __attribute__((always_inline)) __pure
822 uint8_t * __bidi_indexable
signable_get_data(const struct necp_client_signable * signable,size_t data_length)823 signable_get_data(const struct necp_client_signable *signable, size_t data_length)
824 {
825 if (signable == NULL) {
826 return NULL;
827 }
828
829 return __unsafe_forge_bidi_indexable(uint8_t *, signable->signable_data, data_length);
830 }
831 #else
832 #define signable_get_data(signable, data_length) ((signable)->signable_data)
833 #endif
834
835 #if __has_ptrcheck
836 static inline
837 __attribute__((always_inline)) __pure
838 struct sockaddr * __single
flow_req_get_address(const struct necp_client_add_flow * req,size_t offset_of_address)839 flow_req_get_address(const struct necp_client_add_flow *req, size_t offset_of_address)
840 {
841 if (req == NULL) {
842 return NULL;
843 }
844
845 uint8_t * __indexable req_ptr = __unsafe_forge_bidi_indexable(uint8_t *, req, sizeof(struct necp_client_add_flow));
846 return __unsafe_forge_single(struct sockaddr *, req_ptr + offset_of_address);
847 }
848 #else
849 #define flow_req_get_address(req, offset_of_address) ((struct sockaddr *)(((uint8_t *)req) + offset_of_address))
850 #endif
851
852 #if __has_ptrcheck
853 static inline
854 __attribute__((always_inline)) __pure
855 uint8_t * __single
flow_req_get_proto(const struct necp_client_add_flow * req,size_t offset_of_proto)856 flow_req_get_proto(const struct necp_client_add_flow *req, size_t offset_of_proto)
857 {
858 if (req == NULL) {
859 return NULL;
860 }
861
862 uint8_t * __indexable req_ptr = __unsafe_forge_bidi_indexable(uint8_t *, req, sizeof(struct necp_client_add_flow));
863 return __unsafe_forge_single(uint8_t *, req_ptr + offset_of_proto);
864 }
865 #else
866 #define flow_req_get_proto(req, offset_of_proto) ((uint8_t *)(((uint8_t *)req) + offset_of_proto))
867 #endif
868
869 #if __has_ptrcheck
870 static inline
871 __attribute__((always_inline)) __pure
872 uint8_t * __bidi_indexable
necp_update_get_tlv_buffer(const struct necp_client_observer_update * update,size_t buffer_size)873 necp_update_get_tlv_buffer(const struct necp_client_observer_update *update, size_t buffer_size)
874 {
875 if (update == NULL) {
876 return NULL;
877 }
878
879 return __unsafe_forge_bidi_indexable(uint8_t *, update->tlv_buffer, buffer_size);
880 }
881 #else
882 #define necp_update_get_tlv_buffer(update, buffer_size) ((update)->tlv_buffer)
883 #endif
884
885 #if __has_ptrcheck
886 static inline
887 __attribute__((always_inline)) __pure
888 char * __bidi_indexable
necp_answer_get_hostname(const struct necp_client_host_resolver_answer * answer,size_t hostname_length)889 necp_answer_get_hostname(const struct necp_client_host_resolver_answer *answer, size_t hostname_length)
890 {
891 if (answer == NULL) {
892 return NULL;
893 }
894
895 return __unsafe_forge_bidi_indexable(char *, answer->hostname, hostname_length);
896 }
897 #else
898 #define necp_answer_get_hostname(answer, hostname_length) ((answer)->hostname)
899 #endif
900
901 static void
necp_lock_socket_attributes(void)902 necp_lock_socket_attributes(void)
903 {
904 lck_mtx_lock(&necp_socket_attr_lock);
905 }
906
907 static void
necp_unlock_socket_attributes(void)908 necp_unlock_socket_attributes(void)
909 {
910 lck_mtx_unlock(&necp_socket_attr_lock);
911 }
912
913 /// NECP file descriptor functions
914
915 static void
necp_fd_notify(struct necp_fd_data * fd_data,bool locked)916 necp_fd_notify(struct necp_fd_data *fd_data, bool locked)
917 {
918 struct selinfo *si = &fd_data->si;
919
920 if (!locked) {
921 NECP_FD_LOCK(fd_data);
922 }
923
924 selwakeup(si);
925
926 // use a non-zero hint to tell the notification from the
927 // call done in kqueue_scan() which uses 0
928 KNOTE(&si->si_note, 1); // notification
929
930 if (!locked) {
931 NECP_FD_UNLOCK(fd_data);
932 }
933 }
934
935 static inline bool
necp_client_has_unread_flows(struct necp_client * client)936 necp_client_has_unread_flows(struct necp_client *client)
937 {
938 NECP_CLIENT_ASSERT_LOCKED(client);
939 struct necp_client_flow_registration *flow_registration = NULL;
940 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
941 if (!flow_registration->flow_result_read) {
942 return true;
943 }
944 }
945 return false;
946 }
947
948 static int
necp_fd_poll(struct necp_fd_data * fd_data,int events,void * wql,struct proc * p,int is_kevent)949 necp_fd_poll(struct necp_fd_data *fd_data, int events, void *wql, struct proc *p, int is_kevent)
950 {
951 #pragma unused(wql, p, is_kevent)
952 u_int revents = 0;
953
954 u_int want_rx = events & (POLLIN | POLLRDNORM);
955 if (want_rx) {
956 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
957 // Push-mode observers are readable when they have a new update
958 if (!TAILQ_EMPTY(&fd_data->update_list)) {
959 revents |= want_rx;
960 }
961 } else {
962 // Standard fds are readable when some client is unread
963 struct necp_client *client = NULL;
964 bool has_unread_clients = FALSE;
965 RB_FOREACH(client, _necp_client_tree, &fd_data->clients) {
966 NECP_CLIENT_LOCK(client);
967 if (!client->result_read || !client->group_members_read || necp_client_has_unread_flows(client)) {
968 has_unread_clients = TRUE;
969 }
970 NECP_CLIENT_UNLOCK(client);
971 if (has_unread_clients) {
972 break;
973 }
974 }
975
976 if (has_unread_clients || fd_data->request_in_process_flow_divert) {
977 revents |= want_rx;
978 }
979 }
980 }
981
982 return revents;
983 }
984
985 static inline void
necp_generate_client_id(uuid_t client_id,bool is_flow)986 necp_generate_client_id(uuid_t client_id, bool is_flow)
987 {
988 uuid_generate_random(client_id);
989
990 if (is_flow) {
991 client_id[9] |= 0x01;
992 } else {
993 client_id[9] &= ~0x01;
994 }
995 }
996
997 static inline bool
necp_client_id_is_flow(uuid_t client_id)998 necp_client_id_is_flow(uuid_t client_id)
999 {
1000 return client_id[9] & 0x01;
1001 }
1002
1003 static struct necp_client *
necp_find_client_and_lock(uuid_t client_id)1004 necp_find_client_and_lock(uuid_t client_id)
1005 {
1006 NECP_CLIENT_TREE_ASSERT_LOCKED();
1007
1008 struct necp_client *client = NULL;
1009
1010 if (necp_client_id_is_flow(client_id)) {
1011 NECP_FLOW_TREE_LOCK_SHARED();
1012 struct necp_client_flow_registration find;
1013 uuid_copy(find.registration_id, client_id);
1014 struct necp_client_flow_registration *flow = RB_FIND(_necp_client_flow_global_tree, &necp_client_flow_global_tree, &find);
1015 if (flow != NULL) {
1016 client = flow->client;
1017 }
1018 NECP_FLOW_TREE_UNLOCK();
1019 } else {
1020 struct necp_client find;
1021 uuid_copy(find.client_id, client_id);
1022 client = RB_FIND(_necp_client_global_tree, &necp_client_global_tree, &find);
1023 }
1024
1025 if (client != NULL) {
1026 NECP_CLIENT_LOCK(client);
1027 }
1028
1029 return client;
1030 }
1031
1032 static struct necp_client_flow_registration *
necp_client_find_flow(struct necp_client * client,uuid_t flow_id)1033 necp_client_find_flow(struct necp_client *client, uuid_t flow_id)
1034 {
1035 NECP_CLIENT_ASSERT_LOCKED(client);
1036 struct necp_client_flow_registration *flow = NULL;
1037
1038 if (necp_client_id_is_flow(flow_id)) {
1039 struct necp_client_flow_registration find;
1040 uuid_copy(find.registration_id, flow_id);
1041 flow = RB_FIND(_necp_client_flow_tree, &client->flow_registrations, &find);
1042 } else {
1043 flow = RB_ROOT(&client->flow_registrations);
1044 }
1045
1046 return flow;
1047 }
1048
1049 static struct necp_client *
necp_client_fd_find_client_unlocked(struct necp_fd_data * client_fd,uuid_t client_id)1050 necp_client_fd_find_client_unlocked(struct necp_fd_data *client_fd, uuid_t client_id)
1051 {
1052 NECP_FD_ASSERT_LOCKED(client_fd);
1053 struct necp_client *client = NULL;
1054
1055 if (necp_client_id_is_flow(client_id)) {
1056 struct necp_client_flow_registration find;
1057 uuid_copy(find.registration_id, client_id);
1058 struct necp_client_flow_registration *flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find);
1059 if (flow != NULL) {
1060 client = flow->client;
1061 }
1062 } else {
1063 struct necp_client find;
1064 uuid_copy(find.client_id, client_id);
1065 client = RB_FIND(_necp_client_tree, &client_fd->clients, &find);
1066 }
1067
1068 return client;
1069 }
1070
1071 static struct necp_client *
necp_client_fd_find_client_and_lock(struct necp_fd_data * client_fd,uuid_t client_id)1072 necp_client_fd_find_client_and_lock(struct necp_fd_data *client_fd, uuid_t client_id)
1073 {
1074 struct necp_client *client = necp_client_fd_find_client_unlocked(client_fd, client_id);
1075 if (client != NULL) {
1076 NECP_CLIENT_LOCK(client);
1077 }
1078
1079 return client;
1080 }
1081
1082 static inline int
necp_client_id_cmp(struct necp_client * client0,struct necp_client * client1)1083 necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1)
1084 {
1085 return uuid_compare(client0->client_id, client1->client_id);
1086 }
1087
1088 static inline int
necp_client_flow_id_cmp(struct necp_client_flow_registration * flow0,struct necp_client_flow_registration * flow1)1089 necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1)
1090 {
1091 return uuid_compare(flow0->registration_id, flow1->registration_id);
1092 }
1093
1094 static int
necpop_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)1095 necpop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
1096 {
1097 #pragma unused(fp, which, wql, ctx)
1098 return 0;
1099 struct necp_fd_data *fd_data = NULL;
1100 int revents = 0;
1101 int events = 0;
1102 proc_t procp;
1103
1104 fd_data = (struct necp_fd_data *)fp_get_data(fp);
1105 if (fd_data == NULL) {
1106 return 0;
1107 }
1108
1109 procp = vfs_context_proc(ctx);
1110
1111 switch (which) {
1112 case FREAD: {
1113 events = POLLIN;
1114 break;
1115 }
1116
1117 default: {
1118 return 1;
1119 }
1120 }
1121
1122 NECP_FD_LOCK(fd_data);
1123 revents = necp_fd_poll(fd_data, events, wql, procp, 0);
1124 NECP_FD_UNLOCK(fd_data);
1125
1126 return (events & revents) ? 1 : 0;
1127 }
1128
1129 static void
necp_fd_knrdetach(struct knote * kn)1130 necp_fd_knrdetach(struct knote *kn)
1131 {
1132 struct necp_fd_data *fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1133 struct selinfo *si = &fd_data->si;
1134
1135 NECP_FD_LOCK(fd_data);
1136 KNOTE_DETACH(&si->si_note, kn);
1137 NECP_FD_UNLOCK(fd_data);
1138 }
1139
1140 static int
necp_fd_knread(struct knote * kn,long hint)1141 necp_fd_knread(struct knote *kn, long hint)
1142 {
1143 #pragma unused(kn, hint)
1144 return 1; /* assume we are ready */
1145 }
1146
1147 static int
necp_fd_knrprocess(struct knote * kn,struct kevent_qos_s * kev)1148 necp_fd_knrprocess(struct knote *kn, struct kevent_qos_s *kev)
1149 {
1150 struct necp_fd_data *fd_data;
1151 int revents;
1152 int res;
1153
1154 fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1155
1156 NECP_FD_LOCK(fd_data);
1157 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1158 res = ((revents & POLLIN) != 0);
1159 if (res) {
1160 knote_fill_kevent(kn, kev, 0);
1161 }
1162 NECP_FD_UNLOCK(fd_data);
1163 return res;
1164 }
1165
1166 static int
necp_fd_knrtouch(struct knote * kn,struct kevent_qos_s * kev)1167 necp_fd_knrtouch(struct knote *kn, struct kevent_qos_s *kev)
1168 {
1169 #pragma unused(kev)
1170 struct necp_fd_data *fd_data;
1171 int revents;
1172
1173 fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1174
1175 NECP_FD_LOCK(fd_data);
1176 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1177 NECP_FD_UNLOCK(fd_data);
1178
1179 return (revents & POLLIN) != 0;
1180 }
1181
1182 SECURITY_READ_ONLY_EARLY(struct filterops) necp_fd_rfiltops = {
1183 .f_isfd = 1,
1184 .f_detach = necp_fd_knrdetach,
1185 .f_event = necp_fd_knread,
1186 .f_touch = necp_fd_knrtouch,
1187 .f_process = necp_fd_knrprocess,
1188 };
1189
1190 static int
necpop_kqfilter(struct fileproc * fp,struct knote * kn,__unused struct kevent_qos_s * kev)1191 necpop_kqfilter(struct fileproc *fp, struct knote *kn,
1192 __unused struct kevent_qos_s *kev)
1193 {
1194 struct necp_fd_data *fd_data = NULL;
1195 int revents;
1196
1197 if (kn->kn_filter != EVFILT_READ) {
1198 NECPLOG(LOG_ERR, "bad filter request %d", kn->kn_filter);
1199 knote_set_error(kn, EINVAL);
1200 return 0;
1201 }
1202
1203 fd_data = (struct necp_fd_data *)fp_get_data(fp);
1204 if (fd_data == NULL) {
1205 NECPLOG0(LOG_ERR, "No channel for kqfilter");
1206 knote_set_error(kn, ENOENT);
1207 return 0;
1208 }
1209
1210 NECP_FD_LOCK(fd_data);
1211 kn->kn_filtid = EVFILTID_NECP_FD;
1212 knote_kn_hook_set_raw(kn, fd_data);
1213 KNOTE_ATTACH(&fd_data->si.si_note, kn);
1214
1215 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1216
1217 NECP_FD_UNLOCK(fd_data);
1218
1219 return (revents & POLLIN) != 0;
1220 }
1221
1222 #define INTERFACE_FLAGS_SHIFT 32
1223 #define INTERFACE_FLAGS_MASK 0xffffffff
1224 #define INTERFACE_INDEX_SHIFT 0
1225 #define INTERFACE_INDEX_MASK 0xffffffff
1226
1227 static uint64_t
combine_interface_details(uint32_t interface_index,uint32_t interface_flags)1228 combine_interface_details(uint32_t interface_index, uint32_t interface_flags)
1229 {
1230 return ((uint64_t)interface_flags & INTERFACE_FLAGS_MASK) << INTERFACE_FLAGS_SHIFT |
1231 ((uint64_t)interface_index & INTERFACE_INDEX_MASK) << INTERFACE_INDEX_SHIFT;
1232 }
1233
1234 #if SKYWALK
1235
1236 static void
split_interface_details(uint64_t combined_details,uint32_t * interface_index,uint32_t * interface_flags)1237 split_interface_details(uint64_t combined_details, uint32_t *interface_index, uint32_t *interface_flags)
1238 {
1239 *interface_index = (combined_details >> INTERFACE_INDEX_SHIFT) & INTERFACE_INDEX_MASK;
1240 *interface_flags = (combined_details >> INTERFACE_FLAGS_SHIFT) & INTERFACE_FLAGS_MASK;
1241 }
1242
1243 static void
necp_flow_save_current_interface_details(struct necp_client_flow_registration * flow_registration)1244 necp_flow_save_current_interface_details(struct necp_client_flow_registration *flow_registration)
1245 {
1246 struct necp_client_flow *flow = NULL;
1247 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1248 if (flow->nexus) {
1249 uint64_t combined_details = combine_interface_details(flow->interface_index, flow->interface_flags);
1250 os_atomic_store(&flow_registration->last_interface_details, combined_details, release);
1251 break;
1252 }
1253 }
1254 }
1255
1256 static void
necp_client_collect_interface_stats(struct necp_client_flow_registration * flow_registration,struct ifnet_stats_per_flow * ifs)1257 necp_client_collect_interface_stats(struct necp_client_flow_registration *flow_registration, struct ifnet_stats_per_flow *ifs)
1258 {
1259 struct necp_client_flow *flow = NULL;
1260
1261 if (ifs == NULL || ifs->txpackets == 0 || ifs->rxpackets == 0) {
1262 return; // App might have crashed without publishing ifs
1263 }
1264
1265 // Do malicious stats detection here
1266
1267 // Fold userspace stats into (trusted) kernel stats (stored in ifp).
1268 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1269 uint32_t if_idx = flow->interface_index;
1270 ifnet_t ifp = NULL;
1271 ifnet_head_lock_shared();
1272 if (if_idx != IFSCOPE_NONE && if_idx <= (uint32_t)if_index) {
1273 ifp = ifindex2ifnet[if_idx];
1274 ifnet_update_stats_per_flow(ifs, ifp);
1275 }
1276 ifnet_head_done();
1277
1278 // Currently there is only one flow that uses the shared necp
1279 // stats region, so this loop should exit after updating an ifp
1280 break;
1281 }
1282 }
1283
1284 static void
necp_client_collect_aop_flow_stats(struct necp_client_flow_registration * flow_registration)1285 necp_client_collect_aop_flow_stats(struct necp_client_flow_registration *flow_registration)
1286 {
1287 struct aop_flow_stats flow_stats = {};
1288 struct tcp_info *tcpi = &flow_stats.transport.tcp_stats.tcp_info;
1289 uint32_t aop_flow_count = 0;
1290 int err = 0;
1291
1292 ASSERT(flow_registration->aop_offload);
1293 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
1294 if (kstats == NULL) {
1295 return;
1296 }
1297
1298 struct necp_stat_counts *prev_tcpstats = &(((struct necp_tcp_stats *)&kstats->necp_stats_comm)->necp_tcp_counts);
1299 struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
1300
1301 struct necp_client_flow *flow = NULL;
1302 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1303 aop_flow_count++;
1304 ASSERT(flow->aop_offload && aop_flow_count == 1);
1305 if (flow->flow_tag > 0 && flow->aop_stat_index_valid) {
1306 err = net_aop_get_flow_stats(flow->stats_index, &flow_stats);
1307 if (err != 0) {
1308 NECPLOG(LOG_ERR, "failed to get aop flow stats "
1309 "for flow id %u with error %d", flow->flow_tag, err);
1310 continue;
1311 }
1312
1313 if (__improbable(flow->flow_tag != flow_stats.flow_id)) {
1314 NECPLOG(LOG_NOTICE, "aop flow stats, flow tag 0x%x != 0x%x",
1315 flow->flow_tag, flow_stats.flow_id);
1316 continue;
1317 }
1318
1319 if ((prev_tcpstats->necp_stat_rxpackets == tcpi->tcpi_rxpackets) &&
1320 prev_tcpstats->necp_stat_txpackets == tcpi->tcpi_txpackets) {
1321 continue;
1322 }
1323
1324 uint32_t d_rxpackets = tcpi->tcpi_rxpackets - prev_tcpstats->necp_stat_rxpackets;
1325 prev_tcpstats->necp_stat_rxpackets += d_rxpackets;
1326
1327 uint32_t d_txpackets = tcpi->tcpi_txpackets - prev_tcpstats->necp_stat_txpackets;
1328 prev_tcpstats->necp_stat_txpackets += d_txpackets;
1329
1330 uint32_t d_rxbytes = tcpi->tcpi_rxbytes - prev_tcpstats->necp_stat_rxbytes;
1331 prev_tcpstats->necp_stat_rxbytes += d_rxbytes;
1332
1333 uint32_t d_txbytes = tcpi->tcpi_txbytes - prev_tcpstats->necp_stat_txbytes;
1334 prev_tcpstats->necp_stat_txbytes += d_txbytes;
1335
1336 uint32_t d_rxduplicatebytes = tcpi->tcpi_rxduplicatebytes - prev_tcpstats->necp_stat_rxduplicatebytes;
1337 prev_tcpstats->necp_stat_rxduplicatebytes += d_rxduplicatebytes;
1338
1339 uint32_t d_rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes - prev_tcpstats->necp_stat_rxoutoforderbytes;
1340 prev_tcpstats->necp_stat_rxoutoforderbytes += d_rxoutoforderbytes;
1341
1342 uint32_t d_txretransmit = tcpi->tcpi_txretransmitbytes - prev_tcpstats->necp_stat_txretransmit;
1343 prev_tcpstats->necp_stat_txretransmit += d_txretransmit;
1344
1345 uint32_t d_connectattempts = prev_tcpstats->necp_stat_connectattempts - (tcpi->tcpi_state >= TCPS_SYN_SENT ? 1 : 0);
1346 prev_tcpstats->necp_stat_connectattempts += d_connectattempts;
1347
1348 uint32_t d_connectsuccesses = prev_tcpstats->necp_stat_connectsuccesses - (tcpi->tcpi_state >= TCPS_ESTABLISHED ? 1 : 0);
1349 prev_tcpstats->necp_stat_connectsuccesses += d_connectsuccesses;
1350
1351 prev_tcpstats->necp_stat_avg_rtt = tcpi->tcpi_srtt;
1352 prev_tcpstats->necp_stat_var_rtt = tcpi->tcpi_rttvar;
1353
1354 /* Update route stats */
1355 NECP_CLIENT_ROUTE_LOCK(flow_registration->client);
1356 struct rtentry *route = flow_registration->client->current_route;
1357 if (route != NULL) {
1358 nstat_route_update(route, d_connectattempts,
1359 d_connectsuccesses, d_rxpackets, d_rxbytes,
1360 d_rxduplicatebytes, d_rxoutoforderbytes,
1361 d_txpackets, d_txbytes, d_txretransmit,
1362 prev_tcpstats->necp_stat_avg_rtt, prev_tcpstats->necp_stat_var_rtt);
1363 }
1364 NECP_CLIENT_ROUTE_UNLOCK(flow_registration->client);
1365
1366 /* Update nexus flow stats */
1367 if (sf != NULL) {
1368 sf->sf_ibytes = flow_stats.rxbytes;
1369 sf->sf_obytes = flow_stats.txbytes;
1370 sf->sf_ipackets = flow_stats.rxpkts;
1371 sf->sf_opackets = flow_stats.txpkts;
1372 sf->sf_lseq = tcpi->tcpi_snd_nxt - 1;
1373 sf->sf_rseq = tcpi->tcpi_rcv_nxt - 1;
1374 sf->sf_lrtt = tcpi->tcpi_srtt;
1375 sf->sf_rrtt = tcpi->tcpi_rcv_srtt;
1376 sf->sf_ltrack.sft_state = tcpi->tcpi_state;
1377 sf->sf_lwscale = tcpi->tcpi_snd_wscale;
1378 sf->sf_rwscale = tcpi->tcpi_rcv_wscale;
1379
1380 memcpy(&sf->sf_activity, &flow_stats.activity_bitmap,
1381 sizeof(sf->sf_activity));
1382 }
1383 }
1384 }
1385 }
1386
1387 static void
necp_client_collect_nexus_flow_stats(struct necp_client_flow_registration * flow_registration)1388 necp_client_collect_nexus_flow_stats(struct necp_client_flow_registration *flow_registration)
1389 {
1390 ASSERT(!flow_registration->aop_offload);
1391
1392 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
1393 if (kstats == NULL) {
1394 return;
1395 }
1396
1397 // Grab userspace stats delta (untrusted).
1398 struct necp_tcp_stats *curr_tcpstats = (struct necp_tcp_stats *)kstats->necp_stats_ustats;
1399 struct necp_tcp_stats *prev_tcpstats = (struct necp_tcp_stats *)&kstats->necp_stats_comm;
1400 #define diff_n_update(field) \
1401 u_int32_t d_##field = (curr_tcpstats->necp_tcp_counts.necp_stat_##field - prev_tcpstats->necp_tcp_counts.necp_stat_##field); \
1402 prev_tcpstats->necp_tcp_counts.necp_stat_##field += d_##field;
1403 diff_n_update(rxpackets);
1404 diff_n_update(txpackets);
1405 if (d_rxpackets == 0 && d_txpackets == 0) {
1406 return; // no activity since last collection, stop here
1407 }
1408 diff_n_update(rxbytes);
1409 diff_n_update(txbytes);
1410 diff_n_update(rxduplicatebytes);
1411 diff_n_update(rxoutoforderbytes);
1412 diff_n_update(txretransmit);
1413 diff_n_update(connectattempts);
1414 diff_n_update(connectsuccesses);
1415 uint32_t rtt = prev_tcpstats->necp_tcp_counts.necp_stat_avg_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
1416 uint32_t rtt_var = prev_tcpstats->necp_tcp_counts.necp_stat_var_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_var_rtt;
1417 #undef diff_n_update
1418
1419 // Do malicious stats detection with the deltas here.
1420 // RTT check (not necessarily attacks, might just be not measured since we report stats async periodically).
1421 if (rtt < necp_client_stats_rtt_floor || rtt > necp_client_stats_rtt_ceiling) {
1422 rtt = rtt_var = 0; // nstat_route_update to skip 0 rtt
1423 }
1424
1425 // Fold userspace stats into (trusted) kernel stats (stored in route).
1426 NECP_CLIENT_ROUTE_LOCK(flow_registration->client);
1427 struct rtentry *route = flow_registration->client->current_route;
1428 if (route != NULL) {
1429 nstat_route_update(route, d_connectattempts, d_connectsuccesses, d_rxpackets, d_rxbytes, d_rxduplicatebytes,
1430 d_rxoutoforderbytes, d_txpackets, d_txbytes, d_txretransmit, rtt, rtt_var);
1431 }
1432 NECP_CLIENT_ROUTE_UNLOCK(flow_registration->client);
1433 }
1434
1435 static void
necp_client_collect_stats(struct necp_client_flow_registration * flow_registration)1436 necp_client_collect_stats(struct necp_client_flow_registration *flow_registration)
1437 {
1438 if (__probable(!flow_registration->aop_offload)) {
1439 necp_client_collect_nexus_flow_stats(flow_registration);
1440 } else {
1441 necp_client_collect_aop_flow_stats(flow_registration);
1442 }
1443 }
1444
1445 // This is called from various places; "closing" here implies the client being closed/removed if true, otherwise being
1446 // defunct. In the former, we expect the caller to not hold the lock; for the latter it must have acquired it.
1447 static void
necp_destroy_flow_stats(struct necp_fd_data * fd_data,struct necp_client_flow_registration * flow_registration,struct ifnet_stats_per_flow * flow_ifnet_stats,boolean_t closing)1448 necp_destroy_flow_stats(struct necp_fd_data *fd_data,
1449 struct necp_client_flow_registration *flow_registration,
1450 struct ifnet_stats_per_flow *flow_ifnet_stats,
1451 boolean_t closing)
1452 {
1453 NECP_FD_ASSERT_LOCKED(fd_data);
1454
1455 struct necp_client *client = flow_registration->client;
1456
1457 if (closing) {
1458 NECP_CLIENT_ASSERT_UNLOCKED(client);
1459 NECP_CLIENT_LOCK(client);
1460 } else {
1461 NECP_CLIENT_ASSERT_LOCKED(client);
1462 }
1463
1464 // the interface stats are independent of the flow stats, hence we check here
1465 if (flow_ifnet_stats != NULL) {
1466 necp_client_collect_interface_stats(flow_registration, flow_ifnet_stats);
1467 }
1468
1469 if (flow_registration->kstats_kaddr != NULL) {
1470 NECP_STATS_LIST_LOCK_EXCLUSIVE();
1471 necp_client_collect_stats(flow_registration);
1472 const bool destroyed = necp_client_release_locked(client); // Drop the reference held by the stats list
1473 ASSERT(!destroyed);
1474 (void)destroyed;
1475 LIST_REMOVE(flow_registration, collect_stats_chain);
1476 NECP_STATS_LIST_UNLOCK();
1477 if (flow_registration->stats_handler_context != NULL) {
1478 ntstat_userland_stats_close(flow_registration->stats_handler_context);
1479 flow_registration->stats_handler_context = NULL;
1480 }
1481 necp_arena_stats_obj_free(fd_data, flow_registration->stats_arena, &flow_registration->kstats_kaddr, &flow_registration->ustats_uaddr);
1482 ASSERT(flow_registration->kstats_kaddr == NULL);
1483 ASSERT(flow_registration->ustats_uaddr == 0);
1484 }
1485
1486 if (flow_registration->nexus_stats != NULL) {
1487 flow_stats_release(flow_registration->nexus_stats);
1488 flow_registration->nexus_stats = NULL;
1489 }
1490
1491 if (closing) {
1492 NECP_CLIENT_UNLOCK(client);
1493 }
1494 }
1495
1496 static void
necp_schedule_collect_stats_clients(bool recur)1497 necp_schedule_collect_stats_clients(bool recur)
1498 {
1499 if (necp_client_collect_stats_tcall == NULL ||
1500 (!recur && thread_call_isactive(necp_client_collect_stats_tcall))) {
1501 return;
1502 }
1503
1504 uint64_t deadline = 0;
1505 uint64_t leeway = 0;
1506 clock_interval_to_deadline(necp_collect_stats_timeout_microseconds, NSEC_PER_USEC, &deadline);
1507 clock_interval_to_absolutetime_interval(necp_collect_stats_timeout_leeway_microseconds, NSEC_PER_USEC, &leeway);
1508
1509 thread_call_enter_delayed_with_leeway(necp_client_collect_stats_tcall, NULL,
1510 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1511 }
1512
1513 static void
necp_collect_stats_client_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)1514 necp_collect_stats_client_callout(__unused thread_call_param_t dummy,
1515 __unused thread_call_param_t arg)
1516 {
1517 struct necp_client_flow_registration *flow_registration;
1518
1519 net_update_uptime();
1520 NECP_STATS_LIST_LOCK_SHARED();
1521 if (LIST_EMPTY(&necp_collect_stats_flow_list)) {
1522 NECP_STATS_LIST_UNLOCK();
1523 return;
1524 }
1525 LIST_FOREACH(flow_registration, &necp_collect_stats_flow_list, collect_stats_chain) {
1526 // Collecting stats should be cheap (atomic increments)
1527 // Values like flow_registration->kstats_kaddr are guaranteed to be valid
1528 // as long as the flow_registration is in the stats list
1529 necp_client_collect_stats(flow_registration);
1530 }
1531 NECP_STATS_LIST_UNLOCK();
1532
1533 necp_schedule_collect_stats_clients(TRUE); // recurring collection
1534 }
1535
1536 #endif /* !SKYWALK */
1537
1538 static void
necp_defunct_flow_registration(struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct _necp_flow_defunct_list * defunct_list)1539 necp_defunct_flow_registration(struct necp_client *client,
1540 struct necp_client_flow_registration *flow_registration,
1541 struct _necp_flow_defunct_list *defunct_list)
1542 {
1543 NECP_CLIENT_ASSERT_LOCKED(client);
1544
1545 if (!flow_registration->defunct) {
1546 bool needs_defunct = false;
1547 struct necp_client_flow *search_flow = NULL;
1548 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
1549 if (search_flow->nexus &&
1550 !uuid_is_null(search_flow->u.nexus_agent)) {
1551 // Save defunct values for the nexus
1552 if (defunct_list != NULL) {
1553 // Sleeping alloc won't fail; copy only what's necessary
1554 struct necp_flow_defunct *flow_defunct = kalloc_type(struct necp_flow_defunct,
1555 Z_WAITOK | Z_ZERO);
1556 uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent);
1557 uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1558 client->client_id :
1559 flow_registration->registration_id));
1560 flow_defunct->proc_pid = client->proc_pid;
1561 flow_defunct->agent_handle = client->agent_handle;
1562 flow_defunct->flags = flow_registration->flags;
1563 #if SKYWALK
1564 if (flow_registration->kstats_kaddr != NULL) {
1565 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1566 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1567 if (quicstats != NULL) {
1568 memcpy(flow_defunct->close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(flow_defunct->close_parameters.u.close_token));
1569 flow_defunct->has_close_parameters = true;
1570 }
1571 }
1572 #endif /* SKYWALK */
1573 // Add to the list provided by caller
1574 LIST_INSERT_HEAD(defunct_list, flow_defunct, chain);
1575 }
1576
1577 needs_defunct = true;
1578 }
1579 }
1580
1581 if (needs_defunct) {
1582 #if SKYWALK
1583 // Close the stats early
1584 if (flow_registration->stats_handler_context != NULL) {
1585 ntstat_userland_stats_event(flow_registration->stats_handler_context,
1586 NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT);
1587 }
1588 #endif /* SKYWALK */
1589
1590 // Only set defunct if there was some assigned flow
1591 flow_registration->defunct = true;
1592 }
1593 }
1594 }
1595
1596 static void
necp_defunct_client_for_policy(struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)1597 necp_defunct_client_for_policy(struct necp_client *client,
1598 struct _necp_flow_defunct_list *defunct_list)
1599 {
1600 NECP_CLIENT_ASSERT_LOCKED(client);
1601
1602 struct necp_client_flow_registration *flow_registration = NULL;
1603 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
1604 necp_defunct_flow_registration(client, flow_registration, defunct_list);
1605 }
1606 }
1607
1608 static void
necp_client_free(struct necp_client * client)1609 necp_client_free(struct necp_client *client)
1610 {
1611 NECP_CLIENT_ASSERT_UNLOCKED(client);
1612
1613 kfree_data(client->extra_interface_options,
1614 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT);
1615 client->extra_interface_options = NULL;
1616
1617 kfree_data_sized_by(client->parameters, client->parameters_length);
1618 kfree_data_counted_by(client->assigned_group_members, client->assigned_group_members_length);
1619
1620 lck_mtx_destroy(&client->route_lock, &necp_fd_mtx_grp);
1621 lck_mtx_destroy(&client->lock, &necp_fd_mtx_grp);
1622
1623 kfree_type(struct necp_client, client);
1624 }
1625
1626 static void
necp_client_retain_locked(struct necp_client * client)1627 necp_client_retain_locked(struct necp_client *client)
1628 {
1629 NECP_CLIENT_ASSERT_LOCKED(client);
1630
1631 os_ref_retain_locked(&client->reference_count);
1632 }
1633
1634 static void
necp_client_retain(struct necp_client * client)1635 necp_client_retain(struct necp_client *client)
1636 {
1637 NECP_CLIENT_LOCK(client);
1638 necp_client_retain_locked(client);
1639 NECP_CLIENT_UNLOCK(client);
1640 }
1641
1642 static bool
necp_client_release_locked(struct necp_client * client)1643 necp_client_release_locked(struct necp_client *client)
1644 {
1645 NECP_CLIENT_ASSERT_LOCKED(client);
1646
1647 os_ref_count_t count = os_ref_release_locked(&client->reference_count);
1648 if (count == 0) {
1649 NECP_CLIENT_UNLOCK(client);
1650 necp_client_free(client);
1651 }
1652
1653 return count == 0;
1654 }
1655
1656 static bool
necp_client_release(struct necp_client * client)1657 necp_client_release(struct necp_client *client)
1658 {
1659 bool last_ref;
1660
1661 NECP_CLIENT_LOCK(client);
1662 if (!(last_ref = necp_client_release_locked(client))) {
1663 NECP_CLIENT_UNLOCK(client);
1664 }
1665
1666 return last_ref;
1667 }
1668
1669 static struct necp_client_update *
necp_client_update_alloc(const void * __sized_by (length)data,size_t length)1670 necp_client_update_alloc(const void * __sized_by(length)data, size_t length)
1671 {
1672 struct necp_client_update *client_update;
1673 struct necp_client_observer_update *buffer;
1674 size_t alloc_size;
1675
1676 if (os_add_overflow(length, sizeof(*buffer), &alloc_size)) {
1677 return NULL;
1678 }
1679 buffer = kalloc_data(alloc_size, Z_WAITOK);
1680 if (buffer == NULL) {
1681 return NULL;
1682 }
1683
1684 client_update = kalloc_type(struct necp_client_update,
1685 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1686 client_update->update_length = alloc_size;
1687 client_update->update = buffer;
1688 memcpy(necp_update_get_tlv_buffer(buffer, alloc_size), data, length);
1689 return client_update;
1690 }
1691
1692 static void
necp_client_update_free(struct necp_client_update * client_update)1693 necp_client_update_free(struct necp_client_update *client_update)
1694 {
1695 kfree_data_sized_by(client_update->update, client_update->update_length);
1696 kfree_type(struct necp_client_update, client_update);
1697 }
1698
1699 static void
necp_client_update_observer_add_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1700 necp_client_update_observer_add_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1701 {
1702 struct necp_client_update *client_update;
1703
1704 NECP_FD_LOCK(observer_fd);
1705
1706 if (observer_fd->update_count >= necp_observer_message_limit) {
1707 NECP_FD_UNLOCK(observer_fd);
1708 return;
1709 }
1710
1711 client_update = necp_client_update_alloc(client->parameters, client->parameters_length);
1712 if (client_update != NULL) {
1713 uuid_copy(client_update->client_id, client->client_id);
1714 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_PARAMETERS;
1715 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1716 observer_fd->update_count++;
1717
1718 necp_fd_notify(observer_fd, true);
1719 }
1720
1721 NECP_FD_UNLOCK(observer_fd);
1722 }
1723
1724 static void
necp_client_update_observer_update_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1725 necp_client_update_observer_update_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1726 {
1727 NECP_FD_LOCK(observer_fd);
1728
1729 if (observer_fd->update_count >= necp_observer_message_limit) {
1730 NECP_FD_UNLOCK(observer_fd);
1731 return;
1732 }
1733
1734 struct necp_client_update *client_update = necp_client_update_alloc(client->result, client->result_length);
1735 if (client_update != NULL) {
1736 uuid_copy(client_update->client_id, client->client_id);
1737 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_RESULT;
1738 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1739 observer_fd->update_count++;
1740
1741 necp_fd_notify(observer_fd, true);
1742 }
1743
1744 NECP_FD_UNLOCK(observer_fd);
1745 }
1746
1747 static void
necp_client_update_observer_remove_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1748 necp_client_update_observer_remove_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1749 {
1750 NECP_FD_LOCK(observer_fd);
1751
1752 if (observer_fd->update_count >= necp_observer_message_limit) {
1753 NECP_FD_UNLOCK(observer_fd);
1754 return;
1755 }
1756
1757 struct necp_client_update *client_update = necp_client_update_alloc(NULL, 0);
1758 if (client_update != NULL) {
1759 uuid_copy(client_update->client_id, client->client_id);
1760 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_REMOVE;
1761 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1762 observer_fd->update_count++;
1763
1764 necp_fd_notify(observer_fd, true);
1765 }
1766
1767 NECP_FD_UNLOCK(observer_fd);
1768 }
1769
1770 static void
necp_client_update_observer_add(struct necp_client * client)1771 necp_client_update_observer_add(struct necp_client *client)
1772 {
1773 NECP_OBSERVER_LIST_LOCK_SHARED();
1774
1775 if (LIST_EMPTY(&necp_fd_observer_list)) {
1776 // No observers, bail
1777 NECP_OBSERVER_LIST_UNLOCK();
1778 return;
1779 }
1780
1781 struct necp_fd_data *observer_fd = NULL;
1782 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1783 necp_client_update_observer_add_internal(observer_fd, client);
1784 }
1785
1786 NECP_OBSERVER_LIST_UNLOCK();
1787 }
1788
1789 static void
necp_client_update_observer_update(struct necp_client * client)1790 necp_client_update_observer_update(struct necp_client *client)
1791 {
1792 NECP_OBSERVER_LIST_LOCK_SHARED();
1793
1794 if (LIST_EMPTY(&necp_fd_observer_list)) {
1795 // No observers, bail
1796 NECP_OBSERVER_LIST_UNLOCK();
1797 return;
1798 }
1799
1800 struct necp_fd_data *observer_fd = NULL;
1801 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1802 necp_client_update_observer_update_internal(observer_fd, client);
1803 }
1804
1805 NECP_OBSERVER_LIST_UNLOCK();
1806 }
1807
1808 static void
necp_client_update_observer_remove(struct necp_client * client)1809 necp_client_update_observer_remove(struct necp_client *client)
1810 {
1811 NECP_OBSERVER_LIST_LOCK_SHARED();
1812
1813 if (LIST_EMPTY(&necp_fd_observer_list)) {
1814 // No observers, bail
1815 NECP_OBSERVER_LIST_UNLOCK();
1816 return;
1817 }
1818
1819 struct necp_fd_data *observer_fd = NULL;
1820 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1821 necp_client_update_observer_remove_internal(observer_fd, client);
1822 }
1823
1824 NECP_OBSERVER_LIST_UNLOCK();
1825 }
1826
1827 static void
necp_destroy_client_flow_registration(struct necp_client * client,struct necp_client_flow_registration * flow_registration,pid_t pid,bool abort)1828 necp_destroy_client_flow_registration(struct necp_client *client,
1829 struct necp_client_flow_registration *flow_registration,
1830 pid_t pid, bool abort)
1831 {
1832 NECP_CLIENT_ASSERT_LOCKED(client);
1833
1834 bool has_close_parameters = false;
1835 struct necp_client_agent_parameters close_parameters = {};
1836 memset(close_parameters.u.close_token, 0, sizeof(close_parameters.u.close_token));
1837 #if SKYWALK
1838 if (flow_registration->kstats_kaddr != NULL) {
1839 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1840 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1841 if (quicstats != NULL &&
1842 quicstats->necp_quic_udp_stats.necp_udp_hdr.necp_stats_type == NECP_CLIENT_STATISTICS_TYPE_QUIC) {
1843 memcpy(close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(close_parameters.u.close_token));
1844 has_close_parameters = true;
1845 }
1846 }
1847
1848 // Release reference held on the stats arena
1849 if (flow_registration->stats_arena != NULL) {
1850 necp_arena_info_release(flow_registration->stats_arena);
1851 flow_registration->stats_arena = NULL;
1852 }
1853 #endif /* SKYWALK */
1854
1855 struct necp_client_flow * __single search_flow = NULL;
1856 struct necp_client_flow *temp_flow = NULL;
1857 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
1858 if (search_flow->nexus &&
1859 !uuid_is_null(search_flow->u.nexus_agent)) {
1860 // Don't unregister for defunct flows
1861 if (!flow_registration->defunct) {
1862 u_int8_t message_type = (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS :
1863 NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS);
1864 if (((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1865 (flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
1866 !(flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
1867 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
1868 }
1869 size_t dummy_length = 0;
1870 void * __sized_by(dummy_length) dummy_results = NULL;
1871 int netagent_error = netagent_client_message_with_params(search_flow->u.nexus_agent,
1872 ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1873 client->client_id :
1874 flow_registration->registration_id),
1875 pid, client->agent_handle,
1876 message_type,
1877 has_close_parameters ? &close_parameters : NULL,
1878 &dummy_results, &dummy_length);
1879 if (netagent_error != 0 && netagent_error != ENOENT) {
1880 NECPLOG(LOG_ERR, "necp_client_remove close nexus error (%d) MESSAGE TYPE %u", netagent_error, message_type);
1881 }
1882 }
1883 uuid_clear(search_flow->u.nexus_agent);
1884 }
1885 if (search_flow->assigned_results != NULL) {
1886 kfree_data_counted_by(search_flow->assigned_results, search_flow->assigned_results_length);
1887 }
1888 LIST_REMOVE(search_flow, flow_chain);
1889 #if SKYWALK
1890 if (search_flow->nexus) {
1891 OSDecrementAtomic(&necp_nexus_flow_count);
1892 } else
1893 #endif /* SKYWALK */
1894 if (search_flow->socket) {
1895 OSDecrementAtomic(&necp_socket_flow_count);
1896 } else {
1897 OSDecrementAtomic(&necp_if_flow_count);
1898 }
1899
1900 necp_aop_offload_stats_destroy(search_flow);
1901
1902 kfree_type(struct necp_client_flow, search_flow);
1903 }
1904
1905 RB_REMOVE(_necp_client_flow_tree, &client->flow_registrations, flow_registration);
1906 flow_registration->client = NULL;
1907
1908 kfree_type(struct necp_client_flow_registration, flow_registration);
1909 }
1910
1911 static void
necp_destroy_client(struct necp_client * client,pid_t pid,bool abort)1912 necp_destroy_client(struct necp_client *client, pid_t pid, bool abort)
1913 {
1914 NECP_CLIENT_ASSERT_UNLOCKED(client);
1915
1916 #if SKYWALK
1917 if (client->nstat_context != NULL) {
1918 // This is a catch-all that should be rarely used.
1919 nstat_provider_stats_close(client->nstat_context);
1920 client->nstat_context = NULL;
1921 }
1922 if (client->original_parameters_source != NULL) {
1923 necp_client_release(client->original_parameters_source);
1924 client->original_parameters_source = NULL;
1925 }
1926 #endif /* SKYWALK */
1927 necp_client_update_observer_remove(client);
1928
1929 NECP_CLIENT_LOCK(client);
1930
1931 // Free route
1932 NECP_CLIENT_ROUTE_LOCK(client);
1933 if (client->current_route != NULL) {
1934 rtfree(client->current_route);
1935 client->current_route = NULL;
1936 }
1937 NECP_CLIENT_ROUTE_UNLOCK(client);
1938
1939 // Remove flow assignments
1940 struct necp_client_flow_registration *flow_registration = NULL;
1941 struct necp_client_flow_registration *temp_flow_registration = NULL;
1942 RB_FOREACH_SAFE(flow_registration, _necp_client_flow_tree, &client->flow_registrations, temp_flow_registration) {
1943 necp_destroy_client_flow_registration(client, flow_registration, pid, abort);
1944 }
1945
1946 #if SKYWALK
1947 // Remove port reservation
1948 if (NETNS_TOKEN_VALID(&client->port_reservation)) {
1949 netns_release(&client->port_reservation);
1950 }
1951 #endif /* !SKYWALK */
1952
1953 // Remove agent assertions
1954 struct necp_client_assertion * __single search_assertion = NULL;
1955 struct necp_client_assertion *temp_assertion = NULL;
1956 LIST_FOREACH_SAFE(search_assertion, &client->assertion_list, assertion_chain, temp_assertion) {
1957 int netagent_error = netagent_client_message(search_assertion->asserted_netagent, client->client_id, pid,
1958 client->agent_handle, NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT);
1959 if (netagent_error != 0) {
1960 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR),
1961 "necp_client_remove unassert agent error (%d)", netagent_error);
1962 }
1963 LIST_REMOVE(search_assertion, assertion_chain);
1964 kfree_type(struct necp_client_assertion, search_assertion);
1965 }
1966
1967 if (!necp_client_release_locked(client)) {
1968 NECP_CLIENT_UNLOCK(client);
1969 }
1970
1971 OSDecrementAtomic(&necp_client_count);
1972 }
1973
1974 static bool
1975 necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats);
1976
1977 static void
necp_process_defunct_list(struct _necp_flow_defunct_list * defunct_list)1978 necp_process_defunct_list(struct _necp_flow_defunct_list *defunct_list)
1979 {
1980 if (!LIST_EMPTY(defunct_list)) {
1981 struct necp_flow_defunct * __single flow_defunct = NULL;
1982 struct necp_flow_defunct *temp_flow_defunct = NULL;
1983
1984 // For each newly defunct client, send a message to the nexus to remove the flow
1985 LIST_FOREACH_SAFE(flow_defunct, defunct_list, chain, temp_flow_defunct) {
1986 if (!uuid_is_null(flow_defunct->nexus_agent)) {
1987 u_int8_t message_type = NETAGENT_MESSAGE_TYPE_ABORT_NEXUS;
1988 if (((flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1989 (flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
1990 !(flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
1991 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
1992 }
1993 size_t dummy_length = 0;
1994 void * __sized_by(dummy_length) dummy_results = NULL;
1995 int netagent_error = netagent_client_message_with_params(flow_defunct->nexus_agent,
1996 flow_defunct->flow_id,
1997 flow_defunct->proc_pid,
1998 flow_defunct->agent_handle,
1999 message_type,
2000 flow_defunct->has_close_parameters ? &flow_defunct->close_parameters : NULL,
2001 &dummy_results, &dummy_length);
2002 if (netagent_error != 0) {
2003 char namebuf[MAXCOMLEN + 1];
2004 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
2005 proc_name(flow_defunct->proc_pid, namebuf, sizeof(namebuf));
2006 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_update_client abort nexus error (%d) for pid %d %s", netagent_error, flow_defunct->proc_pid, namebuf);
2007 }
2008 }
2009 LIST_REMOVE(flow_defunct, chain);
2010 kfree_type(struct necp_flow_defunct, flow_defunct);
2011 }
2012 }
2013 ASSERT(LIST_EMPTY(defunct_list));
2014 }
2015
2016 static int
necpop_close(struct fileglob * fg,vfs_context_t ctx)2017 necpop_close(struct fileglob *fg, vfs_context_t ctx)
2018 {
2019 #pragma unused(ctx)
2020 struct necp_fd_data * __single fd_data = NULL;
2021 int error = 0;
2022
2023 fd_data = (struct necp_fd_data *)fg_get_data(fg);
2024 fg_set_data(fg, NULL);
2025
2026 if (fd_data != NULL) {
2027 struct _necp_client_tree clients_to_close;
2028 RB_INIT(&clients_to_close);
2029
2030 // Remove from list quickly
2031 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
2032 NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
2033 LIST_REMOVE(fd_data, chain);
2034 NECP_OBSERVER_LIST_UNLOCK();
2035 } else {
2036 NECP_FD_LIST_LOCK_EXCLUSIVE();
2037 LIST_REMOVE(fd_data, chain);
2038 NECP_FD_LIST_UNLOCK();
2039 }
2040
2041 NECP_FD_LOCK(fd_data);
2042 pid_t pid = fd_data->proc_pid;
2043
2044 struct _necp_flow_defunct_list defunct_list;
2045 LIST_INIT(&defunct_list);
2046
2047 (void)necp_defunct_client_fd_locked_inner(fd_data, &defunct_list, false);
2048
2049 struct necp_client_flow_registration *flow_registration = NULL;
2050 struct necp_client_flow_registration *temp_flow_registration = NULL;
2051 RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
2052 #if SKYWALK
2053 necp_destroy_flow_stats(fd_data, flow_registration, NULL, TRUE);
2054 #endif /* SKYWALK */
2055 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
2056 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
2057 NECP_FLOW_TREE_UNLOCK();
2058 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
2059 }
2060
2061 struct necp_client *client = NULL;
2062 struct necp_client *temp_client = NULL;
2063 RB_FOREACH_SAFE(client, _necp_client_tree, &fd_data->clients, temp_client) {
2064 // Clear out the agent_handle to avoid dangling pointers back to fd_data
2065 NECP_CLIENT_LOCK(client);
2066 client->agent_handle = NULL;
2067 NECP_CLIENT_UNLOCK(client);
2068
2069 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
2070 RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
2071 NECP_CLIENT_TREE_UNLOCK();
2072 RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
2073 RB_INSERT(_necp_client_tree, &clients_to_close, client);
2074 }
2075
2076 struct necp_client_update *client_update = NULL;
2077 struct necp_client_update *temp_update = NULL;
2078 TAILQ_FOREACH_SAFE(client_update, &fd_data->update_list, chain, temp_update) {
2079 // Flush pending updates
2080 TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
2081 necp_client_update_free(client_update);
2082 }
2083 fd_data->update_count = 0;
2084
2085 #if SKYWALK
2086 // Cleanup stats arena(s); indicate that we're closing
2087 necp_stats_arenas_destroy(fd_data, TRUE);
2088 ASSERT(fd_data->stats_arena_active == NULL);
2089 ASSERT(LIST_EMPTY(&fd_data->stats_arena_list));
2090
2091 // Cleanup systctl arena
2092 necp_sysctl_arena_destroy(fd_data);
2093 ASSERT(fd_data->sysctl_arena == NULL);
2094 #endif /* SKYWALK */
2095
2096 NECP_FD_UNLOCK(fd_data);
2097
2098 selthreadclear(&fd_data->si);
2099
2100 lck_mtx_destroy(&fd_data->fd_lock, &necp_fd_mtx_grp);
2101
2102 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
2103 OSDecrementAtomic(&necp_observer_fd_count);
2104 } else {
2105 OSDecrementAtomic(&necp_client_fd_count);
2106 }
2107
2108 kfree_type(struct necp_fd_data, fd_data);
2109
2110 RB_FOREACH_SAFE(client, _necp_client_tree, &clients_to_close, temp_client) {
2111 RB_REMOVE(_necp_client_tree, &clients_to_close, client);
2112 necp_destroy_client(client, pid, true);
2113 }
2114
2115 necp_process_defunct_list(&defunct_list);
2116 }
2117
2118 return error;
2119 }
2120
2121 /// NECP client utilities
2122
2123 static inline bool
necp_address_is_wildcard(const union necp_sockaddr_union * const addr)2124 necp_address_is_wildcard(const union necp_sockaddr_union * const addr)
2125 {
2126 return (addr->sa.sa_family == AF_INET && addr->sin.sin_addr.s_addr == INADDR_ANY) ||
2127 (addr->sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr->sin6.sin6_addr));
2128 }
2129
2130 static int
necp_find_fd_data(struct proc * p,int fd,struct fileproc ** fpp,struct necp_fd_data ** fd_data)2131 necp_find_fd_data(struct proc *p, int fd,
2132 struct fileproc **fpp, struct necp_fd_data **fd_data)
2133 {
2134 struct fileproc * __single fp;
2135 int error = fp_get_ftype(p, fd, DTYPE_NETPOLICY, ENODEV, &fp);
2136
2137 if (error == 0) {
2138 *fd_data = (struct necp_fd_data *)fp_get_data(fp);
2139 *fpp = fp;
2140
2141 if ((*fd_data)->necp_fd_type != necp_fd_type_client) {
2142 // Not a client fd, ignore
2143 fp_drop(p, fd, fp, 0);
2144 error = EINVAL;
2145 }
2146 }
2147 return error;
2148 }
2149
2150 static void
necp_client_add_nexus_flow(struct necp_client_flow_registration * flow_registration,uuid_t nexus_agent,uint32_t interface_index,uint32_t interface_flags,bool aop_offload)2151 necp_client_add_nexus_flow(struct necp_client_flow_registration *flow_registration,
2152 uuid_t nexus_agent,
2153 uint32_t interface_index,
2154 uint32_t interface_flags,
2155 bool aop_offload)
2156 {
2157 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2158
2159 new_flow->nexus = TRUE;
2160 uuid_copy(new_flow->u.nexus_agent, nexus_agent);
2161 new_flow->interface_index = interface_index;
2162 new_flow->interface_flags = interface_flags;
2163 new_flow->check_tcp_heuristics = TRUE;
2164 new_flow->aop_offload = aop_offload ? TRUE : FALSE;
2165 #if SKYWALK
2166 OSIncrementAtomic(&necp_nexus_flow_count);
2167 #endif /* SKYWALK */
2168
2169 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
2170
2171 #if SKYWALK
2172 necp_flow_save_current_interface_details(flow_registration);
2173 #endif /* SKYWALK */
2174 }
2175
2176 static void
necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration * flow_registration,uuid_t nexus_agent,uint32_t interface_index,bool aop_offload)2177 necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration *flow_registration,
2178 uuid_t nexus_agent, uint32_t interface_index, bool aop_offload)
2179 {
2180 struct necp_client_flow *flow = NULL;
2181 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2182 if (flow->nexus &&
2183 uuid_compare(flow->u.nexus_agent, nexus_agent) == 0) {
2184 return;
2185 }
2186 }
2187
2188 uint32_t interface_flags = 0;
2189 ifnet_t ifp = NULL;
2190 ifnet_head_lock_shared();
2191 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
2192 ifp = ifindex2ifnet[interface_index];
2193 if (ifp != NULL) {
2194 ifnet_lock_shared(ifp);
2195 interface_flags = nstat_ifnet_to_flags(ifp);
2196 ifnet_lock_done(ifp);
2197 }
2198 }
2199 ifnet_head_done();
2200 necp_client_add_nexus_flow(flow_registration, nexus_agent, interface_index, interface_flags, aop_offload);
2201 }
2202
2203 static struct necp_client_flow *
necp_client_add_interface_flow(struct necp_client_flow_registration * flow_registration,uint32_t interface_index)2204 necp_client_add_interface_flow(struct necp_client_flow_registration *flow_registration,
2205 uint32_t interface_index)
2206 {
2207 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2208
2209 // Neither nexus nor socket
2210 new_flow->interface_index = interface_index;
2211 new_flow->u.socket_handle = flow_registration->interface_handle;
2212 new_flow->u.cb = flow_registration->interface_cb;
2213
2214 OSIncrementAtomic(&necp_if_flow_count);
2215
2216 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
2217
2218 return new_flow;
2219 }
2220
2221 static struct necp_client_flow *
necp_client_add_interface_flow_if_needed(struct necp_client * client,struct necp_client_flow_registration * flow_registration,uint32_t interface_index)2222 necp_client_add_interface_flow_if_needed(struct necp_client *client,
2223 struct necp_client_flow_registration *flow_registration,
2224 uint32_t interface_index)
2225 {
2226 if (!client->allow_multiple_flows ||
2227 interface_index == IFSCOPE_NONE) {
2228 // Interface not set, or client not allowed to use this mode
2229 return NULL;
2230 }
2231
2232 struct necp_client_flow *flow = NULL;
2233 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2234 if (!flow->nexus && !flow->socket && flow->interface_index == interface_index) {
2235 // Already have the flow
2236 flow->invalid = FALSE;
2237 flow->u.socket_handle = flow_registration->interface_handle;
2238 flow->u.cb = flow_registration->interface_cb;
2239 return NULL;
2240 }
2241 }
2242 return necp_client_add_interface_flow(flow_registration, interface_index);
2243 }
2244
2245 static void
necp_client_add_interface_option_if_needed(struct necp_client * client,uint32_t interface_index,uint32_t interface_generation,uuid_t * nexus_agent,bool network_provider)2246 necp_client_add_interface_option_if_needed(struct necp_client *client,
2247 uint32_t interface_index,
2248 uint32_t interface_generation,
2249 uuid_t *nexus_agent,
2250 bool network_provider)
2251 {
2252 if ((interface_index == IFSCOPE_NONE && !network_provider) ||
2253 (client->interface_option_count != 0 && !client->allow_multiple_flows)) {
2254 // Interface not set, or client not allowed to use this mode
2255 return;
2256 }
2257
2258 if (client->interface_option_count >= NECP_CLIENT_MAX_INTERFACE_OPTIONS) {
2259 // Cannot take any more interface options
2260 return;
2261 }
2262
2263 // Check if already present
2264 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
2265 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2266 struct necp_client_interface_option *option = &client->interface_options[option_i];
2267 if (option->interface_index == interface_index) {
2268 if (nexus_agent == NULL) {
2269 return;
2270 }
2271 if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
2272 return;
2273 }
2274 if (uuid_is_null(option->nexus_agent)) {
2275 uuid_copy(option->nexus_agent, *nexus_agent);
2276 return;
2277 }
2278 // If we get to this point, this is a new nexus flow
2279 }
2280 } else {
2281 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2282 if (option->interface_index == interface_index) {
2283 if (nexus_agent == NULL) {
2284 return;
2285 }
2286 if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
2287 return;
2288 }
2289 if (uuid_is_null(option->nexus_agent)) {
2290 uuid_copy(option->nexus_agent, *nexus_agent);
2291 return;
2292 }
2293 // If we get to this point, this is a new nexus flow
2294 }
2295 }
2296 }
2297
2298 // Add a new entry
2299 if (client->interface_option_count < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2300 // Add to static
2301 struct necp_client_interface_option *option = &client->interface_options[client->interface_option_count];
2302 option->interface_index = interface_index;
2303 option->interface_generation = interface_generation;
2304 if (nexus_agent != NULL) {
2305 uuid_copy(option->nexus_agent, *nexus_agent);
2306 } else {
2307 uuid_clear(option->nexus_agent);
2308 }
2309 client->interface_option_count++;
2310 } else {
2311 // Add to extra
2312 if (client->extra_interface_options == NULL) {
2313 client->extra_interface_options = (struct necp_client_interface_option *)kalloc_data(
2314 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT, Z_WAITOK | Z_ZERO);
2315 }
2316 if (client->extra_interface_options != NULL) {
2317 struct necp_client_interface_option *option = &client->extra_interface_options[client->interface_option_count - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2318 option->interface_index = interface_index;
2319 option->interface_generation = interface_generation;
2320 if (nexus_agent != NULL) {
2321 uuid_copy(option->nexus_agent, *nexus_agent);
2322 } else {
2323 uuid_clear(option->nexus_agent);
2324 }
2325 client->interface_option_count++;
2326 }
2327 }
2328 }
2329
2330 static bool
necp_client_flow_is_viable(proc_t proc,struct necp_client * client,struct necp_client_flow * flow)2331 necp_client_flow_is_viable(proc_t proc, struct necp_client *client,
2332 struct necp_client_flow *flow)
2333 {
2334 struct necp_aggregate_result result;
2335 bool ignore_address = (client->allow_multiple_flows && !flow->nexus && !flow->socket);
2336
2337 flow->necp_flow_flags = 0;
2338 int error = necp_application_find_policy_match_internal(proc, client->parameters,
2339 (u_int32_t)client->parameters_length,
2340 &result, &flow->necp_flow_flags, NULL,
2341 flow->interface_index,
2342 &flow->local_addr, &flow->remote_addr, NULL, NULL,
2343 NULL, ignore_address, true, NULL);
2344
2345 // Check for blocking agents
2346 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
2347 if (uuid_is_null(result.netagents[i])) {
2348 // Passed end of valid agents
2349 break;
2350 }
2351 if (result.netagent_use_flags[i] & NECP_AGENT_USE_FLAG_REMOVE) {
2352 // A removed agent, ignore
2353 continue;
2354 }
2355 u_int32_t flags = netagent_get_flags(result.netagents[i]);
2356 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2357 !(flags & NETAGENT_FLAG_VOLUNTARY) &&
2358 !(flags & NETAGENT_FLAG_ACTIVE) &&
2359 !(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY)) {
2360 // A required agent is not active, cause the flow to be marked non-viable
2361 return false;
2362 }
2363 }
2364
2365 if (flow->interface_index != IFSCOPE_NONE) {
2366 ifnet_head_lock_shared();
2367
2368 struct ifnet *ifp = ifindex2ifnet[flow->interface_index];
2369 if (ifp && ifp->if_delegated.ifp != IFSCOPE_NONE) {
2370 flow->delegated_interface_index = ifp->if_delegated.ifp->if_index;
2371 }
2372
2373 ifnet_head_done();
2374 }
2375
2376 return error == 0 &&
2377 result.routed_interface_index != IFSCOPE_NONE &&
2378 result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP;
2379 }
2380
2381 static void
necp_flow_add_interface_flows(proc_t proc,struct necp_client * client,struct necp_client_flow_registration * flow_registration,bool send_initial)2382 necp_flow_add_interface_flows(proc_t proc,
2383 struct necp_client *client,
2384 struct necp_client_flow_registration *flow_registration,
2385 bool send_initial)
2386 {
2387 // Traverse all interfaces and add a tracking flow if needed
2388 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
2389 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2390 struct necp_client_interface_option *option = &client->interface_options[option_i];
2391 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
2392 if (flow != NULL && send_initial) {
2393 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2394 if (flow->viable && flow->u.cb) {
2395 bool viable = flow->viable;
2396 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2397 flow->viable = viable;
2398 }
2399 }
2400 } else {
2401 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2402 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
2403 if (flow != NULL && send_initial) {
2404 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2405 if (flow->viable && flow->u.cb) {
2406 bool viable = flow->viable;
2407 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2408 flow->viable = viable;
2409 }
2410 }
2411 }
2412 }
2413 }
2414
2415 static bool
necp_client_update_flows(proc_t proc,struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)2416 necp_client_update_flows(proc_t proc,
2417 struct necp_client *client,
2418 struct _necp_flow_defunct_list *defunct_list)
2419 {
2420 NECP_CLIENT_ASSERT_LOCKED(client);
2421
2422 bool any_client_updated = FALSE;
2423 struct necp_client_flow * __single flow = NULL;
2424 struct necp_client_flow *temp_flow = NULL;
2425 struct necp_client_flow_registration *flow_registration = NULL;
2426 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2427 if (flow_registration->interface_cb != NULL) {
2428 // Add any interface flows that are not already tracked
2429 necp_flow_add_interface_flows(proc, client, flow_registration, false);
2430 }
2431
2432 LIST_FOREACH_SAFE(flow, &flow_registration->flow_list, flow_chain, temp_flow) {
2433 bool client_updated = FALSE;
2434
2435 // Check policy result for flow
2436 u_short old_delegated_ifindex = flow->delegated_interface_index;
2437
2438 int old_flags = flow->necp_flow_flags;
2439 bool viable = necp_client_flow_is_viable(proc, client, flow);
2440
2441 // TODO: Defunct nexus flows that are blocked by policy
2442
2443 if (flow->viable != viable) {
2444 flow->viable = viable;
2445 client_updated = TRUE;
2446 }
2447
2448 if ((old_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE) !=
2449 (flow->necp_flow_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE)) {
2450 client_updated = TRUE;
2451 }
2452
2453 if (flow->delegated_interface_index != old_delegated_ifindex) {
2454 client_updated = TRUE;
2455 }
2456
2457 if (flow->viable && client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2458 bool flow_viable = flow->viable;
2459 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_VIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2460 flow->viable = flow_viable;
2461 }
2462
2463 if (!flow->viable || flow->invalid) {
2464 if (client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2465 bool flow_viable = flow->viable;
2466 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_NONVIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2467 flow->viable = flow_viable;
2468 }
2469 // The callback might change the viable-flag of the
2470 // flow depending on its policy. Thus, we need to
2471 // check the flags again after the callback.
2472 }
2473
2474 #if SKYWALK
2475 if (defunct_list != NULL) {
2476 if (flow->invalid && flow->nexus && flow->assigned && !uuid_is_null(flow->u.nexus_agent)) {
2477 // This is a nexus flow that was assigned, but not found on path
2478 u_int32_t flags = netagent_get_flags(flow->u.nexus_agent);
2479 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2480 // The agent is no longer registered! Mark defunct.
2481 necp_defunct_flow_registration(client, flow_registration, defunct_list);
2482 client_updated = TRUE;
2483 }
2484 }
2485 }
2486 #else /* !SKYWALK */
2487 (void)defunct_list;
2488 #endif /* !SKYWALK */
2489
2490 // Handle flows that no longer match
2491 if (!flow->viable || flow->invalid) {
2492 // Drop them as long as they aren't assigned data
2493 if (!flow->nexus && !flow->assigned) {
2494 if (flow->assigned_results != NULL) {
2495 kfree_data_counted_by(flow->assigned_results, flow->assigned_results_length);
2496 client_updated = TRUE;
2497 }
2498 LIST_REMOVE(flow, flow_chain);
2499 #if SKYWALK
2500 if (flow->nexus) {
2501 OSDecrementAtomic(&necp_nexus_flow_count);
2502 } else
2503 #endif /* SKYWALK */
2504 if (flow->socket) {
2505 OSDecrementAtomic(&necp_socket_flow_count);
2506 } else {
2507 OSDecrementAtomic(&necp_if_flow_count);
2508 }
2509
2510 necp_aop_offload_stats_destroy(flow);
2511
2512 kfree_type(struct necp_client_flow, flow);
2513 }
2514 }
2515
2516 any_client_updated |= client_updated;
2517 }
2518 #if SKYWALK
2519 necp_flow_save_current_interface_details(flow_registration);
2520 #endif /* SKYWALK */
2521 }
2522
2523 return any_client_updated;
2524 }
2525
2526 static void
necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client * client)2527 necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client *client)
2528 {
2529 struct necp_client_flow_registration *flow_registration = NULL;
2530 struct necp_client_flow *flow = NULL;
2531 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2532 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2533 if (!flow->socket) { // Socket flows are not marked as invalid
2534 flow->invalid = TRUE;
2535 }
2536 }
2537 }
2538
2539 // Reset option count every update
2540 client->interface_option_count = 0;
2541 }
2542
2543 static inline bool
necp_netagent_is_requested(const struct necp_client_parsed_parameters * parameters,uuid_t * netagent_uuid)2544 necp_netagent_is_requested(const struct necp_client_parsed_parameters *parameters,
2545 uuid_t *netagent_uuid)
2546 {
2547 // Specific use agents only apply when requested
2548 bool requested = false;
2549 if (parameters != NULL) {
2550 // Check required agent UUIDs
2551 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2552 if (uuid_is_null(parameters->required_netagents[i])) {
2553 break;
2554 }
2555 if (uuid_compare(parameters->required_netagents[i], *netagent_uuid) == 0) {
2556 requested = true;
2557 break;
2558 }
2559 }
2560
2561 if (!requested) {
2562 // Check required agent types
2563 bool fetched_type = false;
2564 char netagent_domain[NETAGENT_DOMAINSIZE];
2565 char netagent_type[NETAGENT_TYPESIZE];
2566 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
2567 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
2568
2569 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2570 if (strbuflen(parameters->required_netagent_types[i].netagent_domain, sizeof(parameters->required_netagent_types[i].netagent_domain)) == 0 ||
2571 strbuflen(parameters->required_netagent_types[i].netagent_type, sizeof(parameters->required_netagent_types[i].netagent_type)) == 0) {
2572 break;
2573 }
2574
2575 if (!fetched_type) {
2576 if (netagent_get_agent_domain_and_type(*netagent_uuid, netagent_domain, netagent_type)) {
2577 fetched_type = TRUE;
2578 } else {
2579 break;
2580 }
2581 }
2582
2583 if ((strbuflen(parameters->required_netagent_types[i].netagent_domain, sizeof(parameters->required_netagent_types[i].netagent_domain)) == 0 ||
2584 strbufcmp(netagent_domain, NETAGENT_DOMAINSIZE, parameters->required_netagent_types[i].netagent_domain, sizeof(parameters->required_netagent_types[i].netagent_domain)) == 0) &&
2585 (strbuflen(parameters->required_netagent_types[i].netagent_type, sizeof(parameters->required_netagent_types[i].netagent_type)) == 0 ||
2586 strbufcmp(netagent_type, NETAGENT_TYPESIZE, parameters->required_netagent_types[i].netagent_type, sizeof(parameters->required_netagent_types[i].netagent_type)) == 0)) {
2587 requested = true;
2588 break;
2589 }
2590 }
2591 }
2592
2593 // Check preferred agent UUIDs
2594 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2595 if (uuid_is_null(parameters->preferred_netagents[i])) {
2596 break;
2597 }
2598 if (uuid_compare(parameters->preferred_netagents[i], *netagent_uuid) == 0) {
2599 requested = true;
2600 break;
2601 }
2602 }
2603
2604 if (!requested) {
2605 // Check preferred agent types
2606 bool fetched_type = false;
2607 char netagent_domain[NETAGENT_DOMAINSIZE];
2608 char netagent_type[NETAGENT_TYPESIZE];
2609 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
2610 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
2611
2612 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2613 if (strbuflen(parameters->preferred_netagent_types[i].netagent_domain, sizeof(parameters->preferred_netagent_types[i].netagent_domain)) == 0 ||
2614 strbuflen(parameters->preferred_netagent_types[i].netagent_type, sizeof(parameters->preferred_netagent_types[i].netagent_type)) == 0) {
2615 break;
2616 }
2617
2618 if (!fetched_type) {
2619 if (netagent_get_agent_domain_and_type(*netagent_uuid, netagent_domain, netagent_type)) {
2620 fetched_type = TRUE;
2621 } else {
2622 break;
2623 }
2624 }
2625
2626 if ((strbuflen(parameters->preferred_netagent_types[i].netagent_domain, sizeof(parameters->preferred_netagent_types[i].netagent_domain)) == 0 ||
2627 strbufcmp(netagent_domain, NETAGENT_DOMAINSIZE, parameters->preferred_netagent_types[i].netagent_domain, sizeof(parameters->preferred_netagent_types[i].netagent_domain)) == 0) &&
2628 (strbuflen(parameters->preferred_netagent_types[i].netagent_type, sizeof(parameters->preferred_netagent_types[i].netagent_type)) == 0 ||
2629 strbufcmp(netagent_type, NETAGENT_TYPESIZE, parameters->preferred_netagent_types[i].netagent_type, sizeof(parameters->preferred_netagent_types[i].netagent_type)) == 0)) {
2630 requested = true;
2631 break;
2632 }
2633 }
2634 }
2635 }
2636
2637 return requested;
2638 }
2639
2640 static bool
necp_netagent_applies_to_client(struct necp_client * client,const struct necp_client_parsed_parameters * parameters,uuid_t * netagent_uuid,bool allow_nexus,uint32_t interface_index,uint32_t interface_generation)2641 necp_netagent_applies_to_client(struct necp_client *client,
2642 const struct necp_client_parsed_parameters *parameters,
2643 uuid_t *netagent_uuid, bool allow_nexus,
2644 uint32_t interface_index, uint32_t interface_generation)
2645 {
2646 #pragma unused(interface_index, interface_generation)
2647 bool applies = FALSE;
2648 u_int32_t flags = netagent_get_flags(*netagent_uuid);
2649 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2650 // Unregistered agents never apply
2651 return applies;
2652 }
2653
2654 const bool is_nexus_agent = ((flags & NETAGENT_FLAG_NEXUS_PROVIDER) ||
2655 (flags & NETAGENT_FLAG_NEXUS_LISTENER) ||
2656 (flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS) ||
2657 (flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS) ||
2658 (flags & NETAGENT_FLAG_INTERPOSE_NEXUS));
2659 if (is_nexus_agent) {
2660 if (!allow_nexus) {
2661 // Hide nexus providers unless allowed
2662 // Direct interfaces and direct policies are allowed to use a nexus
2663 // Delegate interfaces or re-scoped interfaces are not allowed
2664 return applies;
2665 }
2666
2667 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2668 !(flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS)) {
2669 // Client requested a custom ether nexus, but this nexus isn't one
2670 return applies;
2671 }
2672
2673 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2674 !(flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS)) {
2675 // Client requested a custom IP nexus, but this nexus isn't one
2676 return applies;
2677 }
2678
2679 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2680 !(flags & NETAGENT_FLAG_INTERPOSE_NEXUS)) {
2681 // Client requested an interpose nexus, but this nexus isn't one
2682 return applies;
2683 }
2684
2685 if (!(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2686 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2687 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2688 !(flags & NETAGENT_FLAG_NEXUS_PROVIDER)) {
2689 // Client requested default parameters, but this nexus isn't generic
2690 return applies;
2691 }
2692 }
2693
2694 if (uuid_compare(client->failed_trigger_agent.netagent_uuid, *netagent_uuid) == 0) {
2695 if (client->failed_trigger_agent.generation == netagent_get_generation(*netagent_uuid)) {
2696 // If this agent was triggered, and failed, and hasn't changed, keep hiding it
2697 return applies;
2698 } else {
2699 // Mismatch generation, clear out old trigger
2700 uuid_clear(client->failed_trigger_agent.netagent_uuid);
2701 client->failed_trigger_agent.generation = 0;
2702 }
2703 }
2704
2705 if (flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) {
2706 // Specific use agents only apply when requested
2707 applies = necp_netagent_is_requested(parameters, netagent_uuid);
2708 } else {
2709 applies = TRUE;
2710 }
2711
2712 #if SKYWALK
2713 // Add nexus agent if it is a nexus, and either is not a listener, or the nexus supports listeners
2714 if (applies && is_nexus_agent &&
2715 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) && // Don't add for browse paths
2716 ((flags & NETAGENT_FLAG_NEXUS_LISTENER) || !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER))) {
2717 necp_client_add_interface_option_if_needed(client, interface_index,
2718 interface_generation, netagent_uuid,
2719 (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2720 }
2721 #endif /* SKYWALK */
2722
2723 return applies;
2724 }
2725
2726 static void
necp_client_add_agent_interface_options(struct necp_client * client,const struct necp_client_parsed_parameters * parsed_parameters,ifnet_t ifp)2727 necp_client_add_agent_interface_options(struct necp_client *client,
2728 const struct necp_client_parsed_parameters *parsed_parameters,
2729 ifnet_t ifp)
2730 {
2731 if (ifp == NULL) {
2732 return;
2733 }
2734
2735 ifnet_lock_shared(ifp);
2736 if (ifp->if_agentids != NULL) {
2737 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2738 if (uuid_is_null(ifp->if_agentids[i])) {
2739 continue;
2740 }
2741 // Relies on the side effect that nexus agents that apply will create flows
2742 (void)necp_netagent_applies_to_client(client, parsed_parameters, &ifp->if_agentids[i], TRUE,
2743 ifp->if_index, ifnet_get_generation(ifp));
2744 }
2745 }
2746 ifnet_lock_done(ifp);
2747 }
2748
2749 static void
necp_client_add_browse_interface_options(struct necp_client * client,const struct necp_client_parsed_parameters * parsed_parameters,ifnet_t ifp)2750 necp_client_add_browse_interface_options(struct necp_client *client,
2751 const struct necp_client_parsed_parameters *parsed_parameters,
2752 ifnet_t ifp)
2753 {
2754 if (ifp == NULL) {
2755 return;
2756 }
2757
2758 ifnet_lock_shared(ifp);
2759 if (ifp->if_agentids != NULL) {
2760 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2761 if (uuid_is_null(ifp->if_agentids[i])) {
2762 continue;
2763 }
2764
2765 u_int32_t flags = netagent_get_flags(ifp->if_agentids[i]);
2766 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2767 (flags & NETAGENT_FLAG_ACTIVE) &&
2768 (flags & NETAGENT_FLAG_SUPPORTS_BROWSE) &&
2769 (!(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) ||
2770 necp_netagent_is_requested(parsed_parameters, &ifp->if_agentids[i]))) {
2771 necp_client_add_interface_option_if_needed(client, ifp->if_index, ifnet_get_generation(ifp), &ifp->if_agentids[i], (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2772
2773 // Finding one is enough
2774 break;
2775 }
2776 }
2777 }
2778 ifnet_lock_done(ifp);
2779 }
2780
2781 static inline bool
_necp_client_address_is_valid(struct sockaddr * address)2782 _necp_client_address_is_valid(struct sockaddr *address)
2783 {
2784 if (address->sa_family == AF_INET) {
2785 return address->sa_len == sizeof(struct sockaddr_in);
2786 } else if (address->sa_family == AF_INET6) {
2787 return address->sa_len == sizeof(struct sockaddr_in6);
2788 } else {
2789 return FALSE;
2790 }
2791 }
2792
2793 #define necp_client_address_is_valid(S) _necp_client_address_is_valid(SA(S))
2794
2795 static inline bool
necp_client_endpoint_is_unspecified(struct necp_client_endpoint * endpoint)2796 necp_client_endpoint_is_unspecified(struct necp_client_endpoint *endpoint)
2797 {
2798 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2799 if (endpoint->u.sa.sa_family == AF_INET) {
2800 return endpoint->u.sin.sin_addr.s_addr == INADDR_ANY;
2801 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2802 return IN6_IS_ADDR_UNSPECIFIED(&endpoint->u.sin6.sin6_addr);
2803 } else {
2804 return TRUE;
2805 }
2806 } else {
2807 return TRUE;
2808 }
2809 }
2810
2811 #if SKYWALK
2812 static void
necp_client_update_local_port_parameters(u_int8_t * __sized_by (parameters_size)parameters,u_int32_t parameters_size,uint16_t local_port)2813 necp_client_update_local_port_parameters(u_int8_t * __sized_by(parameters_size)parameters,
2814 u_int32_t parameters_size,
2815 uint16_t local_port)
2816 {
2817 size_t offset = 0;
2818 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
2819 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
2820 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
2821
2822 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
2823 // If the length is larger than what can fit in the remaining parameters size, bail
2824 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
2825 break;
2826 }
2827
2828 if (length > 0) {
2829 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
2830 if (value != NULL) {
2831 switch (type) {
2832 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
2833 if (length >= sizeof(struct necp_policy_condition_addr)) {
2834 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
2835 if (necp_client_address_is_valid(&address_struct->address.sa)) {
2836 if (address_struct->address.sa.sa_family == AF_INET) {
2837 address_struct->address.sin.sin_port = local_port;
2838 } else if (address_struct->address.sa.sa_family == AF_INET6) {
2839 address_struct->address.sin6.sin6_port = local_port;
2840 }
2841 }
2842 }
2843 break;
2844 }
2845 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: {
2846 if (length >= sizeof(struct necp_client_endpoint)) {
2847 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
2848 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2849 if (endpoint->u.sa.sa_family == AF_INET) {
2850 endpoint->u.sin.sin_port = local_port;
2851 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2852 endpoint->u.sin6.sin6_port = local_port;
2853 }
2854 }
2855 }
2856 break;
2857 }
2858 default: {
2859 break;
2860 }
2861 }
2862 }
2863 }
2864
2865 offset += sizeof(struct necp_tlv_header) + length;
2866 }
2867 }
2868 #endif /* !SKYWALK */
2869
2870 #define NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH 253
2871
2872 static void
necp_client_trace_parameter_parsing(struct necp_client * client,u_int8_t type,u_int8_t * __sized_by (length)value,u_int32_t length)2873 necp_client_trace_parameter_parsing(struct necp_client *client, u_int8_t type, u_int8_t * __sized_by(length)value, u_int32_t length)
2874 {
2875 uint64_t num = 0;
2876 uint16_t shortBuf;
2877 uint32_t intBuf;
2878 char buffer[NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH + 1];
2879
2880 if (value != NULL && length > 0) {
2881 switch (length) {
2882 case 1:
2883 num = *value;
2884 break;
2885 case 2:
2886 memcpy(&shortBuf, value, sizeof(shortBuf));
2887 num = shortBuf;
2888 break;
2889 case 4:
2890 memcpy(&intBuf, value, sizeof(intBuf));
2891 num = intBuf;
2892 break;
2893 case 8:
2894 memcpy(&num, value, sizeof(num));
2895 break;
2896 default:
2897 num = 0;
2898 break;
2899 }
2900 int len = NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH < length ? NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH : length;
2901 memcpy(buffer, value, len);
2902 buffer[len] = 0;
2903 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d value <%llu (%llX)> %s", type, length, num, num, buffer);
2904 } else {
2905 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d", type, length);
2906 }
2907 }
2908
2909 static void
necp_client_trace_parsed_parameters(struct necp_client * client,struct necp_client_parsed_parameters * parsed_parameters)2910 necp_client_trace_parsed_parameters(struct necp_client *client, struct necp_client_parsed_parameters *parsed_parameters)
2911 {
2912 int i;
2913 char local_buffer[64] = { };
2914 char remote_buffer[64] = { };
2915 uuid_string_t uuid_str = { };
2916 uuid_unparse_lower(parsed_parameters->effective_uuid, uuid_str);
2917
2918 switch (parsed_parameters->local_addr.sa.sa_family) {
2919 case AF_INET:
2920 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2921 struct sockaddr_in *addr = &parsed_parameters->local_addr.sin;
2922 inet_ntop(AF_INET, &(addr->sin_addr), local_buffer, sizeof(local_buffer));
2923 }
2924 break;
2925 case AF_INET6:
2926 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2927 struct sockaddr_in6 *addr6 = &parsed_parameters->local_addr.sin6;
2928 inet_ntop(AF_INET6, &(addr6->sin6_addr), local_buffer, sizeof(local_buffer));
2929 }
2930 break;
2931 default:
2932 break;
2933 }
2934
2935 switch (parsed_parameters->remote_addr.sa.sa_family) {
2936 case AF_INET:
2937 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2938 struct sockaddr_in *addr = &parsed_parameters->remote_addr.sin;
2939 inet_ntop(AF_INET, &(addr->sin_addr), remote_buffer, sizeof(remote_buffer));
2940 }
2941 break;
2942 case AF_INET6:
2943 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2944 struct sockaddr_in6 *addr6 = &parsed_parameters->remote_addr.sin6;
2945 inet_ntop(AF_INET6, &(addr6->sin6_addr), remote_buffer, sizeof(remote_buffer));
2946 }
2947 break;
2948 default:
2949 break;
2950 }
2951
2952 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - valid_fields %X flags %X "
2953 "extended flags %llX delegated_upid %llu local_addr %s remote_addr %s "
2954 "required_interface_index %u required_interface_type %d local_address_preference %d "
2955 "ip_protocol %d transport_protocol %d ethertype %d effective_pid %d "
2956 "effective_uuid %s uid %d persona_id %d traffic_class %d",
2957 parsed_parameters->valid_fields,
2958 parsed_parameters->flags,
2959 parsed_parameters->extended_flags,
2960 parsed_parameters->delegated_upid,
2961 local_buffer, remote_buffer,
2962 parsed_parameters->required_interface_index,
2963 parsed_parameters->required_interface_type,
2964 parsed_parameters->local_address_preference,
2965 parsed_parameters->ip_protocol,
2966 parsed_parameters->transport_protocol,
2967 parsed_parameters->ethertype,
2968 parsed_parameters->effective_pid,
2969 uuid_str,
2970 parsed_parameters->uid,
2971 parsed_parameters->persona_id,
2972 parsed_parameters->traffic_class);
2973
2974 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - tracker flags <known-tracker %X> <non-app-initiated %X> <silent %X> <app-approved %X>",
2975 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER,
2976 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED,
2977 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_SILENT,
2978 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_APPROVED_APP_DOMAIN);
2979
2980 for (i = 0; i < NECP_MAX_INTERFACE_PARAMETERS && parsed_parameters->prohibited_interfaces[i][0]; i++) {
2981 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_interfaces[%d] <%s>", i, parsed_parameters->prohibited_interfaces[i]);
2982 }
2983
2984 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->required_netagent_types[i].netagent_domain[0]; i++) {
2985 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagent_types[%d] <%s> <%s>", i,
2986 parsed_parameters->required_netagent_types[i].netagent_domain,
2987 parsed_parameters->required_netagent_types[i].netagent_type);
2988 }
2989 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->prohibited_netagent_types[i].netagent_domain[0]; i++) {
2990 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagent_types[%d] <%s> <%s>", i,
2991 parsed_parameters->prohibited_netagent_types[i].netagent_domain,
2992 parsed_parameters->prohibited_netagent_types[i].netagent_type);
2993 }
2994 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->preferred_netagent_types[i].netagent_domain[0]; i++) {
2995 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagent_types[%d] <%s> <%s>", i,
2996 parsed_parameters->preferred_netagent_types[i].netagent_domain,
2997 parsed_parameters->preferred_netagent_types[i].netagent_type);
2998 }
2999 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->avoided_netagent_types[i].netagent_domain[0]; i++) {
3000 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagent_types[%d] <%s> <%s>", i,
3001 parsed_parameters->avoided_netagent_types[i].netagent_domain,
3002 parsed_parameters->avoided_netagent_types[i].netagent_type);
3003 }
3004
3005 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->required_netagents[i]); i++) {
3006 uuid_unparse_lower(parsed_parameters->required_netagents[i], uuid_str);
3007 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagents[%d] <%s>", i, uuid_str);
3008 }
3009 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->prohibited_netagents[i]); i++) {
3010 uuid_unparse_lower(parsed_parameters->prohibited_netagents[i], uuid_str);
3011 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagents[%d] <%s>", i, uuid_str);
3012 }
3013 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->preferred_netagents[i]); i++) {
3014 uuid_unparse_lower(parsed_parameters->preferred_netagents[i], uuid_str);
3015 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagents[%d] <%s>", i, uuid_str);
3016 }
3017 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->avoided_netagents[i]); i++) {
3018 uuid_unparse_lower(parsed_parameters->avoided_netagents[i], uuid_str);
3019 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagents[%d] <%s>", i, uuid_str);
3020 }
3021 }
3022
3023 static bool
necp_client_strings_are_equal(const char * __sized_by (string1_length)string1,size_t string1_length,const char * __sized_by (string2_length)string2,size_t string2_length)3024 necp_client_strings_are_equal(const char * __sized_by(string1_length)string1, size_t string1_length,
3025 const char * __sized_by(string2_length)string2, size_t string2_length)
3026 {
3027 if (string1 == NULL || string2 == NULL) {
3028 return false;
3029 }
3030 const size_t string1_actual_length = strnlen(string1, string1_length);
3031 const size_t string2_actual_length = strnlen(string2, string2_length);
3032 if (string1_actual_length != string2_actual_length) {
3033 return false;
3034 }
3035 return strbufcmp(string1, string1_actual_length, string2, string2_actual_length) == 0;
3036 }
3037
3038 static int
necp_client_parse_parameters(struct necp_client * client,u_int8_t * __sized_by (parameters_size)parameters,u_int32_t parameters_size,struct necp_client_parsed_parameters * parsed_parameters)3039 necp_client_parse_parameters(struct necp_client *client, u_int8_t * __sized_by(parameters_size)parameters,
3040 u_int32_t parameters_size,
3041 struct necp_client_parsed_parameters *parsed_parameters)
3042 {
3043 int error = 0;
3044 size_t offset = 0;
3045
3046 u_int32_t num_prohibited_interfaces = 0;
3047 u_int32_t num_prohibited_interface_types = 0;
3048 u_int32_t num_required_agents = 0;
3049 u_int32_t num_prohibited_agents = 0;
3050 u_int32_t num_preferred_agents = 0;
3051 u_int32_t num_avoided_agents = 0;
3052 u_int32_t num_required_agent_types = 0;
3053 u_int32_t num_prohibited_agent_types = 0;
3054 u_int32_t num_preferred_agent_types = 0;
3055 u_int32_t num_avoided_agent_types = 0;
3056 u_int32_t resolver_tag_length = 0;
3057 u_int8_t * __sized_by(resolver_tag_length) resolver_tag = NULL;
3058 u_int32_t hostname_length = 0;
3059 u_int8_t * __sized_by(hostname_length) client_hostname = NULL;
3060 uuid_t parent_id = {};
3061
3062 if (parsed_parameters == NULL) {
3063 return EINVAL;
3064 }
3065
3066 memset(parsed_parameters, 0, sizeof(struct necp_client_parsed_parameters));
3067
3068 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
3069 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
3070 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
3071
3072 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
3073 // If the length is larger than what can fit in the remaining parameters size, bail
3074 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
3075 break;
3076 }
3077
3078 if (length > 0) {
3079 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
3080 if (value != NULL) {
3081 switch (type) {
3082 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: {
3083 if (length <= IFXNAMSIZ && length > 0) {
3084 ifnet_t __single bound_interface = NULL;
3085 char interface_name[IFXNAMSIZ];
3086 memcpy(interface_name, value, length);
3087 interface_name[length - 1] = 0; // Make sure the string is NULL terminated
3088 if (ifnet_find_by_name(__unsafe_null_terminated_from_indexable(interface_name, &interface_name[length - 1]), &bound_interface) == 0) {
3089 parsed_parameters->required_interface_index = bound_interface->if_index;
3090 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF;
3091 ifnet_release(bound_interface);
3092 }
3093 }
3094 break;
3095 }
3096 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
3097 if (length >= sizeof(struct necp_policy_condition_addr)) {
3098 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
3099 if (necp_client_address_is_valid(&address_struct->address.sa)) {
3100 parsed_parameters->local_addr.sin6 = address_struct->address.sin6;
3101 if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) {
3102 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR;
3103 }
3104 if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) ||
3105 (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) {
3106 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT;
3107 }
3108 }
3109 }
3110 break;
3111 }
3112 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: {
3113 if (length >= sizeof(struct necp_client_endpoint)) {
3114 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3115 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3116 parsed_parameters->local_addr.sin6 = endpoint->u.sin6;
3117 if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) {
3118 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR;
3119 }
3120 if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) ||
3121 (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) {
3122 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT;
3123 }
3124 }
3125 }
3126 break;
3127 }
3128 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
3129 if (length >= sizeof(struct necp_policy_condition_addr)) {
3130 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
3131 if (necp_client_address_is_valid(&address_struct->address.sa)) {
3132 parsed_parameters->remote_addr.sin6 = address_struct->address.sin6;
3133 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
3134 }
3135 }
3136 break;
3137 }
3138 case NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT: {
3139 if (length >= sizeof(struct necp_client_endpoint)) {
3140 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3141 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3142 parsed_parameters->remote_addr.sin6 = endpoint->u.sin6;
3143 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
3144 }
3145 }
3146 break;
3147 }
3148 case NECP_CLIENT_PARAMETER_PROHIBIT_INTERFACE: {
3149 if (num_prohibited_interfaces >= NECP_MAX_INTERFACE_PARAMETERS) {
3150 break;
3151 }
3152 if (length <= IFXNAMSIZ && length > 0) {
3153 memcpy(parsed_parameters->prohibited_interfaces[num_prohibited_interfaces], value, length);
3154 parsed_parameters->prohibited_interfaces[num_prohibited_interfaces][length - 1] = 0; // Make sure the string is NULL terminated
3155 num_prohibited_interfaces++;
3156 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF;
3157 }
3158 break;
3159 }
3160 case NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE: {
3161 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) {
3162 break;
3163 }
3164 if (length >= sizeof(u_int8_t)) {
3165 memcpy(&parsed_parameters->required_interface_type, value, sizeof(u_int8_t));
3166 if (parsed_parameters->required_interface_type) {
3167 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE;
3168 }
3169 }
3170 break;
3171 }
3172 case NECP_CLIENT_PARAMETER_PROHIBIT_IF_TYPE: {
3173 if (num_prohibited_interface_types >= NECP_MAX_INTERFACE_PARAMETERS) {
3174 break;
3175 }
3176 if (length >= sizeof(u_int8_t)) {
3177 memcpy(&parsed_parameters->prohibited_interface_types[num_prohibited_interface_types], value, sizeof(u_int8_t));
3178 num_prohibited_interface_types++;
3179 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE;
3180 }
3181 break;
3182 }
3183 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT: {
3184 if (num_required_agents >= NECP_MAX_AGENT_PARAMETERS) {
3185 break;
3186 }
3187 if (length >= sizeof(uuid_t)) {
3188 memcpy(&parsed_parameters->required_netagents[num_required_agents], value, sizeof(uuid_t));
3189 num_required_agents++;
3190 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT;
3191 }
3192 break;
3193 }
3194 case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT: {
3195 if (num_prohibited_agents >= NECP_MAX_AGENT_PARAMETERS) {
3196 break;
3197 }
3198 if (length >= sizeof(uuid_t)) {
3199 memcpy(&parsed_parameters->prohibited_netagents[num_prohibited_agents], value, sizeof(uuid_t));
3200 num_prohibited_agents++;
3201 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT;
3202 }
3203 break;
3204 }
3205 case NECP_CLIENT_PARAMETER_PREFER_AGENT: {
3206 if (num_preferred_agents >= NECP_MAX_AGENT_PARAMETERS) {
3207 break;
3208 }
3209 if (length >= sizeof(uuid_t)) {
3210 memcpy(&parsed_parameters->preferred_netagents[num_preferred_agents], value, sizeof(uuid_t));
3211 num_preferred_agents++;
3212 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT;
3213 }
3214 break;
3215 }
3216 case NECP_CLIENT_PARAMETER_AVOID_AGENT: {
3217 if (num_avoided_agents >= NECP_MAX_AGENT_PARAMETERS) {
3218 break;
3219 }
3220 if (length >= sizeof(uuid_t)) {
3221 memcpy(&parsed_parameters->avoided_netagents[num_avoided_agents], value, sizeof(uuid_t));
3222 num_avoided_agents++;
3223 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT;
3224 }
3225 break;
3226 }
3227 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE: {
3228 if (num_required_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3229 break;
3230 }
3231 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3232 memcpy(&parsed_parameters->required_netagent_types[num_required_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3233 num_required_agent_types++;
3234 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE;
3235 }
3236 break;
3237 }
3238 case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT_TYPE: {
3239 if (num_prohibited_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3240 break;
3241 }
3242 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3243 memcpy(&parsed_parameters->prohibited_netagent_types[num_prohibited_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3244 num_prohibited_agent_types++;
3245 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE;
3246 }
3247 break;
3248 }
3249 case NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE: {
3250 if (num_preferred_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3251 break;
3252 }
3253 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3254 memcpy(&parsed_parameters->preferred_netagent_types[num_preferred_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3255 num_preferred_agent_types++;
3256 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE;
3257 }
3258 break;
3259 }
3260 case NECP_CLIENT_PARAMETER_AVOID_AGENT_TYPE: {
3261 if (num_avoided_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3262 break;
3263 }
3264 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3265 memcpy(&parsed_parameters->avoided_netagent_types[num_avoided_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3266 num_avoided_agent_types++;
3267 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE;
3268 }
3269 break;
3270 }
3271 case NECP_CLIENT_PARAMETER_FLAGS: {
3272 if (length >= sizeof(u_int32_t)) {
3273 memcpy(&parsed_parameters->flags, value, sizeof(parsed_parameters->flags));
3274 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_FLAGS;
3275 }
3276 break;
3277 }
3278 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
3279 if (length == sizeof(u_int16_t)) {
3280 u_int16_t large_ip_protocol = 0;
3281 memcpy(&large_ip_protocol, value, sizeof(large_ip_protocol));
3282 parsed_parameters->ip_protocol = (u_int8_t)large_ip_protocol;
3283 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL;
3284 } else if (length >= sizeof(parsed_parameters->ip_protocol)) {
3285 memcpy(&parsed_parameters->ip_protocol, value, sizeof(parsed_parameters->ip_protocol));
3286 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL;
3287 }
3288 break;
3289 }
3290 case NECP_CLIENT_PARAMETER_TRANSPORT_PROTOCOL: {
3291 if (length >= sizeof(parsed_parameters->transport_protocol)) {
3292 memcpy(&parsed_parameters->transport_protocol, value, sizeof(parsed_parameters->transport_protocol));
3293 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL;
3294 }
3295 break;
3296 }
3297 case NECP_CLIENT_PARAMETER_PID: {
3298 if (length >= sizeof(parsed_parameters->effective_pid)) {
3299 memcpy(&parsed_parameters->effective_pid, value, sizeof(parsed_parameters->effective_pid));
3300 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID;
3301 }
3302 break;
3303 }
3304 case NECP_CLIENT_PARAMETER_DELEGATED_UPID: {
3305 if (length >= sizeof(parsed_parameters->delegated_upid)) {
3306 memcpy(&parsed_parameters->delegated_upid, value, sizeof(parsed_parameters->delegated_upid));
3307 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID;
3308 }
3309 break;
3310 }
3311 case NECP_CLIENT_PARAMETER_ETHERTYPE: {
3312 if (length >= sizeof(parsed_parameters->ethertype)) {
3313 memcpy(&parsed_parameters->ethertype, value, sizeof(parsed_parameters->ethertype));
3314 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_ETHERTYPE;
3315 }
3316 break;
3317 }
3318 case NECP_CLIENT_PARAMETER_APPLICATION: {
3319 if (length >= sizeof(parsed_parameters->effective_uuid)) {
3320 memcpy(&parsed_parameters->effective_uuid, value, sizeof(parsed_parameters->effective_uuid));
3321 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID;
3322 }
3323 break;
3324 }
3325 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
3326 if (length >= sizeof(parsed_parameters->traffic_class)) {
3327 memcpy(&parsed_parameters->traffic_class, value, sizeof(parsed_parameters->traffic_class));
3328 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS;
3329 }
3330 break;
3331 }
3332 case NECP_CLIENT_PARAMETER_RESOLVER_TAG: {
3333 if (length > 0) {
3334 if (resolver_tag != NULL) {
3335 // Multiple resolver tags is invalid
3336 NECPLOG0(LOG_ERR, "Multiple resolver tags are not supported");
3337 error = EINVAL;
3338 } else {
3339 resolver_tag = (u_int8_t *)value;
3340 resolver_tag_length = length;
3341 }
3342 }
3343 break;
3344 }
3345 case NECP_CLIENT_PARAMETER_DOMAIN: {
3346 if (length > 0) {
3347 client_hostname = (u_int8_t *)value;
3348 hostname_length = length;
3349 }
3350 break;
3351 }
3352 case NECP_CLIENT_PARAMETER_PARENT_ID: {
3353 if (length == sizeof(parent_id)) {
3354 uuid_copy(parent_id, value);
3355 memcpy(&parsed_parameters->parent_uuid, value, sizeof(parsed_parameters->parent_uuid));
3356 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID;
3357 }
3358 break;
3359 }
3360 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE: {
3361 if (length >= sizeof(parsed_parameters->local_address_preference)) {
3362 memcpy(&parsed_parameters->local_address_preference, value, sizeof(parsed_parameters->local_address_preference));
3363 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE;
3364 }
3365 break;
3366 }
3367 case NECP_CLIENT_PARAMETER_ATTRIBUTED_BUNDLE_IDENTIFIER: {
3368 if (length > 0) {
3369 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER;
3370 }
3371 break;
3372 }
3373 case NECP_CLIENT_PARAMETER_FLOW_DEMUX_PATTERN: {
3374 if (parsed_parameters->demux_pattern_count >= NECP_MAX_DEMUX_PATTERNS) {
3375 break;
3376 }
3377 if (length >= sizeof(struct necp_demux_pattern)) {
3378 memcpy(&parsed_parameters->demux_patterns[parsed_parameters->demux_pattern_count], value, sizeof(struct necp_demux_pattern));
3379 parsed_parameters->demux_pattern_count++;
3380 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN;
3381 }
3382 break;
3383 }
3384 case NECP_CLIENT_PARAMETER_APPLICATION_ID: {
3385 if (length >= sizeof(necp_application_id_t)) {
3386 necp_application_id_t *application_id = (necp_application_id_t *)(void *)value;
3387 // UID
3388 parsed_parameters->uid = application_id->uid;
3389 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_UID;
3390 // EUUID
3391 uuid_copy(parsed_parameters->effective_uuid, application_id->effective_uuid);
3392 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID;
3393 // PERSONA
3394 parsed_parameters->persona_id = application_id->persona_id;
3395 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PERSONA_ID;
3396 }
3397 break;
3398 }
3399 case NECP_CLIENT_PARAMETER_EXTENDED_FLAGS: {
3400 if (length >= sizeof(u_int64_t)) {
3401 memcpy(&parsed_parameters->extended_flags, value, sizeof(parsed_parameters->extended_flags));
3402 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EXTENDED_FLAGS;
3403 }
3404 break;
3405 }
3406 default: {
3407 break;
3408 }
3409 }
3410 }
3411
3412 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) {
3413 necp_client_trace_parameter_parsing(client, type, value, length);
3414 }
3415 }
3416
3417 offset += sizeof(struct necp_tlv_header) + length;
3418 }
3419
3420 if (resolver_tag != NULL) {
3421 struct necp_client_validatable *validatable = (struct necp_client_validatable *)resolver_tag;
3422 if (resolver_tag_length <= sizeof(struct necp_client_validatable)) {
3423 error = EINVAL;
3424 NECPLOG(LOG_ERR, "Resolver tag length too short: %u", resolver_tag_length);
3425 } else {
3426 bool matches = true;
3427
3428 // Check the client UUID for client-specific results
3429 if (validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER ||
3430 validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_BROWSE_RESULT ||
3431 validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_SERVICE_RESOLVER_ANSWER) {
3432 if (uuid_compare(parent_id, validatable->signable.client_id) != 0 &&
3433 uuid_compare(client->client_id, validatable->signable.client_id) != 0) {
3434 NECPLOG0(LOG_ERR, "Resolver tag invalid client ID");
3435 matches = false;
3436 }
3437 }
3438
3439 size_t data_length = resolver_tag_length - sizeof(struct necp_client_validatable);
3440 switch (validatable->signable.sign_type) {
3441 case NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER:
3442 case NECP_CLIENT_SIGN_TYPE_SYSTEM_RESOLVER_ANSWER: {
3443 if (data_length < (sizeof(struct necp_client_host_resolver_answer) - sizeof(struct necp_client_signable))) {
3444 NECPLOG0(LOG_ERR, "Resolver tag invalid length for resolver answer");
3445 matches = false;
3446 } else {
3447 struct necp_client_host_resolver_answer * __single answer_struct = (struct necp_client_host_resolver_answer *)&validatable->signable;
3448 if (data_length != (sizeof(struct necp_client_host_resolver_answer) + answer_struct->hostname_length - sizeof(struct necp_client_signable))) {
3449 NECPLOG0(LOG_ERR, "Resolver tag invalid length for resolver answer");
3450 matches = false;
3451 } else {
3452 struct sockaddr_in6 sin6 = answer_struct->address_answer.sin6;
3453 if (answer_struct->hostname_length != 0 && // If the hostname on the signed answer is empty, ignore
3454 !necp_client_strings_are_equal((const char *)client_hostname, hostname_length,
3455 necp_answer_get_hostname(answer_struct, answer_struct->hostname_length), answer_struct->hostname_length)) {
3456 NECPLOG0(LOG_ERR, "Resolver tag hostname does not match");
3457 matches = false;
3458 } else if (answer_struct->address_answer.sa.sa_family != parsed_parameters->remote_addr.sa.sa_family ||
3459 answer_struct->address_answer.sa.sa_len != parsed_parameters->remote_addr.sa.sa_len) {
3460 NECPLOG0(LOG_ERR, "Resolver tag address type does not match");
3461 matches = false;
3462 } else if (answer_struct->address_answer.sin.sin_port != 0 && // If the port on the signed answer is empty, ignore
3463 answer_struct->address_answer.sin.sin_port != parsed_parameters->remote_addr.sin.sin_port) {
3464 NECPLOG0(LOG_ERR, "Resolver tag port does not match");
3465 matches = false;
3466 } else if ((answer_struct->address_answer.sa.sa_family == AF_INET &&
3467 answer_struct->address_answer.sin.sin_addr.s_addr != parsed_parameters->remote_addr.sin.sin_addr.s_addr) ||
3468 (answer_struct->address_answer.sa.sa_family == AF_INET6 &&
3469 memcmp(&sin6.sin6_addr, &parsed_parameters->remote_addr.sin6.sin6_addr, sizeof(struct in6_addr)) != 0)) {
3470 NECPLOG0(LOG_ERR, "Resolver tag address does not match");
3471 matches = false;
3472 }
3473 }
3474 }
3475 break;
3476 }
3477 case NECP_CLIENT_SIGN_TYPE_BROWSE_RESULT:
3478 case NECP_CLIENT_SIGN_TYPE_SYSTEM_BROWSE_RESULT: {
3479 if (data_length < (sizeof(struct necp_client_browse_result) - sizeof(struct necp_client_signable))) {
3480 NECPLOG0(LOG_ERR, "Resolver tag invalid length for browse result");
3481 matches = false;
3482 } else {
3483 struct necp_client_browse_result * __single answer_struct = (struct necp_client_browse_result *)&validatable->signable;
3484 if (data_length != (sizeof(struct necp_client_browse_result) + answer_struct->service_length - sizeof(struct necp_client_signable))) {
3485 NECPLOG0(LOG_ERR, "Resolver tag invalid length for browse result");
3486 matches = false;
3487 }
3488 }
3489 break;
3490 }
3491 case NECP_CLIENT_SIGN_TYPE_SERVICE_RESOLVER_ANSWER:
3492 case NECP_CLIENT_SIGN_TYPE_SYSTEM_SERVICE_RESOLVER_ANSWER: {
3493 if (data_length < (sizeof(struct necp_client_service_resolver_answer) - sizeof(struct necp_client_signable))) {
3494 NECPLOG0(LOG_ERR, "Resolver tag invalid length for service resolver answer");
3495 matches = false;
3496 } else {
3497 struct necp_client_service_resolver_answer * __single answer_struct = (struct necp_client_service_resolver_answer *)&validatable->signable;
3498 if (data_length != (sizeof(struct necp_client_service_resolver_answer) + answer_struct->service_length + answer_struct->hostname_length - sizeof(struct necp_client_signable))) {
3499 NECPLOG0(LOG_ERR, "Resolver tag invalid length for service resolver answer");
3500 matches = false;
3501 }
3502 }
3503 break;
3504 }
3505 default: {
3506 NECPLOG(LOG_ERR, "Resolver tag unknown sign type: %u", validatable->signable.sign_type);
3507 matches = false;
3508 break;
3509 }
3510 }
3511 if (!matches) {
3512 error = EAUTH;
3513 } else {
3514 const bool validated = necp_validate_resolver_answer(validatable->signable.client_id,
3515 validatable->signable.sign_type,
3516 signable_get_data(&validatable->signable, data_length), data_length,
3517 validatable->signature.signed_tag, sizeof(validatable->signature.signed_tag));
3518 if (!validated) {
3519 error = EAUTH;
3520 NECPLOG0(LOG_ERR, "Failed to validate resolve answer");
3521 }
3522 }
3523 }
3524 }
3525
3526 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) {
3527 necp_client_trace_parsed_parameters(client, parsed_parameters);
3528 }
3529
3530 return error;
3531 }
3532
3533 static int
necp_client_parse_result(u_int8_t * __indexable result,u_int32_t result_size,struct necp_client_flow * flow,void ** flow_stats)3534 necp_client_parse_result(u_int8_t * __indexable result,
3535 u_int32_t result_size,
3536 struct necp_client_flow *flow,
3537 void **flow_stats)
3538 {
3539 #pragma unused(flow_stats)
3540 int error = 0;
3541 size_t offset = 0;
3542
3543 while ((offset + sizeof(struct necp_tlv_header)) <= result_size) {
3544 u_int8_t type = necp_buffer_get_tlv_type(result, result_size, offset);
3545 u_int32_t length = necp_buffer_get_tlv_length(result, result_size, offset);
3546
3547 if (length > 0 && (offset + sizeof(struct necp_tlv_header) + length) <= result_size) {
3548 u_int8_t * __indexable value = necp_buffer_get_tlv_value(result, result_size, offset, NULL);
3549 if (value != NULL) {
3550 switch (type) {
3551 case NECP_CLIENT_RESULT_LOCAL_ENDPOINT: {
3552 if (length >= sizeof(struct necp_client_endpoint)) {
3553 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3554 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3555 flow->local_addr.sin6 = endpoint->u.sin6;
3556 }
3557 }
3558 break;
3559 }
3560 case NECP_CLIENT_RESULT_REMOTE_ENDPOINT: {
3561 if (length >= sizeof(struct necp_client_endpoint)) {
3562 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3563 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3564 flow->remote_addr.sin6 = endpoint->u.sin6;
3565 }
3566 }
3567 break;
3568 }
3569 #if SKYWALK
3570 case NECP_CLIENT_RESULT_NEXUS_FLOW_STATS: {
3571 // this TLV contains flow_stats pointer which is refcnt'ed.
3572 if (flow_stats != NULL && length >= sizeof(struct sk_stats_flow *)) {
3573 struct flow_stats * __single fs = *(void **)(void *)value;
3574 // transfer the refcnt to flow_stats pointer
3575 *flow_stats = fs;
3576 }
3577 memset(value, 0, length); // nullify TLV always
3578 break;
3579 }
3580 case NECP_CLIENT_RESULT_UNIQUE_FLOW_TAG: {
3581 if (length >= sizeof(uint32_t)) {
3582 flow->flow_tag = *(uint32_t *)(void *)value;
3583 break;
3584 }
3585 }
3586 #endif /* SKYWALK */
3587 default: {
3588 break;
3589 }
3590 }
3591 }
3592 }
3593
3594 offset += sizeof(struct necp_tlv_header) + length;
3595 }
3596
3597 return error;
3598 }
3599
3600 static struct necp_client_flow_registration *
necp_client_create_flow_registration(struct necp_fd_data * fd_data,struct necp_client * client)3601 necp_client_create_flow_registration(struct necp_fd_data *fd_data, struct necp_client *client)
3602 {
3603 NECP_FD_ASSERT_LOCKED(fd_data);
3604 NECP_CLIENT_ASSERT_LOCKED(client);
3605
3606 struct necp_client_flow_registration *new_registration = kalloc_type(struct necp_client_flow_registration, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3607
3608 new_registration->last_interface_details = combine_interface_details(IFSCOPE_NONE, NSTAT_IFNET_IS_UNKNOWN_TYPE);
3609
3610 necp_generate_client_id(new_registration->registration_id, true);
3611 LIST_INIT(&new_registration->flow_list);
3612
3613 // Add registration to client list
3614 RB_INSERT(_necp_client_flow_tree, &client->flow_registrations, new_registration);
3615
3616 // Add registration to fd list
3617 RB_INSERT(_necp_fd_flow_tree, &fd_data->flows, new_registration);
3618
3619 // Add registration to global tree for lookup
3620 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
3621 RB_INSERT(_necp_client_flow_global_tree, &necp_client_flow_global_tree, new_registration);
3622 NECP_FLOW_TREE_UNLOCK();
3623
3624 new_registration->client = client;
3625
3626 #if SKYWALK
3627 {
3628 // The uuid caching here is something of a hack, but saves a dynamic lookup with attendant lock hierarchy issues
3629 uint64_t stats_event_type = (uuid_is_null(client->latest_flow_registration_id)) ? NSTAT_EVENT_SRC_FLOW_UUID_ASSIGNED : NSTAT_EVENT_SRC_FLOW_UUID_CHANGED;
3630 uuid_copy(client->latest_flow_registration_id, new_registration->registration_id);
3631
3632 // With the flow uuid known, push a new statistics update to ensure the uuid gets known by any clients before the flow can close
3633 if (client->nstat_context != NULL) {
3634 nstat_provider_stats_event(client->nstat_context, stats_event_type);
3635 }
3636 }
3637 #endif /* !SKYWALK */
3638
3639 // Start out assuming there is nothing to read from the flow
3640 new_registration->flow_result_read = true;
3641
3642 return new_registration;
3643 }
3644
3645 static void
necp_client_add_socket_flow(struct necp_client_flow_registration * flow_registration,struct inpcb * inp)3646 necp_client_add_socket_flow(struct necp_client_flow_registration *flow_registration,
3647 struct inpcb *inp)
3648 {
3649 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3650
3651 new_flow->socket = TRUE;
3652 new_flow->u.socket_handle = inp;
3653 new_flow->u.cb = inp->necp_cb;
3654
3655 OSIncrementAtomic(&necp_socket_flow_count);
3656
3657 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
3658 }
3659
3660 static int
necp_client_register_socket_inner(pid_t pid,uuid_t client_id,struct inpcb * inp,bool is_listener)3661 necp_client_register_socket_inner(pid_t pid, uuid_t client_id, struct inpcb *inp, bool is_listener)
3662 {
3663 int error = 0;
3664 struct necp_fd_data *client_fd = NULL;
3665 bool found_client = FALSE;
3666
3667 NECP_FD_LIST_LOCK_SHARED();
3668 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3669 NECP_FD_LOCK(client_fd);
3670 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3671 if (client != NULL) {
3672 if (!pid || client->proc_pid == pid) {
3673 if (is_listener) {
3674 found_client = TRUE;
3675 #if SKYWALK
3676 // Check netns token for registration
3677 if (!NETNS_TOKEN_VALID(&client->port_reservation)) {
3678 error = EINVAL;
3679 }
3680 #endif /* !SKYWALK */
3681 } else {
3682 // Find client flow and assign from socket
3683 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3684 if (flow_registration != NULL) {
3685 // Found the right client and flow registration, add a new flow
3686 found_client = TRUE;
3687 necp_client_add_socket_flow(flow_registration, inp);
3688 } else if (RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
3689 // No flows yet on this client, add a new registration
3690 flow_registration = necp_client_create_flow_registration(client_fd, client);
3691 if (flow_registration == NULL) {
3692 error = ENOMEM;
3693 } else {
3694 // Add a new flow
3695 found_client = TRUE;
3696 necp_client_add_socket_flow(flow_registration, inp);
3697 }
3698 }
3699 }
3700 }
3701
3702 NECP_CLIENT_UNLOCK(client);
3703 }
3704 NECP_FD_UNLOCK(client_fd);
3705
3706 if (found_client) {
3707 break;
3708 }
3709 }
3710 NECP_FD_LIST_UNLOCK();
3711
3712 if (!found_client) {
3713 error = ENOENT;
3714 } else {
3715 // Count the sockets that have the NECP client UUID set
3716 struct socket *so = inp->inp_socket;
3717 if (!(so->so_flags1 & SOF1_HAS_NECP_CLIENT_UUID)) {
3718 so->so_flags1 |= SOF1_HAS_NECP_CLIENT_UUID;
3719 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_necp_clientuuid_total);
3720 }
3721 }
3722
3723 return error;
3724 }
3725
3726 int
necp_client_register_socket_flow(pid_t pid,uuid_t client_id,struct inpcb * inp)3727 necp_client_register_socket_flow(pid_t pid, uuid_t client_id, struct inpcb *inp)
3728 {
3729 return necp_client_register_socket_inner(pid, client_id, inp, false);
3730 }
3731
3732 int
necp_client_register_socket_listener(pid_t pid,uuid_t client_id,struct inpcb * inp)3733 necp_client_register_socket_listener(pid_t pid, uuid_t client_id, struct inpcb *inp)
3734 {
3735 return necp_client_register_socket_inner(pid, client_id, inp, true);
3736 }
3737
3738 #if SKYWALK
3739 int
necp_client_get_netns_flow_info(uuid_t client_id,struct ns_flow_info * flow_info)3740 necp_client_get_netns_flow_info(uuid_t client_id, struct ns_flow_info *flow_info)
3741 {
3742 int error = 0;
3743 struct necp_fd_data *client_fd = NULL;
3744 bool found_client = FALSE;
3745
3746 NECP_FD_LIST_LOCK_SHARED();
3747 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3748 NECP_FD_LOCK(client_fd);
3749 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3750 if (client != NULL) {
3751 found_client = TRUE;
3752 if (!NETNS_TOKEN_VALID(&client->port_reservation)) {
3753 error = EINVAL;
3754 } else {
3755 error = netns_get_flow_info(&client->port_reservation, flow_info);
3756 }
3757
3758 NECP_CLIENT_UNLOCK(client);
3759 }
3760 NECP_FD_UNLOCK(client_fd);
3761
3762 if (found_client) {
3763 break;
3764 }
3765 }
3766 NECP_FD_LIST_UNLOCK();
3767
3768 if (!found_client) {
3769 error = ENOENT;
3770 }
3771
3772 return error;
3773 }
3774 #endif /* !SKYWALK */
3775
3776 static void
necp_client_add_multipath_interface_flows(struct necp_client_flow_registration * flow_registration,struct necp_client * client,struct mppcb * mpp)3777 necp_client_add_multipath_interface_flows(struct necp_client_flow_registration *flow_registration,
3778 struct necp_client *client,
3779 struct mppcb *mpp)
3780 {
3781 flow_registration->interface_handle = mpp;
3782 flow_registration->interface_cb = mpp->necp_cb;
3783
3784 proc_t proc = proc_find(client->proc_pid);
3785 if (proc == PROC_NULL) {
3786 return;
3787 }
3788
3789 // Traverse all interfaces and add a tracking flow if needed
3790 necp_flow_add_interface_flows(proc, client, flow_registration, true);
3791
3792 proc_rele(proc);
3793 proc = PROC_NULL;
3794 }
3795
3796 int
necp_client_register_multipath_cb(pid_t pid,uuid_t client_id,struct mppcb * mpp)3797 necp_client_register_multipath_cb(pid_t pid, uuid_t client_id, struct mppcb *mpp)
3798 {
3799 int error = 0;
3800 struct necp_fd_data *client_fd = NULL;
3801 bool found_client = FALSE;
3802
3803 NECP_FD_LIST_LOCK_SHARED();
3804 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3805 NECP_FD_LOCK(client_fd);
3806 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3807 if (client != NULL) {
3808 if (!pid || client->proc_pid == pid) {
3809 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3810 if (flow_registration != NULL) {
3811 // Found the right client and flow registration, add a new flow
3812 found_client = TRUE;
3813 necp_client_add_multipath_interface_flows(flow_registration, client, mpp);
3814 } else if (RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
3815 // No flows yet on this client, add a new registration
3816 flow_registration = necp_client_create_flow_registration(client_fd, client);
3817 if (flow_registration == NULL) {
3818 error = ENOMEM;
3819 } else {
3820 // Add a new flow
3821 found_client = TRUE;
3822 necp_client_add_multipath_interface_flows(flow_registration, client, mpp);
3823 }
3824 }
3825 }
3826
3827 NECP_CLIENT_UNLOCK(client);
3828 }
3829 NECP_FD_UNLOCK(client_fd);
3830
3831 if (found_client) {
3832 break;
3833 }
3834 }
3835 NECP_FD_LIST_UNLOCK();
3836
3837 if (!found_client && error == 0) {
3838 error = ENOENT;
3839 }
3840
3841 return error;
3842 }
3843
3844 #define NETAGENT_DOMAIN_RADIO_MANAGER "WirelessRadioManager"
3845 #define NETAGENT_TYPE_RADIO_MANAGER "WirelessRadioManager:BB Manager"
3846
3847 static int
necp_client_lookup_bb_radio_manager(struct necp_client * client,uuid_t netagent_uuid)3848 necp_client_lookup_bb_radio_manager(struct necp_client *client,
3849 uuid_t netagent_uuid)
3850 {
3851 char netagent_domain[NETAGENT_DOMAINSIZE];
3852 char netagent_type[NETAGENT_TYPESIZE];
3853 struct necp_aggregate_result result;
3854 proc_t proc;
3855 int error;
3856
3857 proc = proc_find(client->proc_pid);
3858 if (proc == PROC_NULL) {
3859 return ESRCH;
3860 }
3861
3862 error = necp_application_find_policy_match_internal(proc, client->parameters, (u_int32_t)client->parameters_length,
3863 &result, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, true, true, NULL);
3864
3865 proc_rele(proc);
3866 proc = PROC_NULL;
3867
3868 if (error) {
3869 return error;
3870 }
3871
3872 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
3873 if (uuid_is_null(result.netagents[i])) {
3874 // Passed end of valid agents
3875 break;
3876 }
3877
3878 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
3879 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
3880 if (netagent_get_agent_domain_and_type(result.netagents[i], netagent_domain, netagent_type) == FALSE) {
3881 continue;
3882 }
3883
3884 if (strlcmp(netagent_domain, NETAGENT_DOMAIN_RADIO_MANAGER, NETAGENT_DOMAINSIZE) != 0) {
3885 continue;
3886 }
3887
3888 if (strlcmp(netagent_type, NETAGENT_TYPE_RADIO_MANAGER, NETAGENT_TYPESIZE) != 0) {
3889 continue;
3890 }
3891
3892 uuid_copy(netagent_uuid, result.netagents[i]);
3893
3894 break;
3895 }
3896
3897 return 0;
3898 }
3899
3900 static int
necp_client_assert_bb_radio_manager_common(struct necp_client * client,bool assert)3901 necp_client_assert_bb_radio_manager_common(struct necp_client *client, bool assert)
3902 {
3903 uuid_t netagent_uuid;
3904 uint8_t assert_type;
3905 int error;
3906
3907 error = necp_client_lookup_bb_radio_manager(client, netagent_uuid);
3908 if (error) {
3909 NECPLOG0(LOG_ERR, "BB radio manager agent not found");
3910 return error;
3911 }
3912
3913 // Before unasserting, verify that the assertion was already taken
3914 if (assert == FALSE) {
3915 assert_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
3916
3917 if (!necp_client_remove_assertion(client, netagent_uuid)) {
3918 return EINVAL;
3919 }
3920 } else {
3921 assert_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT;
3922 }
3923
3924 error = netagent_client_message(netagent_uuid, client->client_id, client->proc_pid, client->agent_handle, assert_type);
3925 if (error) {
3926 NECPLOG0(LOG_ERR, "netagent_client_message failed");
3927 return error;
3928 }
3929
3930 // Only save the assertion if the action succeeded
3931 if (assert == TRUE) {
3932 necp_client_add_assertion(client, netagent_uuid);
3933 }
3934
3935 return 0;
3936 }
3937
3938 int
necp_client_assert_bb_radio_manager(uuid_t client_id,bool assert)3939 necp_client_assert_bb_radio_manager(uuid_t client_id, bool assert)
3940 {
3941 struct necp_client *client;
3942 int error = 0;
3943
3944 NECP_CLIENT_TREE_LOCK_SHARED();
3945
3946 client = necp_find_client_and_lock(client_id);
3947
3948 if (client) {
3949 // Found the right client!
3950 error = necp_client_assert_bb_radio_manager_common(client, assert);
3951
3952 NECP_CLIENT_UNLOCK(client);
3953 } else {
3954 NECPLOG0(LOG_ERR, "Couldn't find client");
3955 error = ENOENT;
3956 }
3957
3958 NECP_CLIENT_TREE_UNLOCK();
3959
3960 return error;
3961 }
3962
3963 static int
necp_client_unregister_socket_flow(uuid_t client_id,void * handle)3964 necp_client_unregister_socket_flow(uuid_t client_id, void *handle)
3965 {
3966 int error = 0;
3967 struct necp_fd_data *client_fd = NULL;
3968 bool found_client = FALSE;
3969 bool client_updated = FALSE;
3970
3971 NECP_FD_LIST_LOCK_SHARED();
3972 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3973 NECP_FD_LOCK(client_fd);
3974
3975 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3976 if (client != NULL) {
3977 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3978 if (flow_registration != NULL) {
3979 // Found the right client and flow!
3980 found_client = TRUE;
3981
3982 // Remove flow assignment
3983 struct necp_client_flow * __single search_flow = NULL;
3984 struct necp_client_flow *temp_flow = NULL;
3985 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
3986 if (search_flow->socket && search_flow->u.socket_handle == handle) {
3987 if (search_flow->assigned_results != NULL) {
3988 kfree_data_counted_by(search_flow->assigned_results, search_flow->assigned_results_length);
3989 }
3990 client_updated = TRUE;
3991 flow_registration->flow_result_read = FALSE;
3992 LIST_REMOVE(search_flow, flow_chain);
3993 OSDecrementAtomic(&necp_socket_flow_count);
3994 kfree_type(struct necp_client_flow, search_flow);
3995 }
3996 }
3997 }
3998
3999 NECP_CLIENT_UNLOCK(client);
4000 }
4001
4002 if (client_updated) {
4003 necp_fd_notify(client_fd, true);
4004 }
4005 NECP_FD_UNLOCK(client_fd);
4006
4007 if (found_client) {
4008 break;
4009 }
4010 }
4011 NECP_FD_LIST_UNLOCK();
4012
4013 if (!found_client) {
4014 error = ENOENT;
4015 }
4016
4017 return error;
4018 }
4019
4020 static int
necp_client_unregister_multipath_cb(uuid_t client_id,void * handle)4021 necp_client_unregister_multipath_cb(uuid_t client_id, void *handle)
4022 {
4023 int error = 0;
4024 bool found_client = FALSE;
4025
4026 NECP_CLIENT_TREE_LOCK_SHARED();
4027
4028 struct necp_client *client = necp_find_client_and_lock(client_id);
4029 if (client != NULL) {
4030 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4031 if (flow_registration != NULL) {
4032 // Found the right client and flow!
4033 found_client = TRUE;
4034
4035 // Remove flow assignment
4036 struct necp_client_flow *search_flow = NULL;
4037 struct necp_client_flow *temp_flow = NULL;
4038 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
4039 if (!search_flow->socket && !search_flow->nexus &&
4040 search_flow->u.socket_handle == handle) {
4041 search_flow->u.socket_handle = NULL;
4042 search_flow->u.cb = NULL;
4043 }
4044 }
4045
4046 flow_registration->interface_handle = NULL;
4047 flow_registration->interface_cb = NULL;
4048 }
4049
4050 NECP_CLIENT_UNLOCK(client);
4051 }
4052
4053 NECP_CLIENT_TREE_UNLOCK();
4054
4055 if (!found_client) {
4056 error = ENOENT;
4057 }
4058
4059 return error;
4060 }
4061
4062 int
necp_client_assign_from_socket(pid_t pid,uuid_t client_id,struct inpcb * inp)4063 necp_client_assign_from_socket(pid_t pid, uuid_t client_id, struct inpcb *inp)
4064 {
4065 int error = 0;
4066 struct necp_fd_data *client_fd = NULL;
4067 bool found_client = FALSE;
4068 bool client_updated = FALSE;
4069
4070 NECP_FD_LIST_LOCK_SHARED();
4071 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4072 if (pid && client_fd->proc_pid != pid) {
4073 continue;
4074 }
4075
4076 proc_t proc = proc_find(client_fd->proc_pid);
4077 if (proc == PROC_NULL) {
4078 continue;
4079 }
4080
4081 NECP_FD_LOCK(client_fd);
4082
4083 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4084 if (client != NULL) {
4085 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4086 if (flow_registration == NULL && RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
4087 // No flows yet on this client, add a new registration
4088 flow_registration = necp_client_create_flow_registration(client_fd, client);
4089 if (flow_registration == NULL) {
4090 error = ENOMEM;
4091 }
4092 }
4093 if (flow_registration != NULL) {
4094 // Found the right client and flow!
4095 found_client = TRUE;
4096
4097 struct necp_client_flow *flow = NULL;
4098 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
4099 if (flow->socket && flow->u.socket_handle == inp) {
4100 // Release prior results and route
4101 if (flow->assigned_results != NULL) {
4102 kfree_data_counted_by(flow->assigned_results, flow->assigned_results_length);
4103 }
4104
4105 ifnet_t ifp = NULL;
4106 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp) {
4107 ifp = inp->inp_boundifp;
4108 } else {
4109 ifp = inp->inp_last_outifp;
4110 }
4111
4112 if (ifp != NULL) {
4113 flow->interface_index = ifp->if_index;
4114 } else {
4115 flow->interface_index = IFSCOPE_NONE;
4116 }
4117
4118 if (inp->inp_vflag & INP_IPV4) {
4119 flow->local_addr.sin.sin_family = AF_INET;
4120 flow->local_addr.sin.sin_len = sizeof(struct sockaddr_in);
4121 flow->local_addr.sin.sin_port = inp->inp_lport;
4122 memcpy(&flow->local_addr.sin.sin_addr, &inp->inp_laddr, sizeof(struct in_addr));
4123
4124 flow->remote_addr.sin.sin_family = AF_INET;
4125 flow->remote_addr.sin.sin_len = sizeof(struct sockaddr_in);
4126 flow->remote_addr.sin.sin_port = inp->inp_fport;
4127 memcpy(&flow->remote_addr.sin.sin_addr, &inp->inp_faddr, sizeof(struct in_addr));
4128 } else if (inp->inp_vflag & INP_IPV6) {
4129 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope, &flow->local_addr.sin6, sizeof(flow->local_addr));
4130 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope, &flow->remote_addr.sin6, sizeof(flow->remote_addr));
4131 }
4132
4133 flow->viable = necp_client_flow_is_viable(proc, client, flow);
4134
4135 uuid_t empty_uuid;
4136 uuid_clear(empty_uuid);
4137 flow->assigned = TRUE;
4138
4139 size_t message_length;
4140 void *message = necp_create_nexus_assign_message(empty_uuid, 0, NULL, 0,
4141 (struct necp_client_endpoint *)&flow->local_addr,
4142 (struct necp_client_endpoint *)&flow->remote_addr,
4143 NULL, 0, NULL, 0, &message_length);
4144 flow->assigned_results = message;
4145 flow->assigned_results_length = message_length;
4146 flow_registration->flow_result_read = FALSE;
4147 client_updated = TRUE;
4148 break;
4149 }
4150 }
4151 }
4152
4153 NECP_CLIENT_UNLOCK(client);
4154 }
4155 if (client_updated) {
4156 necp_fd_notify(client_fd, true);
4157 }
4158 NECP_FD_UNLOCK(client_fd);
4159
4160 proc_rele(proc);
4161 proc = PROC_NULL;
4162
4163 if (found_client) {
4164 break;
4165 }
4166 }
4167 NECP_FD_LIST_UNLOCK();
4168
4169 if (error == 0) {
4170 if (!found_client) {
4171 error = ENOENT;
4172 } else if (!client_updated) {
4173 error = EINVAL;
4174 }
4175 }
4176
4177 return error;
4178 }
4179
4180 bool
necp_socket_is_allowed_to_recv_on_interface(struct inpcb * inp,ifnet_t interface)4181 necp_socket_is_allowed_to_recv_on_interface(struct inpcb *inp, ifnet_t interface)
4182 {
4183 if (interface == NULL ||
4184 inp == NULL ||
4185 !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
4186 uuid_is_null(inp->necp_client_uuid)) {
4187 // If there's no interface or client ID to check,
4188 // or if this is not a listener, pass.
4189 // Outbound connections will have already been
4190 // validated for policy.
4191 return TRUE;
4192 }
4193
4194 // Only filter out listener sockets (no remote address specified)
4195 if ((inp->inp_vflag & INP_IPV4) &&
4196 inp->inp_faddr.s_addr != INADDR_ANY) {
4197 return TRUE;
4198 }
4199 if ((inp->inp_vflag & INP_IPV6) &&
4200 !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
4201 return TRUE;
4202 }
4203
4204 bool allowed = TRUE;
4205
4206 NECP_CLIENT_TREE_LOCK_SHARED();
4207
4208 struct necp_client *client = necp_find_client_and_lock(inp->necp_client_uuid);
4209 if (client != NULL) {
4210 struct necp_client_parsed_parameters * __single parsed_parameters = NULL;
4211
4212 parsed_parameters = kalloc_type(struct necp_client_parsed_parameters,
4213 Z_WAITOK | Z_ZERO | Z_NOFAIL);
4214 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, parsed_parameters);
4215 if (error == 0) {
4216 if (!necp_ifnet_matches_parameters(interface, parsed_parameters, 0, NULL, true, false)) {
4217 allowed = FALSE;
4218 }
4219 }
4220 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4221
4222 NECP_CLIENT_UNLOCK(client);
4223 }
4224
4225 NECP_CLIENT_TREE_UNLOCK();
4226
4227 return allowed;
4228 }
4229
4230 int
necp_update_flow_protoctl_event(uuid_t netagent_uuid,uuid_t client_id,uint32_t protoctl_event_code,uint32_t protoctl_event_val,uint32_t protoctl_event_tcp_seq_number)4231 necp_update_flow_protoctl_event(uuid_t netagent_uuid, uuid_t client_id,
4232 uint32_t protoctl_event_code, uint32_t protoctl_event_val,
4233 uint32_t protoctl_event_tcp_seq_number)
4234 {
4235 int error = 0;
4236 struct necp_fd_data *client_fd = NULL;
4237 bool found_client = FALSE;
4238 bool client_updated = FALSE;
4239
4240 NECP_FD_LIST_LOCK_SHARED();
4241 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4242 proc_t proc = proc_find(client_fd->proc_pid);
4243 if (proc == PROC_NULL) {
4244 continue;
4245 }
4246
4247 NECP_FD_LOCK(client_fd);
4248
4249 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4250 if (client != NULL) {
4251 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4252 if (flow_registration != NULL) {
4253 // Found the right client and flow!
4254 found_client = TRUE;
4255
4256 struct necp_client_flow *flow = NULL;
4257 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
4258 // Verify that the client nexus agent matches
4259 if ((flow->nexus && uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) ||
4260 flow->socket) {
4261 flow->has_protoctl_event = TRUE;
4262 flow->protoctl_event.protoctl_event_code = protoctl_event_code;
4263 flow->protoctl_event.protoctl_event_val = protoctl_event_val;
4264 flow->protoctl_event.protoctl_event_tcp_seq_num = protoctl_event_tcp_seq_number;
4265 flow_registration->flow_result_read = FALSE;
4266 client_updated = TRUE;
4267 break;
4268 }
4269 }
4270 }
4271
4272 NECP_CLIENT_UNLOCK(client);
4273 }
4274
4275 if (client_updated) {
4276 necp_fd_notify(client_fd, true);
4277 }
4278
4279 NECP_FD_UNLOCK(client_fd);
4280 proc_rele(proc);
4281 proc = PROC_NULL;
4282
4283 if (found_client) {
4284 break;
4285 }
4286 }
4287 NECP_FD_LIST_UNLOCK();
4288
4289 if (!found_client) {
4290 error = ENOENT;
4291 } else if (!client_updated) {
4292 error = EINVAL;
4293 }
4294 return error;
4295 }
4296
4297 static bool
necp_assign_client_result_locked(struct proc * proc,struct necp_fd_data * client_fd,struct necp_client * client,struct necp_client_flow_registration * flow_registration,uuid_t netagent_uuid,u_int8_t * __indexable assigned_results,size_t assigned_results_length,bool notify_fd,bool assigned_from_userspace_agent)4298 necp_assign_client_result_locked(struct proc *proc,
4299 struct necp_fd_data *client_fd,
4300 struct necp_client *client,
4301 struct necp_client_flow_registration *flow_registration,
4302 uuid_t netagent_uuid,
4303 u_int8_t * __indexable assigned_results,
4304 size_t assigned_results_length,
4305 bool notify_fd,
4306 bool assigned_from_userspace_agent)
4307 {
4308 bool client_updated = FALSE;
4309
4310 NECP_FD_ASSERT_LOCKED(client_fd);
4311 NECP_CLIENT_ASSERT_LOCKED(client);
4312
4313 struct necp_client_flow *flow = NULL;
4314 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
4315 // Verify that the client nexus agent matches
4316 if (flow->nexus &&
4317 uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) {
4318 // Release prior results and route
4319 if (flow->assigned_results != NULL) {
4320 kfree_data_counted_by(flow->assigned_results, flow->assigned_results_length);
4321 }
4322
4323 void * __single nexus_stats = NULL;
4324 if (assigned_results != NULL && assigned_results_length > 0) {
4325 int error = necp_client_parse_result(assigned_results, (u_int32_t)assigned_results_length,
4326 flow, assigned_from_userspace_agent ? NULL : &nexus_stats); // Only assign stats from kernel agents
4327 VERIFY(error == 0);
4328 }
4329
4330 flow->viable = necp_client_flow_is_viable(proc, client, flow);
4331
4332 flow->assigned = TRUE;
4333 flow->assigned_results = assigned_results;
4334 flow->assigned_results_length = assigned_results_length;
4335 flow_registration->flow_result_read = FALSE;
4336 #if SKYWALK
4337 if (nexus_stats != NULL) {
4338 if (flow_registration->nexus_stats != NULL) {
4339 flow_stats_release(flow_registration->nexus_stats);
4340 }
4341 flow_registration->nexus_stats = nexus_stats;
4342 }
4343 #endif /* SKYWALK */
4344 client_updated = TRUE;
4345 break;
4346 }
4347 }
4348
4349 if (client_updated && notify_fd) {
4350 necp_fd_notify(client_fd, true);
4351 }
4352
4353 // if not updated, client must free assigned_results
4354 return client_updated;
4355 }
4356
4357 int
necp_assign_client_result(uuid_t netagent_uuid,uuid_t client_id,u_int8_t * __sized_by (assigned_results_length)assigned_results,size_t assigned_results_length)4358 necp_assign_client_result(uuid_t netagent_uuid, uuid_t client_id,
4359 u_int8_t * __sized_by(assigned_results_length)assigned_results, size_t assigned_results_length)
4360 {
4361 int error = 0;
4362 struct necp_fd_data *client_fd = NULL;
4363 bool found_client = FALSE;
4364 bool client_updated = FALSE;
4365
4366 NECP_FD_LIST_LOCK_SHARED();
4367
4368 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4369 proc_t proc = proc_find(client_fd->proc_pid);
4370 if (proc == PROC_NULL) {
4371 continue;
4372 }
4373
4374 NECP_FD_LOCK(client_fd);
4375 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4376 if (client != NULL) {
4377 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4378 if (flow_registration != NULL) {
4379 // Found the right client and flow!
4380 found_client = TRUE;
4381 if (necp_assign_client_result_locked(proc, client_fd, client, flow_registration, netagent_uuid,
4382 assigned_results, assigned_results_length, true, true)) {
4383 client_updated = TRUE;
4384 }
4385 }
4386
4387 NECP_CLIENT_UNLOCK(client);
4388 }
4389 NECP_FD_UNLOCK(client_fd);
4390
4391 proc_rele(proc);
4392 proc = PROC_NULL;
4393
4394 if (found_client) {
4395 break;
4396 }
4397 }
4398
4399 NECP_FD_LIST_UNLOCK();
4400
4401 // upon error, client must free assigned_results
4402 if (!found_client) {
4403 error = ENOENT;
4404 } else if (!client_updated) {
4405 error = EINVAL;
4406 }
4407
4408 return error;
4409 }
4410
4411 int
necp_assign_client_group_members(uuid_t netagent_uuid,uuid_t client_id,u_int8_t * __counted_by (assigned_group_members_length)assigned_group_members,size_t assigned_group_members_length)4412 necp_assign_client_group_members(uuid_t netagent_uuid, uuid_t client_id,
4413 u_int8_t *__counted_by(assigned_group_members_length) assigned_group_members,
4414 size_t assigned_group_members_length)
4415 {
4416 #pragma unused(netagent_uuid)
4417 int error = 0;
4418 struct necp_fd_data *client_fd = NULL;
4419 bool found_client = false;
4420 bool client_updated = false;
4421
4422 NECP_FD_LIST_LOCK_SHARED();
4423
4424 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4425 proc_t proc = proc_find(client_fd->proc_pid);
4426 if (proc == PROC_NULL) {
4427 continue;
4428 }
4429
4430 NECP_FD_LOCK(client_fd);
4431 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4432 if (client != NULL) {
4433 found_client = true;
4434 // Release prior results
4435 if (client->assigned_group_members != NULL) {
4436 kfree_data_counted_by(client->assigned_group_members, client->assigned_group_members_length);
4437 }
4438
4439 // Save new results
4440 client->assigned_group_members = assigned_group_members;
4441 client->assigned_group_members_length = assigned_group_members_length;
4442 client->group_members_read = false;
4443
4444 client_updated = true;
4445 necp_fd_notify(client_fd, true);
4446
4447 NECP_CLIENT_UNLOCK(client);
4448 }
4449 NECP_FD_UNLOCK(client_fd);
4450
4451 proc_rele(proc);
4452 proc = PROC_NULL;
4453
4454 if (found_client) {
4455 break;
4456 }
4457 }
4458
4459 NECP_FD_LIST_UNLOCK();
4460
4461 // upon error, client must free assigned_results
4462 if (!found_client) {
4463 error = ENOENT;
4464 } else if (!client_updated) {
4465 error = EINVAL;
4466 }
4467
4468 return error;
4469 }
4470
4471 /// Client updating
4472
4473 static bool
necp_update_parsed_parameters(struct necp_client_parsed_parameters * parsed_parameters,struct necp_aggregate_result * result)4474 necp_update_parsed_parameters(struct necp_client_parsed_parameters *parsed_parameters,
4475 struct necp_aggregate_result *result)
4476 {
4477 if (parsed_parameters == NULL ||
4478 result == NULL) {
4479 return false;
4480 }
4481
4482 bool updated = false;
4483 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
4484 if (uuid_is_null(result->netagents[i])) {
4485 // Passed end of valid agents
4486 break;
4487 }
4488
4489 if (!(result->netagent_use_flags[i] & NECP_AGENT_USE_FLAG_SCOPE)) {
4490 // Not a scoped agent, ignore
4491 continue;
4492 }
4493
4494 // This is a scoped agent. Add it to the required agents.
4495 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
4496 // Already some required agents, add this at the end
4497 for (int j = 0; j < NECP_MAX_AGENT_PARAMETERS; j++) {
4498 if (uuid_compare(parsed_parameters->required_netagents[j], result->netagents[i]) == 0) {
4499 // Already required, break
4500 break;
4501 }
4502 if (uuid_is_null(parsed_parameters->required_netagents[j])) {
4503 // Add here
4504 memcpy(&parsed_parameters->required_netagents[j], result->netagents[i], sizeof(uuid_t));
4505 updated = true;
4506 break;
4507 }
4508 }
4509 } else {
4510 // No required agents yet, add this one
4511 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT;
4512 memcpy(&parsed_parameters->required_netagents[0], result->netagents[i], sizeof(uuid_t));
4513 updated = true;
4514 }
4515
4516 // Remove requirements for agents of the same type
4517 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
4518 char remove_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4519 char remove_agent_type[NETAGENT_TYPESIZE] = { 0 };
4520 if (netagent_get_agent_domain_and_type(result->netagents[i], remove_agent_domain, remove_agent_type)) {
4521 for (int j = 0; j < NECP_MAX_AGENT_PARAMETERS; j++) {
4522 if (strbuflen(parsed_parameters->required_netagent_types[j].netagent_domain, sizeof(parsed_parameters->required_netagent_types[j].netagent_domain)) == 0 &&
4523 strbuflen(parsed_parameters->required_netagent_types[j].netagent_type, sizeof(parsed_parameters->required_netagent_types[j].netagent_type)) == 0) {
4524 break;
4525 }
4526
4527 if (strbufcmp(parsed_parameters->required_netagent_types[j].netagent_domain, sizeof(parsed_parameters->required_netagent_types[j].netagent_domain), remove_agent_domain, NETAGENT_DOMAINSIZE) == 0 &&
4528 strbufcmp(parsed_parameters->required_netagent_types[j].netagent_type, sizeof(parsed_parameters->required_netagent_types[j].netagent_type), remove_agent_type, NETAGENT_TYPESIZE) == 0) {
4529 updated = true;
4530
4531 if (j == NECP_MAX_AGENT_PARAMETERS - 1) {
4532 // Last field, just clear and break
4533 memset(&parsed_parameters->required_netagent_types[NECP_MAX_AGENT_PARAMETERS - 1], 0, sizeof(struct necp_client_parameter_netagent_type));
4534 break;
4535 } else {
4536 // Move the parameters down, clear the last entry
4537 memmove(&parsed_parameters->required_netagent_types[j],
4538 &parsed_parameters->required_netagent_types[j + 1],
4539 sizeof(struct necp_client_parameter_netagent_type) * (NECP_MAX_AGENT_PARAMETERS - (j + 1)));
4540 memset(&parsed_parameters->required_netagent_types[NECP_MAX_AGENT_PARAMETERS - 1], 0, sizeof(struct necp_client_parameter_netagent_type));
4541 // Continue, don't increment but look at the new shifted item instead
4542 continue;
4543 }
4544 }
4545
4546 // Increment j to look at the next agent type parameter
4547 j++;
4548 }
4549 }
4550 }
4551 }
4552
4553 if (updated &&
4554 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4555 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) {
4556 // A required interface index was added after the fact. Clear it.
4557 parsed_parameters->required_interface_index = IFSCOPE_NONE;
4558 }
4559
4560
4561 return updated;
4562 }
4563
4564 static inline bool
necp_agent_types_match(const char * __sized_by (NETAGENT_DOMAINSIZE)agent_domain1,const char * __sized_by (NETAGENT_TYPESIZE)agent_type1,const char * __sized_by (NETAGENT_DOMAINSIZE)agent_domain2,const char * __sized_by (NETAGENT_TYPESIZE)agent_type2)4565 necp_agent_types_match(const char * __sized_by(NETAGENT_DOMAINSIZE)agent_domain1, const char * __sized_by(NETAGENT_TYPESIZE)agent_type1,
4566 const char * __sized_by(NETAGENT_DOMAINSIZE)agent_domain2, const char * __sized_by(NETAGENT_TYPESIZE)agent_type2)
4567 {
4568 return (strbuflen(agent_domain1, NETAGENT_DOMAINSIZE) == 0 ||
4569 strbufcmp(agent_domain2, NETAGENT_DOMAINSIZE, agent_domain1, NETAGENT_DOMAINSIZE) == 0) &&
4570 (strbuflen(agent_type1, NETAGENT_TYPESIZE) == 0 ||
4571 strbufcmp(agent_type2, NETAGENT_TYPESIZE, agent_type1, NETAGENT_TYPESIZE) == 0);
4572 }
4573
4574 static inline bool
necp_calculate_client_result(proc_t proc,struct necp_client * client,struct necp_client_parsed_parameters * parsed_parameters,struct necp_aggregate_result * result,u_int32_t * flags,u_int32_t * reason,struct necp_client_endpoint * v4_gateway,struct necp_client_endpoint * v6_gateway,uuid_t * override_euuid)4575 necp_calculate_client_result(proc_t proc,
4576 struct necp_client *client,
4577 struct necp_client_parsed_parameters *parsed_parameters,
4578 struct necp_aggregate_result *result,
4579 u_int32_t *flags,
4580 u_int32_t *reason,
4581 struct necp_client_endpoint *v4_gateway,
4582 struct necp_client_endpoint *v6_gateway,
4583 uuid_t *override_euuid)
4584 {
4585 struct rtentry * __single route = NULL;
4586
4587 // Check parameters to find best interface
4588 bool validate_agents = false;
4589 u_int matching_if_index = 0;
4590 if (necp_find_matching_interface_index(parsed_parameters, &matching_if_index, &validate_agents)) {
4591 if (matching_if_index != 0) {
4592 parsed_parameters->required_interface_index = matching_if_index;
4593 }
4594 // Interface found or not needed, match policy.
4595 memset(result, 0, sizeof(*result));
4596 int error = necp_application_find_policy_match_internal(proc, client->parameters,
4597 (u_int32_t)client->parameters_length,
4598 result, flags, reason, matching_if_index,
4599 NULL, NULL,
4600 v4_gateway, v6_gateway,
4601 &route, false, true,
4602 override_euuid);
4603 if (error != 0) {
4604 if (route != NULL) {
4605 rtfree(route);
4606 }
4607 return FALSE;
4608 }
4609
4610 if (validate_agents) {
4611 bool requirement_failed = FALSE;
4612 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
4613 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
4614 if (uuid_is_null(parsed_parameters->required_netagents[i])) {
4615 break;
4616 }
4617
4618 bool requirement_found = FALSE;
4619 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4620 if (uuid_is_null(result->netagents[j])) {
4621 break;
4622 }
4623
4624 if (result->netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) {
4625 // A removed agent, ignore
4626 continue;
4627 }
4628
4629 if (uuid_compare(parsed_parameters->required_netagents[i], result->netagents[j]) == 0) {
4630 requirement_found = TRUE;
4631 break;
4632 }
4633 }
4634
4635 if (!requirement_found) {
4636 requirement_failed = TRUE;
4637 break;
4638 }
4639 }
4640 }
4641
4642 if (!requirement_failed && parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
4643 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
4644 if (strbuflen(parsed_parameters->required_netagent_types[i].netagent_domain, sizeof(parsed_parameters->required_netagent_types[i].netagent_domain)) == 0 &&
4645 strbuflen(parsed_parameters->required_netagent_types[i].netagent_type, sizeof(parsed_parameters->required_netagent_types[i].netagent_type)) == 0) {
4646 break;
4647 }
4648
4649 bool requirement_found = FALSE;
4650 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4651 if (uuid_is_null(result->netagents[j])) {
4652 break;
4653 }
4654
4655 if (result->netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) {
4656 // A removed agent, ignore
4657 continue;
4658 }
4659
4660 char policy_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4661 char policy_agent_type[NETAGENT_TYPESIZE] = { 0 };
4662
4663 if (netagent_get_agent_domain_and_type(result->netagents[j], policy_agent_domain, policy_agent_type)) {
4664 if (necp_agent_types_match(parsed_parameters->required_netagent_types[i].netagent_domain,
4665 parsed_parameters->required_netagent_types[i].netagent_type,
4666 policy_agent_domain, policy_agent_type)) {
4667 requirement_found = TRUE;
4668 break;
4669 }
4670 }
4671 }
4672
4673 if (!requirement_found) {
4674 requirement_failed = TRUE;
4675 break;
4676 }
4677 }
4678 }
4679
4680 if (requirement_failed) {
4681 // Agent requirement failed. Clear out the whole result, make everything fail.
4682 memset(result, 0, sizeof(*result));
4683 if (route != NULL) {
4684 rtfree(route);
4685 }
4686 return TRUE;
4687 }
4688 }
4689
4690 // Reset current route
4691 NECP_CLIENT_ROUTE_LOCK(client);
4692 if (client->current_route != NULL) {
4693 rtfree(client->current_route);
4694 }
4695 client->current_route = route;
4696 NECP_CLIENT_ROUTE_UNLOCK(client);
4697 } else {
4698 // Interface not found. Clear out the whole result, make everything fail.
4699 memset(result, 0, sizeof(*result));
4700 }
4701
4702 return TRUE;
4703 }
4704
4705 static bool
necp_agent_is_removed_by_type(struct necp_aggregate_result * result,uuid_t agent_uuid)4706 necp_agent_is_removed_by_type(struct necp_aggregate_result *result,
4707 uuid_t agent_uuid)
4708 {
4709 for (int i = 0; i < NECP_MAX_REMOVE_NETAGENT_TYPES; i++) {
4710 if (result->remove_netagent_types[i].agent_domain[0] == 0 &&
4711 result->remove_netagent_types[i].agent_type[0] == 0) {
4712 // Empty type, hit the end of the list
4713 return false;
4714 }
4715
4716 char compare_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4717 char compare_agent_type[NETAGENT_TYPESIZE] = { 0 };
4718 if (netagent_get_agent_domain_and_type(agent_uuid, compare_agent_domain, compare_agent_type)) {
4719 if (necp_agent_types_match(result->remove_netagent_types[i].agent_domain,
4720 result->remove_netagent_types[i].agent_type,
4721 compare_agent_domain, compare_agent_type)) {
4722 return true;
4723 }
4724 }
4725 }
4726 return false;
4727 }
4728
4729 #define NECP_PARSED_PARAMETERS_REQUIRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF | \
4730 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
4731 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
4732 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE)
4733
4734 static bool
necp_update_client_result(proc_t proc,struct necp_fd_data * client_fd,struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)4735 necp_update_client_result(proc_t proc,
4736 struct necp_fd_data *client_fd,
4737 struct necp_client *client,
4738 struct _necp_flow_defunct_list *defunct_list)
4739 {
4740 struct necp_client_result_netagent netagent;
4741 struct necp_aggregate_result result;
4742 struct necp_client_parsed_parameters * __single parsed_parameters = NULL;
4743 u_int32_t flags = 0;
4744 u_int32_t reason = 0;
4745
4746 NECP_CLIENT_ASSERT_LOCKED(client);
4747
4748 parsed_parameters = kalloc_type(struct necp_client_parsed_parameters,
4749 Z_WAITOK | Z_ZERO | Z_NOFAIL);
4750
4751 // Nexus flows will be brought back if they are still valid
4752 necp_client_mark_all_nonsocket_flows_as_invalid(client);
4753
4754 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, parsed_parameters);
4755 if (error != 0) {
4756 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4757 return FALSE;
4758 }
4759 bool originally_scoped = (parsed_parameters->required_interface_index != IFSCOPE_NONE);
4760
4761 // Update saved IP protocol
4762 client->ip_protocol = parsed_parameters->ip_protocol;
4763
4764 // Calculate the policy result
4765 struct necp_client_endpoint v4_gateway = {};
4766 struct necp_client_endpoint v6_gateway = {};
4767 uuid_t override_euuid;
4768 uuid_clear(override_euuid);
4769 if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags, &reason, &v4_gateway, &v6_gateway, &override_euuid)) {
4770 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4771 return FALSE;
4772 }
4773
4774 if (necp_update_parsed_parameters(parsed_parameters, &result)) {
4775 // Changed the parameters based on result, try again (only once)
4776 if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags, &reason, &v4_gateway, &v6_gateway, &override_euuid)) {
4777 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4778 return FALSE;
4779 }
4780 }
4781
4782 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) &&
4783 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4784 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) {
4785 // Listener should not apply required interface index if
4786 parsed_parameters->required_interface_index = IFSCOPE_NONE;
4787 }
4788
4789 // Save the last policy id on the client
4790 client->policy_id = result.policy_id;
4791 client->skip_policy_id = result.skip_policy_id;
4792 uuid_copy(client->override_euuid, override_euuid);
4793
4794 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH) ||
4795 (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) ||
4796 ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) &&
4797 result.routing_result != NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED)) {
4798 client->allow_multiple_flows = TRUE;
4799 } else {
4800 client->allow_multiple_flows = FALSE;
4801 }
4802
4803 // If the original request was scoped, and the policy result matches, make sure the result is scoped
4804 if ((result.routing_result == NECP_KERNEL_POLICY_RESULT_NONE ||
4805 result.routing_result == NECP_KERNEL_POLICY_RESULT_PASS) &&
4806 result.routed_interface_index != IFSCOPE_NONE &&
4807 parsed_parameters->required_interface_index == result.routed_interface_index) {
4808 result.routing_result = NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED;
4809 result.routing_result_parameter.scoped_interface_index = result.routed_interface_index;
4810 }
4811
4812 if (defunct_list != NULL &&
4813 result.routing_result == NECP_KERNEL_POLICY_RESULT_DROP) {
4814 // If we are forced to drop the client, defunct it if it has flows
4815 necp_defunct_client_for_policy(client, defunct_list);
4816 }
4817
4818 // Recalculate flags
4819 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
4820 // Listeners are valid as long as they aren't dropped
4821 if (result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP) {
4822 flags |= NECP_CLIENT_RESULT_FLAG_SATISFIED;
4823 }
4824 } else if (result.routed_interface_index != 0) {
4825 // Clients without flows determine viability based on having some routable interface
4826 flags |= NECP_CLIENT_RESULT_FLAG_SATISFIED;
4827 }
4828
4829 bool updated = FALSE;
4830 u_int8_t * __indexable cursor = client->result;
4831 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLAGS, sizeof(flags), &flags, &updated, client->result, sizeof(client->result));
4832 if (reason != 0) {
4833 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_REASON, sizeof(reason), &reason, &updated, client->result, sizeof(client->result));
4834 }
4835 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_CLIENT_ID, sizeof(uuid_t), client->client_id, &updated,
4836 client->result, sizeof(client->result));
4837 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT, sizeof(result.routing_result), &result.routing_result, &updated,
4838 client->result, sizeof(client->result));
4839
4840 client->policy_result = result.routing_result;
4841 client->policy_result_parameter = result.routing_result_parameter;
4842 client->flow_divert_control_unit = result.flow_divert_aggregate_unit;
4843 client->filter_control_unit = result.filter_control_unit;
4844
4845 if (result.routing_result_parameter.tunnel_interface_index != 0) {
4846 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT_PARAMETER,
4847 sizeof(result.routing_result_parameter), &result.routing_result_parameter, &updated,
4848 client->result, sizeof(client->result));
4849 }
4850 if (result.filter_control_unit != 0) {
4851 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FILTER_CONTROL_UNIT,
4852 sizeof(result.filter_control_unit), &result.filter_control_unit, &updated,
4853 client->result, sizeof(client->result));
4854 }
4855 if (result.flow_divert_aggregate_unit != 0) {
4856 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLOW_DIVERT_AGGREGATE_UNIT,
4857 sizeof(result.flow_divert_aggregate_unit), &result.flow_divert_aggregate_unit, &updated,
4858 client->result, sizeof(client->result));
4859 }
4860 if (result.routed_interface_index != 0) {
4861 u_int routed_interface_index = result.routed_interface_index;
4862 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
4863 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_REQUIRED_FIELDS) &&
4864 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4865 parsed_parameters->required_interface_index != result.routed_interface_index) {
4866 routed_interface_index = parsed_parameters->required_interface_index;
4867 }
4868
4869 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_INDEX,
4870 sizeof(routed_interface_index), &routed_interface_index, &updated,
4871 client->result, sizeof(client->result));
4872 }
4873 if (client_fd && client_fd->flags & NECP_OPEN_FLAG_BACKGROUND) {
4874 u_int32_t effective_traffic_class = SO_TC_BK_SYS;
4875 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_TRAFFIC_CLASS,
4876 sizeof(effective_traffic_class), &effective_traffic_class, &updated,
4877 client->result, sizeof(client->result));
4878 }
4879
4880 if (client_fd->background) {
4881 bool has_assigned_flow = FALSE;
4882 struct necp_client_flow_registration *flow_registration = NULL;
4883 struct necp_client_flow *search_flow = NULL;
4884 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
4885 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
4886 if (search_flow->assigned) {
4887 has_assigned_flow = TRUE;
4888 break;
4889 }
4890 }
4891 }
4892
4893 if (has_assigned_flow) {
4894 u_int32_t background = client_fd->background;
4895 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_TRAFFIC_MGMT_BG,
4896 sizeof(background), &background, &updated,
4897 client->result, sizeof(client->result));
4898 }
4899 }
4900
4901 bool write_v4_gateway = !necp_client_endpoint_is_unspecified(&v4_gateway);
4902 bool write_v6_gateway = !necp_client_endpoint_is_unspecified(&v6_gateway);
4903
4904 NECP_CLIENT_ROUTE_LOCK(client);
4905 if (client->current_route != NULL) {
4906 const u_int32_t route_mtu = get_maxmtu(client->current_route);
4907 if (route_mtu != 0) {
4908 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_MTU,
4909 sizeof(route_mtu), &route_mtu, &updated,
4910 client->result, sizeof(client->result));
4911 }
4912 bool has_remote_addr = parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
4913 if (has_remote_addr && client->current_route->rt_gateway != NULL) {
4914 if (client->current_route->rt_gateway->sa_family == AF_INET) {
4915 write_v6_gateway = false;
4916 } else if (client->current_route->rt_gateway->sa_family == AF_INET6) {
4917 write_v4_gateway = false;
4918 }
4919 }
4920
4921 if (client->current_route->rt_ifp != NULL) {
4922 int8_t if_lqm = client->current_route->rt_ifp->if_interface_state.lqm_state;
4923
4924 // Upgrade to enhancedLQM for cellular interfaces that support it
4925 if (client->current_route->rt_ifp->if_type == IFT_CELLULAR && client->current_route->rt_ifp->if_link_status != NULL) {
4926 struct if_cellular_status_v1 *cell_link_status = &client->current_route->rt_ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
4927
4928 if (cell_link_status->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
4929 if_lqm = ifnet_lqm_normalize(cell_link_status->link_quality_metric);
4930 }
4931 }
4932
4933 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_LINK_QUALITY,
4934 sizeof(if_lqm), &if_lqm, &updated,
4935 client->result, sizeof(client->result));
4936 }
4937 }
4938 NECP_CLIENT_ROUTE_UNLOCK(client);
4939
4940 if (write_v4_gateway) {
4941 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_GATEWAY,
4942 sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)&v4_gateway, &updated,
4943 client->result, sizeof(client->result));
4944 }
4945
4946 if (write_v6_gateway) {
4947 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_GATEWAY,
4948 sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)&v6_gateway, &updated,
4949 client->result, sizeof(client->result));
4950 }
4951
4952 for (int i = 0; i < NAT64_MAX_NUM_PREFIXES; i++) {
4953 if (result.nat64_prefixes[i].prefix_len != 0) {
4954 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NAT64,
4955 sizeof(result.nat64_prefixes), result.nat64_prefixes, &updated,
4956 client->result, sizeof(client->result));
4957 break;
4958 }
4959 }
4960
4961 if (result.mss_recommended != 0) {
4962 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_RECOMMENDED_MSS,
4963 sizeof(result.mss_recommended), &result.mss_recommended, &updated,
4964 client->result, sizeof(client->result));
4965 }
4966
4967 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
4968 if (uuid_is_null(result.netagents[i])) {
4969 break;
4970 }
4971 if (result.netagent_use_flags[i] & NECP_AGENT_USE_FLAG_REMOVE) {
4972 // A removed agent, ignore
4973 continue;
4974 }
4975
4976 if (necp_agent_is_removed_by_type(&result, result.netagents[i])) {
4977 // A removed agent, ignore
4978 continue;
4979 }
4980
4981 uuid_copy(netagent.netagent_uuid, result.netagents[i]);
4982 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
4983 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE, 0, 0)) {
4984 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
4985 client->result, sizeof(client->result));
4986 }
4987 }
4988
4989 ifnet_head_lock_shared();
4990 ifnet_t direct_interface = NULL;
4991 ifnet_t delegate_interface = NULL;
4992 ifnet_t original_scoped_interface = NULL;
4993
4994 if (result.routed_interface_index != IFSCOPE_NONE && result.routed_interface_index <= (u_int32_t)if_index) {
4995 direct_interface = ifindex2ifnet[result.routed_interface_index];
4996 } else if (parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4997 parsed_parameters->required_interface_index <= (u_int32_t)if_index) {
4998 // If the request was scoped, but the route didn't match, still grab the agents
4999 direct_interface = ifindex2ifnet[parsed_parameters->required_interface_index];
5000 } else if (result.routed_interface_index == IFSCOPE_NONE &&
5001 result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED &&
5002 result.routing_result_parameter.scoped_interface_index != IFSCOPE_NONE) {
5003 direct_interface = ifindex2ifnet[result.routing_result_parameter.scoped_interface_index];
5004 }
5005 if (direct_interface != NULL) {
5006 delegate_interface = direct_interface->if_delegated.ifp;
5007 }
5008 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
5009 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_REQUIRED_FIELDS) &&
5010 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
5011 parsed_parameters->required_interface_index != result.routing_result_parameter.tunnel_interface_index &&
5012 parsed_parameters->required_interface_index <= (u_int32_t)if_index) {
5013 original_scoped_interface = ifindex2ifnet[parsed_parameters->required_interface_index];
5014 }
5015 // Add interfaces
5016 if (original_scoped_interface != NULL) {
5017 struct necp_client_result_interface interface_struct;
5018 interface_struct.index = original_scoped_interface->if_index;
5019 interface_struct.generation = ifnet_get_generation(original_scoped_interface);
5020 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
5021 client->result, sizeof(client->result));
5022 }
5023 if (direct_interface != NULL) {
5024 struct necp_client_result_interface interface_struct;
5025 interface_struct.index = direct_interface->if_index;
5026 interface_struct.generation = ifnet_get_generation(direct_interface);
5027 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
5028 client->result, sizeof(client->result));
5029
5030 // Set the delta time since interface up/down
5031 struct timeval updown_delta = {};
5032 if (ifnet_updown_delta(direct_interface, &updown_delta) == 0) {
5033 u_int32_t delta = updown_delta.tv_sec;
5034 bool ignore_updated = FALSE;
5035 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_TIME_DELTA,
5036 sizeof(delta), &delta, &ignore_updated,
5037 client->result, sizeof(client->result));
5038 }
5039 }
5040 if (delegate_interface != NULL) {
5041 struct necp_client_result_interface interface_struct;
5042 interface_struct.index = delegate_interface->if_index;
5043 interface_struct.generation = ifnet_get_generation(delegate_interface);
5044 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
5045 client->result, sizeof(client->result));
5046 }
5047
5048 // Update multipath/listener interface flows
5049 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH && !(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE)) {
5050 // Add the interface option for the routed interface first
5051 if (direct_interface != NULL) {
5052 // Add nexus agent
5053 necp_client_add_agent_interface_options(client, parsed_parameters, direct_interface);
5054
5055 // Add interface option in case it is not a nexus
5056 necp_client_add_interface_option_if_needed(client, direct_interface->if_index,
5057 ifnet_get_generation(direct_interface), NULL, false);
5058 }
5059 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INBOUND) {
5060 // For inbound multipath, add from the global list (like a listener)
5061 struct ifnet *multi_interface = NULL;
5062 TAILQ_FOREACH(multi_interface, &ifnet_head, if_link) {
5063 if ((multi_interface->if_flags & (IFF_UP | IFF_RUNNING)) &&
5064 necp_ifnet_matches_parameters(multi_interface, parsed_parameters, 0, NULL, true, false)) {
5065 // Add nexus agents for inbound multipath
5066 necp_client_add_agent_interface_options(client, parsed_parameters, multi_interface);
5067 }
5068 }
5069 } else {
5070 // Get other multipath interface options from ordered list
5071 struct ifnet *multi_interface = NULL;
5072 TAILQ_FOREACH(multi_interface, &ifnet_ordered_head, if_ordered_link) {
5073 if (multi_interface != direct_interface &&
5074 necp_ifnet_matches_parameters(multi_interface, parsed_parameters, 0, NULL, true, false)) {
5075 // Add nexus agents for multipath
5076 necp_client_add_agent_interface_options(client, parsed_parameters, multi_interface);
5077
5078 // Add multipath interface flows for kernel MPTCP
5079 necp_client_add_interface_option_if_needed(client, multi_interface->if_index,
5080 ifnet_get_generation(multi_interface), NULL, false);
5081 }
5082 }
5083 }
5084 } else if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
5085 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
5086 if (direct_interface != NULL) {
5087 // If scoped, only listen on that interface
5088 // Add nexus agents for listeners
5089 necp_client_add_agent_interface_options(client, parsed_parameters, direct_interface);
5090
5091 // Add interface option in case it is not a nexus
5092 necp_client_add_interface_option_if_needed(client, direct_interface->if_index,
5093 ifnet_get_generation(direct_interface), NULL, false);
5094 }
5095 } else {
5096 // Get listener interface options from global list
5097 struct ifnet *listen_interface = NULL;
5098 TAILQ_FOREACH(listen_interface, &ifnet_head, if_link) {
5099 if ((listen_interface->if_flags & (IFF_UP | IFF_RUNNING)) &&
5100 necp_ifnet_matches_parameters(listen_interface, parsed_parameters, 0, NULL, true, false)) {
5101 // Add nexus agents for listeners
5102 necp_client_add_agent_interface_options(client, parsed_parameters, listen_interface);
5103 }
5104 }
5105 }
5106 } else if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) {
5107 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED && originally_scoped) {
5108 if (direct_interface != NULL) {
5109 // Add browse option if it has an agent
5110 necp_client_add_browse_interface_options(client, parsed_parameters, direct_interface);
5111 }
5112 } else {
5113 // Get browse interface options from global list
5114 struct ifnet *browse_interface = NULL;
5115 TAILQ_FOREACH(browse_interface, &ifnet_head, if_link) {
5116 if (necp_ifnet_matches_parameters(browse_interface, parsed_parameters, 0, NULL, true, false)) {
5117 necp_client_add_browse_interface_options(client, parsed_parameters, browse_interface);
5118 }
5119 }
5120 }
5121 }
5122
5123 struct necp_client_result_estimated_throughput throughput = {
5124 .up = 0,
5125 .down = 0,
5126 };
5127
5128 // Add agents
5129 if (original_scoped_interface != NULL) {
5130 ifnet_lock_shared(original_scoped_interface);
5131 if (original_scoped_interface->if_agentids != NULL) {
5132 for (u_int32_t i = 0; i < original_scoped_interface->if_agentcount; i++) {
5133 if (uuid_is_null(original_scoped_interface->if_agentids[i])) {
5134 continue;
5135 }
5136 bool skip_agent = false;
5137 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
5138 if (uuid_is_null(result.netagents[j])) {
5139 break;
5140 }
5141 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
5142 uuid_compare(original_scoped_interface->if_agentids[i], result.netagents[j]) == 0) {
5143 skip_agent = true;
5144 break;
5145 }
5146 }
5147
5148 if (!skip_agent && necp_agent_is_removed_by_type(&result, original_scoped_interface->if_agentids[i])) {
5149 skip_agent = true;
5150 }
5151
5152 if (skip_agent) {
5153 continue;
5154 }
5155
5156 uuid_copy(netagent.netagent_uuid, original_scoped_interface->if_agentids[i]);
5157 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5158 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE,
5159 original_scoped_interface->if_index, ifnet_get_generation(original_scoped_interface))) {
5160 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5161 client->result, sizeof(client->result));
5162 }
5163 }
5164 }
5165 ifnet_lock_done(original_scoped_interface);
5166 }
5167 if (direct_interface != NULL) {
5168 ifnet_lock_shared(direct_interface);
5169 throughput.up = direct_interface->if_estimated_up_bucket;
5170 throughput.down = direct_interface->if_estimated_down_bucket;
5171 if (direct_interface->if_agentids != NULL) {
5172 for (u_int32_t i = 0; i < direct_interface->if_agentcount; i++) {
5173 if (uuid_is_null(direct_interface->if_agentids[i])) {
5174 continue;
5175 }
5176 bool skip_agent = false;
5177 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
5178 if (uuid_is_null(result.netagents[j])) {
5179 break;
5180 }
5181 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
5182 uuid_compare(direct_interface->if_agentids[i], result.netagents[j]) == 0) {
5183 skip_agent = true;
5184 break;
5185 }
5186 }
5187
5188 if (!skip_agent && necp_agent_is_removed_by_type(&result, direct_interface->if_agentids[i])) {
5189 skip_agent = true;
5190 }
5191
5192 if (skip_agent) {
5193 continue;
5194 }
5195 uuid_copy(netagent.netagent_uuid, direct_interface->if_agentids[i]);
5196 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5197 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE,
5198 direct_interface->if_index, ifnet_get_generation(direct_interface))) {
5199 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5200 client->result, sizeof(client->result));
5201 }
5202 }
5203 }
5204 ifnet_lock_done(direct_interface);
5205 }
5206 if (delegate_interface != NULL) {
5207 ifnet_lock_shared(delegate_interface);
5208 if (throughput.up == 0 && throughput.down == 0) {
5209 throughput.up = delegate_interface->if_estimated_up_bucket;
5210 throughput.down = delegate_interface->if_estimated_down_bucket;
5211 }
5212 if (delegate_interface->if_agentids != NULL) {
5213 for (u_int32_t i = 0; i < delegate_interface->if_agentcount; i++) {
5214 if (uuid_is_null(delegate_interface->if_agentids[i])) {
5215 continue;
5216 }
5217 bool skip_agent = false;
5218 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
5219 if (uuid_is_null(result.netagents[j])) {
5220 break;
5221 }
5222 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
5223 uuid_compare(delegate_interface->if_agentids[i], result.netagents[j]) == 0) {
5224 skip_agent = true;
5225 break;
5226 }
5227 }
5228
5229 if (!skip_agent && necp_agent_is_removed_by_type(&result, delegate_interface->if_agentids[i])) {
5230 skip_agent = true;
5231 }
5232
5233 if (skip_agent) {
5234 continue;
5235 }
5236 uuid_copy(netagent.netagent_uuid, delegate_interface->if_agentids[i]);
5237 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5238 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE,
5239 delegate_interface->if_index, ifnet_get_generation(delegate_interface))) {
5240 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5241 client->result, sizeof(client->result));
5242 }
5243 }
5244 }
5245 ifnet_lock_done(delegate_interface);
5246 }
5247 ifnet_head_done();
5248
5249 if (throughput.up != 0 || throughput.down != 0) {
5250 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_ESTIMATED_THROUGHPUT,
5251 sizeof(throughput), &throughput, &updated, client->result, sizeof(client->result));
5252 }
5253
5254 // Add interface options
5255 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
5256 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
5257 struct necp_client_interface_option *option = &client->interface_options[option_i];
5258 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated,
5259 client->result, sizeof(client->result));
5260 } else {
5261 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
5262 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated,
5263 client->result, sizeof(client->result));
5264 }
5265 }
5266
5267 size_t new_result_length = (cursor - client->result);
5268 if (new_result_length != client->result_length) {
5269 client->result_length = new_result_length;
5270 updated = TRUE;
5271 }
5272
5273 // Update flow viability/flags
5274 if (necp_client_update_flows(proc, client, defunct_list)) {
5275 updated = TRUE;
5276 }
5277
5278 if (updated) {
5279 client->result_read = FALSE;
5280 necp_client_update_observer_update(client);
5281 }
5282
5283 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
5284 return updated;
5285 }
5286
5287 static bool
necp_defunct_client_fd_locked_inner(struct necp_fd_data * client_fd,struct _necp_flow_defunct_list * defunct_list,bool destroy_stats)5288 necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats)
5289 {
5290 bool updated_result = FALSE;
5291 struct necp_client *client = NULL;
5292
5293 NECP_FD_ASSERT_LOCKED(client_fd);
5294
5295 RB_FOREACH(client, _necp_client_tree, &client_fd->clients) {
5296 struct necp_client_flow_registration *flow_registration = NULL;
5297
5298 NECP_CLIENT_LOCK(client);
5299
5300 // Prepare close events to be sent to the nexus to effectively remove the flows
5301 struct necp_client_flow *search_flow = NULL;
5302 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
5303 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
5304 if (search_flow->nexus &&
5305 !uuid_is_null(search_flow->u.nexus_agent)) {
5306 // Sleeping alloc won't fail; copy only what's necessary
5307 struct necp_flow_defunct *flow_defunct = kalloc_type(struct necp_flow_defunct, Z_WAITOK | Z_ZERO);
5308 uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent);
5309 uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
5310 client->client_id :
5311 flow_registration->registration_id));
5312 flow_defunct->proc_pid = client->proc_pid;
5313 flow_defunct->agent_handle = client->agent_handle;
5314 flow_defunct->flags = flow_registration->flags;
5315 #if SKYWALK
5316 if (flow_registration->kstats_kaddr != NULL) {
5317 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
5318 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
5319 if (quicstats != NULL &&
5320 quicstats->necp_quic_udp_stats.necp_udp_hdr.necp_stats_type == NECP_CLIENT_STATISTICS_TYPE_QUIC) {
5321 memcpy(flow_defunct->close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(flow_defunct->close_parameters.u.close_token));
5322 flow_defunct->has_close_parameters = true;
5323 }
5324 }
5325 #endif /* SKYWALK */
5326 // Add to the list provided by caller
5327 LIST_INSERT_HEAD(defunct_list, flow_defunct, chain);
5328
5329 flow_registration->defunct = true;
5330 flow_registration->flow_result_read = false;
5331 updated_result = true;
5332 }
5333 }
5334 }
5335 if (destroy_stats) {
5336 #if SKYWALK
5337 // Free any remaining stats objects back to the arena where they came from;
5338 // do this independent of the above defunct check, as the client may have
5339 // been marked as defunct separately via necp_defunct_client_for_policy().
5340 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
5341 necp_destroy_flow_stats(client_fd, flow_registration, NULL, FALSE);
5342 }
5343 #endif /* SKYWALK */
5344 }
5345 NECP_CLIENT_UNLOCK(client);
5346 }
5347
5348 return updated_result;
5349 }
5350
5351 static inline void
necp_defunct_client_fd_locked(struct necp_fd_data * client_fd,struct _necp_flow_defunct_list * defunct_list,struct proc * proc)5352 necp_defunct_client_fd_locked(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, struct proc *proc)
5353 {
5354 #pragma unused(proc)
5355 bool updated_result = FALSE;
5356
5357 NECP_FD_ASSERT_LOCKED(client_fd);
5358 #if SKYWALK
5359 // redirect regions of currently-active stats arena to zero-filled pages
5360 struct necp_arena_info *nai = necp_fd_mredirect_stats_arena(client_fd, proc);
5361 #endif /* SKYWALK */
5362
5363 updated_result = necp_defunct_client_fd_locked_inner(client_fd, defunct_list, true);
5364
5365 #if SKYWALK
5366 // and tear down the currently-active arena's regions now that the redirection and freeing are done
5367 if (nai != NULL) {
5368 ASSERT((nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)) == NAIF_REDIRECT);
5369 ASSERT(nai->nai_arena != NULL);
5370 ASSERT(nai->nai_mmap.ami_mapref != NULL);
5371
5372 int err = skmem_arena_defunct(nai->nai_arena);
5373 VERIFY(err == 0);
5374
5375 nai->nai_flags |= NAIF_DEFUNCT;
5376 }
5377 #endif /* SKYWALK */
5378
5379 if (updated_result) {
5380 necp_fd_notify(client_fd, true);
5381 }
5382 }
5383
5384 static inline void
necp_update_client_fd_locked(struct necp_fd_data * client_fd,proc_t proc,struct _necp_flow_defunct_list * defunct_list)5385 necp_update_client_fd_locked(struct necp_fd_data *client_fd,
5386 proc_t proc,
5387 struct _necp_flow_defunct_list *defunct_list)
5388 {
5389 struct necp_client *client = NULL;
5390 bool updated_result = FALSE;
5391 NECP_FD_ASSERT_LOCKED(client_fd);
5392 RB_FOREACH(client, _necp_client_tree, &client_fd->clients) {
5393 NECP_CLIENT_LOCK(client);
5394 if (necp_update_client_result(proc, client_fd, client, defunct_list)) {
5395 updated_result = TRUE;
5396 }
5397 NECP_CLIENT_UNLOCK(client);
5398 }
5399
5400 // Check if this PID needs to request in-process flow divert
5401 NECP_FD_LIST_ASSERT_LOCKED();
5402 for (int i = 0; i < NECP_MAX_FLOW_DIVERT_NEEDED_PIDS; i++) {
5403 if (necp_flow_divert_needed_pids[i] == 0) {
5404 break;
5405 }
5406 if (necp_flow_divert_needed_pids[i] == client_fd->proc_pid) {
5407 client_fd->request_in_process_flow_divert = true;
5408 break;
5409 }
5410 }
5411
5412 if (updated_result || client_fd->request_in_process_flow_divert) {
5413 necp_fd_notify(client_fd, true);
5414 }
5415 }
5416
5417 #if SKYWALK
5418 static void
necp_close_empty_arenas_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)5419 necp_close_empty_arenas_callout(__unused thread_call_param_t dummy,
5420 __unused thread_call_param_t arg)
5421 {
5422 struct necp_fd_data *client_fd = NULL;
5423
5424 NECP_FD_LIST_LOCK_SHARED();
5425
5426 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5427 NECP_FD_LOCK(client_fd);
5428 necp_stats_arenas_destroy(client_fd, FALSE);
5429 NECP_FD_UNLOCK(client_fd);
5430 }
5431
5432 NECP_FD_LIST_UNLOCK();
5433 }
5434 #endif /* SKYWALK */
5435
5436 static void
necp_update_all_clients_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)5437 necp_update_all_clients_callout(__unused thread_call_param_t dummy,
5438 __unused thread_call_param_t arg)
5439 {
5440 struct necp_fd_data *client_fd = NULL;
5441
5442 NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE();
5443 uint32_t count = necp_update_all_clients_sched_cnt;
5444 necp_update_all_clients_sched_cnt = 0;
5445 necp_update_all_clients_sched_abstime = 0;
5446 NECP_UPDATE_ALL_CLIENTS_UNLOCK();
5447
5448 if (necp_debug > 0) {
5449 NECPLOG(LOG_DEBUG,
5450 "necp_update_all_clients_callout running for coalesced %u updates",
5451 count);
5452 }
5453
5454 struct _necp_flow_defunct_list defunct_list;
5455 LIST_INIT(&defunct_list);
5456
5457 NECP_FD_LIST_LOCK_SHARED();
5458
5459 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5460 proc_t proc = proc_find(client_fd->proc_pid);
5461 if (proc == PROC_NULL) {
5462 continue;
5463 }
5464
5465 // Update all clients on one fd
5466 NECP_FD_LOCK(client_fd);
5467 necp_update_client_fd_locked(client_fd, proc, &defunct_list);
5468 NECP_FD_UNLOCK(client_fd);
5469
5470 proc_rele(proc);
5471 proc = PROC_NULL;
5472 }
5473
5474 // Reset the necp_flow_divert_needed_pids list
5475 for (int i = 0; i < NECP_MAX_FLOW_DIVERT_NEEDED_PIDS; i++) {
5476 necp_flow_divert_needed_pids[i] = 0;
5477 }
5478
5479 NECP_FD_LIST_UNLOCK();
5480
5481 // Handle the case in which some clients became newly defunct
5482 necp_process_defunct_list(&defunct_list);
5483 }
5484
5485 void
necp_update_all_clients(void)5486 necp_update_all_clients(void)
5487 {
5488 necp_update_all_clients_immediately_if_needed(false);
5489 }
5490
5491 void
necp_update_all_clients_immediately_if_needed(bool should_update_immediately)5492 necp_update_all_clients_immediately_if_needed(bool should_update_immediately)
5493 {
5494 if (necp_client_update_tcall == NULL) {
5495 // Don't try to update clients if the module is not initialized
5496 return;
5497 }
5498
5499 uint64_t deadline = 0;
5500 uint64_t leeway = 0;
5501
5502 uint32_t timeout_to_use = necp_timeout_microseconds;
5503 uint32_t leeway_to_use = necp_timeout_leeway_microseconds;
5504 if (should_update_immediately) {
5505 timeout_to_use = 1000 * 10; // 10ms
5506 leeway_to_use = 1000 * 10; // 10ms;
5507 }
5508
5509 clock_interval_to_deadline(timeout_to_use, NSEC_PER_USEC, &deadline);
5510 clock_interval_to_absolutetime_interval(leeway_to_use, NSEC_PER_USEC, &leeway);
5511
5512 NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE();
5513 bool need_cancel = false;
5514 bool need_schedule = true;
5515 uint64_t sched_abstime;
5516
5517 clock_absolutetime_interval_to_deadline(deadline + leeway, &sched_abstime);
5518
5519 /*
5520 * Do not push the timer if it is already scheduled
5521 */
5522 if (necp_update_all_clients_sched_abstime != 0) {
5523 need_schedule = false;
5524
5525 if (should_update_immediately) {
5526 /*
5527 * To update immediately we may have to cancel the current timer
5528 * if it's scheduled too far out.
5529 */
5530 if (necp_update_all_clients_sched_abstime > sched_abstime) {
5531 need_cancel = true;
5532 need_schedule = true;
5533 }
5534 }
5535 }
5536
5537 /*
5538 * Record the time of the deadline with leeway
5539 */
5540 if (need_schedule) {
5541 necp_update_all_clients_sched_abstime = sched_abstime;
5542 }
5543
5544 necp_update_all_clients_sched_cnt += 1;
5545 uint32_t count = necp_update_all_clients_sched_cnt;
5546 NECP_UPDATE_ALL_CLIENTS_UNLOCK();
5547
5548 if (need_schedule) {
5549 /*
5550 * Wait if the thread call is currently executing to make sure the
5551 * next update will be delivered to all clients
5552 */
5553 if (need_cancel) {
5554 (void) thread_call_cancel_wait(necp_client_update_tcall);
5555 }
5556
5557 (void) thread_call_enter_delayed_with_leeway(necp_client_update_tcall, NULL,
5558 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
5559 }
5560 if (necp_debug > 0) {
5561 NECPLOG(LOG_DEBUG,
5562 "necp_update_all_clients immediate %s update %u",
5563 should_update_immediately ? "true" : "false", count);
5564 }
5565 }
5566
5567 bool
necp_set_client_as_background(proc_t proc,struct fileproc * fp,bool background)5568 necp_set_client_as_background(proc_t proc,
5569 struct fileproc *fp,
5570 bool background)
5571 {
5572 if (proc == PROC_NULL) {
5573 NECPLOG0(LOG_ERR, "NULL proc");
5574 return FALSE;
5575 }
5576
5577 if (fp == NULL) {
5578 NECPLOG0(LOG_ERR, "NULL fp");
5579 return FALSE;
5580 }
5581
5582 struct necp_fd_data *client_fd = (struct necp_fd_data *)fp_get_data(fp);
5583 if (client_fd == NULL) {
5584 NECPLOG0(LOG_ERR, "Could not find client structure for backgrounded client");
5585 return FALSE;
5586 }
5587
5588 if (client_fd->necp_fd_type != necp_fd_type_client) {
5589 // Not a client fd, ignore
5590 NECPLOG0(LOG_ERR, "Not a client fd, ignore");
5591 return FALSE;
5592 }
5593
5594 client_fd->background = background;
5595
5596 return TRUE;
5597 }
5598
5599 void
necp_fd_memstatus(proc_t proc,uint32_t status,struct necp_fd_data * client_fd)5600 necp_fd_memstatus(proc_t proc, uint32_t status,
5601 struct necp_fd_data *client_fd)
5602 {
5603 #pragma unused(proc, status, client_fd)
5604 ASSERT(proc != PROC_NULL);
5605 ASSERT(client_fd != NULL);
5606
5607 // Nothing to reap for the process or client for now,
5608 // but this is where we would trigger that in future.
5609 }
5610
5611 void
necp_fd_defunct(proc_t proc,struct necp_fd_data * client_fd)5612 necp_fd_defunct(proc_t proc, struct necp_fd_data *client_fd)
5613 {
5614 struct _necp_flow_defunct_list defunct_list;
5615
5616 ASSERT(proc != PROC_NULL);
5617 ASSERT(client_fd != NULL);
5618
5619 if (client_fd->necp_fd_type != necp_fd_type_client) {
5620 // Not a client fd, ignore
5621 return;
5622 }
5623
5624 // Our local temporary list
5625 LIST_INIT(&defunct_list);
5626
5627 // Need to hold lock so ntstats defunct the same set of clients
5628 NECP_FD_LOCK(client_fd);
5629 #if SKYWALK
5630 // Shut down statistics
5631 nstats_userland_stats_defunct_for_process(proc_getpid(proc));
5632 #endif /* SKYWALK */
5633 necp_defunct_client_fd_locked(client_fd, &defunct_list, proc);
5634 NECP_FD_UNLOCK(client_fd);
5635
5636 necp_process_defunct_list(&defunct_list);
5637 }
5638
5639 void
necp_client_request_in_process_flow_divert(pid_t pid)5640 necp_client_request_in_process_flow_divert(pid_t pid)
5641 {
5642 if (pid == 0) {
5643 return;
5644 }
5645
5646 // Add to the list of pids that should get an update. These will
5647 // get picked up on the next thread call to update client paths.
5648 NECP_FD_LIST_LOCK_SHARED();
5649 for (int i = 0; i < NECP_MAX_FLOW_DIVERT_NEEDED_PIDS; i++) {
5650 if (necp_flow_divert_needed_pids[i] == 0) {
5651 necp_flow_divert_needed_pids[i] = pid;
5652 break;
5653 }
5654 }
5655 NECP_FD_LIST_UNLOCK();
5656 }
5657
5658 static void
necp_client_remove_agent_from_result(struct necp_client * client,uuid_t netagent_uuid)5659 necp_client_remove_agent_from_result(struct necp_client *client, uuid_t netagent_uuid)
5660 {
5661 size_t offset = 0;
5662
5663 u_int8_t *result_buffer = client->result;
5664 while ((offset + sizeof(struct necp_tlv_header)) <= client->result_length) {
5665 u_int8_t type = necp_buffer_get_tlv_type(result_buffer, client->result_length, offset);
5666 u_int32_t length = necp_buffer_get_tlv_length(result_buffer, client->result_length, offset);
5667
5668 size_t tlv_total_length = (sizeof(struct necp_tlv_header) + length);
5669 if (type == NECP_CLIENT_RESULT_NETAGENT &&
5670 length == sizeof(struct necp_client_result_netagent) &&
5671 (offset + tlv_total_length) <= client->result_length) {
5672 struct necp_client_result_netagent *value = ((struct necp_client_result_netagent *)(void *)
5673 necp_buffer_get_tlv_value(result_buffer, client->result_length, offset, NULL));
5674 if (uuid_compare(value->netagent_uuid, netagent_uuid) == 0) {
5675 // Found a netagent to remove
5676 // Shift bytes down to remove the tlv, and adjust total length
5677 // Don't adjust the current offset
5678 memmove(result_buffer + offset,
5679 result_buffer + offset + tlv_total_length,
5680 client->result_length - (offset + tlv_total_length));
5681 client->result_length -= tlv_total_length;
5682 memset(result_buffer + client->result_length, 0, sizeof(client->result) - client->result_length);
5683 continue;
5684 }
5685 }
5686
5687 offset += tlv_total_length;
5688 }
5689 }
5690
5691 void
necp_force_update_client(uuid_t client_id,uuid_t remove_netagent_uuid,u_int32_t agent_generation)5692 necp_force_update_client(uuid_t client_id, uuid_t remove_netagent_uuid, u_int32_t agent_generation)
5693 {
5694 struct necp_fd_data *client_fd = NULL;
5695
5696 NECP_FD_LIST_LOCK_SHARED();
5697
5698 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5699 bool updated_result = FALSE;
5700 NECP_FD_LOCK(client_fd);
5701 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
5702 if (client != NULL) {
5703 client->failed_trigger_agent.generation = agent_generation;
5704 uuid_copy(client->failed_trigger_agent.netagent_uuid, remove_netagent_uuid);
5705 if (!uuid_is_null(remove_netagent_uuid)) {
5706 necp_client_remove_agent_from_result(client, remove_netagent_uuid);
5707 }
5708 client->result_read = FALSE;
5709 // Found the client, break
5710 updated_result = TRUE;
5711 NECP_CLIENT_UNLOCK(client);
5712 }
5713 if (updated_result) {
5714 necp_fd_notify(client_fd, true);
5715 }
5716 NECP_FD_UNLOCK(client_fd);
5717 if (updated_result) {
5718 // Found the client, break
5719 break;
5720 }
5721 }
5722
5723 NECP_FD_LIST_UNLOCK();
5724 }
5725
5726 #if SKYWALK
5727 void
necp_client_early_close(uuid_t client_id)5728 necp_client_early_close(uuid_t client_id)
5729 {
5730 NECP_CLIENT_TREE_LOCK_SHARED();
5731
5732 struct necp_client *client = necp_find_client_and_lock(client_id);
5733 if (client != NULL) {
5734 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
5735 if (flow_registration != NULL) {
5736 // Found the right client and flow, mark the stats as over
5737 if (flow_registration->stats_handler_context != NULL) {
5738 ntstat_userland_stats_event(flow_registration->stats_handler_context,
5739 NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT);
5740 }
5741 }
5742 NECP_CLIENT_UNLOCK(client);
5743 }
5744
5745 NECP_CLIENT_TREE_UNLOCK();
5746 }
5747 #endif /* SKYWALK */
5748
5749 /// Interface matching
5750
5751 #define NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5752 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF | \
5753 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
5754 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE | \
5755 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
5756 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT | \
5757 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5758 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \
5759 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \
5760 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE | \
5761 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \
5762 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE)
5763
5764 #define NECP_PARSED_PARAMETERS_SCOPED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5765 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
5766 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
5767 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5768 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \
5769 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE)
5770
5771 #define NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5772 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)
5773
5774 #define NECP_PARSED_PARAMETERS_PREFERRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5775 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \
5776 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \
5777 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE)
5778
5779 static bool
necp_ifnet_matches_type(struct ifnet * ifp,u_int8_t interface_type,bool check_delegates)5780 necp_ifnet_matches_type(struct ifnet *ifp, u_int8_t interface_type, bool check_delegates)
5781 {
5782 struct ifnet *check_ifp = ifp;
5783 while (check_ifp) {
5784 if (if_functional_type(check_ifp, TRUE) == interface_type) {
5785 return TRUE;
5786 }
5787 if (!check_delegates) {
5788 break;
5789 }
5790 check_ifp = check_ifp->if_delegated.ifp;
5791 }
5792 return FALSE;
5793 }
5794
5795 static bool
necp_ifnet_matches_name(struct ifnet * ifp,const char * __sized_by (IFXNAMSIZ)interface_name,bool check_delegates)5796 necp_ifnet_matches_name(struct ifnet *ifp, const char * __sized_by(IFXNAMSIZ)interface_name, bool check_delegates)
5797 {
5798 struct ifnet *check_ifp = ifp;
5799 while (check_ifp) {
5800 if (strlcmp(interface_name, check_ifp->if_xname, IFXNAMSIZ) == 0) {
5801 return TRUE;
5802 }
5803 if (!check_delegates) {
5804 break;
5805 }
5806 check_ifp = check_ifp->if_delegated.ifp;
5807 }
5808 return FALSE;
5809 }
5810
5811 static bool
necp_ifnet_matches_agent(struct ifnet * ifp,uuid_t * agent_uuid,bool check_delegates)5812 necp_ifnet_matches_agent(struct ifnet *ifp, uuid_t *agent_uuid, bool check_delegates)
5813 {
5814 struct ifnet *check_ifp = ifp;
5815
5816 while (check_ifp != NULL) {
5817 ifnet_lock_shared(check_ifp);
5818 if (check_ifp->if_agentids != NULL) {
5819 for (u_int32_t index = 0; index < check_ifp->if_agentcount; index++) {
5820 if (uuid_compare(check_ifp->if_agentids[index], *agent_uuid) == 0) {
5821 ifnet_lock_done(check_ifp);
5822 return TRUE;
5823 }
5824 }
5825 }
5826 ifnet_lock_done(check_ifp);
5827
5828 if (!check_delegates) {
5829 break;
5830 }
5831 check_ifp = check_ifp->if_delegated.ifp;
5832 }
5833 return FALSE;
5834 }
5835
5836 static bool
necp_ifnet_matches_agent_type(struct ifnet * ifp,const char * __sized_by (NETAGENT_DOMAINSIZE)agent_domain,const char * __sized_by (NETAGENT_TYPESIZE)agent_type,bool check_delegates)5837 necp_ifnet_matches_agent_type(struct ifnet *ifp, const char * __sized_by(NETAGENT_DOMAINSIZE)agent_domain, const char * __sized_by(NETAGENT_TYPESIZE)agent_type, bool check_delegates)
5838 {
5839 struct ifnet *check_ifp = ifp;
5840
5841 while (check_ifp != NULL) {
5842 ifnet_lock_shared(check_ifp);
5843 if (check_ifp->if_agentids != NULL) {
5844 for (u_int32_t index = 0; index < check_ifp->if_agentcount; index++) {
5845 if (uuid_is_null(check_ifp->if_agentids[index])) {
5846 continue;
5847 }
5848
5849 char if_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
5850 char if_agent_type[NETAGENT_TYPESIZE] = { 0 };
5851
5852 if (netagent_get_agent_domain_and_type(check_ifp->if_agentids[index], if_agent_domain, if_agent_type)) {
5853 if (necp_agent_types_match(agent_domain, agent_type, if_agent_domain, if_agent_type)) {
5854 ifnet_lock_done(check_ifp);
5855 return TRUE;
5856 }
5857 }
5858 }
5859 }
5860 ifnet_lock_done(check_ifp);
5861
5862 if (!check_delegates) {
5863 break;
5864 }
5865 check_ifp = check_ifp->if_delegated.ifp;
5866 }
5867 return FALSE;
5868 }
5869
5870 static bool
necp_ifnet_matches_local_address(struct ifnet * ifp,struct sockaddr * sa)5871 necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa)
5872 {
5873 struct ifaddr *ifa = NULL;
5874 bool matched_local_address = FALSE;
5875
5876 // Transform sa into the ifaddr form
5877 // IPv6 Scope IDs are always embedded in the ifaddr list
5878 struct sockaddr_storage address;
5879 u_int ifscope = IFSCOPE_NONE;
5880 (void)sa_copy(sa, &address, &ifscope);
5881 SIN(&address)->sin_port = 0;
5882 if (address.ss_family == AF_INET6) {
5883 if (in6_embedded_scope ||
5884 !IN6_IS_SCOPE_EMBED(&SIN6(&address)->sin6_addr)) {
5885 SIN6(&address)->sin6_scope_id = 0;
5886 }
5887 }
5888
5889 ifa = ifa_ifwithaddr_scoped_locked(SA(&address), ifp->if_index);
5890 matched_local_address = (ifa != NULL);
5891
5892 if (ifa) {
5893 ifaddr_release(ifa);
5894 }
5895
5896 return matched_local_address;
5897 }
5898
5899 static bool
necp_interface_type_should_match_unranked_interfaces(u_int8_t interface_type)5900 necp_interface_type_should_match_unranked_interfaces(u_int8_t interface_type)
5901 {
5902 switch (interface_type) {
5903 // These are the interface types we allow a client to request even if the matching
5904 // interface isn't currently eligible to be primary (has default route, dns, etc)
5905 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
5906 case IFRTYPE_FUNCTIONAL_INTCOPROC:
5907 case IFRTYPE_FUNCTIONAL_COMPANIONLINK:
5908 return true;
5909 default:
5910 break;
5911 }
5912 return false;
5913 }
5914
5915 #define NECP_IFP_IS_ON_ORDERED_LIST(_ifp) ((_ifp)->if_ordered_link.tqe_next != NULL || (_ifp)->if_ordered_link.tqe_prev != NULL)
5916
5917 // Secondary interface flag indicates that the interface is being
5918 // used for multipath or a listener as an extra path
5919 static bool
necp_ifnet_matches_parameters(struct ifnet * ifp,struct necp_client_parsed_parameters * parsed_parameters,u_int32_t override_flags,u_int32_t * preferred_count,bool secondary_interface,bool require_scoped_field)5920 necp_ifnet_matches_parameters(struct ifnet *ifp,
5921 struct necp_client_parsed_parameters *parsed_parameters,
5922 u_int32_t override_flags,
5923 u_int32_t *preferred_count,
5924 bool secondary_interface,
5925 bool require_scoped_field)
5926 {
5927 bool matched_some_scoped_field = FALSE;
5928
5929 if (preferred_count) {
5930 *preferred_count = 0;
5931 }
5932
5933 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) {
5934 if (parsed_parameters->required_interface_index != ifp->if_index) {
5935 return FALSE;
5936 }
5937 }
5938 #if SKYWALK
5939 else {
5940 if (ifnet_is_low_latency(ifp)) {
5941 return FALSE;
5942 }
5943 }
5944 #endif /* SKYWALK */
5945
5946 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR) {
5947 if (!necp_ifnet_matches_local_address(ifp, SA(&parsed_parameters->local_addr.sa))) {
5948 return FALSE;
5949 }
5950 if (require_scoped_field) {
5951 matched_some_scoped_field = TRUE;
5952 }
5953 }
5954
5955 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) {
5956 if (override_flags != 0) {
5957 if ((override_flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE) &&
5958 IFNET_IS_EXPENSIVE(ifp)) {
5959 return FALSE;
5960 }
5961 if ((override_flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED) &&
5962 IFNET_IS_CONSTRAINED(ifp)) {
5963 return FALSE;
5964 }
5965 if (!(override_flags & NECP_CLIENT_PARAMETER_FLAG_ALLOW_ULTRA_CONSTRAINED) &&
5966 IFNET_IS_ULTRA_CONSTRAINED(ifp)) {
5967 return FALSE;
5968 }
5969 } else {
5970 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE) &&
5971 IFNET_IS_EXPENSIVE(ifp)) {
5972 return FALSE;
5973 }
5974 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED) &&
5975 IFNET_IS_CONSTRAINED(ifp)) {
5976 return FALSE;
5977 }
5978 if (!(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_ALLOW_ULTRA_CONSTRAINED) &&
5979 IFNET_IS_ULTRA_CONSTRAINED(ifp)) {
5980 return FALSE;
5981 }
5982 }
5983 }
5984
5985 if ((!secondary_interface || // Enforce interface type if this is the primary interface
5986 !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) || // or if there are no flags
5987 !(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_ONLY_PRIMARY_REQUIRES_TYPE)) && // or if the flags don't give an exception
5988 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) &&
5989 !necp_ifnet_matches_type(ifp, parsed_parameters->required_interface_type, FALSE)) {
5990 return FALSE;
5991 }
5992
5993 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) {
5994 if (require_scoped_field) {
5995 matched_some_scoped_field = TRUE;
5996 }
5997 }
5998
5999 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE) {
6000 for (int i = 0; i < NECP_MAX_INTERFACE_PARAMETERS; i++) {
6001 if (parsed_parameters->prohibited_interface_types[i] == 0) {
6002 break;
6003 }
6004
6005 if (necp_ifnet_matches_type(ifp, parsed_parameters->prohibited_interface_types[i], TRUE)) {
6006 return FALSE;
6007 }
6008 }
6009 }
6010
6011 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF) {
6012 for (int i = 0; i < NECP_MAX_INTERFACE_PARAMETERS; i++) {
6013 if (strbuflen(parsed_parameters->prohibited_interfaces[i], sizeof(parsed_parameters->prohibited_interfaces[i])) == 0) {
6014 break;
6015 }
6016
6017 if (necp_ifnet_matches_name(ifp, parsed_parameters->prohibited_interfaces[i], TRUE)) {
6018 return FALSE;
6019 }
6020 }
6021 }
6022
6023 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
6024 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6025 if (uuid_is_null(parsed_parameters->required_netagents[i])) {
6026 break;
6027 }
6028
6029 if (!necp_ifnet_matches_agent(ifp, &parsed_parameters->required_netagents[i], FALSE)) {
6030 return FALSE;
6031 }
6032
6033 if (require_scoped_field) {
6034 matched_some_scoped_field = TRUE;
6035 }
6036 }
6037 }
6038
6039 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT) {
6040 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6041 if (uuid_is_null(parsed_parameters->prohibited_netagents[i])) {
6042 break;
6043 }
6044
6045 if (necp_ifnet_matches_agent(ifp, &parsed_parameters->prohibited_netagents[i], TRUE)) {
6046 return FALSE;
6047 }
6048 }
6049 }
6050
6051 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
6052 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6053 if (strbuflen(parsed_parameters->required_netagent_types[i].netagent_domain, sizeof(parsed_parameters->required_netagent_types[i].netagent_domain)) == 0 &&
6054 strbuflen(parsed_parameters->required_netagent_types[i].netagent_type, sizeof(parsed_parameters->required_netagent_types[i].netagent_type)) == 0) {
6055 break;
6056 }
6057
6058 if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->required_netagent_types[i].netagent_domain, parsed_parameters->required_netagent_types[i].netagent_type, FALSE)) {
6059 return FALSE;
6060 }
6061
6062 if (require_scoped_field) {
6063 matched_some_scoped_field = TRUE;
6064 }
6065 }
6066 }
6067
6068 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE) {
6069 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6070 if (strbuflen(parsed_parameters->prohibited_netagent_types[i].netagent_domain, sizeof(parsed_parameters->prohibited_netagent_types[i].netagent_domain)) == 0 &&
6071 strbuflen(parsed_parameters->prohibited_netagent_types[i].netagent_type, sizeof(parsed_parameters->prohibited_netagent_types[i].netagent_type)) == 0) {
6072 break;
6073 }
6074
6075 if (necp_ifnet_matches_agent_type(ifp, parsed_parameters->prohibited_netagent_types[i].netagent_domain, parsed_parameters->prohibited_netagent_types[i].netagent_type, TRUE)) {
6076 return FALSE;
6077 }
6078 }
6079 }
6080
6081 // Checked preferred properties
6082 if (preferred_count) {
6083 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT) {
6084 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6085 if (uuid_is_null(parsed_parameters->preferred_netagents[i])) {
6086 break;
6087 }
6088
6089 if (necp_ifnet_matches_agent(ifp, &parsed_parameters->preferred_netagents[i], TRUE)) {
6090 (*preferred_count)++;
6091 if (require_scoped_field) {
6092 matched_some_scoped_field = TRUE;
6093 }
6094 }
6095 }
6096 }
6097
6098 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE) {
6099 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6100 if (strbuflen(parsed_parameters->preferred_netagent_types[i].netagent_domain, sizeof(parsed_parameters->preferred_netagent_types[i].netagent_domain)) == 0 &&
6101 strbuflen(parsed_parameters->preferred_netagent_types[i].netagent_type, sizeof(parsed_parameters->preferred_netagent_types[i].netagent_type)) == 0) {
6102 break;
6103 }
6104
6105 if (necp_ifnet_matches_agent_type(ifp, parsed_parameters->preferred_netagent_types[i].netagent_domain, parsed_parameters->preferred_netagent_types[i].netagent_type, TRUE)) {
6106 (*preferred_count)++;
6107 if (require_scoped_field) {
6108 matched_some_scoped_field = TRUE;
6109 }
6110 }
6111 }
6112 }
6113
6114 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT) {
6115 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6116 if (uuid_is_null(parsed_parameters->avoided_netagents[i])) {
6117 break;
6118 }
6119
6120 if (!necp_ifnet_matches_agent(ifp, &parsed_parameters->avoided_netagents[i], TRUE)) {
6121 (*preferred_count)++;
6122 }
6123 }
6124 }
6125
6126 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) {
6127 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6128 if (strbuflen(parsed_parameters->avoided_netagent_types[i].netagent_domain, sizeof(parsed_parameters->avoided_netagent_types[i].netagent_domain)) == 0 &&
6129 strbuflen(parsed_parameters->avoided_netagent_types[i].netagent_type, sizeof(parsed_parameters->avoided_netagent_types[i].netagent_type)) == 0) {
6130 break;
6131 }
6132
6133 if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->avoided_netagent_types[i].netagent_domain,
6134 parsed_parameters->avoided_netagent_types[i].netagent_type, TRUE)) {
6135 (*preferred_count)++;
6136 }
6137 }
6138 }
6139 }
6140
6141 if (require_scoped_field) {
6142 return matched_some_scoped_field;
6143 }
6144
6145 return TRUE;
6146 }
6147
6148 static bool
necp_find_matching_interface_index(struct necp_client_parsed_parameters * parsed_parameters,u_int * return_ifindex,bool * validate_agents)6149 necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters,
6150 u_int *return_ifindex, bool *validate_agents)
6151 {
6152 struct ifnet *ifp = NULL;
6153 u_int32_t best_preferred_count = 0;
6154 bool has_preferred_fields = FALSE;
6155 *return_ifindex = 0;
6156
6157 if (parsed_parameters->required_interface_index != 0) {
6158 *return_ifindex = parsed_parameters->required_interface_index;
6159 return TRUE;
6160 }
6161
6162 // Check and save off flags
6163 u_int32_t flags = 0;
6164 bool has_prohibit_flags = FALSE;
6165 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) {
6166 flags = parsed_parameters->flags;
6167 has_prohibit_flags = (parsed_parameters->flags &
6168 (NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE |
6169 NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED));
6170 }
6171
6172 if (!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS) &&
6173 !has_prohibit_flags) {
6174 return TRUE;
6175 }
6176
6177 has_preferred_fields = (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS);
6178
6179 // We have interesting parameters to parse and find a matching interface
6180 ifnet_head_lock_shared();
6181
6182 if (!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_FIELDS) &&
6183 !has_preferred_fields) {
6184 // We do have fields to match, but they are only prohibitory
6185 // If the first interface in the list matches, or there are no ordered interfaces, we don't need to scope
6186 ifp = TAILQ_FIRST(&ifnet_ordered_head);
6187 if (ifp == NULL || necp_ifnet_matches_parameters(ifp, parsed_parameters, 0, NULL, false, false)) {
6188 // Don't set return_ifindex, so the client doesn't need to scope
6189 ifnet_head_done();
6190 return TRUE;
6191 }
6192
6193 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR &&
6194 parsed_parameters->remote_addr.sin6.sin6_family == AF_INET6 &&
6195 parsed_parameters->remote_addr.sin6.sin6_scope_id != IFSCOPE_NONE &&
6196 parsed_parameters->remote_addr.sin6.sin6_scope_id <= (u_int32_t)if_index) {
6197 ifp = ifindex2ifnet[parsed_parameters->remote_addr.sin6.sin6_scope_id];
6198 if (ifp != NULL && necp_ifnet_matches_parameters(ifp, parsed_parameters, 0, NULL, false, false)) {
6199 // Don't set return_ifindex, so the client doesn't need to scope since the v6 scope ID will
6200 // already route to the correct interface
6201 ifnet_head_done();
6202 return TRUE;
6203 }
6204 }
6205 }
6206
6207 // First check the ordered interface list
6208 TAILQ_FOREACH(ifp, &ifnet_ordered_head, if_ordered_link) {
6209 u_int32_t preferred_count = 0;
6210 if (necp_ifnet_matches_parameters(ifp, parsed_parameters, flags, &preferred_count, false, false)) {
6211 if (preferred_count > best_preferred_count ||
6212 *return_ifindex == 0) {
6213 // Everything matched, and is most preferred. Return this interface.
6214 *return_ifindex = ifp->if_index;
6215 best_preferred_count = preferred_count;
6216
6217 if (!has_preferred_fields) {
6218 break;
6219 }
6220 }
6221 }
6222
6223 if (has_prohibit_flags &&
6224 ifp == TAILQ_FIRST(&ifnet_ordered_head)) {
6225 // This was the first interface. From here on, if the
6226 // client prohibited either expensive or constrained,
6227 // don't allow either as a secondary interface option.
6228 flags |= (NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE |
6229 NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED);
6230 }
6231 }
6232
6233 bool is_listener = ((parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
6234 (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER));
6235
6236 // Then check the remaining interfaces
6237 if ((parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_FIELDS) &&
6238 ((!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)) ||
6239 necp_interface_type_should_match_unranked_interfaces(parsed_parameters->required_interface_type) ||
6240 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR) ||
6241 is_listener) &&
6242 (*return_ifindex == 0 || has_preferred_fields)) {
6243 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
6244 u_int32_t preferred_count = 0;
6245 if (NECP_IFP_IS_ON_ORDERED_LIST(ifp)) {
6246 // This interface was in the ordered list, skip
6247 continue;
6248 }
6249 if (necp_ifnet_matches_parameters(ifp, parsed_parameters, flags, &preferred_count, false, true)) {
6250 if (preferred_count > best_preferred_count ||
6251 *return_ifindex == 0) {
6252 // Everything matched, and is most preferred. Return this interface.
6253 *return_ifindex = ifp->if_index;
6254 best_preferred_count = preferred_count;
6255
6256 if (!has_preferred_fields) {
6257 break;
6258 }
6259 }
6260 }
6261 }
6262 }
6263
6264 ifnet_head_done();
6265
6266 if (has_preferred_fields && best_preferred_count == 0 &&
6267 ((parsed_parameters->valid_fields & (NECP_PARSED_PARAMETERS_SCOPED_FIELDS | NECP_PARSED_PARAMETERS_PREFERRED_FIELDS)) ==
6268 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS))) {
6269 // If only has preferred ifnet fields, and nothing was found, clear the interface index and return TRUE
6270 *return_ifindex = 0;
6271 return TRUE;
6272 }
6273
6274 if (*return_ifindex == 0 &&
6275 !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS)) {
6276 // Has required fields, but not including specific interface fields. Pass for now, and check
6277 // to see if agents are satisfied by policy.
6278 *validate_agents = TRUE;
6279 return TRUE;
6280 }
6281
6282 return *return_ifindex != 0;
6283 }
6284
6285 void
necp_copy_inp_domain_info(struct inpcb * inp,struct socket * so,nstat_domain_info * domain_info)6286 necp_copy_inp_domain_info(struct inpcb *inp, struct socket *so, nstat_domain_info *domain_info)
6287 {
6288 if (inp == NULL || so == NULL || domain_info == NULL) {
6289 return;
6290 }
6291
6292 necp_lock_socket_attributes();
6293
6294 domain_info->is_silent = !!(so->so_flags1 & SOF1_DOMAIN_INFO_SILENT);
6295 if (!domain_info->is_silent) {
6296 domain_info->is_tracker = !!(so->so_flags1 & SOF1_KNOWN_TRACKER);
6297 domain_info->is_non_app_initiated = !!(so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED);
6298 if (domain_info->is_tracker &&
6299 inp->inp_necp_attributes.inp_tracker_domain != NULL) {
6300 strlcpy(domain_info->domain_name, inp->inp_necp_attributes.inp_tracker_domain,
6301 sizeof(domain_info->domain_name));
6302 } else if (inp->inp_necp_attributes.inp_domain != NULL) {
6303 strlcpy(domain_info->domain_name, inp->inp_necp_attributes.inp_domain,
6304 sizeof(domain_info->domain_name));
6305 }
6306 if (inp->inp_necp_attributes.inp_domain_owner != NULL) {
6307 strlcpy(domain_info->domain_owner, inp->inp_necp_attributes.inp_domain_owner,
6308 sizeof(domain_info->domain_owner));
6309 }
6310 if (inp->inp_necp_attributes.inp_domain_context != NULL) {
6311 strlcpy(domain_info->domain_tracker_ctxt, inp->inp_necp_attributes.inp_domain_context,
6312 sizeof(domain_info->domain_tracker_ctxt));
6313 }
6314 }
6315
6316 necp_unlock_socket_attributes();
6317 }
6318
6319 void
necp_with_inp_domain_name(struct socket * so,void * ctx,void (* with_func)(char * domain_name __null_terminated,void * ctx))6320 necp_with_inp_domain_name(struct socket *so, void *ctx, void (*with_func)(char *domain_name __null_terminated, void *ctx))
6321 {
6322 struct inpcb *inp = NULL;
6323
6324 if (so == NULL || with_func == NULL) {
6325 return;
6326 }
6327
6328 inp = (struct inpcb *)so->so_pcb;
6329 if (inp == NULL) {
6330 return;
6331 }
6332
6333 necp_lock_socket_attributes();
6334 with_func(inp->inp_necp_attributes.inp_domain, ctx);
6335 necp_unlock_socket_attributes();
6336 }
6337
6338 static size_t
necp_find_domain_info_common(struct necp_client * client,u_int8_t * __sized_by (parameters_size)parameters,size_t parameters_size,struct necp_client_flow_registration * flow_registration,nstat_domain_info * domain_info)6339 necp_find_domain_info_common(struct necp_client *client,
6340 u_int8_t * __sized_by(parameters_size)parameters,
6341 size_t parameters_size,
6342 struct necp_client_flow_registration *flow_registration, /* For logging purposes only */
6343 nstat_domain_info *domain_info)
6344 {
6345 if (client == NULL) {
6346 return 0;
6347 }
6348 if (domain_info == NULL) {
6349 return sizeof(nstat_domain_info);
6350 }
6351
6352 size_t offset = 0;
6353 u_int32_t flags = 0;
6354 u_int8_t *tracker_domain = NULL;
6355 u_int8_t *domain = NULL;
6356 size_t tracker_domain_length = 0;
6357 size_t domain_length = 0;
6358
6359 NECP_CLIENT_FLOW_LOG(client, flow_registration, "Collecting stats");
6360
6361 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6362 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
6363 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
6364
6365 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6366 // If the length is larger than what can fit in the remaining parameters size, bail
6367 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6368 break;
6369 }
6370
6371 if (length > 0) {
6372 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
6373 if (value != NULL) {
6374 switch (type) {
6375 case NECP_CLIENT_PARAMETER_FLAGS: {
6376 if (length >= sizeof(u_int32_t)) {
6377 memcpy(&flags, value, sizeof(u_int32_t));
6378 }
6379
6380 domain_info->is_tracker =
6381 !!(flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER);
6382 domain_info->is_non_app_initiated =
6383 !!(flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED);
6384 domain_info->is_silent =
6385 !!(flags & NECP_CLIENT_PARAMETER_FLAG_SILENT);
6386 break;
6387 }
6388 case NECP_CLIENT_PARAMETER_TRACKER_DOMAIN: {
6389 tracker_domain_length = length;
6390 tracker_domain = value;
6391 break;
6392 }
6393 case NECP_CLIENT_PARAMETER_DOMAIN: {
6394 domain_length = length;
6395 domain = value;
6396 break;
6397 }
6398 case NECP_CLIENT_PARAMETER_DOMAIN_OWNER: {
6399 size_t length_to_copy = MIN(length, sizeof(domain_info->domain_owner));
6400 strbufcpy(domain_info->domain_owner, sizeof(domain_info->domain_owner), (const char *)value, length_to_copy);
6401 break;
6402 }
6403 case NECP_CLIENT_PARAMETER_DOMAIN_CONTEXT: {
6404 size_t length_to_copy = MIN(length, sizeof(domain_info->domain_tracker_ctxt));
6405 strbufcpy(domain_info->domain_tracker_ctxt, sizeof(domain_info->domain_tracker_ctxt), (const char *)value, length_to_copy);
6406 break;
6407 }
6408 case NECP_CLIENT_PARAMETER_ATTRIBUTED_BUNDLE_IDENTIFIER: {
6409 size_t length_to_copy = MIN(length, sizeof(domain_info->domain_attributed_bundle_id));
6410 strbufcpy(domain_info->domain_attributed_bundle_id, sizeof(domain_info->domain_attributed_bundle_id), (const char *)value, length_to_copy);
6411 break;
6412 }
6413 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
6414 if (length >= sizeof(struct necp_policy_condition_addr)) {
6415 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6416 if (necp_client_address_is_valid(&address_struct->address.sa)) {
6417 domain_info->remote.v6 = address_struct->address.sin6;
6418 }
6419 }
6420 break;
6421 }
6422 default: {
6423 break;
6424 }
6425 }
6426 }
6427 }
6428 offset += sizeof(struct necp_tlv_header) + length;
6429 }
6430
6431 if (domain_info->is_silent) {
6432 memset(domain_info, 0, sizeof(*domain_info));
6433 domain_info->is_silent = true;
6434 } else if (domain_info->is_tracker && tracker_domain != NULL && tracker_domain_length > 0) {
6435 size_t length_to_copy = MIN(tracker_domain_length, sizeof(domain_info->domain_name));
6436 strbufcpy(domain_info->domain_name, sizeof(domain_info->domain_name), (const char *)tracker_domain, length_to_copy);
6437 } else if (domain != NULL && domain_length > 0) {
6438 size_t length_to_copy = MIN(domain_length, sizeof(domain_info->domain_name));
6439 strbufcpy(domain_info->domain_name, sizeof(domain_info->domain_name), (const char *)domain, length_to_copy);
6440 }
6441
6442 NECP_CLIENT_FLOW_LOG(client, flow_registration,
6443 "Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> "
6444 "is_tracker %d is_non_app_initiated %d is_silent %d",
6445 domain_info->domain_name,
6446 domain_info->domain_owner,
6447 domain_info->domain_tracker_ctxt,
6448 domain_info->domain_attributed_bundle_id,
6449 domain_info->is_tracker,
6450 domain_info->is_non_app_initiated,
6451 domain_info->is_silent);
6452
6453 return sizeof(nstat_domain_info);
6454 }
6455
6456 static size_t
necp_find_conn_extension_info(nstat_provider_context ctx,int requested_extension,void * __sized_by (buf_size)buf,size_t buf_size)6457 necp_find_conn_extension_info(nstat_provider_context ctx,
6458 int requested_extension, /* The extension to be returned */
6459 void * __sized_by(buf_size)buf, /* If not NULL, the address for extensions to be returned in */
6460 size_t buf_size) /* The size of the buffer space, typically matching the return from a previous call with a NULL buf pointer */
6461 {
6462 // Note, the caller has guaranteed that any buffer has been zeroed, there is no need to clear it again
6463
6464 if (ctx == NULL) {
6465 return 0;
6466 }
6467 struct necp_client *client = (struct necp_client *)ctx;
6468 switch (requested_extension) {
6469 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
6470 // This is for completeness. The intent is that domain information can be extracted at user level from the TLV parameters
6471 if (buf == NULL) {
6472 return sizeof(nstat_domain_info);
6473 }
6474 if (buf_size < sizeof(nstat_domain_info)) {
6475 return 0;
6476 }
6477 return necp_find_domain_info_common(client, client->parameters, client->parameters_length, NULL, (nstat_domain_info *)buf);
6478
6479 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV: {
6480 size_t parameters_length = client->parameters_length;
6481 if (buf == NULL) {
6482 return parameters_length;
6483 }
6484 if (buf_size < parameters_length) {
6485 return 0;
6486 }
6487 memcpy(buf, client->parameters, parameters_length);
6488 return parameters_length;
6489 }
6490 case NSTAT_EXTENDED_UPDATE_TYPE_ORIGINAL_NECP_TLV:
6491 if (buf == NULL) {
6492 return (client->original_parameters_source != NULL) ? client->original_parameters_source->parameters_length : 0;
6493 }
6494 if ((client->original_parameters_source == NULL) || (buf_size < client->original_parameters_source->parameters_length)) {
6495 return 0;
6496 }
6497 memcpy(buf, client->original_parameters_source->parameters, client->original_parameters_source->parameters_length);
6498 return client->original_parameters_source->parameters_length;
6499
6500 case NSTAT_EXTENDED_UPDATE_TYPE_ORIGINAL_DOMAIN:
6501 if (buf == NULL) {
6502 return (client->original_parameters_source != NULL) ? sizeof(nstat_domain_info) : 0;
6503 }
6504 if ((buf_size < sizeof(nstat_domain_info)) || (client->original_parameters_source == NULL)) {
6505 return 0;
6506 }
6507 return necp_find_domain_info_common(client, client->original_parameters_source->parameters, client->original_parameters_source->parameters_length,
6508 NULL, (nstat_domain_info *)buf);
6509
6510 default:
6511 return 0;
6512 }
6513 }
6514
6515 #if SKYWALK
6516
6517 static struct traffic_stats*
media_stats_embedded_ts(struct media_stats * media_stats,uint32_t ifflags)6518 media_stats_embedded_ts(struct media_stats *media_stats, uint32_t ifflags)
6519 {
6520 struct traffic_stats *ts = NULL;
6521 if (media_stats) {
6522 if (ifflags & NSTAT_IFNET_IS_WIFI) {
6523 if (ifflags & NSTAT_IFNET_IS_WIFI_INFRA) {
6524 ts = &media_stats->ms_wifi_infra;
6525 } else {
6526 ts = &media_stats->ms_wifi_non_infra;
6527 }
6528 } else if (ifflags & NSTAT_IFNET_IS_CELLULAR) {
6529 ts = &media_stats->ms_cellular;
6530 } else if (ifflags & NSTAT_IFNET_IS_WIRED) {
6531 ts = &media_stats->ms_wired;
6532 } else if (ifflags & NSTAT_IFNET_IS_COMPANIONLINK_BT) {
6533 ts = &media_stats->ms_bluetooth;
6534 } else if (!(ifflags & NSTAT_IFNET_IS_LOOPBACK)) {
6535 ts = &media_stats->ms_alternate;
6536 }
6537 }
6538 return ts;
6539 }
6540
6541 static size_t
necp_find_extension_info(userland_stats_provider_context * ctx,int requested_extension,void * __sized_by (buf_size)buf,size_t buf_size)6542 necp_find_extension_info(userland_stats_provider_context *ctx,
6543 int requested_extension, /* The extension to be returned */
6544 void * __sized_by(buf_size)buf, /* If not NULL, the address for extensions to be returned in */
6545 size_t buf_size) /* The size of the buffer space, typically matching the return from a previous call with a NULL buf pointer */
6546 {
6547 if (ctx == NULL) {
6548 return 0;
6549 }
6550 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
6551 struct necp_client *client = flow_registration->client;
6552
6553 switch (requested_extension) {
6554 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
6555 if (buf == NULL) {
6556 return sizeof(nstat_domain_info);
6557 }
6558 if (buf_size < sizeof(nstat_domain_info)) {
6559 return 0;
6560 }
6561 return necp_find_domain_info_common(client, client->parameters, client->parameters_length, flow_registration, (nstat_domain_info *)buf);
6562
6563 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV:
6564 if (buf == NULL) {
6565 return client->parameters_length;
6566 }
6567 if (buf_size < client->parameters_length) {
6568 return 0;
6569 }
6570 memcpy(buf, client->parameters, client->parameters_length);
6571 return client->parameters_length;
6572
6573 case NSTAT_EXTENDED_UPDATE_TYPE_FUUID:
6574 if (buf == NULL) {
6575 return sizeof(uuid_t);
6576 }
6577 if (buf_size < sizeof(uuid_t)) {
6578 return 0;
6579 }
6580 uuid_copy(buf, flow_registration->registration_id);
6581 return sizeof(uuid_t);
6582
6583 case NSTAT_EXTENDED_UPDATE_TYPE_BLUETOOTH_COUNTS: {
6584 // Retrieve details from the last time the assigned flows were updated
6585 u_int32_t route_ifindex = IFSCOPE_NONE;
6586 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6587 u_int64_t combined_interface_details = 0;
6588
6589 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
6590 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6591 bool is_companionlink_bluetooth = (route_ifflags & NSTAT_IFNET_IS_COMPANIONLINK_BT);
6592
6593 if (buf == NULL) {
6594 return (is_companionlink_bluetooth ||
6595 (route_ifflags & NSTAT_IFNET_PEEREGRESSINTERFACE_IS_CELLULAR)) ? sizeof(nstat_interface_counts):0;
6596 }
6597 if (buf_size < sizeof(nstat_interface_counts)) {
6598 return 0;
6599 }
6600
6601 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6602 if ((sf != NULL) &&
6603 (is_companionlink_bluetooth || (route_ifflags & NSTAT_IFNET_PEEREGRESSINTERFACE_IS_CELLULAR))) {
6604 nstat_interface_counts *bt_counts = (nstat_interface_counts *)buf;
6605 bt_counts->nstat_rxbytes = sf->sf_ibytes;
6606 bt_counts->nstat_txbytes = sf->sf_obytes;
6607 return sizeof(nstat_interface_counts);
6608 } else {
6609 return 0;
6610 }
6611 }
6612
6613 default:
6614 return 0;
6615 }
6616 }
6617
6618 static void
necp_find_netstat_data(struct necp_client * client,union necp_sockaddr_union * remote,pid_t * effective_pid,uid_t * uid,uuid_t euuid,uid_t * persona_id,u_int32_t * traffic_class,u_int8_t * fallback_mode)6619 necp_find_netstat_data(struct necp_client *client,
6620 union necp_sockaddr_union *remote,
6621 pid_t *effective_pid,
6622 uid_t *uid,
6623 uuid_t euuid,
6624 uid_t *persona_id,
6625 u_int32_t *traffic_class,
6626 u_int8_t *fallback_mode)
6627 {
6628 bool have_set_euuid = false;
6629 size_t offset = 0;
6630 u_int8_t *parameters;
6631 u_int32_t parameters_size;
6632
6633 parameters = client->parameters;
6634 parameters_size = (u_int32_t)client->parameters_length;
6635
6636 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6637 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
6638 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
6639
6640 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6641 // If the length is larger than what can fit in the remaining parameters size, bail
6642 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6643 break;
6644 }
6645
6646 if (length > 0) {
6647 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
6648 if (value != NULL) {
6649 switch (type) {
6650 case NECP_CLIENT_PARAMETER_APPLICATION: {
6651 if (length >= sizeof(uuid_t)) {
6652 uuid_copy(euuid, value);
6653 }
6654 break;
6655 }
6656 case NECP_CLIENT_PARAMETER_PID: {
6657 if (length >= sizeof(pid_t)) {
6658 memcpy(effective_pid, value, sizeof(pid_t));
6659 }
6660 break;
6661 }
6662 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
6663 if (length >= sizeof(u_int32_t)) {
6664 memcpy(traffic_class, value, sizeof(u_int32_t));
6665 }
6666 break;
6667 }
6668 case NECP_CLIENT_PARAMETER_FALLBACK_MODE: {
6669 if (length >= sizeof(u_int8_t)) {
6670 memcpy(fallback_mode, value, sizeof(u_int8_t));
6671 }
6672 break;
6673 }
6674 // It is an implementation quirk that the remote address can be found in the necp parameters
6675 // while the local address must be retrieved from the flowswitch
6676 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
6677 if (length >= sizeof(struct necp_policy_condition_addr)) {
6678 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6679 if (necp_client_address_is_valid(&address_struct->address.sa)) {
6680 remote->sin6 = address_struct->address.sin6;
6681 }
6682 }
6683 break;
6684 }
6685 case NECP_CLIENT_PARAMETER_APPLICATION_ID: {
6686 if (length >= sizeof(necp_application_id_t) && uid && persona_id) {
6687 necp_application_id_t *application_id = (necp_application_id_t *)(void *)value;
6688 memcpy(uid, &application_id->uid, sizeof(uid_t));
6689 uuid_copy(euuid, application_id->effective_uuid);
6690 memcpy(persona_id, &application_id->persona_id, sizeof(uid_t));
6691 have_set_euuid = true;
6692 }
6693 break;
6694 }
6695 default: {
6696 break;
6697 }
6698 }
6699 }
6700 }
6701 offset += sizeof(struct necp_tlv_header) + length;
6702 }
6703
6704 if (!have_set_euuid) {
6705 proc_t proc = proc_find(client->proc_pid);
6706 if (proc != PROC_NULL) {
6707 uuid_t responsible_uuid = { 0 };
6708 proc_getresponsibleuuid(proc, responsible_uuid, sizeof(responsible_uuid));
6709 proc_rele(proc);
6710 if (!uuid_is_null(responsible_uuid)) {
6711 uuid_copy(euuid, responsible_uuid);
6712 }
6713 }
6714 }
6715 }
6716
6717 static u_int64_t
necp_find_netstat_initial_properties(struct necp_client * client)6718 necp_find_netstat_initial_properties(struct necp_client *client)
6719 {
6720 size_t offset = 0;
6721 u_int64_t retval = 0;
6722 u_int8_t *parameters;
6723 u_int32_t parameters_size;
6724
6725 parameters = client->parameters;
6726 parameters_size = (u_int32_t)client->parameters_length;
6727
6728 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6729 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
6730 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
6731
6732 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6733 // If the length is larger than what can fit in the remaining parameters size, bail
6734 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6735 break;
6736 }
6737
6738 if (type == NECP_CLIENT_PARAMETER_FLAGS) {
6739 u_int32_t policy_condition_client_flags;
6740 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
6741 if ((value != NULL) && (length >= sizeof(policy_condition_client_flags))) {
6742 memcpy(&policy_condition_client_flags, value, sizeof(policy_condition_client_flags));
6743 if (policy_condition_client_flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
6744 retval |= NSTAT_SOURCE_IS_LISTENER;
6745 }
6746 if (policy_condition_client_flags & NECP_CLIENT_PARAMETER_FLAG_INBOUND) {
6747 retval |= NSTAT_SOURCE_IS_INBOUND;
6748 }
6749 }
6750 break;
6751 }
6752 offset += sizeof(struct necp_tlv_header) + length;
6753 }
6754 if (retval == 0) {
6755 retval = NSTAT_SOURCE_IS_OUTBOUND;
6756 }
6757 return retval;
6758 }
6759
6760 static bool
necp_request_nexus_tcp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)6761 necp_request_nexus_tcp_netstats(userland_stats_provider_context *ctx,
6762 u_int32_t *ifflagsp,
6763 nstat_progress_digest *digestp,
6764 nstat_counts *countsp,
6765 nstat_detailed_counts *detailed_countsp,
6766 void *metadatap)
6767 {
6768 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
6769 struct necp_client *client = flow_registration->client;
6770 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
6771 struct necp_tcp_stats *tcpstats = (struct necp_tcp_stats *)ustats_kaddr;
6772 ASSERT(tcpstats != NULL);
6773 ASSERT(!flow_registration->aop_offload);
6774
6775 u_int32_t nstat_diagnostic_flags = 0;
6776
6777 // Retrieve details from the last time the assigned flows were updated
6778 u_int32_t route_ifindex = IFSCOPE_NONE;
6779 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6780 u_int64_t combined_interface_details = 0;
6781
6782 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
6783 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6784
6785 if (route_ifindex == IFSCOPE_NONE) {
6786 // Mark no interface
6787 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
6788 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6789 NECPLOG(LOG_INFO, "req tcp stats, failed to get route details for pid %d curproc %d %s\n",
6790 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
6791 }
6792
6793 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6794 if (sf == NULL) {
6795 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
6796 char namebuf[MAXCOMLEN + 1];
6797 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
6798 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
6799 NECPLOG(LOG_ERR, "req tcp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
6800 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
6801 sf = &ntstat_sk_stats_zero;
6802 }
6803
6804 if (ifflagsp) {
6805 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
6806 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
6807 if (tcpstats->necp_tcp_extra.flags1 & SOF1_CELLFALLBACK) {
6808 *ifflagsp |= NSTAT_IFNET_VIA_CELLFALLBACK;
6809 }
6810 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
6811 return true;
6812 }
6813 }
6814
6815 if (digestp) {
6816 // The digest is intended to give information that may help give insight into the state of the link
6817 digestp->rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6818 digestp->txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6819 digestp->rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6820 digestp->rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6821 digestp->txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6822 digestp->ifindex = route_ifindex;
6823 digestp->state = tcpstats->necp_tcp_extra.state;
6824 digestp->txunacked = tcpstats->necp_tcp_extra.txunacked;
6825 digestp->txwindow = tcpstats->necp_tcp_extra.txwindow;
6826 digestp->connstatus.probe_activated = tcpstats->necp_tcp_extra.probestatus.probe_activated;
6827 digestp->connstatus.write_probe_failed = tcpstats->necp_tcp_extra.probestatus.write_probe_failed;
6828 digestp->connstatus.read_probe_failed = tcpstats->necp_tcp_extra.probestatus.read_probe_failed;
6829 digestp->connstatus.conn_probe_failed = tcpstats->necp_tcp_extra.probestatus.conn_probe_failed;
6830
6831 if ((countsp == NULL) && (metadatap == NULL)) {
6832 return true;
6833 }
6834 }
6835
6836 if (countsp) {
6837 countsp->nstat_rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6838 countsp->nstat_txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6839
6840 countsp->nstat_rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6841 countsp->nstat_rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6842 countsp->nstat_txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6843
6844 countsp->nstat_min_rtt = tcpstats->necp_tcp_counts.necp_stat_min_rtt;
6845 countsp->nstat_avg_rtt = tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
6846 countsp->nstat_var_rtt = tcpstats->necp_tcp_counts.necp_stat_var_rtt;
6847
6848 countsp->nstat_connectattempts = tcpstats->necp_tcp_extra.state >= TCPS_SYN_SENT ? 1 : 0;
6849 countsp->nstat_connectsuccesses = tcpstats->necp_tcp_extra.state >= TCPS_ESTABLISHED ? 1 : 0;
6850
6851 // Supplement what the user level has told us with what we know from the flowswitch
6852 // The nstat_counts structure has only one set of packet counts so set them from the
6853 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
6854 countsp->nstat_rxpackets = sf->sf_ipackets;
6855 countsp->nstat_txpackets = sf->sf_opackets;
6856 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
6857 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
6858 countsp->nstat_cell_txbytes = sf->sf_obytes;
6859 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
6860 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
6861 countsp->nstat_wifi_txbytes = sf->sf_obytes;
6862 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
6863 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
6864 countsp->nstat_wired_txbytes = sf->sf_obytes;
6865 }
6866 }
6867
6868 if (detailed_countsp) {
6869 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6870 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6871 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = tcpstats->necp_tcp_counts.necp_stat_rxpackets;
6872 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = tcpstats->necp_tcp_counts.necp_stat_txpackets;
6873
6874 detailed_countsp->nstat_rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6875 detailed_countsp->nstat_rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6876 detailed_countsp->nstat_txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6877
6878 detailed_countsp->nstat_min_rtt = tcpstats->necp_tcp_counts.necp_stat_min_rtt;
6879 detailed_countsp->nstat_avg_rtt = tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
6880 detailed_countsp->nstat_var_rtt = tcpstats->necp_tcp_counts.necp_stat_var_rtt;
6881
6882 // Supplement what the user level has told us with what we know from the flowswitch
6883 // The user level statistics don't include a bitmap so use the one within the kernel,
6884 memcpy(&detailed_countsp->nstat_media_stats.ms_total.ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6885
6886 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
6887 if (ts) {
6888 ts->ts_rxpackets = sf->sf_ipackets;
6889 ts->ts_txpackets = sf->sf_opackets;
6890 ts->ts_rxbytes = sf->sf_ibytes;
6891 ts->ts_txbytes = sf->sf_obytes;
6892 memcpy(&ts->ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6893 }
6894 }
6895
6896 if (metadatap) {
6897 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)metadatap;
6898 memset(desc, 0, sizeof(*desc));
6899
6900 // Metadata from the flow registration
6901 uuid_copy(desc->fuuid, flow_registration->registration_id);
6902
6903 // Metadata that the necp client should have in TLV format.
6904 pid_t effective_pid = client->proc_pid;
6905 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
6906 desc->epid = (u_int32_t)effective_pid;
6907
6908 // Metadata from the flow registration
6909 // This needs to revisited if multiple flows are created from one flow registration
6910 struct necp_client_flow *flow = NULL;
6911 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
6912 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
6913 break;
6914 }
6915
6916 // Metadata from the route
6917 desc->ifindex = route_ifindex;
6918 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
6919 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
6920 if (tcpstats->necp_tcp_extra.flags1 & SOF1_CELLFALLBACK) {
6921 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
6922 }
6923
6924 // Basic metadata from userland
6925 desc->rcvbufsize = tcpstats->necp_tcp_basic.rcvbufsize;
6926 desc->rcvbufused = tcpstats->necp_tcp_basic.rcvbufused;
6927
6928 // Additional TCP specific data
6929 desc->sndbufsize = tcpstats->necp_tcp_extra.sndbufsize;
6930 desc->sndbufused = tcpstats->necp_tcp_extra.sndbufused;
6931 desc->txunacked = tcpstats->necp_tcp_extra.txunacked;
6932 desc->txwindow = tcpstats->necp_tcp_extra.txwindow;
6933 desc->txcwindow = tcpstats->necp_tcp_extra.txcwindow;
6934 desc->traffic_mgt_flags = tcpstats->necp_tcp_extra.traffic_mgt_flags;
6935 desc->state = tcpstats->necp_tcp_extra.state;
6936
6937 u_int32_t cc_alg_index = tcpstats->necp_tcp_extra.cc_alg_index;
6938 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
6939 strbufcpy(desc->cc_algo, sizeof(desc->cc_algo), tcp_cc_algo_list[cc_alg_index]->name, sizeof(tcp_cc_algo_list[cc_alg_index]->name));
6940 } else {
6941 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
6942 }
6943
6944 desc->connstatus.probe_activated = tcpstats->necp_tcp_extra.probestatus.probe_activated;
6945 desc->connstatus.write_probe_failed = tcpstats->necp_tcp_extra.probestatus.write_probe_failed;
6946 desc->connstatus.read_probe_failed = tcpstats->necp_tcp_extra.probestatus.read_probe_failed;
6947 desc->connstatus.conn_probe_failed = tcpstats->necp_tcp_extra.probestatus.conn_probe_failed;
6948
6949 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6950
6951 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
6952 uuid_string_t euuid_str = { 0 };
6953 uuid_unparse(desc->euuid, euuid_str);
6954 NECPLOG(LOG_NOTICE, "Collected stats - TCP - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
6955 }
6956 }
6957
6958 return true;
6959 }
6960
6961 static bool
necp_request_aop_tcp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)6962 necp_request_aop_tcp_netstats(userland_stats_provider_context *ctx,
6963 u_int32_t *ifflagsp,
6964 nstat_progress_digest *digestp,
6965 nstat_counts *countsp,
6966 nstat_detailed_counts *detailed_countsp,
6967 void *metadatap)
6968 {
6969 struct aop_flow_stats flow_stats = {};
6970 struct tcp_info *tcpi = &flow_stats.transport.tcp_stats.tcp_info;
6971 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
6972 struct necp_client *client = flow_registration->client;
6973 int err = 0;
6974
6975 ASSERT(flow_registration->aop_offload);
6976
6977 u_int32_t nstat_diagnostic_flags = 0;
6978
6979 // Retrieve details from the last time the assigned flows were updated
6980 u_int32_t route_ifindex = IFSCOPE_NONE;
6981 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6982 u_int64_t combined_interface_details = 0;
6983
6984 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
6985 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6986
6987 if (route_ifindex == IFSCOPE_NONE) {
6988 // Mark no interface
6989 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
6990 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6991 NECPLOG(LOG_INFO, "req tcp stats, failed to get route details for pid %d curproc %d %s\n",
6992 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
6993 }
6994
6995 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6996 if (sf == NULL) {
6997 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
6998 char namebuf[MAXCOMLEN + 1];
6999 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
7000 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
7001 NECPLOG(LOG_ERR, "req tcp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
7002 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
7003 sf = &ntstat_sk_stats_zero;
7004 }
7005
7006 if (ifflagsp) {
7007 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
7008 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7009 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
7010 return true;
7011 }
7012 }
7013
7014 // This needs to revisited if multiple flows are created from one flow registration
7015 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
7016 if (flow == NULL) {
7017 return false;
7018 }
7019
7020 ASSERT(flow->aop_offload && flow->flow_tag > 0);
7021 if (!flow->aop_stat_index_valid) {
7022 return false;
7023 }
7024 err = net_aop_get_flow_stats(flow->stats_index, &flow_stats);
7025 if (err != 0) {
7026 NECPLOG(LOG_ERR, "failed to get aop flow stats "
7027 "for flow id %u with error %d", flow->flow_tag, err);
7028 return false;
7029 }
7030
7031 if (__improbable(flow->flow_tag != flow_stats.flow_id)) {
7032 NECPLOG(LOG_ERR, "aop flow stats, flow tag 0x%x != 0x%x",
7033 flow->flow_tag, flow_stats.flow_id);
7034 return false;
7035 }
7036
7037 if (digestp) {
7038 // The digest is intended to give information that may help give insight into the state of the link
7039 digestp->rxbytes = tcpi->tcpi_rxbytes;
7040 digestp->txbytes = tcpi->tcpi_txbytes;
7041 digestp->rxduplicatebytes = tcpi->tcpi_rxduplicatebytes;
7042 digestp->rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes;
7043 digestp->txretransmit = tcpi->tcpi_txretransmitbytes;
7044 digestp->ifindex = route_ifindex;
7045 digestp->state = tcpi->tcpi_state;
7046 digestp->txunacked = tcpi->tcpi_txunacked;
7047 digestp->txwindow = tcpi->tcpi_snd_wnd;
7048
7049 if ((countsp == NULL) && (metadatap == NULL)) {
7050 return true;
7051 }
7052 }
7053
7054 if (countsp) {
7055 countsp->nstat_rxbytes = tcpi->tcpi_rxbytes;
7056 countsp->nstat_txbytes = tcpi->tcpi_txbytes;
7057
7058 countsp->nstat_rxduplicatebytes = tcpi->tcpi_rxduplicatebytes;
7059 countsp->nstat_rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes;
7060 countsp->nstat_txretransmit = tcpi->tcpi_txretransmitbytes;
7061
7062 countsp->nstat_min_rtt = tcpi->tcpi_rttbest;
7063 countsp->nstat_avg_rtt = tcpi->tcpi_srtt;
7064 countsp->nstat_var_rtt = tcpi->tcpi_rttvar;
7065
7066 countsp->nstat_connectattempts = tcpi->tcpi_state >= TCPS_SYN_SENT ? 1 : 0;
7067 countsp->nstat_connectsuccesses = tcpi->tcpi_state >= TCPS_ESTABLISHED ? 1 : 0;
7068
7069 // Supplement what the user level has told us with what we know from the flowswitch
7070 // The nstat_counts structure has only one set of packet counts so set them from the
7071 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
7072 countsp->nstat_rxpackets = sf->sf_ipackets;
7073 countsp->nstat_txpackets = sf->sf_opackets;
7074 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
7075 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
7076 countsp->nstat_cell_txbytes = sf->sf_obytes;
7077 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
7078 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
7079 countsp->nstat_wifi_txbytes = sf->sf_obytes;
7080 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
7081 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
7082 countsp->nstat_wired_txbytes = sf->sf_obytes;
7083 }
7084 }
7085
7086 if (detailed_countsp) {
7087 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = tcpi->tcpi_rxbytes;
7088 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = tcpi->tcpi_txbytes;
7089 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = tcpi->tcpi_rxpackets;
7090 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = tcpi->tcpi_txpackets;
7091
7092 detailed_countsp->nstat_rxduplicatebytes = tcpi->tcpi_rxduplicatebytes;
7093 detailed_countsp->nstat_rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes;
7094 detailed_countsp->nstat_txretransmit = tcpi->tcpi_txretransmitbytes;
7095
7096 detailed_countsp->nstat_min_rtt = tcpi->tcpi_rttbest;
7097 detailed_countsp->nstat_avg_rtt = tcpi->tcpi_srtt;
7098 detailed_countsp->nstat_var_rtt = tcpi->tcpi_rttvar;
7099
7100 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
7101 if (ts) {
7102 ts->ts_rxpackets = sf->sf_ipackets;
7103 ts->ts_txpackets = sf->sf_opackets;
7104 ts->ts_rxbytes = sf->sf_ibytes;
7105 ts->ts_txbytes = sf->sf_obytes;
7106 }
7107 }
7108
7109 if (metadatap) {
7110 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)metadatap;
7111 memset(desc, 0, sizeof(*desc));
7112
7113 // Metadata from the flow registration
7114 uuid_copy(desc->fuuid, flow_registration->registration_id);
7115
7116 // Metadata that the necp client should have in TLV format.
7117 pid_t effective_pid = client->proc_pid;
7118 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
7119 desc->epid = (u_int32_t)effective_pid;
7120
7121 // Metadata from the flow registration
7122 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
7123
7124 // Metadata from the route
7125 desc->ifindex = route_ifindex;
7126 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
7127 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7128
7129 // Basic metadata from userland
7130 desc->rcvbufsize = flow_stats.rx_buffer_stats.bufsize;
7131 desc->rcvbufused = flow_stats.rx_buffer_stats.bufused;
7132
7133 // Additional TCP specific data
7134 desc->sndbufsize = flow_stats.tx_buffer_stats.bufsize;
7135 desc->sndbufused = flow_stats.tx_buffer_stats.bufused;
7136 desc->txunacked = tcpi->tcpi_txunacked;
7137 desc->txwindow = tcpi->tcpi_snd_wnd;
7138 desc->txcwindow = tcpi->tcpi_snd_cwnd;
7139 desc->traffic_mgt_flags = 0;
7140 desc->state = tcpi->tcpi_state;
7141
7142 u_int32_t cc_alg_index = flow_stats.transport.tcp_stats.tcp_cc_algo;
7143 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
7144 strbufcpy(desc->cc_algo, sizeof(desc->cc_algo), tcp_cc_algo_list[cc_alg_index]->name, sizeof(tcp_cc_algo_list[cc_alg_index]->name));
7145 } else {
7146 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
7147 }
7148
7149 desc->connstatus.probe_activated = 0;
7150 desc->connstatus.write_probe_failed = 0;
7151 desc->connstatus.read_probe_failed = 0;
7152 desc->connstatus.conn_probe_failed = 0;
7153
7154 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
7155 uuid_string_t euuid_str = { 0 };
7156 uuid_unparse(desc->euuid, euuid_str);
7157 NECPLOG(LOG_NOTICE, "Collected stats - TCP - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
7158 }
7159 }
7160
7161 return true;
7162 }
7163
7164 // Called from NetworkStatistics when it wishes to collect latest information for a TCP flow.
7165 // It is a responsibility of NetworkStatistics to have previously zeroed any supplied memory.
7166 static bool
necp_request_tcp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)7167 necp_request_tcp_netstats(userland_stats_provider_context *ctx,
7168 u_int32_t *ifflagsp,
7169 nstat_progress_digest *digestp,
7170 nstat_counts *countsp,
7171 nstat_detailed_counts *detailed_countsp,
7172 void *metadatap)
7173 {
7174 if (ctx == NULL) {
7175 return false;
7176 }
7177
7178 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
7179 if (__probable(!flow_registration->aop_offload)) {
7180 return necp_request_nexus_tcp_netstats(ctx, ifflagsp, digestp, countsp, detailed_countsp, metadatap);
7181 } else {
7182 return necp_request_aop_tcp_netstats(ctx, ifflagsp, digestp, countsp, detailed_countsp, metadatap);
7183 }
7184 }
7185
7186 // Called from NetworkStatistics when it wishes to collect latest information for a UDP flow.
7187 static bool
necp_request_udp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)7188 necp_request_udp_netstats(userland_stats_provider_context *ctx,
7189 u_int32_t *ifflagsp,
7190 nstat_progress_digest *digestp,
7191 nstat_counts *countsp,
7192 nstat_detailed_counts *detailed_countsp,
7193 void *metadatap)
7194 {
7195 #pragma unused(digestp)
7196
7197 if (ctx == NULL) {
7198 return false;
7199 }
7200
7201 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
7202 struct necp_client *client = flow_registration->client;
7203 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
7204 struct necp_udp_stats *udpstats = (struct necp_udp_stats *)ustats_kaddr;
7205 ASSERT(udpstats != NULL);
7206
7207 u_int32_t nstat_diagnostic_flags = 0;
7208
7209 // Retrieve details from the last time the assigned flows were updated
7210 u_int32_t route_ifindex = IFSCOPE_NONE;
7211 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7212 u_int64_t combined_interface_details = 0;
7213
7214 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
7215 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
7216
7217 if (route_ifindex == IFSCOPE_NONE) {
7218 // Mark no interface
7219 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
7220 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7221 NECPLOG(LOG_INFO, "req udp stats, failed to get route details for pid %d curproc %d %s\n",
7222 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
7223 }
7224
7225 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
7226 if (sf == NULL) {
7227 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
7228 char namebuf[MAXCOMLEN + 1];
7229 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
7230 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
7231 NECPLOG(LOG_ERR, "req udp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
7232 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
7233 sf = &ntstat_sk_stats_zero;
7234 }
7235
7236 if (ifflagsp) {
7237 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
7238 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7239 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
7240 return true;
7241 }
7242 }
7243
7244 if (countsp) {
7245 countsp->nstat_rxbytes = udpstats->necp_udp_counts.necp_stat_rxbytes;
7246 countsp->nstat_txbytes = udpstats->necp_udp_counts.necp_stat_txbytes;
7247
7248 countsp->nstat_rxduplicatebytes = udpstats->necp_udp_counts.necp_stat_rxduplicatebytes;
7249 countsp->nstat_rxoutoforderbytes = udpstats->necp_udp_counts.necp_stat_rxoutoforderbytes;
7250 countsp->nstat_txretransmit = udpstats->necp_udp_counts.necp_stat_txretransmit;
7251
7252 countsp->nstat_min_rtt = udpstats->necp_udp_counts.necp_stat_min_rtt;
7253 countsp->nstat_avg_rtt = udpstats->necp_udp_counts.necp_stat_avg_rtt;
7254 countsp->nstat_var_rtt = udpstats->necp_udp_counts.necp_stat_var_rtt;
7255
7256 // Supplement what the user level has told us with what we know from the flowswitch
7257 // The nstat_counts structure has only one set of packet counts so set them from the
7258 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
7259 countsp->nstat_rxpackets = sf->sf_ipackets;
7260 countsp->nstat_txpackets = sf->sf_opackets;
7261 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
7262 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
7263 countsp->nstat_cell_txbytes = sf->sf_obytes;
7264 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
7265 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
7266 countsp->nstat_wifi_txbytes = sf->sf_obytes;
7267 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
7268 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
7269 countsp->nstat_wired_txbytes = sf->sf_obytes;
7270 }
7271 }
7272
7273 if (detailed_countsp) {
7274 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = udpstats->necp_udp_counts.necp_stat_rxbytes;
7275 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = udpstats->necp_udp_counts.necp_stat_txbytes;
7276 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = udpstats->necp_udp_counts.necp_stat_rxpackets;
7277 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = udpstats->necp_udp_counts.necp_stat_txpackets;
7278
7279 detailed_countsp->nstat_rxduplicatebytes = udpstats->necp_udp_counts.necp_stat_rxduplicatebytes;
7280 detailed_countsp->nstat_rxoutoforderbytes = udpstats->necp_udp_counts.necp_stat_rxoutoforderbytes;
7281 detailed_countsp->nstat_txretransmit = udpstats->necp_udp_counts.necp_stat_txretransmit;
7282
7283 detailed_countsp->nstat_min_rtt = udpstats->necp_udp_counts.necp_stat_min_rtt;
7284 detailed_countsp->nstat_avg_rtt = udpstats->necp_udp_counts.necp_stat_avg_rtt;
7285 detailed_countsp->nstat_var_rtt = udpstats->necp_udp_counts.necp_stat_var_rtt;
7286
7287 // Supplement what the user level has told us with what we know from the flowswitch
7288 // The user level statistics don't include a bitmap so use the one within the kernel,
7289 memcpy(&detailed_countsp->nstat_media_stats.ms_total.ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7290
7291 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
7292 if (ts) {
7293 ts->ts_rxpackets = sf->sf_ipackets;
7294 ts->ts_txpackets = sf->sf_opackets;
7295 ts->ts_rxbytes = sf->sf_ibytes;
7296 ts->ts_txbytes = sf->sf_obytes;
7297 memcpy(&ts->ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7298 }
7299 }
7300
7301 if (metadatap) {
7302 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)metadatap;
7303 memset(desc, 0, sizeof(*desc));
7304
7305 // Metadata from the flow registration
7306 uuid_copy(desc->fuuid, flow_registration->registration_id);
7307
7308 // Metadata that the necp client should have in TLV format.
7309 pid_t effective_pid = client->proc_pid;
7310 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
7311 desc->epid = (u_int32_t)effective_pid;
7312
7313 // Metadata from the flow registration
7314 // This needs to revisited if multiple flows are created from one flow registration
7315 struct necp_client_flow *flow = NULL;
7316 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
7317 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
7318 break;
7319 }
7320
7321 // Metadata from the route
7322 desc->ifindex = route_ifindex;
7323 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
7324 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7325
7326 // Basic metadata is all that is required for UDP
7327 desc->rcvbufsize = udpstats->necp_udp_basic.rcvbufsize;
7328 desc->rcvbufused = udpstats->necp_udp_basic.rcvbufused;
7329
7330 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7331
7332 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
7333 uuid_string_t euuid_str = { 0 };
7334 uuid_unparse(desc->euuid, euuid_str);
7335 NECPLOG(LOG_NOTICE, "Collected stats - UDP - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
7336 }
7337 }
7338
7339 return true;
7340 }
7341
7342 // Called from NetworkStatistics when it wishes to collect latest information for a QUIC flow.
7343 //
7344 // TODO: For now it is an exact implementation as that of TCP.
7345 // Still to keep the logic separate for future divergence, keeping the routines separate.
7346 // It also seems there are lots of common code between existing implementations and
7347 // it would be good to refactor this logic at some point.
7348 static bool
necp_request_quic_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)7349 necp_request_quic_netstats(userland_stats_provider_context *ctx,
7350 u_int32_t *ifflagsp,
7351 nstat_progress_digest *digestp,
7352 nstat_counts *countsp,
7353 nstat_detailed_counts *detailed_countsp,
7354 void *metadatap)
7355 {
7356 if (ctx == NULL) {
7357 return false;
7358 }
7359
7360 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
7361 struct necp_client *client = flow_registration->client;
7362 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
7363 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
7364 ASSERT(quicstats != NULL);
7365
7366 u_int32_t nstat_diagnostic_flags = 0;
7367
7368 // Retrieve details from the last time the assigned flows were updated
7369 u_int32_t route_ifindex = IFSCOPE_NONE;
7370 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7371 u_int64_t combined_interface_details = 0;
7372
7373 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
7374 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
7375
7376 if (route_ifindex == IFSCOPE_NONE) {
7377 // Mark no interface
7378 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
7379 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7380 NECPLOG(LOG_INFO, "req quic stats, failed to get route details for pid %d curproc %d %s\n",
7381 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
7382 }
7383
7384 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
7385 if (sf == NULL) {
7386 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
7387 char namebuf[MAXCOMLEN + 1];
7388 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
7389 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
7390 NECPLOG(LOG_ERR, "req quic stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
7391 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
7392 sf = &ntstat_sk_stats_zero;
7393 }
7394
7395 if (ifflagsp) {
7396 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
7397 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7398 if (quicstats->necp_quic_extra.fallback) {
7399 *ifflagsp |= NSTAT_IFNET_VIA_CELLFALLBACK;
7400 }
7401 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
7402 return true;
7403 }
7404 }
7405
7406 if (digestp) {
7407 // The digest is intended to give information that may help give insight into the state of the link
7408 digestp->rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
7409 digestp->txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
7410 digestp->rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
7411 digestp->rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
7412 digestp->txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
7413 digestp->ifindex = route_ifindex;
7414 digestp->state = quicstats->necp_quic_extra.state;
7415 digestp->txunacked = quicstats->necp_quic_extra.txunacked;
7416 digestp->txwindow = quicstats->necp_quic_extra.txwindow;
7417 digestp->connstatus.probe_activated = quicstats->necp_quic_extra.probestatus.probe_activated;
7418 digestp->connstatus.write_probe_failed = quicstats->necp_quic_extra.probestatus.write_probe_failed;
7419 digestp->connstatus.read_probe_failed = quicstats->necp_quic_extra.probestatus.read_probe_failed;
7420 digestp->connstatus.conn_probe_failed = quicstats->necp_quic_extra.probestatus.conn_probe_failed;
7421
7422 if ((countsp == NULL) && (metadatap == NULL)) {
7423 return true;
7424 }
7425 }
7426
7427 if (countsp) {
7428 countsp->nstat_rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
7429 countsp->nstat_txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
7430
7431 countsp->nstat_rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
7432 countsp->nstat_rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
7433 countsp->nstat_txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
7434
7435 countsp->nstat_min_rtt = quicstats->necp_quic_counts.necp_stat_min_rtt;
7436 countsp->nstat_avg_rtt = quicstats->necp_quic_counts.necp_stat_avg_rtt;
7437 countsp->nstat_var_rtt = quicstats->necp_quic_counts.necp_stat_var_rtt;
7438
7439 // TODO: It would be good to expose QUIC stats for CH/SH retransmission and connection state
7440 // Supplement what the user level has told us with what we know from the flowswitch
7441 // The nstat_counts structure has only one set of packet counts so set them from the
7442 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
7443 countsp->nstat_rxpackets = sf->sf_ipackets;
7444 countsp->nstat_txpackets = sf->sf_opackets;
7445 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
7446 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
7447 countsp->nstat_cell_txbytes = sf->sf_obytes;
7448 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
7449 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
7450 countsp->nstat_wifi_txbytes = sf->sf_obytes;
7451 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
7452 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
7453 countsp->nstat_wired_txbytes = sf->sf_obytes;
7454 }
7455 }
7456
7457 if (detailed_countsp) {
7458 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
7459 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
7460 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = quicstats->necp_quic_counts.necp_stat_rxpackets;
7461 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = quicstats->necp_quic_counts.necp_stat_txpackets;
7462
7463 detailed_countsp->nstat_rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
7464 detailed_countsp->nstat_rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
7465 detailed_countsp->nstat_txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
7466
7467 detailed_countsp->nstat_min_rtt = quicstats->necp_quic_counts.necp_stat_min_rtt;
7468 detailed_countsp->nstat_avg_rtt = quicstats->necp_quic_counts.necp_stat_avg_rtt;
7469 detailed_countsp->nstat_var_rtt = quicstats->necp_quic_counts.necp_stat_var_rtt;
7470
7471 // Supplement what the user level has told us with what we know from the flowswitch
7472 // The user level statistics don't include a bitmap so use the one within the kernel,
7473 memcpy(&detailed_countsp->nstat_media_stats.ms_total.ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7474
7475 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
7476 if (ts) {
7477 ts->ts_rxpackets = sf->sf_ipackets;
7478 ts->ts_txpackets = sf->sf_opackets;
7479 ts->ts_rxbytes = sf->sf_ibytes;
7480 ts->ts_txbytes = sf->sf_obytes;
7481 memcpy(&ts->ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7482 }
7483 }
7484
7485 if (metadatap) {
7486 nstat_quic_descriptor *desc = (nstat_quic_descriptor *)metadatap;
7487 memset(desc, 0, sizeof(*desc));
7488
7489 // Metadata from the flow registration
7490 uuid_copy(desc->fuuid, flow_registration->registration_id);
7491
7492 // Metadata, that the necp client should have, in TLV format.
7493 pid_t effective_pid = client->proc_pid;
7494 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
7495 desc->epid = (u_int32_t)effective_pid;
7496
7497 // Metadata from the flow registration
7498 // This needs to revisited if multiple flows are created from one flow registration
7499 struct necp_client_flow *flow = NULL;
7500 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
7501 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
7502 break;
7503 }
7504
7505 // Metadata from the route
7506 desc->ifindex = route_ifindex;
7507 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
7508 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7509 if (quicstats->necp_quic_extra.fallback) {
7510 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
7511 desc->fallback_mode = SO_FALLBACK_MODE_FAST;
7512 }
7513
7514 // Basic metadata from userland
7515 desc->rcvbufsize = quicstats->necp_quic_basic.rcvbufsize;
7516 desc->rcvbufused = quicstats->necp_quic_basic.rcvbufused;
7517
7518 // Additional QUIC specific data
7519 desc->sndbufsize = quicstats->necp_quic_extra.sndbufsize;
7520 desc->sndbufused = quicstats->necp_quic_extra.sndbufused;
7521 desc->txunacked = quicstats->necp_quic_extra.txunacked;
7522 desc->txwindow = quicstats->necp_quic_extra.txwindow;
7523 desc->txcwindow = quicstats->necp_quic_extra.txcwindow;
7524 desc->traffic_mgt_flags = quicstats->necp_quic_extra.traffic_mgt_flags;
7525 desc->state = quicstats->necp_quic_extra.state;
7526
7527 // TODO: CC algo defines should be named agnostic of the protocol
7528 u_int32_t cc_alg_index = quicstats->necp_quic_extra.cc_alg_index;
7529 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
7530 strbufcpy(desc->cc_algo, sizeof(desc->cc_algo), tcp_cc_algo_list[cc_alg_index]->name, sizeof(tcp_cc_algo_list[cc_alg_index]->name));
7531 } else {
7532 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
7533 }
7534
7535 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7536
7537 desc->connstatus.probe_activated = quicstats->necp_quic_extra.probestatus.probe_activated;
7538 desc->connstatus.write_probe_failed = quicstats->necp_quic_extra.probestatus.write_probe_failed;
7539 desc->connstatus.read_probe_failed = quicstats->necp_quic_extra.probestatus.read_probe_failed;
7540 desc->connstatus.conn_probe_failed = quicstats->necp_quic_extra.probestatus.conn_probe_failed;
7541
7542 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
7543 uuid_string_t euuid_str = { 0 };
7544 uuid_unparse(desc->euuid, euuid_str);
7545 NECPLOG(LOG_NOTICE, "Collected stats - QUIC - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
7546 }
7547 }
7548 return true;
7549 }
7550
7551 #endif /* SKYWALK */
7552
7553 // Support functions for NetworkStatistics support for necp_client connections
7554
7555 static void
necp_client_inherit_from_parent(struct necp_client * client,struct necp_client * parent)7556 necp_client_inherit_from_parent(
7557 struct necp_client *client,
7558 struct necp_client *parent)
7559 {
7560 assert(client->original_parameters_source == NULL);
7561
7562 if (parent->original_parameters_source != NULL) {
7563 client->original_parameters_source = parent->original_parameters_source;
7564 } else {
7565 client->original_parameters_source = parent;
7566 }
7567 necp_client_retain(client->original_parameters_source);
7568 }
7569
7570 static void
necp_find_conn_netstat_data(struct necp_client * client,u_int32_t * ntstat_flags,pid_t * effective_pid,uuid_t * puuid,uid_t * uid,uuid_t * euuid,uid_t * persona_id)7571 necp_find_conn_netstat_data(struct necp_client *client,
7572 u_int32_t *ntstat_flags,
7573 pid_t *effective_pid,
7574 uuid_t *puuid,
7575 uid_t *uid,
7576 uuid_t *euuid,
7577 uid_t *persona_id)
7578 {
7579 bool has_remote_address = false;
7580 bool has_ip_protocol = false;
7581 bool has_transport_protocol = false;
7582 size_t offset = 0;
7583 u_int8_t *parameters;
7584 u_int32_t parameters_size;
7585
7586
7587 parameters = client->parameters;
7588 parameters_size = (u_int32_t)client->parameters_length;
7589
7590 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
7591 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
7592 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
7593
7594 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
7595 // If the length is larger than what can fit in the remaining parameters size, bail
7596 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
7597 break;
7598 }
7599
7600 if (length > 0) {
7601 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
7602 if (value != NULL) {
7603 switch (type) {
7604 case NECP_CLIENT_PARAMETER_APPLICATION: {
7605 if ((euuid) && (length >= sizeof(uuid_t))) {
7606 uuid_copy(*euuid, value);
7607 }
7608 break;
7609 }
7610 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
7611 if (length >= 1) {
7612 has_ip_protocol = true;
7613 }
7614 break;
7615 }
7616 case NECP_CLIENT_PARAMETER_PID: {
7617 if ((effective_pid) && length >= sizeof(pid_t)) {
7618 memcpy(effective_pid, value, sizeof(pid_t));
7619 }
7620 break;
7621 }
7622 case NECP_CLIENT_PARAMETER_PARENT_ID: {
7623 if ((puuid) && (length == sizeof(uuid_t))) {
7624 uuid_copy(*puuid, value);
7625 }
7626 break;
7627 }
7628 // It is an implementation quirk that the remote address can be found in the necp parameters
7629 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
7630 if (length >= sizeof(struct necp_policy_condition_addr)) {
7631 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
7632 if (necp_client_address_is_valid(&address_struct->address.sa)) {
7633 has_remote_address = true;
7634 }
7635 }
7636 break;
7637 }
7638 case NECP_CLIENT_PARAMETER_TRANSPORT_PROTOCOL: {
7639 if (length >= 1) {
7640 has_transport_protocol = true;
7641 }
7642 break;
7643 }
7644 case NECP_CLIENT_PARAMETER_APPLICATION_ID: {
7645 if (length >= sizeof(necp_application_id_t) && uid && persona_id) {
7646 necp_application_id_t *application_id = (necp_application_id_t *)(void *)value;
7647 memcpy(uid, &application_id->uid, sizeof(uid_t));
7648 uuid_copy(*euuid, application_id->effective_uuid);
7649 memcpy(persona_id, &application_id->persona_id, sizeof(uid_t));
7650 }
7651 break;
7652 }
7653 default: {
7654 break;
7655 }
7656 }
7657 }
7658 }
7659 offset += sizeof(struct necp_tlv_header) + length;
7660 }
7661 if (ntstat_flags) {
7662 *ntstat_flags = (has_remote_address && has_ip_protocol && has_transport_protocol)? NSTAT_NECP_CONN_HAS_NET_ACCESS: 0;
7663 }
7664 }
7665
7666 static bool
necp_request_conn_netstats(nstat_provider_context ctx,u_int32_t * ifflagsp,nstat_counts * countsp,nstat_detailed_counts * detailsp,void * metadatap)7667 necp_request_conn_netstats(nstat_provider_context ctx,
7668 u_int32_t *ifflagsp,
7669 nstat_counts *countsp,
7670 nstat_detailed_counts *detailsp,
7671 void *metadatap)
7672 {
7673 if (ctx == NULL) {
7674 return false;
7675 }
7676 struct necp_client * __single client = (struct necp_client *)(void *)ctx;
7677 nstat_connection_descriptor *desc = (nstat_connection_descriptor *)metadatap;
7678
7679 if (ifflagsp) {
7680 necp_find_conn_netstat_data(client, ifflagsp, NULL, NULL, NULL, NULL, NULL);
7681 }
7682 if (countsp) {
7683 memset(countsp, 0, sizeof(*countsp));
7684 }
7685 if (detailsp) {
7686 memset(detailsp, 0, sizeof(*detailsp));
7687 }
7688 if (desc) {
7689 memset(desc, 0, sizeof(*desc));
7690 // Metadata, that the necp client should have, in TLV format.
7691 pid_t effective_pid = client->proc_pid;
7692 necp_find_conn_netstat_data(client, &desc->ifnet_properties, &effective_pid, &desc->puuid, &desc->uid, &desc->euuid, &desc->persona_id);
7693 desc->epid = (u_int32_t)effective_pid;
7694
7695 // User level should obtain almost all connection information from an extension
7696 // leaving little to do here
7697 uuid_copy(desc->fuuid, client->latest_flow_registration_id);
7698 uuid_copy(desc->cuuid, client->client_id);
7699 }
7700 return true;
7701 }
7702
7703 static int
necp_skywalk_priv_check_cred(proc_t p,kauth_cred_t cred)7704 necp_skywalk_priv_check_cred(proc_t p, kauth_cred_t cred)
7705 {
7706 #pragma unused(p, cred)
7707 #if SKYWALK
7708 /* This includes Nexus controller and Skywalk observer privs */
7709 return skywalk_nxctl_check_privileges(p, cred);
7710 #else /* !SKYWALK */
7711 return 0;
7712 #endif /* !SKYWALK */
7713 }
7714
7715 /// System calls
7716
7717 int
necp_open(struct proc * p,struct necp_open_args * uap,int * retval)7718 necp_open(struct proc *p, struct necp_open_args *uap, int *retval)
7719 {
7720 #pragma unused(retval)
7721 int error = 0;
7722 struct necp_fd_data * __single fd_data = NULL;
7723 struct fileproc * __single fp = NULL;
7724 int fd = -1;
7725
7726 if (uap->flags & NECP_OPEN_FLAG_OBSERVER ||
7727 uap->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
7728 if (necp_skywalk_priv_check_cred(p, kauth_cred_get()) != 0 &&
7729 priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
7730 NECPLOG0(LOG_ERR, "Client does not hold necessary entitlement to observe other NECP clients");
7731 error = EACCES;
7732 goto done;
7733 }
7734 }
7735
7736 #if CONFIG_MACF
7737 error = mac_necp_check_open(p, uap->flags);
7738 if (error) {
7739 goto done;
7740 }
7741 #endif /* MACF */
7742
7743 error = falloc(p, &fp, &fd);
7744 if (error != 0) {
7745 goto done;
7746 }
7747
7748 fd_data = kalloc_type(struct necp_fd_data, Z_WAITOK | Z_ZERO | Z_NOFAIL);
7749
7750 fd_data->necp_fd_type = necp_fd_type_client;
7751 fd_data->flags = uap->flags;
7752 RB_INIT(&fd_data->clients);
7753 RB_INIT(&fd_data->flows);
7754 TAILQ_INIT(&fd_data->update_list);
7755 lck_mtx_init(&fd_data->fd_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
7756 klist_init(&fd_data->si.si_note);
7757 fd_data->proc_pid = proc_pid(p);
7758 #if SKYWALK
7759 LIST_INIT(&fd_data->stats_arena_list);
7760 #endif /* !SKYWALK */
7761
7762 fp->fp_flags |= FP_CLOEXEC | FP_CLOFORK;
7763 fp->fp_glob->fg_flag = FREAD;
7764 fp->fp_glob->fg_ops = &necp_fd_ops;
7765 fp_set_data(fp, fd_data);
7766
7767 proc_fdlock(p);
7768
7769 procfdtbl_releasefd(p, fd, NULL);
7770 fp_drop(p, fd, fp, 1);
7771
7772 *retval = fd;
7773
7774 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
7775 NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
7776 LIST_INSERT_HEAD(&necp_fd_observer_list, fd_data, chain);
7777 OSIncrementAtomic(&necp_observer_fd_count);
7778 NECP_OBSERVER_LIST_UNLOCK();
7779
7780 // Walk all existing clients and add them
7781 NECP_CLIENT_TREE_LOCK_SHARED();
7782 struct necp_client *existing_client = NULL;
7783 RB_FOREACH(existing_client, _necp_client_global_tree, &necp_client_global_tree) {
7784 NECP_CLIENT_LOCK(existing_client);
7785 necp_client_update_observer_add_internal(fd_data, existing_client);
7786 necp_client_update_observer_update_internal(fd_data, existing_client);
7787 NECP_CLIENT_UNLOCK(existing_client);
7788 }
7789 NECP_CLIENT_TREE_UNLOCK();
7790 } else {
7791 NECP_FD_LIST_LOCK_EXCLUSIVE();
7792 LIST_INSERT_HEAD(&necp_fd_list, fd_data, chain);
7793 OSIncrementAtomic(&necp_client_fd_count);
7794 NECP_FD_LIST_UNLOCK();
7795 }
7796
7797 proc_fdunlock(p);
7798
7799 done:
7800 if (error != 0) {
7801 if (fp != NULL) {
7802 fp_free(p, fd, fp);
7803 fp = NULL;
7804 }
7805 if (fd_data != NULL) {
7806 kfree_type(struct necp_fd_data, fd_data);
7807 }
7808 }
7809
7810 return error;
7811 }
7812
7813 // All functions called directly from necp_client_action() to handle one of the
7814 // types should be marked with NECP_CLIENT_ACTION_FUNCTION. This ensures that
7815 // necp_client_action() does not inline all the actions into a single function.
7816 #define NECP_CLIENT_ACTION_FUNCTION __attribute__((noinline))
7817
7818 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_add(struct proc * p,struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)7819 necp_client_add(struct proc *p, struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
7820 {
7821 int error = 0;
7822 struct necp_client * __single client = NULL;
7823 const size_t buffer_size = uap->buffer_size;
7824 const task_t __single task = proc_task(p);
7825
7826 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
7827 NECPLOG0(LOG_ERR, "NECP client observers with push enabled may not add their own clients");
7828 return EINVAL;
7829 }
7830
7831 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
7832 buffer_size == 0 || buffer_size > NECP_MAX_CLIENT_PARAMETERS_SIZE || uap->buffer == 0) {
7833 return EINVAL;
7834 }
7835
7836 client = kalloc_type(struct necp_client, Z_WAITOK | Z_ZERO | Z_NOFAIL);
7837 client->parameters = kalloc_data(buffer_size, Z_WAITOK | Z_NOFAIL);
7838 client->parameters_length = buffer_size;
7839 lck_mtx_init(&client->lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
7840 lck_mtx_init(&client->route_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
7841
7842 error = copyin(uap->buffer, client->parameters, buffer_size);
7843 if (error) {
7844 NECPLOG(LOG_ERR, "necp_client_add parameters copyin error (%d)", error);
7845 goto done;
7846 }
7847
7848 os_ref_init(&client->reference_count, &necp_client_refgrp); // Hold our reference until close
7849
7850 client->proc_pid = fd_data->proc_pid; // Save off proc pid in case the client will persist past fd
7851 client->agent_handle = (void *)fd_data;
7852 client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1);
7853
7854 necp_generate_client_id(client->client_id, false);
7855 LIST_INIT(&client->assertion_list);
7856 RB_INIT(&client->flow_registrations);
7857
7858 NECP_CLIENT_LOG(client, "Adding client");
7859
7860 error = copyout(client->client_id, uap->client_id, sizeof(uuid_t));
7861 if (error) {
7862 NECPLOG(LOG_ERR, "necp_client_add client_id copyout error (%d)", error);
7863 goto done;
7864 }
7865
7866 #if SKYWALK
7867 struct necp_client_parsed_parameters parsed_parameters = {};
7868 int parse_error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, &parsed_parameters);
7869
7870 if (parse_error == 0 &&
7871 ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) ||
7872 (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER))) {
7873 bool has_delegation_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0);
7874 if (!has_delegation_entitlement) {
7875 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) {
7876 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by upid",
7877 proc_name_address(p), proc_pid(p));
7878 }
7879 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER) {
7880 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to set attributed bundle identifier",
7881 proc_name_address(p), proc_pid(p));
7882 }
7883 error = EPERM;
7884 goto done;
7885 }
7886
7887 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) {
7888 // Save off delegated unique PID
7889 client->delegated_upid = parsed_parameters.delegated_upid;
7890 }
7891 }
7892
7893 if (parse_error == 0 && parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) {
7894 bool has_nexus_entitlement = (necp_skywalk_priv_check_cred(p, kauth_cred_get()) == 0);
7895 if (!has_nexus_entitlement) {
7896 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to open a custom nexus client",
7897 proc_name_address(p), proc_pid(p));
7898 error = EPERM;
7899 goto done;
7900 }
7901 }
7902
7903 if (parse_error == 0 && (parsed_parameters.flags &
7904 (NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER | NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP))) {
7905 bool has_custom_protocol_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_CUSTOM_PROTOCOL, 0) == 0);
7906 if (!has_custom_protocol_entitlement) {
7907 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement for custom protocol APIs",
7908 proc_name_address(p), proc_pid(p));
7909 error = EPERM;
7910 goto done;
7911 }
7912 }
7913
7914 if (parse_error == 0 && (parsed_parameters.extended_flags & NECP_CLIENT_PARAMETER_EXTENDED_FLAG_AOP2_OFFLOAD)) {
7915 bool has_aop_offload_entitlement = IOTaskHasEntitlement(task, "com.apple.private.network.aop2_offload");
7916 if (!has_aop_offload_entitlement) {
7917 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement for aop offload",
7918 proc_name_address(p), proc_pid(p));
7919 error = EPERM;
7920 goto done;
7921 }
7922
7923 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH) ||
7924 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) ||
7925 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER)) {
7926 NECPLOG0(LOG_INFO, "necp_client_add, aop_offload not supported for multipath/listener");
7927 error = EINVAL;
7928 goto done;
7929 }
7930 }
7931
7932 if (parse_error == 0 && parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER &&
7933 (parsed_parameters.ip_protocol == IPPROTO_TCP || parsed_parameters.ip_protocol == IPPROTO_UDP)) {
7934 uint32_t *netns_addr = NULL;
7935 uint8_t netns_addr_len = 0;
7936 struct ns_flow_info flow_info = {};
7937 uint32_t netns_flags = NETNS_LISTENER;
7938 uuid_copy(flow_info.nfi_flow_uuid, client->client_id);
7939 flow_info.nfi_protocol = parsed_parameters.ip_protocol;
7940 flow_info.nfi_owner_pid = client->proc_pid;
7941 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID) {
7942 flow_info.nfi_effective_pid = parsed_parameters.effective_pid;
7943 } else {
7944 flow_info.nfi_effective_pid = flow_info.nfi_owner_pid;
7945 }
7946 proc_name(flow_info.nfi_owner_pid, flow_info.nfi_owner_name, MAXCOMLEN);
7947 proc_name(flow_info.nfi_effective_pid, flow_info.nfi_effective_name, MAXCOMLEN);
7948
7949 if (parsed_parameters.local_addr.sa.sa_family == AF_UNSPEC) {
7950 // Treat no local address as a wildcard IPv6
7951 // parsed_parameters is already initialized to all zeros
7952 parsed_parameters.local_addr.sin6.sin6_family = AF_INET6;
7953 parsed_parameters.local_addr.sin6.sin6_len = sizeof(struct sockaddr_in6);
7954 }
7955
7956 switch (parsed_parameters.local_addr.sa.sa_family) {
7957 case AF_INET: {
7958 memcpy(&flow_info.nfi_laddr, &parsed_parameters.local_addr.sa, parsed_parameters.local_addr.sa.sa_len);
7959 netns_addr = (uint32_t *)&parsed_parameters.local_addr.sin.sin_addr;
7960 netns_addr_len = 4;
7961 break;
7962 }
7963 case AF_INET6: {
7964 memcpy(&flow_info.nfi_laddr.sin6, &parsed_parameters.local_addr.sin6, parsed_parameters.local_addr.sa.sa_len);
7965 netns_addr = (uint32_t *)&parsed_parameters.local_addr.sin6.sin6_addr;
7966 netns_addr_len = 16;
7967 break;
7968 }
7969
7970 default: {
7971 NECPLOG(LOG_ERR, "necp_client_add listener invalid address family (%d)", parsed_parameters.local_addr.sa.sa_family);
7972 error = EINVAL;
7973 goto done;
7974 }
7975 }
7976 if ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
7977 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_REUSE_LOCAL)) {
7978 netns_flags |= NETNS_REUSEPORT;
7979 }
7980 if (parsed_parameters.local_addr.sin.sin_port == 0) {
7981 error = netns_reserve_ephemeral(&client->port_reservation, netns_addr, netns_addr_len, parsed_parameters.ip_protocol,
7982 &parsed_parameters.local_addr.sin.sin_port, netns_flags, &flow_info);
7983 if (error) {
7984 NECPLOG(LOG_ERR, "necp_client_add netns_reserve_ephemeral error (%d)", error);
7985 goto done;
7986 }
7987
7988 // Update the parameter TLVs with the assigned port
7989 necp_client_update_local_port_parameters(client->parameters, (u_int32_t)client->parameters_length, parsed_parameters.local_addr.sin.sin_port);
7990 } else {
7991 error = netns_reserve(&client->port_reservation, netns_addr, netns_addr_len, parsed_parameters.ip_protocol,
7992 parsed_parameters.local_addr.sin.sin_port, netns_flags, &flow_info);
7993 if (error) {
7994 NECPLOG(LOG_ERR, "necp_client_add netns_reserve error (%d)", error);
7995 goto done;
7996 }
7997 }
7998 }
7999
8000 struct necp_client *parent = NULL;
8001 uuid_t parent_client_id;
8002 uuid_clear(parent_client_id);
8003 struct necp_client_nexus_parameters parent_parameters = {};
8004 uint16_t num_flow_regs = 0;
8005 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID) {
8006 // The parent "should" be found on fd_data without having to search across the whole necp_fd_list
8007 // It would be nice to do this a little further down where there's another instance of NECP_FD_LOCK
8008 // but the logic here depends on the parse paramters
8009 NECP_FD_LOCK(fd_data);
8010 parent = necp_client_fd_find_client_unlocked(fd_data, parsed_parameters.parent_uuid);
8011 if (parent != NULL) {
8012 necp_client_inherit_from_parent(client, parent);
8013 necp_client_copy_parameters_locked(client, &parent_parameters);
8014 uuid_copy(parent_client_id, parsed_parameters.parent_uuid);
8015 struct necp_client_flow_registration *flow_registration = NULL;
8016 RB_FOREACH(flow_registration, _necp_client_flow_tree, &parent->flow_registrations) {
8017 num_flow_regs++;
8018 }
8019 }
8020 NECP_FD_UNLOCK(fd_data);
8021 if (parent == NULL) {
8022 NECPLOG0(LOG_ERR, "necp_client_add, no necp_client_inherit_from_parent as can't find parent on fd_data");
8023 }
8024 }
8025 if (parse_error == 0 && parent != NULL && parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN) {
8026 do {
8027 if (parsed_parameters.demux_patterns[0].len == 0) {
8028 NECPLOG0(LOG_INFO, "necp_client_add, child does not have a demux pattern");
8029 break;
8030 }
8031
8032 if (uuid_is_null(parent_client_id)) {
8033 NECPLOG0(LOG_INFO, "necp_client_add, parent ID is null");
8034 break;
8035 }
8036
8037 if (num_flow_regs > 1) {
8038 NECPLOG0(LOG_INFO, "necp_client_add, multiple parent flows not supported");
8039 break;
8040 }
8041 if (parsed_parameters.ip_protocol != IPPROTO_UDP) {
8042 NECPLOG(LOG_INFO, "necp_client_add, flow demux pattern not supported for %d protocol",
8043 parsed_parameters.ip_protocol);
8044 break;
8045 }
8046 if (parsed_parameters.ip_protocol != parent_parameters.ip_protocol) {
8047 NECPLOG0(LOG_INFO, "necp_client_add, parent/child ip protocol mismatch");
8048 break;
8049 }
8050 if (parsed_parameters.local_addr.sa.sa_family != AF_INET && parsed_parameters.local_addr.sa.sa_family != AF_INET6) {
8051 NECPLOG(LOG_INFO, "necp_client_add, flow demux pattern not supported for %d family",
8052 parsed_parameters.local_addr.sa.sa_family);
8053 break;
8054 }
8055 if (parsed_parameters.local_addr.sa.sa_family != parsed_parameters.remote_addr.sa.sa_family) {
8056 NECPLOG0(LOG_INFO, "necp_client_add, local/remote address family mismatch");
8057 break;
8058 }
8059 if (parsed_parameters.local_addr.sa.sa_family != parent_parameters.local_addr.sa.sa_family) {
8060 NECPLOG0(LOG_INFO, "necp_client_add, parent/child address family mismatch");
8061 break;
8062 }
8063 if (SOCKADDR_CMP(&parsed_parameters.local_addr.sa, &parent_parameters.local_addr.sa, parsed_parameters.local_addr.sa.sa_len)) {
8064 NECPLOG0(LOG_INFO, "necp_client_add, parent/child local address mismatch");
8065 break;
8066 }
8067 if (SOCKADDR_CMP(&parsed_parameters.remote_addr.sa, &parent_parameters.remote_addr.sa, parsed_parameters.remote_addr.sa.sa_len)) {
8068 NECPLOG0(LOG_INFO, "necp_client_add, parent/child remote address mismatch");
8069 break;
8070 }
8071 if (parsed_parameters.local_addr.sin.sin_port != parent_parameters.local_addr.sin.sin_port) {
8072 NECPLOG0(LOG_INFO, "necp_client_add, parent/child local port mismatch");
8073 break;
8074 }
8075 if (parsed_parameters.remote_addr.sin.sin_port != parent_parameters.remote_addr.sin.sin_port) {
8076 NECPLOG0(LOG_INFO, "necp_client_add, parent/child remote port mismatch");
8077 break;
8078 }
8079 client->validated_parent = 1;
8080 uuid_copy(client->parent_client_id, parent_client_id);
8081 } while (false);
8082 }
8083
8084 #endif /* !SKYWALK */
8085
8086 necp_client_update_observer_add(client);
8087
8088 NECP_FD_LOCK(fd_data);
8089 RB_INSERT(_necp_client_tree, &fd_data->clients, client);
8090 OSIncrementAtomic(&necp_client_count);
8091 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
8092 RB_INSERT(_necp_client_global_tree, &necp_client_global_tree, client);
8093 NECP_CLIENT_TREE_UNLOCK();
8094
8095 // Prime the client result
8096 NECP_CLIENT_LOCK(client);
8097 (void)necp_update_client_result(current_proc(), fd_data, client, NULL);
8098 necp_client_retain_locked(client);
8099 NECP_CLIENT_UNLOCK(client);
8100 NECP_FD_UNLOCK(fd_data);
8101 // Now everything is set, it's safe to plumb this in to NetworkStatistics
8102 uint32_t ntstat_properties = 0;
8103 necp_find_conn_netstat_data(client, &ntstat_properties, NULL, NULL, NULL, NULL, NULL);
8104
8105 client->nstat_context = nstat_provider_stats_open((nstat_provider_context)client,
8106 NSTAT_PROVIDER_CONN_USERLAND, (u_int64_t)ntstat_properties, necp_request_conn_netstats, necp_find_conn_extension_info);
8107 necp_client_release(client);
8108 done:
8109 if (error != 0 && client != NULL) {
8110 necp_client_free(client);
8111 client = NULL;
8112 }
8113 *retval = error;
8114
8115 return error;
8116 }
8117
8118 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_claim(struct proc * p,struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8119 necp_client_claim(struct proc *p, struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8120 {
8121 int error = 0;
8122 uuid_t client_id = {};
8123 struct necp_client *client = NULL;
8124
8125 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8126 error = EINVAL;
8127 goto done;
8128 }
8129
8130 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8131 if (error) {
8132 NECPLOG(LOG_ERR, "necp_client_claim copyin client_id error (%d)", error);
8133 goto done;
8134 }
8135
8136 if (necp_client_id_is_flow(client_id)) {
8137 NECPLOG0(LOG_ERR, "necp_client_claim cannot claim from flow UUID");
8138 error = EINVAL;
8139 goto done;
8140 }
8141
8142 u_int64_t upid = proc_uniqueid(p);
8143
8144 NECP_FD_LIST_LOCK_SHARED();
8145
8146 struct necp_fd_data *find_fd = NULL;
8147 LIST_FOREACH(find_fd, &necp_fd_list, chain) {
8148 NECP_FD_LOCK(find_fd);
8149 struct necp_client *find_client = necp_client_fd_find_client_and_lock(find_fd, client_id);
8150 if (find_client != NULL) {
8151 if (find_client->delegated_upid == upid &&
8152 RB_EMPTY(&find_client->flow_registrations)) {
8153 // Matched the client to claim; remove from the old fd
8154 client = find_client;
8155 RB_REMOVE(_necp_client_tree, &find_fd->clients, client);
8156 necp_client_retain_locked(client);
8157 }
8158 NECP_CLIENT_UNLOCK(find_client);
8159 }
8160 NECP_FD_UNLOCK(find_fd);
8161
8162 if (client != NULL) {
8163 break;
8164 }
8165 }
8166
8167 NECP_FD_LIST_UNLOCK();
8168
8169 if (client == NULL) {
8170 error = ENOENT;
8171 goto done;
8172 }
8173
8174 client->proc_pid = fd_data->proc_pid; // Transfer client to claiming pid
8175 client->agent_handle = (void *)fd_data;
8176 client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1);
8177
8178 NECP_CLIENT_LOG(client, "Claiming client");
8179
8180 // Add matched client to our fd and re-run result
8181 NECP_FD_LOCK(fd_data);
8182 RB_INSERT(_necp_client_tree, &fd_data->clients, client);
8183 NECP_CLIENT_LOCK(client);
8184 (void)necp_update_client_result(current_proc(), fd_data, client, NULL);
8185 NECP_CLIENT_UNLOCK(client);
8186 NECP_FD_UNLOCK(fd_data);
8187
8188 necp_client_release(client);
8189
8190 done:
8191 *retval = error;
8192
8193 return error;
8194 }
8195
8196 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_remove(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8197 necp_client_remove(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8198 {
8199 int error = 0;
8200 uuid_t client_id = {};
8201 struct ifnet_stats_per_flow flow_ifnet_stats = {};
8202 const size_t buffer_size = uap->buffer_size;
8203
8204 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8205 error = EINVAL;
8206 goto done;
8207 }
8208
8209 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8210 if (error) {
8211 NECPLOG(LOG_ERR, "necp_client_remove copyin client_id error (%d)", error);
8212 goto done;
8213 }
8214
8215 if (uap->buffer != 0 && buffer_size == sizeof(flow_ifnet_stats)) {
8216 error = copyin(uap->buffer, &flow_ifnet_stats, buffer_size);
8217 if (error) {
8218 NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error);
8219 // Not fatal; make sure to zero-out stats in case of partial copy
8220 memset(&flow_ifnet_stats, 0, sizeof(flow_ifnet_stats));
8221 error = 0;
8222 }
8223 } else if (uap->buffer != 0) {
8224 NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size);
8225 }
8226
8227 NECP_FD_LOCK(fd_data);
8228
8229 pid_t pid = fd_data->proc_pid;
8230 struct necp_client *client = necp_client_fd_find_client_unlocked(fd_data, client_id);
8231
8232 NECP_CLIENT_LOG(client, "Removing client");
8233
8234 if (client != NULL) {
8235 // Remove any flow registrations that match
8236 struct necp_client_flow_registration *flow_registration = NULL;
8237 struct necp_client_flow_registration *temp_flow_registration = NULL;
8238 RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
8239 if (flow_registration->client == client) {
8240 #if SKYWALK
8241 necp_destroy_flow_stats(fd_data, flow_registration, NULL, TRUE);
8242 #endif /* SKYWALK */
8243 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
8244 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
8245 NECP_FLOW_TREE_UNLOCK();
8246 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
8247 }
8248 }
8249 #if SKYWALK
8250 if (client->nstat_context != NULL) {
8251 // Main path, we expect stats to be in existance at this point
8252 nstat_provider_stats_close(client->nstat_context);
8253 client->nstat_context = NULL;
8254 } else {
8255 NECPLOG0(LOG_ERR, "necp_client_remove ntstat shutdown finds nstat_context NULL");
8256 }
8257 #endif /* SKYWALK */
8258 // Remove client from lists
8259 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
8260 RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
8261 NECP_CLIENT_TREE_UNLOCK();
8262 RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
8263 }
8264
8265 #if SKYWALK
8266 // If the currently-active arena is idle (has no more flows referring to it), or if there are defunct
8267 // arenas lingering in the list, schedule a threadcall to do the clean up. The idle check is done
8268 // by checking if the reference count is 3: one held by this client (will be released below when we
8269 // destroy it) when it's non-NULL; the rest held by stats_arena_{active,list}.
8270 if ((fd_data->stats_arena_active != NULL && fd_data->stats_arena_active->nai_use_count == 3) ||
8271 (fd_data->stats_arena_active == NULL && !LIST_EMPTY(&fd_data->stats_arena_list))) {
8272 uint64_t deadline = 0;
8273 uint64_t leeway = 0;
8274 clock_interval_to_deadline(necp_close_arenas_timeout_microseconds, NSEC_PER_USEC, &deadline);
8275 clock_interval_to_absolutetime_interval(necp_close_arenas_timeout_leeway_microseconds, NSEC_PER_USEC, &leeway);
8276
8277 thread_call_enter_delayed_with_leeway(necp_close_empty_arenas_tcall, NULL,
8278 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
8279 }
8280 #endif /* SKYWALK */
8281
8282 NECP_FD_UNLOCK(fd_data);
8283
8284 if (client != NULL) {
8285 ASSERT(error == 0);
8286 necp_destroy_client(client, pid, true);
8287 } else {
8288 error = ENOENT;
8289 NECPLOG(LOG_ERR, "necp_client_remove invalid client_id (%d)", error);
8290 }
8291 done:
8292 *retval = error;
8293
8294 return error;
8295 }
8296
8297 static struct necp_client_flow_registration *
necp_client_fd_find_flow(struct necp_fd_data * client_fd,uuid_t flow_id)8298 necp_client_fd_find_flow(struct necp_fd_data *client_fd, uuid_t flow_id)
8299 {
8300 NECP_FD_ASSERT_LOCKED(client_fd);
8301 struct necp_client_flow_registration *flow = NULL;
8302
8303 if (necp_client_id_is_flow(flow_id)) {
8304 struct necp_client_flow_registration find;
8305 uuid_copy(find.registration_id, flow_id);
8306 flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find);
8307 }
8308
8309 return flow;
8310 }
8311
8312 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_remove_flow(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8313 necp_client_remove_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8314 {
8315 int error = 0;
8316 uuid_t flow_id = {};
8317 struct ifnet_stats_per_flow flow_ifnet_stats = {};
8318 const size_t buffer_size = uap->buffer_size;
8319
8320 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8321 error = EINVAL;
8322 NECPLOG(LOG_ERR, "necp_client_remove_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
8323 goto done;
8324 }
8325
8326 error = copyin(uap->client_id, flow_id, sizeof(uuid_t));
8327 if (error) {
8328 NECPLOG(LOG_ERR, "necp_client_remove_flow copyin client_id error (%d)", error);
8329 goto done;
8330 }
8331
8332 if (uap->buffer != 0 && buffer_size != 0) {
8333 error = copyin(uap->buffer, &flow_ifnet_stats, MIN(buffer_size, sizeof(flow_ifnet_stats)));
8334 if (error) {
8335 NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error);
8336 // Not fatal
8337 }
8338 } else if (uap->buffer != 0) {
8339 NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size);
8340 }
8341
8342 NECP_FD_LOCK(fd_data);
8343 struct necp_client *client = NULL;
8344 struct necp_client_flow_registration *flow_registration = necp_client_fd_find_flow(fd_data, flow_id);
8345 if (flow_registration != NULL) {
8346 #if SKYWALK
8347 // Cleanup stats per flow
8348 necp_destroy_flow_stats(fd_data, flow_registration, &flow_ifnet_stats, TRUE);
8349 #endif /* SKYWALK */
8350 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
8351 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
8352 NECP_FLOW_TREE_UNLOCK();
8353 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
8354
8355 client = flow_registration->client;
8356 if (client != NULL) {
8357 necp_client_retain(client);
8358 }
8359 }
8360 NECP_FD_UNLOCK(fd_data);
8361
8362 NECP_CLIENT_FLOW_LOG(client, flow_registration, "removing flow");
8363
8364 if (flow_registration != NULL && client != NULL) {
8365 NECP_CLIENT_LOCK(client);
8366 if (flow_registration->client == client) {
8367 bool abort = (flow_registration->aop_offload) ? true : false;
8368 necp_destroy_client_flow_registration(client, flow_registration, fd_data->proc_pid, abort);
8369 }
8370 necp_client_release_locked(client);
8371 NECP_CLIENT_UNLOCK(client);
8372 }
8373
8374 done:
8375 *retval = error;
8376 if (error != 0) {
8377 NECPLOG(LOG_ERR, "Remove flow error (%d)", error);
8378 }
8379
8380 return error;
8381 }
8382
8383 // Don't inline the function since it includes necp_client_parsed_parameters on the stack
8384 static __attribute__((noinline)) int
necp_client_check_tcp_heuristics(struct necp_client * client,struct necp_client_flow * flow,u_int32_t * flags,u_int8_t * __counted_by (tfo_cookie_maxlen)tfo_cookie,u_int8_t tfo_cookie_maxlen,u_int8_t * tfo_cookie_len)8385 necp_client_check_tcp_heuristics(struct necp_client *client, struct necp_client_flow *flow,
8386 u_int32_t *flags, u_int8_t *__counted_by(tfo_cookie_maxlen) tfo_cookie, u_int8_t tfo_cookie_maxlen,
8387 u_int8_t *tfo_cookie_len)
8388 {
8389 struct necp_client_parsed_parameters parsed_parameters;
8390 int error = 0;
8391
8392 error = necp_client_parse_parameters(client, client->parameters,
8393 (u_int32_t)client->parameters_length,
8394 &parsed_parameters);
8395 if (error) {
8396 NECPLOG(LOG_ERR, "necp_client_parse_parameters error (%d)", error);
8397 return error;
8398 }
8399
8400 if ((flow->remote_addr.sa.sa_family != AF_INET &&
8401 flow->remote_addr.sa.sa_family != AF_INET6) ||
8402 (flow->local_addr.sa.sa_family != AF_INET &&
8403 flow->local_addr.sa.sa_family != AF_INET6)) {
8404 return EINVAL;
8405 }
8406
8407 NECP_CLIENT_ROUTE_LOCK(client);
8408
8409 if (client->current_route == NULL) {
8410 error = ENOENT;
8411 goto do_unlock;
8412 }
8413
8414 bool check_ecn = false;
8415 do {
8416 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) ==
8417 NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) {
8418 check_ecn = true;
8419 break;
8420 }
8421
8422 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) ==
8423 NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) {
8424 break;
8425 }
8426
8427 if (tcp_ecn == 1) {
8428 check_ecn = true;
8429 }
8430 } while (false);
8431
8432 if (check_ecn) {
8433 if (tcp_heuristic_do_ecn_with_address(client->current_route->rt_ifp,
8434 (union sockaddr_in_4_6 *)&flow->local_addr)) {
8435 *flags |= NECP_CLIENT_RESULT_FLAG_ECN_ENABLED;
8436 }
8437 }
8438
8439 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) ==
8440 NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) {
8441 if (!tcp_heuristic_do_tfo_with_address(client->current_route->rt_ifp,
8442 (union sockaddr_in_4_6 *)&flow->local_addr,
8443 (union sockaddr_in_4_6 *)&flow->remote_addr,
8444 tfo_cookie, tfo_cookie_maxlen, tfo_cookie_len)) {
8445 *flags |= NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED;
8446 *tfo_cookie_len = 0;
8447 }
8448 } else {
8449 *flags |= NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED;
8450 *tfo_cookie_len = 0;
8451 }
8452 do_unlock:
8453 NECP_CLIENT_ROUTE_UNLOCK(client);
8454
8455 return error;
8456 }
8457
8458 static size_t
necp_client_calculate_flow_tlv_size(struct necp_client_flow_registration * flow_registration)8459 necp_client_calculate_flow_tlv_size(struct necp_client_flow_registration *flow_registration)
8460 {
8461 size_t assigned_results_size = 0;
8462 struct necp_client_flow *flow = NULL;
8463 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
8464 if (flow->assigned || flow_registration->defunct || !necp_client_endpoint_is_unspecified((struct necp_client_endpoint *)&flow->remote_addr)) {
8465 size_t header_length = 0;
8466 if (flow->nexus) {
8467 header_length = sizeof(struct necp_client_nexus_flow_header);
8468 } else {
8469 header_length = sizeof(struct necp_client_flow_header);
8470 }
8471 assigned_results_size += (header_length + flow->assigned_results_length);
8472
8473 if (flow->has_protoctl_event) {
8474 assigned_results_size += sizeof(struct necp_client_flow_protoctl_event_header);
8475 }
8476 }
8477 }
8478 return assigned_results_size;
8479 }
8480
8481 static errno_t
necp_client_destination_mac_address(struct sockaddr * remote,uint32_t index,struct ether_addr * remote_mac)8482 necp_client_destination_mac_address(struct sockaddr *remote, uint32_t index,
8483 struct ether_addr *remote_mac)
8484 {
8485 struct rtentry *rt = NULL;
8486 struct rtentry *tgt_rt = NULL;
8487 struct rtentry *__single gwrt = NULL;
8488 errno_t err = 0;
8489
8490 ASSERT(remote_mac != NULL);
8491 ASSERT(remote != NULL);
8492
8493 rt = rtalloc1_scoped(remote, 0, 0, index);
8494 if (rt == NULL) {
8495 return ENOENT;
8496 }
8497
8498 if (IS_DIRECT_HOSTROUTE(rt)) {
8499 tgt_rt = rt;
8500 } else {
8501 err = route_to_gwroute(remote, rt, &gwrt);
8502 if (err != 0) {
8503 goto done;
8504 }
8505
8506 ASSERT(gwrt != NULL);
8507 RT_LOCK_ASSERT_HELD(gwrt);
8508 tgt_rt = gwrt;
8509 }
8510
8511 if ((tgt_rt->rt_flags & RTF_HOST) &&
8512 (tgt_rt->rt_flags & RTF_LLINFO) &&
8513 (tgt_rt->rt_gateway->sa_family == AF_LINK) &&
8514 (SDL(tgt_rt->rt_gateway)->sdl_alen == ETHER_ADDR_LEN)) {
8515 struct sockaddr_dl *__bidi_indexable sdl =
8516 (struct sockaddr_dl *__bidi_indexable)SDL(tgt_rt->rt_gateway);
8517 bcopy(LLADDR(sdl), remote_mac->octet, ETHER_ADDR_LEN);
8518 } else {
8519 err = ENOENT;
8520 }
8521 done:
8522 if (gwrt != NULL) {
8523 RT_UNLOCK(gwrt);
8524 rtfree(gwrt);
8525 gwrt = NULL;
8526 }
8527
8528 if (rt != NULL) {
8529 rtfree(rt);
8530 rt = NULL;
8531 }
8532
8533 return err;
8534 }
8535
8536 static uint8_t *
8537 __sized_by(*buflen)
necp_client_flow_mac_and_gateway(struct necp_client_flow * flow,size_t * buflen)8538 necp_client_flow_mac_and_gateway(struct necp_client_flow *flow, size_t *buflen)
8539 {
8540 u_int8_t * __indexable buffer = NULL;
8541 u_int8_t * __indexable cursor = NULL;
8542 size_t valsize = 0;
8543
8544 ASSERT(flow != NULL);
8545 ASSERT(buflen != NULL);
8546
8547 *buflen = 0;
8548
8549 ifnet_t ifp = NULL;
8550 ifnet_head_lock_shared();
8551 if (flow->interface_index != IFSCOPE_NONE && flow->interface_index <= if_index) {
8552 ifp = ifindex2ifnet[flow->interface_index];
8553 }
8554 ifnet_head_done();
8555
8556 if (ifp == NULL) {
8557 NECPLOG0(LOG_ERR, "necp_client_flow_mac_and_gateway: ifp is NULL");
8558 return NULL;
8559 }
8560
8561 if (!IFNET_IS_ETHERNET(ifp)) {
8562 return NULL;
8563 }
8564
8565 /* local MAC */
8566 struct ether_addr local_ether = {};
8567 bool local_ether_set = false;
8568 if (ifnet_lladdr_copy_bytes(ifp, local_ether.octet, ETHER_ADDR_LEN) == 0) {
8569 local_ether_set = true;
8570 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
8571 }
8572
8573 /*remote MAC */
8574 struct ether_addr remote_ether = {};
8575 bool remote_ether_set = false;
8576 if (necp_client_destination_mac_address(SA(&flow->remote_addr),
8577 flow->interface_index, &remote_ether) == 0) {
8578 remote_ether_set = true;
8579 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
8580 }
8581
8582 if (valsize == 0) {
8583 return NULL;
8584 }
8585
8586 buffer = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
8587 if (buffer == NULL) {
8588 return NULL;
8589 }
8590
8591 cursor = buffer;
8592 if (local_ether_set) {
8593 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ETHER_ADDR,
8594 sizeof(struct ether_addr), (uint8_t *)(struct ether_addr * __bidi_indexable)&local_ether,
8595 buffer, valsize);
8596 }
8597 if (remote_ether_set) {
8598 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_REMOTE_ETHER_ADDR,
8599 sizeof(struct ether_addr), (uint8_t *)(struct ether_addr * __bidi_indexable)&remote_ether,
8600 buffer, valsize);
8601 }
8602 *buflen = valsize;
8603 return buffer;
8604 }
8605
8606 static int
necp_client_fillout_flow_tlvs(struct necp_client * client,bool client_is_observed,struct necp_client_flow_registration * flow_registration,struct necp_client_action_args * uap,size_t * assigned_results_cursor)8607 necp_client_fillout_flow_tlvs(struct necp_client *client,
8608 bool client_is_observed,
8609 struct necp_client_flow_registration *flow_registration,
8610 struct necp_client_action_args *uap,
8611 size_t *assigned_results_cursor)
8612 {
8613 int error = 0;
8614 struct necp_client_flow *flow = NULL;
8615 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
8616 if (flow->assigned || flow_registration->defunct || !necp_client_endpoint_is_unspecified((struct necp_client_endpoint *)&flow->remote_addr)) {
8617 // Write TLV headers
8618 struct necp_client_nexus_flow_header header = {};
8619 u_int32_t length = 0;
8620 u_int32_t flags = 0;
8621 u_int8_t tfo_cookie_len = 0;
8622 u_int8_t type = 0;
8623 size_t buflen = 0;
8624 uint8_t *buffer = NULL;
8625
8626 type = NECP_CLIENT_RESULT_FLOW_ID;
8627 length = sizeof(header.flow_header.flow_id);
8628 header.flow_header.flow_id_tlv_header.type = type;
8629 header.flow_header.flow_id_tlv_header.length = length;
8630 uuid_copy(header.flow_header.flow_id, flow_registration->registration_id);
8631
8632 if (flow->nexus) {
8633 if (flow->check_tcp_heuristics) {
8634 u_int8_t tfo_cookie[NECP_TFO_COOKIE_LEN_MAX];
8635 tfo_cookie_len = NECP_TFO_COOKIE_LEN_MAX;
8636
8637 if (necp_client_check_tcp_heuristics(client, flow, &flags,
8638 tfo_cookie, tfo_cookie_len, &tfo_cookie_len) != 0) {
8639 tfo_cookie_len = 0;
8640 } else {
8641 flow->check_tcp_heuristics = FALSE;
8642
8643 if (tfo_cookie_len != 0) {
8644 type = NECP_CLIENT_RESULT_TFO_COOKIE;
8645 length = tfo_cookie_len;
8646 header.tfo_cookie_tlv_header.type = type;
8647 header.tfo_cookie_tlv_header.length = length;
8648 memcpy(&header.tfo_cookie_value, tfo_cookie, tfo_cookie_len);
8649 }
8650 }
8651 }
8652 }
8653
8654 size_t header_length = 0;
8655 if (flow->nexus) {
8656 if (tfo_cookie_len != 0) {
8657 header_length = sizeof(struct necp_client_nexus_flow_header) - (NECP_TFO_COOKIE_LEN_MAX - tfo_cookie_len);
8658 } else {
8659 header_length = sizeof(struct necp_client_nexus_flow_header) - sizeof(struct necp_tlv_header) - NECP_TFO_COOKIE_LEN_MAX;
8660 }
8661 } else {
8662 header_length = sizeof(struct necp_client_flow_header);
8663 }
8664
8665 type = NECP_CLIENT_RESULT_FLAGS;
8666 length = sizeof(header.flow_header.flags_value);
8667 header.flow_header.flags_tlv_header.type = type;
8668 header.flow_header.flags_tlv_header.length = length;
8669 if (flow->assigned) {
8670 flags |= NECP_CLIENT_RESULT_FLAG_FLOW_ASSIGNED;
8671 }
8672 if (flow->viable) {
8673 flags |= NECP_CLIENT_RESULT_FLAG_FLOW_VIABLE;
8674 }
8675 if (flow_registration->defunct) {
8676 flags |= NECP_CLIENT_RESULT_FLAG_DEFUNCT;
8677 }
8678 flags |= flow->necp_flow_flags;
8679 header.flow_header.flags_value = flags;
8680
8681 type = NECP_CLIENT_RESULT_INTERFACE;
8682 length = sizeof(header.flow_header.interface_value);
8683 header.flow_header.interface_tlv_header.type = type;
8684 header.flow_header.interface_tlv_header.length = length;
8685
8686 struct necp_client_result_interface interface_struct;
8687 interface_struct.generation = 0;
8688 interface_struct.index = flow->interface_index;
8689
8690 header.flow_header.interface_value = interface_struct;
8691 if (flow->nexus) {
8692 type = NECP_CLIENT_RESULT_NETAGENT;
8693 length = sizeof(header.agent_value);
8694 header.agent_tlv_header.type = type;
8695 header.agent_tlv_header.length = length;
8696
8697 struct necp_client_result_netagent agent_struct;
8698 uuid_copy(agent_struct.netagent_uuid, flow->u.nexus_agent);
8699 agent_struct.generation = netagent_get_generation(agent_struct.netagent_uuid);
8700
8701 header.agent_value = agent_struct;
8702 }
8703
8704 // Don't include outer TLV header in length field
8705 type = NECP_CLIENT_RESULT_FLOW;
8706 length = (header_length - sizeof(struct necp_tlv_header) + flow->assigned_results_length);
8707 if (flow->has_protoctl_event) {
8708 length += sizeof(struct necp_client_flow_protoctl_event_header);
8709 }
8710 if (flow->nexus && flow->aop_offload) {
8711 buffer = necp_client_flow_mac_and_gateway(flow, &buflen);
8712 length += buflen;
8713
8714 if (flow->aop_stat_index_valid) {
8715 length += sizeof(struct necp_client_flow_stats_index_header);
8716 }
8717 }
8718 header.flow_header.outer_header.type = type;
8719 header.flow_header.outer_header.length = length;
8720
8721 error = copyout(&header, uap->buffer + client->result_length + *assigned_results_cursor, header_length);
8722 if (error) {
8723 NECPLOG(LOG_ERR, "necp_client_copy assigned results tlv_header copyout error (%d)", error);
8724 return error;
8725 }
8726 *assigned_results_cursor += header_length;
8727
8728 if (flow->assigned_results && flow->assigned_results_length) {
8729 // Write inner TLVs
8730 error = copyout(flow->assigned_results, uap->buffer + client->result_length + *assigned_results_cursor,
8731 flow->assigned_results_length);
8732 if (error) {
8733 NECPLOG(LOG_ERR, "necp_client_copy assigned results copyout error (%d)", error);
8734 return error;
8735 }
8736 }
8737 *assigned_results_cursor += flow->assigned_results_length;
8738
8739 /* Read the protocol event and reset it */
8740 if (flow->has_protoctl_event) {
8741 struct necp_client_flow_protoctl_event_header protoctl_event_header = {};
8742
8743 type = NECP_CLIENT_RESULT_PROTO_CTL_EVENT;
8744 length = sizeof(protoctl_event_header.protoctl_event);
8745
8746 protoctl_event_header.protoctl_tlv_header.type = type;
8747 protoctl_event_header.protoctl_tlv_header.length = length;
8748 protoctl_event_header.protoctl_event = flow->protoctl_event;
8749
8750 error = copyout(&protoctl_event_header, uap->buffer + client->result_length + *assigned_results_cursor,
8751 sizeof(protoctl_event_header));
8752
8753 if (error) {
8754 NECPLOG(LOG_ERR, "necp_client_copy protocol control event results"
8755 " tlv_header copyout error (%d)", error);
8756 return error;
8757 }
8758 *assigned_results_cursor += sizeof(protoctl_event_header);
8759 flow->has_protoctl_event = FALSE;
8760 flow->protoctl_event.protoctl_event_code = 0;
8761 flow->protoctl_event.protoctl_event_val = 0;
8762 flow->protoctl_event.protoctl_event_tcp_seq_num = 0;
8763 }
8764
8765 if (flow->nexus && flow->aop_offload) {
8766 if (buffer != NULL) {
8767 ASSERT(buflen > 0);
8768 error = copyout(buffer, uap->buffer + client->result_length + *assigned_results_cursor,
8769 buflen);
8770 *assigned_results_cursor += buflen;
8771 kfree_data_counted_by(buffer, buflen);
8772 if (error) {
8773 NECPLOG(LOG_ERR, "necp_client_copy mac address results"
8774 " tlv_header copyout error (%d)", error);
8775 return error;
8776 }
8777 }
8778
8779 if (flow->aop_stat_index_valid) {
8780 struct necp_client_flow_stats_index_header flow_stats_header = {};
8781
8782 type = NECP_CLIENT_RESULT_FLOW_STATS_INDEX;
8783 length = sizeof(flow_stats_header.stats_index);
8784
8785 flow_stats_header.stats_index_tlv_header.type = type;
8786 flow_stats_header.stats_index_tlv_header.length = length;
8787 flow_stats_header.stats_index = flow->stats_index;
8788
8789 error = copyout(&flow_stats_header, uap->buffer +
8790 client->result_length + *assigned_results_cursor, sizeof(flow_stats_header));
8791 if (error) {
8792 NECPLOG(LOG_ERR, "necp_client_copy flow stats index "
8793 "tlv header copyout error (%d)", error);
8794 return error;
8795 }
8796 *assigned_results_cursor += sizeof(flow_stats_header);
8797 }
8798 }
8799 }
8800 }
8801 if (!client_is_observed) {
8802 flow_registration->flow_result_read = TRUE;
8803 }
8804 return 0;
8805 }
8806
8807 static int
necp_client_copy_internal(struct necp_client * client,uuid_t client_id,bool client_is_observed,struct necp_client_action_args * uap,int * retval)8808 necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool client_is_observed, struct necp_client_action_args *uap, int *retval)
8809 {
8810 NECP_CLIENT_ASSERT_LOCKED(client);
8811 int error = 0;
8812 // Copy results out
8813 if (uap->action == NECP_CLIENT_ACTION_COPY_PARAMETERS) {
8814 if (uap->buffer_size < client->parameters_length) {
8815 return EINVAL;
8816 }
8817 error = copyout(client->parameters, uap->buffer, client->parameters_length);
8818 if (error) {
8819 NECPLOG(LOG_ERR, "necp_client_copy parameters copyout error (%d)", error);
8820 return error;
8821 }
8822 *retval = client->parameters_length;
8823 } else if ((uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT || uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) &&
8824 client->result_read && client->group_members_read && !necp_client_has_unread_flows(client)) {
8825 // Copy updates only, but nothing to read
8826 // Just return 0 for bytes read
8827 *retval = 0;
8828 } else if (uap->action == NECP_CLIENT_ACTION_COPY_RESULT ||
8829 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT ||
8830 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8831 size_t assigned_results_size = client->assigned_group_members_length;
8832
8833 bool some_flow_is_defunct = false;
8834 struct necp_client_flow_registration *single_flow_registration = NULL;
8835 if (necp_client_id_is_flow(client_id)) {
8836 single_flow_registration = necp_client_find_flow(client, client_id);
8837 if (single_flow_registration != NULL) {
8838 assigned_results_size += necp_client_calculate_flow_tlv_size(single_flow_registration);
8839 }
8840 } else {
8841 // This request is for the client, so copy everything
8842 struct necp_client_flow_registration *flow_registration = NULL;
8843 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
8844 if (flow_registration->defunct) {
8845 some_flow_is_defunct = true;
8846 }
8847 assigned_results_size += necp_client_calculate_flow_tlv_size(flow_registration);
8848 }
8849 }
8850 if (uap->buffer_size < (client->result_length + assigned_results_size)) {
8851 if (uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8852 // Mark the client and all flows as read to prevent looping
8853 client->result_read = true;
8854 struct necp_client_flow_registration *flow_registration = NULL;
8855 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
8856 flow_registration->flow_result_read = true;
8857 }
8858 }
8859 return EINVAL;
8860 }
8861
8862 u_int32_t original_flags = 0;
8863 bool flags_updated = false;
8864 if (some_flow_is_defunct && client->legacy_client_is_flow) {
8865 // If our client expects the defunct flag in the client, add it now
8866 u_int32_t client_flags = 0;
8867 u_int32_t value_size = 0;
8868 u_int8_t *flags_pointer = necp_buffer_get_tlv_value(client->result, client->result_length, 0, &value_size);
8869 if (flags_pointer != NULL && value_size == sizeof(client_flags)) {
8870 memcpy(&client_flags, flags_pointer, value_size);
8871 original_flags = client_flags;
8872 client_flags |= NECP_CLIENT_RESULT_FLAG_DEFUNCT;
8873 (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS,
8874 sizeof(client_flags), &client_flags, &flags_updated,
8875 client->result, sizeof(client->result));
8876 }
8877 }
8878
8879 error = copyout(client->result, uap->buffer, client->result_length);
8880
8881 if (flags_updated) {
8882 // Revert stored flags
8883 (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS,
8884 sizeof(original_flags), &original_flags, &flags_updated,
8885 client->result, sizeof(client->result));
8886 }
8887
8888 if (error != 0) {
8889 NECPLOG(LOG_ERR, "necp_client_copy result copyout error (%d)", error);
8890 return error;
8891 }
8892
8893 if (client->assigned_group_members != NULL && client->assigned_group_members_length > 0) {
8894 error = copyout(client->assigned_group_members, uap->buffer + client->result_length, client->assigned_group_members_length);
8895 if (error != 0) {
8896 NECPLOG(LOG_ERR, "necp_client_copy group members copyout error (%d)", error);
8897 return error;
8898 }
8899 }
8900
8901 size_t assigned_results_cursor = client->assigned_group_members_length; // Start with an offset based on the group members
8902 if (necp_client_id_is_flow(client_id)) {
8903 if (single_flow_registration != NULL) {
8904 error = necp_client_fillout_flow_tlvs(client, client_is_observed, single_flow_registration, uap, &assigned_results_cursor);
8905 if (error != 0) {
8906 return error;
8907 }
8908 }
8909 } else {
8910 // This request is for the client, so copy everything
8911 struct necp_client_flow_registration *flow_registration = NULL;
8912 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
8913 error = necp_client_fillout_flow_tlvs(client, client_is_observed, flow_registration, uap, &assigned_results_cursor);
8914 if (error != 0) {
8915 return error;
8916 }
8917 }
8918 }
8919
8920 *retval = client->result_length + assigned_results_cursor;
8921
8922 if (!client_is_observed) {
8923 client->result_read = TRUE;
8924 client->group_members_read = TRUE;
8925 }
8926 }
8927
8928 return 0;
8929 }
8930
8931 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8932 necp_client_copy(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8933 {
8934 int error = 0;
8935 struct necp_client *client = NULL;
8936 uuid_t client_id;
8937 uuid_clear(client_id);
8938
8939 *retval = 0;
8940
8941 if (uap->buffer_size == 0 || uap->buffer == 0) {
8942 return EINVAL;
8943 }
8944
8945 if (uap->action != NECP_CLIENT_ACTION_COPY_PARAMETERS &&
8946 uap->action != NECP_CLIENT_ACTION_COPY_RESULT &&
8947 uap->action != NECP_CLIENT_ACTION_COPY_UPDATED_RESULT &&
8948 uap->action != NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8949 return EINVAL;
8950 }
8951
8952 if (uap->client_id) {
8953 if (uap->client_id_len != sizeof(uuid_t)) {
8954 NECPLOG(LOG_ERR, "Incorrect length (got %zu, expected %zu)", (size_t)uap->client_id_len, sizeof(uuid_t));
8955 return ERANGE;
8956 }
8957
8958 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8959 if (error) {
8960 NECPLOG(LOG_ERR, "necp_client_copy client_id copyin error (%d)", error);
8961 return error;
8962 }
8963 }
8964
8965 const bool is_wildcard = (bool)uuid_is_null(client_id);
8966
8967 NECP_FD_LOCK(fd_data);
8968
8969 bool send_in_process_flow_divert_message = false;
8970 if (is_wildcard) {
8971 if (uap->action == NECP_CLIENT_ACTION_COPY_RESULT ||
8972 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT ||
8973 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8974 struct necp_client *find_client = NULL;
8975 RB_FOREACH(find_client, _necp_client_tree, &fd_data->clients) {
8976 NECP_CLIENT_LOCK(find_client);
8977 if (!find_client->result_read || !find_client->group_members_read || necp_client_has_unread_flows(find_client)) {
8978 client = find_client;
8979 // Leave the client locked, and break
8980 break;
8981 }
8982 NECP_CLIENT_UNLOCK(find_client);
8983 }
8984
8985 if (client == NULL && fd_data->request_in_process_flow_divert) {
8986 // No client found that needs update. Check for an event requesting in-process flow divert.
8987 send_in_process_flow_divert_message = true;
8988 }
8989 }
8990 } else {
8991 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
8992 }
8993
8994 if (client != NULL) {
8995 if (!send_in_process_flow_divert_message) {
8996 // If client is set, it is locked
8997 error = necp_client_copy_internal(client, client_id, FALSE, uap, retval);
8998 }
8999 NECP_CLIENT_UNLOCK(client);
9000 }
9001
9002 if (send_in_process_flow_divert_message) {
9003 fd_data->request_in_process_flow_divert = false;
9004
9005 struct necp_tlv_header request_tlv = {
9006 .type = NECP_CLIENT_RESULT_REQUEST_IN_PROCESS_FLOW_DIVERT,
9007 .length = 0,
9008 };
9009 if (uap->buffer_size < sizeof(request_tlv)) {
9010 error = EINVAL;
9011 } else {
9012 error = copyout(&request_tlv, uap->buffer, sizeof(request_tlv));
9013 if (error) {
9014 NECPLOG(LOG_ERR, "necp_client_copy request flow divert TLV copyout error (%d)", error);
9015 } else {
9016 *retval = sizeof(request_tlv);
9017 }
9018 }
9019 }
9020
9021 // Unlock our own fd before moving on or returning
9022 NECP_FD_UNLOCK(fd_data);
9023
9024 if (client == NULL && !send_in_process_flow_divert_message) {
9025 if (fd_data->flags & NECP_OPEN_FLAG_OBSERVER) {
9026 // Observers are allowed to lookup clients on other fds
9027
9028 // Lock tree
9029 NECP_CLIENT_TREE_LOCK_SHARED();
9030
9031 bool found_client = FALSE;
9032
9033 client = necp_find_client_and_lock(client_id);
9034 if (client != NULL) {
9035 // Matched, copy out data
9036 found_client = TRUE;
9037 error = necp_client_copy_internal(client, client_id, TRUE, uap, retval);
9038 NECP_CLIENT_UNLOCK(client);
9039 }
9040
9041 // Unlock tree
9042 NECP_CLIENT_TREE_UNLOCK();
9043
9044 // No client found, fail
9045 if (!found_client) {
9046 return ENOENT;
9047 }
9048 } else {
9049 // No client found, and not allowed to search other fds, fail
9050 return ENOENT;
9051 }
9052 }
9053
9054 return error;
9055 }
9056
9057 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_client_update(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9058 necp_client_copy_client_update(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9059 {
9060 int error = 0;
9061
9062 *retval = 0;
9063
9064 if (!(fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER)) {
9065 NECPLOG0(LOG_ERR, "NECP fd is not observer, cannot copy client update");
9066 return EINVAL;
9067 }
9068
9069 if (uap->client_id_len != sizeof(uuid_t) || uap->client_id == 0) {
9070 NECPLOG0(LOG_ERR, "Client id invalid, cannot copy client update");
9071 return EINVAL;
9072 }
9073
9074 if (uap->buffer_size == 0 || uap->buffer == 0) {
9075 NECPLOG0(LOG_ERR, "Buffer invalid, cannot copy client update");
9076 return EINVAL;
9077 }
9078
9079 NECP_FD_LOCK(fd_data);
9080 struct necp_client_update *client_update = TAILQ_FIRST(&fd_data->update_list);
9081 if (client_update != NULL) {
9082 TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
9083 VERIFY(fd_data->update_count > 0);
9084 fd_data->update_count--;
9085 }
9086 NECP_FD_UNLOCK(fd_data);
9087
9088 if (client_update != NULL) {
9089 error = copyout(client_update->client_id, uap->client_id, sizeof(uuid_t));
9090 if (error) {
9091 NECPLOG(LOG_ERR, "Copy client update copyout client id error (%d)", error);
9092 } else {
9093 if (uap->buffer_size < client_update->update_length) {
9094 NECPLOG(LOG_ERR, "Buffer size cannot hold update (%zu < %zu)", (size_t)uap->buffer_size, client_update->update_length);
9095 error = EINVAL;
9096 } else {
9097 error = copyout(client_update->update, uap->buffer, client_update->update_length);
9098 if (error) {
9099 NECPLOG(LOG_ERR, "Copy client update copyout error (%d)", error);
9100 } else {
9101 *retval = client_update->update_length;
9102 }
9103 }
9104 }
9105
9106 necp_client_update_free(client_update);
9107 client_update = NULL;
9108 } else {
9109 error = ENOENT;
9110 }
9111
9112 return error;
9113 }
9114
9115 static int
necp_client_copy_parameters_locked(struct necp_client * client,struct necp_client_nexus_parameters * parameters)9116 necp_client_copy_parameters_locked(struct necp_client *client,
9117 struct necp_client_nexus_parameters *parameters)
9118 {
9119 VERIFY(parameters != NULL);
9120
9121 struct necp_client_parsed_parameters parsed_parameters = {};
9122 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, &parsed_parameters);
9123
9124 parameters->pid = client->proc_pid;
9125 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID) {
9126 parameters->epid = parsed_parameters.effective_pid;
9127 } else {
9128 parameters->epid = parameters->pid;
9129 }
9130 #if SKYWALK
9131 parameters->port_reservation = client->port_reservation;
9132 #endif /* !SKYWALK */
9133 memcpy(¶meters->local_addr, &parsed_parameters.local_addr, sizeof(parameters->local_addr));
9134 memcpy(¶meters->remote_addr, &parsed_parameters.remote_addr, sizeof(parameters->remote_addr));
9135 parameters->ip_protocol = parsed_parameters.ip_protocol;
9136 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL) {
9137 parameters->transport_protocol = parsed_parameters.transport_protocol;
9138 } else {
9139 parameters->transport_protocol = parsed_parameters.ip_protocol;
9140 }
9141 parameters->ethertype = parsed_parameters.ethertype;
9142 parameters->traffic_class = parsed_parameters.traffic_class;
9143 if (uuid_is_null(client->override_euuid)) {
9144 uuid_copy(parameters->euuid, parsed_parameters.effective_uuid);
9145 } else {
9146 uuid_copy(parameters->euuid, client->override_euuid);
9147 }
9148 parameters->is_listener = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) ? 1 : 0;
9149 parameters->is_interpose = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) ? 1 : 0;
9150 parameters->is_custom_ether = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) ? 1 : 0;
9151 parameters->policy_id = client->policy_id;
9152 parameters->skip_policy_id = client->skip_policy_id;
9153
9154 // parse client result flag
9155 u_int32_t client_result_flags = 0;
9156 u_int32_t value_size = 0;
9157 u_int8_t *flags_pointer = NULL;
9158 flags_pointer = necp_buffer_get_tlv_value(client->result, client->result_length, 0, &value_size);
9159 if (flags_pointer && value_size == sizeof(client_result_flags)) {
9160 memcpy(&client_result_flags, flags_pointer, value_size);
9161 }
9162 parameters->allow_qos_marking = (client_result_flags & NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING) ? 1 : 0;
9163
9164 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE) {
9165 if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_DEFAULT) {
9166 parameters->override_address_selection = false;
9167 } else if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_TEMPORARY) {
9168 parameters->override_address_selection = true;
9169 parameters->use_stable_address = false;
9170 } else if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_STABLE) {
9171 parameters->override_address_selection = true;
9172 parameters->use_stable_address = true;
9173 }
9174 } else {
9175 parameters->override_address_selection = false;
9176 }
9177
9178 if ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
9179 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_NO_WAKE_FROM_SLEEP)) {
9180 parameters->no_wake_from_sleep = true;
9181 }
9182
9183 if ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
9184 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_REUSE_LOCAL)) {
9185 parameters->reuse_port = true;
9186 }
9187
9188 #if SKYWALK
9189 if (!parameters->is_listener) {
9190 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN) {
9191 if (parsed_parameters.demux_patterns[0].len == 0) {
9192 parameters->is_demuxable_parent = 1;
9193 } else {
9194 if (client->validated_parent) {
9195 ASSERT(!uuid_is_null(client->parent_client_id));
9196
9197 NECP_CLIENT_TREE_LOCK_SHARED();
9198 struct necp_client *parent = necp_find_client_and_lock(client->parent_client_id);
9199 if (parent != NULL) {
9200 struct necp_client_flow_registration *parent_flow_registration = NULL;
9201 RB_FOREACH(parent_flow_registration, _necp_client_flow_tree, &parent->flow_registrations) {
9202 uuid_copy(parameters->parent_flow_uuid, parent_flow_registration->registration_id);
9203 break;
9204 }
9205
9206 NECP_CLIENT_UNLOCK(parent);
9207 }
9208 NECP_CLIENT_TREE_UNLOCK();
9209
9210 if (parsed_parameters.demux_pattern_count > 0) {
9211 for (int i = 0; i < parsed_parameters.demux_pattern_count; i++) {
9212 memcpy(¶meters->demux_patterns[i], &parsed_parameters.demux_patterns[i], sizeof(struct necp_demux_pattern));
9213 }
9214 parameters->demux_pattern_count = parsed_parameters.demux_pattern_count;
9215 }
9216 }
9217 }
9218 }
9219
9220 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EXTENDED_FLAGS) {
9221 if (parsed_parameters.extended_flags & NECP_CLIENT_PARAMETER_EXTENDED_FLAG_AOP2_OFFLOAD) {
9222 parameters->use_aop_offload = true;
9223 }
9224 }
9225 }
9226 #endif // SKYWALK
9227
9228 return error;
9229 }
9230
9231 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_list(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9232 necp_client_list(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9233 {
9234 int error = 0;
9235 struct necp_client *find_client = NULL;
9236 size_t copy_buffer_size = 0;
9237 uuid_t *list = NULL;
9238 u_int32_t requested_client_count = 0;
9239 u_int32_t client_count = 0;
9240
9241 if (uap->buffer_size < sizeof(requested_client_count) || uap->buffer == 0) {
9242 error = EINVAL;
9243 goto done;
9244 }
9245
9246 if (!(fd_data->flags & NECP_OPEN_FLAG_OBSERVER)) {
9247 NECPLOG0(LOG_ERR, "Client does not hold necessary entitlement to list other NECP clients");
9248 error = EACCES;
9249 goto done;
9250 }
9251
9252 error = copyin(uap->buffer, &requested_client_count, sizeof(requested_client_count));
9253 if (error) {
9254 goto done;
9255 }
9256
9257 if (os_mul_overflow(sizeof(uuid_t), requested_client_count, ©_buffer_size)) {
9258 error = ERANGE;
9259 goto done;
9260 }
9261
9262 if (uap->buffer_size - sizeof(requested_client_count) != copy_buffer_size) {
9263 error = EINVAL;
9264 goto done;
9265 }
9266
9267 if (copy_buffer_size > NECP_MAX_CLIENT_LIST_SIZE) {
9268 error = EINVAL;
9269 goto done;
9270 }
9271
9272 if (requested_client_count > 0) {
9273 list = (uuid_t*)kalloc_data(copy_buffer_size, Z_WAITOK | Z_ZERO);
9274 if (list == NULL) {
9275 error = ENOMEM;
9276 goto done;
9277 }
9278 }
9279
9280 // Lock tree
9281 NECP_CLIENT_TREE_LOCK_SHARED();
9282
9283 find_client = NULL;
9284 RB_FOREACH(find_client, _necp_client_global_tree, &necp_client_global_tree) {
9285 NECP_CLIENT_LOCK(find_client);
9286 if (!uuid_is_null(find_client->client_id)) {
9287 if (client_count < requested_client_count) {
9288 uuid_copy(list[client_count], find_client->client_id);
9289 }
9290 client_count++;
9291 }
9292 NECP_CLIENT_UNLOCK(find_client);
9293 }
9294
9295 // Unlock tree
9296 NECP_CLIENT_TREE_UNLOCK();
9297
9298 error = copyout(&client_count, uap->buffer, sizeof(client_count));
9299 if (error) {
9300 NECPLOG(LOG_ERR, "necp_client_list buffer copyout error (%d)", error);
9301 goto done;
9302 }
9303
9304 if (requested_client_count > 0 &&
9305 client_count > 0 &&
9306 list != NULL) {
9307 error = copyout(list, uap->buffer + sizeof(client_count), copy_buffer_size);
9308 if (error) {
9309 NECPLOG(LOG_ERR, "necp_client_list client count copyout error (%d)", error);
9310 goto done;
9311 }
9312 }
9313 done:
9314 if (list != NULL) {
9315 kfree_data(list, copy_buffer_size);
9316 }
9317 *retval = error;
9318
9319 return error;
9320 }
9321
9322 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_add_flow(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9323 necp_client_add_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9324 {
9325 int error = 0;
9326 struct necp_client *client = NULL;
9327 uuid_t client_id;
9328 struct necp_client_nexus_parameters parameters = {};
9329 struct proc *proc = PROC_NULL;
9330 struct necp_client_add_flow * __indexable add_request = NULL;
9331 struct necp_client_add_flow * __indexable allocated_add_request = NULL;
9332 struct necp_client_add_flow_default default_add_request = {};
9333 const size_t buffer_size = uap->buffer_size;
9334
9335 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
9336 error = EINVAL;
9337 NECPLOG(LOG_ERR, "necp_client_add_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
9338 goto done;
9339 }
9340
9341 if (uap->buffer == 0 || buffer_size < sizeof(struct necp_client_add_flow) ||
9342 buffer_size > sizeof(struct necp_client_add_flow_default) * 4) {
9343 error = EINVAL;
9344 NECPLOG(LOG_ERR, "necp_client_add_flow invalid buffer (length %zu)", buffer_size);
9345 goto done;
9346 }
9347
9348 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9349 if (error) {
9350 NECPLOG(LOG_ERR, "necp_client_add_flow copyin client_id error (%d)", error);
9351 goto done;
9352 }
9353
9354 if (buffer_size <= sizeof(struct necp_client_add_flow_default)) {
9355 // Fits in default size
9356 error = copyin(uap->buffer, &default_add_request, buffer_size);
9357 if (error) {
9358 NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error);
9359 goto done;
9360 }
9361
9362 add_request = (struct necp_client_add_flow *)&default_add_request;
9363 } else {
9364 allocated_add_request = (struct necp_client_add_flow *)kalloc_data(buffer_size, Z_WAITOK | Z_ZERO);
9365 if (allocated_add_request == NULL) {
9366 error = ENOMEM;
9367 goto done;
9368 }
9369
9370 error = copyin(uap->buffer, allocated_add_request, buffer_size);
9371 if (error) {
9372 NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error);
9373 goto done;
9374 }
9375
9376 add_request = allocated_add_request;
9377 }
9378
9379 NECP_FD_LOCK(fd_data);
9380 pid_t pid = fd_data->proc_pid;
9381 proc = proc_find(pid);
9382 if (proc == PROC_NULL) {
9383 NECP_FD_UNLOCK(fd_data);
9384 NECPLOG(LOG_ERR, "necp_client_add_flow process not found for pid %d error (%d)", pid, error);
9385 error = ESRCH;
9386 goto done;
9387 }
9388
9389 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9390 if (client == NULL) {
9391 error = ENOENT;
9392 NECP_FD_UNLOCK(fd_data);
9393 goto done;
9394 }
9395
9396 // Using ADD_FLOW indicates that the client supports multiple flows per client
9397 client->legacy_client_is_flow = false;
9398
9399 necp_client_retain_locked(client);
9400 necp_client_copy_parameters_locked(client, ¶meters);
9401
9402 struct necp_client_flow_registration *new_registration = necp_client_create_flow_registration(fd_data, client);
9403 if (new_registration == NULL) {
9404 error = ENOMEM;
9405 NECP_CLIENT_UNLOCK(client);
9406 NECP_FD_UNLOCK(fd_data);
9407 NECPLOG0(LOG_ERR, "Failed to allocate flow registration");
9408 goto done;
9409 }
9410
9411 new_registration->flags = add_request->flags;
9412
9413 // If NECP_CLIENT_FLOW_FLAGS_OPEN_FLOW_ON_BEHALF_OF_CLIENT is set, then set registration_id_to_add to the old
9414 // value in add_request->registration_id, otherwise use the new value in new_registration->registration_id.
9415 bool open_flow_on_behalf_of_client = (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OPEN_FLOW_ON_BEHALF_OF_CLIENT);
9416 uuid_t registration_id_to_add = {};
9417 if (open_flow_on_behalf_of_client && !uuid_is_null(add_request->registration_id)) {
9418 uuid_copy(registration_id_to_add, add_request->registration_id);
9419 } else {
9420 uuid_copy(registration_id_to_add, new_registration->registration_id);
9421 }
9422
9423 // Copy new ID out to caller
9424 uuid_copy(add_request->registration_id, new_registration->registration_id);
9425 new_registration->aop_offload = parameters.use_aop_offload;
9426
9427 NECP_CLIENT_FLOW_LOG(client, new_registration, "adding flow");
9428
9429 size_t trailer_offset = (sizeof(struct necp_client_add_flow) +
9430 add_request->stats_request_count * sizeof(struct necp_client_flow_stats));
9431
9432 // Copy override address
9433 struct sockaddr * __single override_address = NULL;
9434 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OVERRIDE_ADDRESS) {
9435 size_t offset_of_address = trailer_offset;
9436 if (buffer_size >= offset_of_address + sizeof(struct sockaddr_in)) {
9437 override_address = flow_req_get_address(add_request, offset_of_address);
9438 if (buffer_size >= offset_of_address + override_address->sa_len &&
9439 override_address->sa_len <= sizeof(parameters.remote_addr)) {
9440 SOCKADDR_COPY(override_address, ¶meters.remote_addr, override_address->sa_len);
9441 trailer_offset += override_address->sa_len;
9442
9443 // Clear out any local address if the remote address is overridden
9444 if (parameters.remote_addr.sa.sa_family == AF_INET) {
9445 parameters.local_addr.sin.sin_family = AF_INET;
9446 parameters.local_addr.sin.sin_len = sizeof(struct sockaddr_in);
9447 parameters.local_addr.sin.sin_addr.s_addr = 0;
9448 } else if (parameters.remote_addr.sa.sa_family == AF_INET6) {
9449 parameters.local_addr.sin6.sin6_family = AF_INET6;
9450 parameters.local_addr.sin6.sin6_len = sizeof(struct sockaddr_in6);
9451 memset((uint8_t *)¶meters.local_addr.sin6.sin6_addr, 0, sizeof(struct in6_addr));
9452 parameters.local_addr.sin6.sin6_scope_id = 0;
9453 }
9454 } else {
9455 override_address = NULL;
9456 }
9457 }
9458 }
9459
9460 // Copy override IP protocol
9461 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OVERRIDE_IP_PROTOCOL) {
9462 size_t offset_of_ip_protocol = trailer_offset;
9463 if (buffer_size >= offset_of_ip_protocol + sizeof(uint8_t)) {
9464 uint8_t * __single ip_protocol_p = flow_req_get_proto(add_request, offset_of_ip_protocol);
9465 memcpy(¶meters.ip_protocol, ip_protocol_p, sizeof(uint8_t));
9466 }
9467 }
9468
9469 // If opening the flow on behalf of the client, then replace the pid and parameters.pid with the effective PID
9470 // so that the client's PID is used for this flow instead of the PID of the process making the requests.
9471 if (open_flow_on_behalf_of_client) {
9472 parameters.pid = parameters.epid;
9473 pid = parameters.epid;
9474 }
9475
9476 #if SKYWALK
9477 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS) {
9478 size_t assigned_results_length = 0;
9479 void * __sized_by(assigned_results_length) assigned_results = NULL;
9480 uint32_t interface_index = 0;
9481
9482 // Validate that the nexus UUID is assigned
9483 bool found_nexus = false;
9484 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
9485 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
9486 struct necp_client_interface_option *option = &client->interface_options[option_i];
9487 if (uuid_compare(option->nexus_agent, add_request->agent_uuid) == 0) {
9488 interface_index = option->interface_index;
9489 found_nexus = true;
9490 break;
9491 }
9492 } else {
9493 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
9494 if (uuid_compare(option->nexus_agent, add_request->agent_uuid) == 0) {
9495 interface_index = option->interface_index;
9496 found_nexus = true;
9497 break;
9498 }
9499 }
9500 }
9501
9502 if (!found_nexus) {
9503 error = EINVAL;
9504 NECPLOG(LOG_ERR, "<pid %d> Requested nexus not found", client->proc_pid);
9505 } else if (client->flow_divert_control_unit != 0) {
9506 // If policy result indicates flow divert, no nexus flow is allowed.
9507 // All flow divert flows must be using sockets.
9508 error = EPERM;
9509 NECPLOG(LOG_ERR, "<pid %d> Disallow flow add with flow divert result", client->proc_pid);
9510 } else {
9511 necp_client_add_nexus_flow_if_needed(new_registration, add_request->agent_uuid, interface_index, parameters.use_aop_offload);
9512
9513 error = netagent_client_message_with_params(add_request->agent_uuid,
9514 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
9515 client->client_id :
9516 registration_id_to_add),
9517 pid, client->agent_handle,
9518 NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS,
9519 (struct necp_client_agent_parameters *)¶meters,
9520 &assigned_results, &assigned_results_length);
9521 if (error != 0) {
9522 VERIFY(assigned_results == NULL);
9523 VERIFY(assigned_results_length == 0);
9524 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
9525 } else if (assigned_results != NULL) {
9526 if (!necp_assign_client_result_locked(proc, fd_data, client, new_registration, add_request->agent_uuid,
9527 assigned_results, assigned_results_length, false, false)) {
9528 kfree_data_sized_by(assigned_results, assigned_results_length);
9529 }
9530 } else if (override_address != NULL) {
9531 // Save the overridden address in the flow. Find the correct flow,
9532 // and assign just the address TLV. Don't set the assigned flag.
9533 struct necp_client_flow *flow = NULL;
9534 LIST_FOREACH(flow, &new_registration->flow_list, flow_chain) {
9535 if (flow->nexus &&
9536 uuid_compare(flow->u.nexus_agent, add_request->agent_uuid) == 0) {
9537 if (flow->assigned_results == NULL) {
9538 SOCKADDR_COPY(override_address, &flow->remote_addr, override_address->sa_len);
9539 uuid_t empty_uuid;
9540 uuid_clear(empty_uuid);
9541 size_t message_length;
9542 void *message = necp_create_nexus_assign_message(empty_uuid, 0, NULL, 0,
9543 (struct necp_client_endpoint *)&flow->local_addr,
9544 (struct necp_client_endpoint *)&flow->remote_addr,
9545 NULL, 0, NULL, 0, &message_length);
9546 flow->assigned_results = message;
9547 flow->assigned_results_length = message_length;
9548 }
9549 break;
9550 }
9551 }
9552 }
9553 }
9554 }
9555
9556 // Don't request stats if nexus creation fails
9557 if (error == 0 && add_request->stats_request_count > 0 && necp_arena_initialize(fd_data, true) == 0) {
9558 struct necp_client_flow_stats * __single stats_request = &(necp_client_get_flow_stats(add_request))[0];
9559 struct necp_stats_bufreq bufreq = {};
9560
9561 NECP_CLIENT_FLOW_LOG(client, new_registration, "Initializing stats");
9562
9563 bufreq.necp_stats_bufreq_id = NECP_CLIENT_STATISTICS_BUFREQ_ID;
9564 bufreq.necp_stats_bufreq_type = stats_request->stats_type;
9565 bufreq.necp_stats_bufreq_ver = stats_request->stats_version;
9566 bufreq.necp_stats_bufreq_size = stats_request->stats_size;
9567 bufreq.necp_stats_bufreq_uaddr = stats_request->stats_addr;
9568 (void)necp_stats_initialize(fd_data, client, new_registration, &bufreq);
9569 stats_request->stats_type = bufreq.necp_stats_bufreq_type;
9570 stats_request->stats_version = bufreq.necp_stats_bufreq_ver;
9571 stats_request->stats_size = bufreq.necp_stats_bufreq_size;
9572 stats_request->stats_addr = bufreq.necp_stats_bufreq_uaddr;
9573 }
9574
9575 if (error == 0 && parameters.use_aop_offload) {
9576 error = necp_aop_offload_stats_initialize(
9577 new_registration, add_request->agent_uuid);
9578 }
9579 #endif /* !SKYWALK */
9580
9581 if (error == 0 &&
9582 (add_request->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE ||
9583 add_request->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) {
9584 uint32_t interface_index = IFSCOPE_NONE;
9585 ifnet_head_lock_shared();
9586 struct ifnet *interface = NULL;
9587 TAILQ_FOREACH(interface, &ifnet_head, if_link) {
9588 ifnet_lock_shared(interface);
9589 if (interface->if_agentids != NULL) {
9590 for (u_int32_t i = 0; i < interface->if_agentcount; i++) {
9591 if (uuid_compare(interface->if_agentids[i], add_request->agent_uuid) == 0) {
9592 interface_index = interface->if_index;
9593 break;
9594 }
9595 }
9596 }
9597 ifnet_lock_done(interface);
9598 if (interface_index != IFSCOPE_NONE) {
9599 break;
9600 }
9601 }
9602 ifnet_head_done();
9603
9604 necp_client_add_nexus_flow_if_needed(new_registration, add_request->agent_uuid, interface_index, parameters.use_aop_offload);
9605
9606 size_t dummy_length = 0;
9607 void * __sized_by(dummy_length) dummy_results = NULL;
9608 error = netagent_client_message_with_params(add_request->agent_uuid,
9609 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
9610 client->client_id :
9611 new_registration->registration_id),
9612 pid, client->agent_handle,
9613 NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT,
9614 (struct necp_client_agent_parameters *)¶meters,
9615 &dummy_results, &dummy_length);
9616 if (error != 0) {
9617 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
9618 }
9619 }
9620
9621 if (error != 0) {
9622 // Encountered an error in adding the flow, destroy the flow registration
9623 #if SKYWALK
9624 necp_destroy_flow_stats(fd_data, new_registration, NULL, false);
9625 #endif /* SKYWALK */
9626 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
9627 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, new_registration);
9628 NECP_FLOW_TREE_UNLOCK();
9629 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, new_registration);
9630 necp_destroy_client_flow_registration(client, new_registration, fd_data->proc_pid, true);
9631 new_registration = NULL;
9632 }
9633
9634 NECP_CLIENT_UNLOCK(client);
9635 NECP_FD_UNLOCK(fd_data);
9636
9637 necp_client_release(client);
9638
9639 if (error != 0) {
9640 goto done;
9641 }
9642
9643 // Copy the request back out to the caller with assigned fields
9644 error = copyout(add_request, uap->buffer, buffer_size);
9645 if (error != 0) {
9646 NECPLOG(LOG_ERR, "necp_client_add_flow copyout add_request error (%d)", error);
9647 }
9648
9649 done:
9650 *retval = error;
9651 if (error != 0) {
9652 NECPLOG(LOG_ERR, "Add flow error (%d)", error);
9653 }
9654
9655 if (allocated_add_request != NULL) {
9656 kfree_data(allocated_add_request, buffer_size);
9657 }
9658
9659 if (proc != PROC_NULL) {
9660 proc_rele(proc);
9661 }
9662 return error;
9663 }
9664
9665 #if SKYWALK
9666
9667 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_request_nexus(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9668 necp_client_request_nexus(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9669 {
9670 int error = 0;
9671 struct necp_client *client = NULL;
9672 uuid_t client_id;
9673 struct necp_client_nexus_parameters parameters = {};
9674 struct proc *proc = PROC_NULL;
9675 const size_t buffer_size = uap->buffer_size;
9676
9677 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
9678 error = EINVAL;
9679 goto done;
9680 }
9681
9682 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9683 if (error) {
9684 NECPLOG(LOG_ERR, "necp_client_request_nexus copyin client_id error (%d)", error);
9685 goto done;
9686 }
9687
9688 NECP_FD_LOCK(fd_data);
9689 pid_t pid = fd_data->proc_pid;
9690 proc = proc_find(pid);
9691 if (proc == PROC_NULL) {
9692 NECP_FD_UNLOCK(fd_data);
9693 NECPLOG(LOG_ERR, "necp_client_request_nexus process not found for pid %d error (%d)", pid, error);
9694 error = ESRCH;
9695 goto done;
9696 }
9697
9698 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9699 if (client == NULL) {
9700 NECP_FD_UNLOCK(fd_data);
9701 error = ENOENT;
9702 goto done;
9703 }
9704
9705 // Using REQUEST_NEXUS indicates that the client only supports one flow per client
9706 client->legacy_client_is_flow = true;
9707
9708 necp_client_retain_locked(client);
9709 necp_client_copy_parameters_locked(client, ¶meters);
9710
9711 do {
9712 size_t assigned_results_length = 0;
9713 void * __sized_by(assigned_results_length) assigned_results = NULL;
9714 uuid_t nexus_uuid;
9715 uint32_t interface_index = 0;
9716
9717 // Validate that the nexus UUID is assigned
9718 bool found_nexus = false;
9719 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
9720 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
9721 struct necp_client_interface_option *option = &client->interface_options[option_i];
9722 if (!uuid_is_null(option->nexus_agent)) {
9723 uuid_copy(nexus_uuid, option->nexus_agent);
9724 interface_index = option->interface_index;
9725 found_nexus = true;
9726 break;
9727 }
9728 } else {
9729 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
9730 if (!uuid_is_null(option->nexus_agent)) {
9731 uuid_copy(nexus_uuid, option->nexus_agent);
9732 interface_index = option->interface_index;
9733 found_nexus = true;
9734 break;
9735 }
9736 }
9737 }
9738
9739 if (!found_nexus) {
9740 NECP_CLIENT_UNLOCK(client);
9741 NECP_FD_UNLOCK(fd_data);
9742 necp_client_release(client);
9743 // Break the loop
9744 error = ENETDOWN;
9745 goto done;
9746 }
9747
9748 struct necp_client_flow_registration *new_registration = necp_client_create_flow_registration(fd_data, client);
9749 if (new_registration == NULL) {
9750 error = ENOMEM;
9751 NECP_CLIENT_UNLOCK(client);
9752 NECP_FD_UNLOCK(fd_data);
9753 necp_client_release(client);
9754 NECPLOG0(LOG_ERR, "Failed to allocate flow registration");
9755 goto done;
9756 }
9757
9758 new_registration->flags = (NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS | NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID);
9759
9760 necp_client_add_nexus_flow_if_needed(new_registration, nexus_uuid, interface_index, parameters.use_aop_offload);
9761
9762 // Note: Any clients using "request_nexus" are not flow-registration aware.
9763 // Register the Client ID rather than the Registration ID with the nexus, since
9764 // the client will send traffic based on the client ID.
9765 error = netagent_client_message_with_params(nexus_uuid,
9766 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
9767 client->client_id :
9768 new_registration->registration_id),
9769 pid, client->agent_handle,
9770 NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS,
9771 (struct necp_client_agent_parameters *)¶meters,
9772 &assigned_results, &assigned_results_length);
9773 if (error) {
9774 NECP_CLIENT_UNLOCK(client);
9775 NECP_FD_UNLOCK(fd_data);
9776 necp_client_release(client);
9777 VERIFY(assigned_results == NULL);
9778 VERIFY(assigned_results_length == 0);
9779 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
9780 goto done;
9781 }
9782
9783 if (assigned_results != NULL) {
9784 if (!necp_assign_client_result_locked(proc, fd_data, client, new_registration, nexus_uuid,
9785 assigned_results, assigned_results_length, false, false)) {
9786 kfree_data_sized_by(assigned_results, assigned_results_length);
9787 }
9788 }
9789
9790 if (uap->buffer != 0 && buffer_size == sizeof(struct necp_stats_bufreq) &&
9791 necp_arena_initialize(fd_data, true) == 0) {
9792 struct necp_stats_bufreq bufreq = {};
9793 int copy_error = copyin(uap->buffer, &bufreq, buffer_size);
9794 if (copy_error) {
9795 NECPLOG(LOG_ERR, "necp_client_request_nexus copyin bufreq error (%d)", copy_error);
9796 } else {
9797 (void)necp_stats_initialize(fd_data, client, new_registration, &bufreq);
9798 copy_error = copyout(&bufreq, uap->buffer, buffer_size);
9799 if (copy_error != 0) {
9800 NECPLOG(LOG_ERR, "necp_client_request_nexus copyout bufreq error (%d)", copy_error);
9801 }
9802 }
9803 }
9804 } while (false);
9805
9806 NECP_CLIENT_UNLOCK(client);
9807 NECP_FD_UNLOCK(fd_data);
9808
9809 necp_client_release(client);
9810
9811 done:
9812 *retval = error;
9813 if (error != 0) {
9814 NECPLOG(LOG_ERR, "Request nexus error (%d)", error);
9815 }
9816
9817 if (proc != PROC_NULL) {
9818 proc_rele(proc);
9819 }
9820 return error;
9821 }
9822 #endif /* !SKYWALK */
9823
9824 static void
necp_client_add_assertion(struct necp_client * client,uuid_t netagent_uuid)9825 necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid)
9826 {
9827 struct necp_client_assertion *new_assertion = NULL;
9828
9829 new_assertion = kalloc_type(struct necp_client_assertion,
9830 Z_WAITOK | Z_NOFAIL);
9831
9832 uuid_copy(new_assertion->asserted_netagent, netagent_uuid);
9833
9834 LIST_INSERT_HEAD(&client->assertion_list, new_assertion, assertion_chain);
9835 }
9836
9837 static bool
necp_client_remove_assertion(struct necp_client * client,uuid_t netagent_uuid)9838 necp_client_remove_assertion(struct necp_client *client, uuid_t netagent_uuid)
9839 {
9840 struct necp_client_assertion * __single found_assertion = NULL;
9841 struct necp_client_assertion *search_assertion = NULL;
9842 LIST_FOREACH(search_assertion, &client->assertion_list, assertion_chain) {
9843 if (uuid_compare(search_assertion->asserted_netagent, netagent_uuid) == 0) {
9844 found_assertion = search_assertion;
9845 break;
9846 }
9847 }
9848
9849 if (found_assertion == NULL) {
9850 NECPLOG0(LOG_ERR, "Netagent uuid not previously asserted");
9851 return false;
9852 }
9853
9854 LIST_REMOVE(found_assertion, assertion_chain);
9855 kfree_type(struct necp_client_assertion, found_assertion);
9856 return true;
9857 }
9858
9859 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_agent_action(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9860 necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9861 {
9862 int error = 0;
9863 struct necp_client *client = NULL;
9864 uuid_t client_id;
9865 bool acted_on_agent = FALSE;
9866 u_int8_t *parameters = NULL;
9867 const size_t buffer_size = uap->buffer_size;
9868
9869 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
9870 buffer_size == 0 || uap->buffer == 0) {
9871 NECPLOG0(LOG_ERR, "necp_client_agent_action invalid parameters");
9872 error = EINVAL;
9873 goto done;
9874 }
9875
9876 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9877 if (error) {
9878 NECPLOG(LOG_ERR, "necp_client_agent_action copyin client_id error (%d)", error);
9879 goto done;
9880 }
9881
9882 if (buffer_size > NECP_MAX_AGENT_ACTION_SIZE) {
9883 NECPLOG(LOG_ERR, "necp_client_agent_action invalid buffer size (>%u)", NECP_MAX_AGENT_ACTION_SIZE);
9884 error = EINVAL;
9885 goto done;
9886 }
9887
9888 parameters = (u_int8_t *)kalloc_data(buffer_size, Z_WAITOK | Z_ZERO);
9889 if (parameters == NULL) {
9890 error = ENOMEM;
9891 goto done;
9892 }
9893
9894 error = copyin(uap->buffer, parameters, buffer_size);
9895 if (error) {
9896 NECPLOG(LOG_ERR, "necp_client_agent_action parameters copyin error (%d)", error);
9897 goto done;
9898 }
9899
9900 NECP_FD_LOCK(fd_data);
9901 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9902 if (client != NULL) {
9903 size_t offset = 0;
9904 while ((offset + sizeof(struct necp_tlv_header)) <= buffer_size) {
9905 u_int8_t type = necp_buffer_get_tlv_type(parameters, buffer_size, offset);
9906 u_int32_t length = necp_buffer_get_tlv_length(parameters, buffer_size, offset);
9907
9908 if (length > (buffer_size - (offset + sizeof(struct necp_tlv_header)))) {
9909 // If the length is larger than what can fit in the remaining parameters size, bail
9910 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
9911 break;
9912 }
9913
9914 if (length >= sizeof(uuid_t)) {
9915 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, buffer_size, offset, NULL);
9916 if (value == NULL) {
9917 NECPLOG0(LOG_ERR, "Invalid TLV value");
9918 break;
9919 }
9920 if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT ||
9921 type == NECP_CLIENT_PARAMETER_ASSERT_AGENT ||
9922 type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
9923 uuid_t agent_uuid;
9924 uuid_copy(agent_uuid, value);
9925 u_int8_t netagent_message_type = 0;
9926 if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT) {
9927 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER;
9928 } else if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) {
9929 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT;
9930 } else if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
9931 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
9932 }
9933
9934 // Before unasserting, verify that the assertion was already taken
9935 if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
9936 if (!necp_client_remove_assertion(client, agent_uuid)) {
9937 error = ENOENT;
9938 break;
9939 }
9940 }
9941
9942 struct necp_client_nexus_parameters parsed_parameters = {};
9943 necp_client_copy_parameters_locked(client, &parsed_parameters);
9944 size_t dummy_length = 0;
9945 void * __sized_by(dummy_length) dummy_results = NULL;
9946
9947 error = netagent_client_message_with_params(agent_uuid,
9948 client_id,
9949 fd_data->proc_pid,
9950 client->agent_handle,
9951 netagent_message_type,
9952 (struct necp_client_agent_parameters *)&parsed_parameters,
9953 &dummy_results, &dummy_length);
9954 if (error == 0) {
9955 acted_on_agent = TRUE;
9956 } else {
9957 break;
9958 }
9959
9960 // Only save the assertion if the action succeeded
9961 if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) {
9962 necp_client_add_assertion(client, agent_uuid);
9963 }
9964 } else if (type == NECP_CLIENT_PARAMETER_AGENT_ADD_GROUP_MEMBERS ||
9965 type == NECP_CLIENT_PARAMETER_AGENT_REMOVE_GROUP_MEMBERS) {
9966 uuid_t agent_uuid;
9967 uuid_copy(agent_uuid, value);
9968 u_int8_t netagent_message_type = 0;
9969 if (type == NECP_CLIENT_PARAMETER_AGENT_ADD_GROUP_MEMBERS) {
9970 netagent_message_type = NETAGENT_MESSAGE_TYPE_ADD_GROUP_MEMBERS;
9971 } else if (type == NECP_CLIENT_PARAMETER_AGENT_REMOVE_GROUP_MEMBERS) {
9972 netagent_message_type = NETAGENT_MESSAGE_TYPE_REMOVE_GROUP_MEMBERS;
9973 }
9974
9975 struct necp_client_group_members group_members = {};
9976 group_members.group_members_length = (length - sizeof(uuid_t));
9977 group_members.group_members = (value + sizeof(uuid_t));
9978 size_t dummy_length = 0;
9979 void * __sized_by(dummy_length) dummy_results = NULL;
9980 error = netagent_client_message_with_params(agent_uuid,
9981 client_id,
9982 fd_data->proc_pid,
9983 client->agent_handle,
9984 netagent_message_type,
9985 (struct necp_client_agent_parameters *)&group_members,
9986 &dummy_results, &dummy_length);
9987 if (error == 0) {
9988 acted_on_agent = TRUE;
9989 } else {
9990 break;
9991 }
9992 } else if (type == NECP_CLIENT_PARAMETER_REPORT_AGENT_ERROR) {
9993 uuid_t agent_uuid;
9994 uuid_copy(agent_uuid, value);
9995 struct necp_client_agent_parameters agent_params = {};
9996 if ((length - sizeof(uuid_t)) >= sizeof(agent_params.u.error.error)) {
9997 memcpy(&agent_params.u.error.error,
9998 (value + sizeof(uuid_t)),
9999 sizeof(agent_params.u.error.error));
10000 }
10001 bool agent_reported = false;
10002 for (int agent_i = 0; agent_i < NECP_FD_REPORTED_AGENT_COUNT; agent_i++) {
10003 if (uuid_compare(agent_uuid, fd_data->reported_agents.agent_uuid[agent_i]) == 0) {
10004 // Found a match, already reported
10005 agent_reported = true;
10006 break;
10007 }
10008 }
10009 agent_params.u.error.force_report = !agent_reported;
10010 if (!agent_reported) {
10011 // Save this agent as having been reported
10012 bool saved_agent_uuid = false;
10013 for (int agent_i = 0; agent_i < NECP_FD_REPORTED_AGENT_COUNT; agent_i++) {
10014 if (uuid_is_null(fd_data->reported_agents.agent_uuid[agent_i])) {
10015 uuid_copy(fd_data->reported_agents.agent_uuid[agent_i], agent_uuid);
10016 saved_agent_uuid = true;
10017 break;
10018 }
10019 }
10020 if (!saved_agent_uuid) {
10021 // Reported agent UUIDs full, move over and insert at the end
10022 for (int agent_i = 0; agent_i < NECP_FD_REPORTED_AGENT_COUNT; agent_i++) {
10023 if (agent_i + 1 < NECP_FD_REPORTED_AGENT_COUNT) {
10024 uuid_copy(fd_data->reported_agents.agent_uuid[agent_i], fd_data->reported_agents.agent_uuid[agent_i + 1]);
10025 } else {
10026 uuid_copy(fd_data->reported_agents.agent_uuid[agent_i], agent_uuid);
10027 }
10028 }
10029 }
10030 }
10031 size_t dummy_length = 0;
10032 void * __sized_by(dummy_length) dummy_results = NULL;
10033 error = netagent_client_message_with_params(agent_uuid,
10034 client_id,
10035 fd_data->proc_pid,
10036 client->agent_handle,
10037 NETAGENT_MESSAGE_TYPE_CLIENT_ERROR,
10038 &agent_params,
10039 &dummy_results, &dummy_length);
10040 if (error == 0) {
10041 acted_on_agent = TRUE;
10042 } else {
10043 break;
10044 }
10045 }
10046 }
10047
10048 offset += sizeof(struct necp_tlv_header) + length;
10049 }
10050
10051 NECP_CLIENT_UNLOCK(client);
10052 }
10053 NECP_FD_UNLOCK(fd_data);
10054
10055 if (!acted_on_agent &&
10056 error == 0) {
10057 error = ENOENT;
10058 }
10059 done:
10060 *retval = error;
10061 if (parameters != NULL) {
10062 kfree_data(parameters, buffer_size);
10063 parameters = NULL;
10064 }
10065
10066 return error;
10067 }
10068
10069 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_agent(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10070 necp_client_copy_agent(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10071 {
10072 int error = 0;
10073 uuid_t agent_uuid;
10074 const size_t buffer_size = uap->buffer_size;
10075
10076 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
10077 buffer_size == 0 || uap->buffer == 0) {
10078 NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input");
10079 error = EINVAL;
10080 goto done;
10081 }
10082
10083 error = copyin(uap->client_id, agent_uuid, sizeof(uuid_t));
10084 if (error) {
10085 NECPLOG(LOG_ERR, "necp_client_copy_agent copyin agent_uuid error (%d)", error);
10086 goto done;
10087 }
10088
10089 error = netagent_copyout(agent_uuid, uap->buffer, buffer_size);
10090 if (error) {
10091 // netagent_copyout already logs appropriate errors
10092 goto done;
10093 }
10094 done:
10095 *retval = error;
10096
10097 return error;
10098 }
10099
10100 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_agent_use(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10101 necp_client_agent_use(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10102 {
10103 int error = 0;
10104 struct necp_client *client = NULL;
10105 uuid_t client_id;
10106 struct necp_agent_use_parameters parameters = {};
10107 const size_t buffer_size = uap->buffer_size;
10108
10109 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
10110 buffer_size != sizeof(parameters) || uap->buffer == 0) {
10111 error = EINVAL;
10112 goto done;
10113 }
10114
10115 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
10116 if (error) {
10117 NECPLOG(LOG_ERR, "Copyin client_id error (%d)", error);
10118 goto done;
10119 }
10120
10121 error = copyin(uap->buffer, ¶meters, buffer_size);
10122 if (error) {
10123 NECPLOG(LOG_ERR, "Parameters copyin error (%d)", error);
10124 goto done;
10125 }
10126
10127 NECP_FD_LOCK(fd_data);
10128 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
10129 if (client != NULL) {
10130 error = netagent_use(parameters.agent_uuid, ¶meters.out_use_count);
10131 NECP_CLIENT_UNLOCK(client);
10132 } else {
10133 error = ENOENT;
10134 }
10135
10136 NECP_FD_UNLOCK(fd_data);
10137
10138 if (error == 0) {
10139 error = copyout(¶meters, uap->buffer, buffer_size);
10140 if (error) {
10141 NECPLOG(LOG_ERR, "Parameters copyout error (%d)", error);
10142 goto done;
10143 }
10144 }
10145
10146 done:
10147 *retval = error;
10148
10149 return error;
10150 }
10151
10152 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_acquire_agent_token(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10153 necp_client_acquire_agent_token(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10154 {
10155 int error = 0;
10156 uuid_t agent_uuid = {};
10157 const size_t buffer_size = uap->buffer_size;
10158
10159 *retval = 0;
10160
10161 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
10162 buffer_size == 0 || uap->buffer == 0) {
10163 NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input");
10164 error = EINVAL;
10165 goto done;
10166 }
10167
10168 error = copyin(uap->client_id, agent_uuid, sizeof(uuid_t));
10169 if (error) {
10170 NECPLOG(LOG_ERR, "necp_client_copy_agent copyin agent_uuid error (%d)", error);
10171 goto done;
10172 }
10173
10174 error = netagent_acquire_token(agent_uuid, uap->buffer, buffer_size, retval);
10175 done:
10176 return error;
10177 }
10178
10179 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_interface(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10180 necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10181 {
10182 int error = 0;
10183 u_int32_t interface_index = 0;
10184 struct necp_interface_details interface_details = {};
10185
10186 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
10187 uap->buffer_size < sizeof(interface_details) ||
10188 uap->buffer == 0) {
10189 NECPLOG0(LOG_ERR, "necp_client_copy_interface bad input");
10190 error = EINVAL;
10191 goto done;
10192 }
10193
10194 error = copyin(uap->client_id, &interface_index, sizeof(u_int32_t));
10195 if (error) {
10196 NECPLOG(LOG_ERR, "necp_client_copy_interface copyin interface_index error (%d)", error);
10197 goto done;
10198 }
10199
10200 if (interface_index == 0) {
10201 error = ENOENT;
10202 NECPLOG(LOG_ERR, "necp_client_copy_interface bad interface_index (%d)", interface_index);
10203 goto done;
10204 }
10205
10206 lck_mtx_lock(rnh_lock);
10207 ifnet_head_lock_shared();
10208 ifnet_t interface = NULL;
10209 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
10210 interface = ifindex2ifnet[interface_index];
10211 }
10212
10213 if (interface != NULL) {
10214 if (interface->if_xname != NULL) {
10215 strlcpy((char *)&interface_details.name, interface->if_xname, sizeof(interface_details.name));
10216 }
10217 interface_details.index = interface->if_index;
10218 interface_details.generation = ifnet_get_generation(interface);
10219 if (interface->if_delegated.ifp != NULL) {
10220 interface_details.delegate_index = interface->if_delegated.ifp->if_index;
10221 }
10222 interface_details.functional_type = if_functional_type(interface, TRUE);
10223 if (IFNET_IS_EXPENSIVE(interface)) {
10224 interface_details.flags |= NECP_INTERFACE_FLAG_EXPENSIVE;
10225 }
10226 if (IFNET_IS_CONSTRAINED(interface)) {
10227 interface_details.flags |= NECP_INTERFACE_FLAG_CONSTRAINED;
10228 }
10229 if (IFNET_IS_ULTRA_CONSTRAINED(interface)) {
10230 interface_details.flags |= NECP_INTERFACE_FLAG_ULTRA_CONSTRAINED;
10231 }
10232 if ((interface->if_eflags & IFEF_TXSTART) == IFEF_TXSTART) {
10233 interface_details.flags |= NECP_INTERFACE_FLAG_TXSTART;
10234 }
10235 if ((interface->if_eflags & IFEF_NOACKPRI) == IFEF_NOACKPRI) {
10236 interface_details.flags |= NECP_INTERFACE_FLAG_NOACKPRI;
10237 }
10238 if ((interface->if_eflags & IFEF_3CA) == IFEF_3CA) {
10239 interface_details.flags |= NECP_INTERFACE_FLAG_3CARRIERAGG;
10240 }
10241 if (IFNET_IS_LOW_POWER(interface)) {
10242 interface_details.flags |= NECP_INTERFACE_FLAG_IS_LOW_POWER;
10243 }
10244 if (interface->if_xflags & IFXF_MPK_LOG) {
10245 interface_details.flags |= NECP_INTERFACE_FLAG_MPK_LOG;
10246 }
10247 if (interface->if_flags & IFF_MULTICAST) {
10248 interface_details.flags |= NECP_INTERFACE_FLAG_SUPPORTS_MULTICAST;
10249 }
10250 if (IS_INTF_CLAT46(interface)) {
10251 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_NAT64;
10252 }
10253 if (interface->if_xflags & IFXF_LOW_POWER_WAKE) {
10254 interface_details.flags |= NECP_INTERFACE_FLAG_LOW_POWER_WAKE;
10255 }
10256 interface_details.l4s_mode = interface->if_l4s_mode;
10257 interface_details.mtu = interface->if_mtu;
10258 #if SKYWALK
10259 fsw_get_tso_capabilities(interface, &interface_details.tso_max_segment_size_v4,
10260 &interface_details.tso_max_segment_size_v6);
10261
10262 interface_details.hwcsum_flags = interface->if_hwassist & IFNET_CHECKSUMF;
10263 #endif /* SKYWALK */
10264
10265 u_int8_t ipv4_signature_len = sizeof(interface_details.ipv4_signature.signature);
10266 u_int16_t ipv4_signature_flags;
10267 if (ifnet_get_netsignature(interface, AF_INET, &ipv4_signature_len, &ipv4_signature_flags,
10268 (u_int8_t *)&interface_details.ipv4_signature) != 0) {
10269 ipv4_signature_len = 0;
10270 }
10271 interface_details.ipv4_signature.signature_len = ipv4_signature_len;
10272
10273 // Check for default scoped routes for IPv4 and IPv6
10274 union necp_sockaddr_union default_address;
10275 struct rtentry *v4Route = NULL;
10276 memset(&default_address, 0, sizeof(default_address));
10277 default_address.sa.sa_family = AF_INET;
10278 default_address.sa.sa_len = sizeof(struct sockaddr_in);
10279 v4Route = rtalloc1_scoped_locked(SA(&default_address), 0, 0,
10280 interface->if_index);
10281 if (v4Route != NULL) {
10282 if (v4Route->rt_ifp != NULL && !IS_INTF_CLAT46(v4Route->rt_ifp)) {
10283 interface_details.flags |= NECP_INTERFACE_FLAG_IPV4_ROUTABLE;
10284 }
10285 rtfree_locked(v4Route);
10286 v4Route = NULL;
10287 }
10288
10289 struct rtentry *v6Route = NULL;
10290 memset(&default_address, 0, sizeof(default_address));
10291 default_address.sa.sa_family = AF_INET6;
10292 default_address.sa.sa_len = sizeof(struct sockaddr_in6);
10293 v6Route = rtalloc1_scoped_locked(SA(&default_address), 0, 0,
10294 interface->if_index);
10295 if (v6Route != NULL) {
10296 if (v6Route->rt_ifp != NULL) {
10297 interface_details.flags |= NECP_INTERFACE_FLAG_IPV6_ROUTABLE;
10298 }
10299 rtfree_locked(v6Route);
10300 v6Route = NULL;
10301 }
10302
10303 u_int8_t ipv6_signature_len = sizeof(interface_details.ipv6_signature.signature);
10304 u_int16_t ipv6_signature_flags;
10305 if (ifnet_get_netsignature(interface, AF_INET6, &ipv6_signature_len, &ipv6_signature_flags,
10306 (u_int8_t *)&interface_details.ipv6_signature) != 0) {
10307 ipv6_signature_len = 0;
10308 }
10309 interface_details.ipv6_signature.signature_len = ipv6_signature_len;
10310
10311 ifnet_lock_shared(interface);
10312 struct ifaddr * __single ifa = NULL;
10313 TAILQ_FOREACH(ifa, &interface->if_addrhead, ifa_link) {
10314 IFA_LOCK(ifa);
10315 if (ifa->ifa_addr->sa_family == AF_INET) {
10316 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_NETMASK;
10317 interface_details.ipv4_netmask = (ifatoia(ifa))->ia_sockmask.sin_addr.s_addr;
10318 if (interface->if_flags & IFF_BROADCAST) {
10319 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_BROADCAST;
10320 interface_details.ipv4_broadcast = (ifatoia(ifa))->ia_broadaddr.sin_addr.s_addr;
10321 }
10322 }
10323 IFA_UNLOCK(ifa);
10324 }
10325
10326 interface_details.radio_type = interface->if_radio_type;
10327 if (interface_details.radio_type == 0 && interface->if_delegated.ifp) {
10328 interface_details.radio_type = interface->if_delegated.ifp->if_radio_type;
10329 }
10330 ifnet_lock_done(interface);
10331 }
10332
10333 ifnet_head_done();
10334 lck_mtx_unlock(rnh_lock);
10335
10336 // If the client is using an older version of the struct, copy that length
10337 error = copyout(&interface_details, uap->buffer, sizeof(interface_details));
10338 if (error) {
10339 NECPLOG(LOG_ERR, "necp_client_copy_interface copyout error (%d)", error);
10340 goto done;
10341 }
10342 done:
10343 *retval = error;
10344
10345 return error;
10346 }
10347
10348 #if SKYWALK
10349
10350 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_interface_address(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10351 necp_client_get_interface_address(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10352 {
10353 int error = 0;
10354 u_int32_t interface_index = IFSCOPE_NONE;
10355 struct sockaddr_storage address = {};
10356 const size_t buffer_size = uap->buffer_size;
10357
10358 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
10359 buffer_size < sizeof(struct sockaddr_in) ||
10360 buffer_size > sizeof(struct sockaddr_storage) ||
10361 uap->buffer == 0) {
10362 NECPLOG0(LOG_ERR, "necp_client_get_interface_address bad input");
10363 error = EINVAL;
10364 goto done;
10365 }
10366
10367 error = copyin(uap->client_id, &interface_index, sizeof(u_int32_t));
10368 if (error) {
10369 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyin interface_index error (%d)", error);
10370 goto done;
10371 }
10372
10373 if (interface_index == IFSCOPE_NONE) {
10374 error = ENOENT;
10375 NECPLOG(LOG_ERR, "necp_client_get_interface_address bad interface_index (%d)", interface_index);
10376 goto done;
10377 }
10378
10379 error = copyin(uap->buffer, &address, buffer_size);
10380 if (error) {
10381 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyin address error (%d)", error);
10382 goto done;
10383 }
10384
10385 if (address.ss_family != AF_INET && address.ss_family != AF_INET6) {
10386 error = EINVAL;
10387 NECPLOG(LOG_ERR, "necp_client_get_interface_address invalid address family (%u)", address.ss_family);
10388 goto done;
10389 }
10390
10391 if (address.ss_len != buffer_size) {
10392 error = EINVAL;
10393 NECPLOG(LOG_ERR, "necp_client_get_interface_address invalid address length (%u)", address.ss_len);
10394 goto done;
10395 }
10396
10397 ifnet_head_lock_shared();
10398 ifnet_t ifp = NULL;
10399 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
10400 ifp = ifindex2ifnet[interface_index];
10401 }
10402 ifnet_head_done();
10403 if (ifp == NULL) {
10404 error = ENOENT;
10405 NECPLOG0(LOG_ERR, "necp_client_get_interface_address no matching interface found");
10406 goto done;
10407 }
10408
10409 struct rtentry *rt = rtalloc1_scoped(SA(&address), 0, 0, interface_index);
10410 if (rt == NULL) {
10411 error = EINVAL;
10412 NECPLOG0(LOG_ERR, "necp_client_get_interface_address route lookup failed");
10413 goto done;
10414 }
10415
10416 uint32_t gencount = 0;
10417 struct sockaddr_storage local_address = {};
10418 error = flow_route_select_laddr((union sockaddr_in_4_6 *)&local_address,
10419 (union sockaddr_in_4_6 *)&address, ifp, rt, &gencount, 1);
10420 rtfree(rt);
10421 rt = NULL;
10422
10423 if (error) {
10424 NECPLOG(LOG_ERR, "necp_client_get_interface_address local address selection failed (%d)", error);
10425 goto done;
10426 }
10427
10428 if (local_address.ss_len > buffer_size) {
10429 error = EMSGSIZE;
10430 NECPLOG(LOG_ERR, "necp_client_get_interface_address local address too long for buffer (%u)",
10431 local_address.ss_len);
10432 goto done;
10433 }
10434
10435 error = copyout(&local_address, uap->buffer, local_address.ss_len);
10436 if (error) {
10437 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyout error (%d)", error);
10438 goto done;
10439 }
10440 done:
10441 *retval = error;
10442
10443 return error;
10444 }
10445
10446 extern const char *proc_name_address(void *p);
10447
10448 int
necp_stats_ctor(struct skmem_obj_info * oi,struct skmem_obj_info * oim,void * arg,uint32_t skmflag)10449 necp_stats_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim,
10450 void *arg, uint32_t skmflag)
10451 {
10452 #pragma unused(arg, skmflag)
10453 struct necp_all_kstats * __single kstats = SKMEM_OBJ_ADDR(oi);
10454
10455 ASSERT(oim != NULL && SKMEM_OBJ_ADDR(oim) != NULL);
10456 ASSERT(SKMEM_OBJ_SIZE(oi) == SKMEM_OBJ_SIZE(oim));
10457
10458 kstats->necp_stats_ustats = SKMEM_OBJ_ADDR(oim);
10459
10460 return 0;
10461 }
10462
10463 int
necp_stats_dtor(void * addr,void * arg)10464 necp_stats_dtor(void *addr, void *arg)
10465 {
10466 #pragma unused(addr, arg)
10467 struct necp_all_kstats * __single kstats = addr;
10468
10469 kstats->necp_stats_ustats = NULL;
10470
10471 return 0;
10472 }
10473
10474 static void
necp_fd_insert_stats_arena(struct necp_fd_data * fd_data,struct necp_arena_info * nai)10475 necp_fd_insert_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai)
10476 {
10477 NECP_FD_ASSERT_LOCKED(fd_data);
10478 VERIFY(!(nai->nai_flags & NAIF_ATTACHED));
10479 VERIFY(nai->nai_chain.le_next == NULL && nai->nai_chain.le_prev == NULL);
10480
10481 LIST_INSERT_HEAD(&fd_data->stats_arena_list, nai, nai_chain);
10482 nai->nai_flags |= NAIF_ATTACHED;
10483 necp_arena_info_retain(nai); // for the list
10484 }
10485
10486 static void
necp_fd_remove_stats_arena(struct necp_fd_data * fd_data,struct necp_arena_info * nai)10487 necp_fd_remove_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai)
10488 {
10489 #pragma unused(fd_data)
10490 NECP_FD_ASSERT_LOCKED(fd_data);
10491 VERIFY(nai->nai_flags & NAIF_ATTACHED);
10492 VERIFY(nai->nai_use_count >= 1);
10493
10494 LIST_REMOVE(nai, nai_chain);
10495 nai->nai_flags &= ~NAIF_ATTACHED;
10496 nai->nai_chain.le_next = NULL;
10497 nai->nai_chain.le_prev = NULL;
10498 necp_arena_info_release(nai); // for the list
10499 }
10500
10501 static struct necp_arena_info *
necp_fd_mredirect_stats_arena(struct necp_fd_data * fd_data,struct proc * proc)10502 necp_fd_mredirect_stats_arena(struct necp_fd_data *fd_data, struct proc *proc)
10503 {
10504 struct necp_arena_info *nai, *nai_ret = NULL;
10505
10506 NECP_FD_ASSERT_LOCKED(fd_data);
10507
10508 // Redirect currently-active stats arena and remove it from the active state;
10509 // upon process resumption, new flow request would trigger the creation of
10510 // another active arena.
10511 if ((nai = fd_data->stats_arena_active) != NULL) {
10512 boolean_t need_defunct = FALSE;
10513
10514 ASSERT(!(nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)));
10515 VERIFY(nai->nai_use_count >= 2);
10516 ASSERT(nai->nai_arena != NULL);
10517 ASSERT(nai->nai_mmap.ami_mapref != NULL);
10518
10519 int err = skmem_arena_mredirect(nai->nai_arena, &nai->nai_mmap, proc, &need_defunct);
10520 VERIFY(err == 0);
10521 // must be TRUE since we don't mmap the arena more than once
10522 VERIFY(need_defunct == TRUE);
10523
10524 nai->nai_flags |= NAIF_REDIRECT;
10525 nai_ret = nai; // return to caller
10526
10527 necp_arena_info_release(nai); // for fd_data
10528 fd_data->stats_arena_active = nai = NULL;
10529 }
10530
10531 #if (DEVELOPMENT || DEBUG)
10532 // make sure this list now contains nothing but redirected/defunct arenas
10533 LIST_FOREACH(nai, &fd_data->stats_arena_list, nai_chain) {
10534 ASSERT(nai->nai_use_count >= 1);
10535 ASSERT(nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT));
10536 }
10537 #endif /* (DEVELOPMENT || DEBUG) */
10538
10539 return nai_ret;
10540 }
10541
10542 static void
necp_arena_info_retain(struct necp_arena_info * nai)10543 necp_arena_info_retain(struct necp_arena_info *nai)
10544 {
10545 nai->nai_use_count++;
10546 VERIFY(nai->nai_use_count != 0);
10547 }
10548
10549 static void
necp_arena_info_release(struct necp_arena_info * nai)10550 necp_arena_info_release(struct necp_arena_info *nai)
10551 {
10552 VERIFY(nai->nai_use_count > 0);
10553 if (--nai->nai_use_count == 0) {
10554 necp_arena_info_free(nai);
10555 }
10556 }
10557
10558 static struct necp_arena_info *
necp_arena_info_alloc(void)10559 necp_arena_info_alloc(void)
10560 {
10561 return zalloc_flags(necp_arena_info_zone, Z_WAITOK | Z_ZERO);
10562 }
10563
10564 static void
necp_arena_info_free(struct necp_arena_info * nai)10565 necp_arena_info_free(struct necp_arena_info *nai)
10566 {
10567 VERIFY(nai->nai_chain.le_next == NULL && nai->nai_chain.le_prev == NULL);
10568 VERIFY(nai->nai_use_count == 0);
10569
10570 // NOTE: destroying the arena requires that all outstanding objects
10571 // that were allocated have been freed, else it will assert.
10572 if (nai->nai_arena != NULL) {
10573 skmem_arena_munmap(nai->nai_arena, &nai->nai_mmap);
10574 skmem_arena_release(nai->nai_arena);
10575 OSDecrementAtomic(&necp_arena_count);
10576 nai->nai_arena = NULL;
10577 nai->nai_roff = 0;
10578 }
10579
10580 ASSERT(nai->nai_arena == NULL);
10581 ASSERT(nai->nai_mmap.ami_mapref == NULL);
10582 ASSERT(nai->nai_mmap.ami_arena == NULL);
10583 ASSERT(nai->nai_mmap.ami_maptask == TASK_NULL);
10584
10585 zfree(necp_arena_info_zone, nai);
10586 }
10587
10588 static int
necp_arena_create(struct necp_fd_data * fd_data,size_t obj_size,size_t obj_cnt,struct proc * p)10589 necp_arena_create(struct necp_fd_data *fd_data, size_t obj_size, size_t obj_cnt, struct proc *p)
10590 {
10591 struct skmem_region_params srp_ustats = {};
10592 struct skmem_region_params srp_kstats = {};
10593 struct necp_arena_info *nai;
10594 char name[32];
10595 const char *__null_terminated name_ptr = NULL;
10596 int error = 0;
10597
10598 NECP_FD_ASSERT_LOCKED(fd_data);
10599 ASSERT(fd_data->stats_arena_active == NULL);
10600 ASSERT(p != PROC_NULL);
10601 ASSERT(proc_pid(p) == fd_data->proc_pid);
10602
10603 // inherit the default parameters for the stats region
10604 srp_ustats = *skmem_get_default(SKMEM_REGION_USTATS);
10605 srp_kstats = *skmem_get_default(SKMEM_REGION_KSTATS);
10606
10607 // enable multi-segment mode
10608 srp_ustats.srp_cflags &= ~SKMEM_REGION_CR_MONOLITHIC;
10609 srp_kstats.srp_cflags &= ~SKMEM_REGION_CR_MONOLITHIC;
10610
10611 // configure and adjust the region parameters
10612 srp_ustats.srp_r_obj_cnt = srp_kstats.srp_r_obj_cnt = obj_cnt;
10613 srp_ustats.srp_r_obj_size = srp_kstats.srp_r_obj_size = obj_size;
10614 skmem_region_params_config(&srp_ustats);
10615 skmem_region_params_config(&srp_kstats);
10616
10617 nai = necp_arena_info_alloc();
10618
10619 nai->nai_proc_pid = fd_data->proc_pid;
10620 name_ptr = tsnprintf(name, sizeof(name), "stats-%u.%s.%d", fd_data->stats_arena_gencnt, proc_name_address(p), fd_data->proc_pid);
10621 nai->nai_arena = skmem_arena_create_for_necp(name_ptr, &srp_ustats, &srp_kstats, &error);
10622 ASSERT(nai->nai_arena != NULL || error != 0);
10623 if (error != 0) {
10624 NECPLOG(LOG_ERR, "failed to create stats arena for pid %d\n", fd_data->proc_pid);
10625 } else {
10626 OSIncrementAtomic(&necp_arena_count);
10627
10628 // Get region offsets from base of mmap span; the arena
10629 // doesn't need to be mmap'd at this point, since we simply
10630 // compute the relative offset.
10631 nai->nai_roff = skmem_arena_get_region_offset(nai->nai_arena, SKMEM_REGION_USTATS);
10632
10633 // map to the task/process; upon success, the base address of the region
10634 // will be returned in nai_mmap.ami_mapaddr; this can be communicated to
10635 // the process.
10636 error = skmem_arena_mmap(nai->nai_arena, p, &nai->nai_mmap);
10637 if (error != 0) {
10638 NECPLOG(LOG_ERR, "failed to map stats arena for pid %d\n", fd_data->proc_pid);
10639 }
10640 }
10641
10642 if (error == 0) {
10643 fd_data->stats_arena_active = nai;
10644 necp_arena_info_retain(nai); // for fd_data
10645 necp_fd_insert_stats_arena(fd_data, nai);
10646 ++fd_data->stats_arena_gencnt;
10647 } else {
10648 necp_arena_info_free(nai);
10649 }
10650
10651 return error;
10652 }
10653
10654 static int
necp_arena_stats_obj_alloc(struct necp_fd_data * fd_data,mach_vm_offset_t * off,struct necp_arena_info ** stats_arena,void ** kstats_kaddr,boolean_t cansleep)10655 necp_arena_stats_obj_alloc(struct necp_fd_data *fd_data,
10656 mach_vm_offset_t *off,
10657 struct necp_arena_info **stats_arena,
10658 void **kstats_kaddr,
10659 boolean_t cansleep)
10660 {
10661 struct skmem_cache *kstats_cp = NULL;
10662 struct skmem_obj_info kstats_oi = {};
10663 uint32_t ustats_obj_sz = 0;
10664 void *__sized_by(ustats_obj_sz) ustats_obj = NULL;
10665 uint32_t kstats_obj_sz = 0;
10666 void *__sized_by(kstats_obj_sz) kstats_obj = NULL;
10667 void * __indexable kstats_obj_tmp = NULL;
10668 struct necp_all_kstats * __single kstats = NULL;
10669
10670 ASSERT(off != NULL);
10671 ASSERT(stats_arena != NULL && *stats_arena == NULL);
10672 ASSERT(kstats_kaddr != NULL && *kstats_kaddr == NULL);
10673
10674 NECP_FD_ASSERT_LOCKED(fd_data);
10675 ASSERT(fd_data->stats_arena_active != NULL);
10676 ASSERT(fd_data->stats_arena_active->nai_arena != NULL);
10677
10678 kstats_cp = skmem_arena_necp(fd_data->stats_arena_active->nai_arena)->arc_kstats_cache;
10679 if ((kstats_obj_tmp = skmem_cache_alloc(kstats_cp, (cansleep ? SKMEM_SLEEP : SKMEM_NOSLEEP))) == NULL) {
10680 return ENOMEM;
10681 }
10682 skmem_cache_get_obj_info(kstats_cp, kstats_obj_tmp, &kstats_oi, NULL);
10683 ASSERT(SKMEM_OBJ_SIZE(&kstats_oi) >= sizeof(struct necp_all_stats));
10684 kstats_obj = kstats_obj_tmp;
10685 kstats_obj_sz = SKMEM_OBJ_SIZE(&kstats_oi);
10686
10687 kstats = (struct necp_all_kstats*)kstats_obj;
10688 ustats_obj = __unsafe_forge_bidi_indexable(uint8_t *, kstats->necp_stats_ustats, kstats_obj_sz);
10689 ustats_obj_sz = kstats_obj_sz;
10690
10691 bzero(ustats_obj, ustats_obj_sz);
10692 bzero(&kstats->necp_stats_comm, sizeof(struct necp_all_stats));
10693 *stats_arena = fd_data->stats_arena_active;
10694 *kstats_kaddr = kstats_obj;
10695 // kstats and ustats are mirrored and have the same offset
10696 *off = fd_data->stats_arena_active->nai_roff + SKMEM_OBJ_ROFF(&kstats_oi);
10697
10698 return 0;
10699 }
10700
10701 static void
necp_arena_stats_obj_free(struct necp_fd_data * fd_data,struct necp_arena_info * stats_arena,void ** kstats_kaddr,mach_vm_address_t * ustats_uaddr)10702 necp_arena_stats_obj_free(struct necp_fd_data *fd_data, struct necp_arena_info *stats_arena, void **kstats_kaddr, mach_vm_address_t *ustats_uaddr)
10703 {
10704 #pragma unused(fd_data)
10705 NECP_FD_ASSERT_LOCKED(fd_data);
10706
10707 ASSERT(stats_arena != NULL);
10708 ASSERT(stats_arena->nai_arena != NULL);
10709 ASSERT(kstats_kaddr != NULL && *kstats_kaddr != NULL);
10710 ASSERT(ustats_uaddr != NULL);
10711
10712 skmem_cache_free(skmem_arena_necp(stats_arena->nai_arena)->arc_kstats_cache, *kstats_kaddr);
10713 *kstats_kaddr = NULL;
10714 *ustats_uaddr = 0;
10715 }
10716
10717 // This routine returns the KVA of the sysctls object, as well as the
10718 // offset of that object relative to the mmap base address for the
10719 // task/process.
10720 static void *
necp_arena_sysctls_obj(struct necp_fd_data * fd_data,mach_vm_offset_t * off,size_t * size)10721 necp_arena_sysctls_obj(struct necp_fd_data *fd_data, mach_vm_offset_t *off, size_t *size)
10722 {
10723 void * __single objaddr;
10724
10725 NECP_FD_ASSERT_LOCKED(fd_data);
10726 ASSERT(fd_data->sysctl_arena != NULL);
10727
10728 // kernel virtual address of the sysctls object
10729 objaddr = skmem_arena_system_sysctls_obj_addr(fd_data->sysctl_arena);
10730 ASSERT(objaddr != NULL);
10731
10732 // Return the relative offset of the sysctls object; there is
10733 // only 1 object in the entire sysctls region, and therefore the
10734 // object's offset is simply the region's offset in the arena.
10735 // (sysctl_mmap.ami_mapaddr + offset) is the address of this object
10736 // in the task/process.
10737 if (off != NULL) {
10738 *off = fd_data->system_sysctls_roff;
10739 }
10740
10741 if (size != NULL) {
10742 *size = skmem_arena_system_sysctls_obj_size(fd_data->sysctl_arena);
10743 ASSERT(*size != 0);
10744 }
10745
10746 return objaddr;
10747 }
10748
10749 static void
necp_stats_arenas_destroy(struct necp_fd_data * fd_data,boolean_t closing)10750 necp_stats_arenas_destroy(struct necp_fd_data *fd_data, boolean_t closing)
10751 {
10752 struct necp_arena_info *nai, *nai_tmp;
10753
10754 NECP_FD_ASSERT_LOCKED(fd_data);
10755
10756 // If reaping (not closing), release reference only for idle active arena; the reference
10757 // count must be 2 by now, when it's not being referred to by any clients/flows.
10758 if ((nai = fd_data->stats_arena_active) != NULL && (closing || nai->nai_use_count == 2)) {
10759 VERIFY(nai->nai_use_count >= 2);
10760 necp_arena_info_release(nai); // for fd_data
10761 fd_data->stats_arena_active = NULL;
10762 }
10763
10764 // clean up any defunct arenas left in the list
10765 LIST_FOREACH_SAFE(nai, &fd_data->stats_arena_list, nai_chain, nai_tmp) {
10766 // If reaping, release reference if the list holds the last one
10767 if (closing || nai->nai_use_count == 1) {
10768 VERIFY(nai->nai_use_count >= 1);
10769 // callee unchains nai (and may free it)
10770 necp_fd_remove_stats_arena(fd_data, nai);
10771 }
10772 }
10773 }
10774
10775 static void
necp_sysctl_arena_destroy(struct necp_fd_data * fd_data)10776 necp_sysctl_arena_destroy(struct necp_fd_data *fd_data)
10777 {
10778 NECP_FD_ASSERT_LOCKED(fd_data);
10779
10780 // NOTE: destroying the arena requires that all outstanding objects
10781 // that were allocated have been freed, else it will assert.
10782 if (fd_data->sysctl_arena != NULL) {
10783 skmem_arena_munmap(fd_data->sysctl_arena, &fd_data->sysctl_mmap);
10784 skmem_arena_release(fd_data->sysctl_arena);
10785 OSDecrementAtomic(&necp_sysctl_arena_count);
10786 fd_data->sysctl_arena = NULL;
10787 fd_data->system_sysctls_roff = 0;
10788 }
10789 }
10790
10791 static int
necp_arena_initialize(struct necp_fd_data * fd_data,bool locked)10792 necp_arena_initialize(struct necp_fd_data *fd_data, bool locked)
10793 {
10794 int error = 0;
10795 size_t stats_obj_size = MAX(sizeof(struct necp_all_stats), sizeof(struct necp_all_kstats));
10796
10797 if (!locked) {
10798 NECP_FD_LOCK(fd_data);
10799 }
10800 if (fd_data->stats_arena_active == NULL) {
10801 error = necp_arena_create(fd_data, stats_obj_size,
10802 NECP_MAX_PER_PROCESS_CLIENT_STATISTICS_STRUCTS,
10803 current_proc());
10804 }
10805 if (!locked) {
10806 NECP_FD_UNLOCK(fd_data);
10807 }
10808
10809 return error;
10810 }
10811
10812 static int
necp_sysctl_arena_initialize(struct necp_fd_data * fd_data,bool locked)10813 necp_sysctl_arena_initialize(struct necp_fd_data *fd_data, bool locked)
10814 {
10815 int error = 0;
10816
10817 if (!locked) {
10818 NECP_FD_LOCK(fd_data);
10819 }
10820
10821 NECP_FD_ASSERT_LOCKED(fd_data);
10822
10823 if (fd_data->sysctl_arena == NULL) {
10824 char name[32];
10825 const char *__null_terminated name_ptr = NULL;
10826 struct proc *p = current_proc();
10827
10828 ASSERT(p != PROC_NULL);
10829 ASSERT(proc_pid(p) == fd_data->proc_pid);
10830
10831 name_ptr = tsnprintf(name, sizeof(name), "sysctl.%s.%d", proc_name_address(p), fd_data->proc_pid);
10832 fd_data->sysctl_arena = skmem_arena_create_for_system(name_ptr, &error);
10833 ASSERT(fd_data->sysctl_arena != NULL || error != 0);
10834 if (error != 0) {
10835 NECPLOG(LOG_ERR, "failed to create arena for pid %d\n", fd_data->proc_pid);
10836 } else {
10837 OSIncrementAtomic(&necp_sysctl_arena_count);
10838
10839 // Get region offsets from base of mmap span; the arena
10840 // doesn't need to be mmap'd at this point, since we simply
10841 // compute the relative offset.
10842 fd_data->system_sysctls_roff = skmem_arena_get_region_offset(fd_data->sysctl_arena, SKMEM_REGION_SYSCTLS);
10843
10844 // map to the task/process; upon success, the base address of the region
10845 // will be returned in nai_mmap.ami_mapaddr; this can be communicated to
10846 // the process.
10847 error = skmem_arena_mmap(fd_data->sysctl_arena, p, &fd_data->sysctl_mmap);
10848 if (error != 0) {
10849 NECPLOG(LOG_ERR, "failed to map sysctl arena for pid %d\n", fd_data->proc_pid);
10850 necp_sysctl_arena_destroy(fd_data);
10851 }
10852 }
10853 }
10854
10855 if (!locked) {
10856 NECP_FD_UNLOCK(fd_data);
10857 }
10858
10859 return error;
10860 }
10861
10862 static int
necp_client_stats_bufreq(struct necp_fd_data * fd_data,struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct necp_stats_bufreq * bufreq,struct necp_stats_hdr * out_header)10863 necp_client_stats_bufreq(struct necp_fd_data *fd_data,
10864 struct necp_client *client,
10865 struct necp_client_flow_registration *flow_registration,
10866 struct necp_stats_bufreq *bufreq,
10867 struct necp_stats_hdr *out_header)
10868 {
10869 int error = 0;
10870 NECP_CLIENT_ASSERT_LOCKED(client);
10871 NECP_FD_ASSERT_LOCKED(fd_data);
10872
10873 if ((bufreq->necp_stats_bufreq_id == NECP_CLIENT_STATISTICS_BUFREQ_ID) &&
10874 ((bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_TCP &&
10875 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_TCP_CURRENT_VER) ||
10876 (bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_UDP &&
10877 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_UDP_CURRENT_VER) ||
10878 (bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_QUIC &&
10879 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_QUIC_CURRENT_VER)) &&
10880 (bufreq->necp_stats_bufreq_size == sizeof(struct necp_all_stats))) {
10881 // There should be one and only one stats allocation per client.
10882 // If asked more than once, we just repeat ourselves.
10883 if (flow_registration->ustats_uaddr == 0) {
10884 mach_vm_offset_t off;
10885 ASSERT(flow_registration->stats_arena == NULL);
10886 ASSERT(flow_registration->kstats_kaddr == NULL);
10887 ASSERT(flow_registration->ustats_uaddr == 0);
10888 error = necp_arena_stats_obj_alloc(fd_data, &off, &flow_registration->stats_arena, &flow_registration->kstats_kaddr, FALSE);
10889 if (error == 0) {
10890 // upon success, hold a reference for the client; this is released when the client is removed/closed
10891 ASSERT(flow_registration->stats_arena != NULL);
10892 necp_arena_info_retain(flow_registration->stats_arena);
10893
10894 // compute user address based on mapping info and object offset
10895 flow_registration->ustats_uaddr = flow_registration->stats_arena->nai_mmap.ami_mapaddr + off;
10896
10897 // add to collect_stats list
10898 NECP_STATS_LIST_LOCK_EXCLUSIVE();
10899 necp_client_retain_locked(client); // Add a reference to the client
10900 LIST_INSERT_HEAD(&necp_collect_stats_flow_list, flow_registration, collect_stats_chain);
10901 NECP_STATS_LIST_UNLOCK();
10902 necp_schedule_collect_stats_clients(FALSE);
10903 } else {
10904 ASSERT(flow_registration->stats_arena == NULL);
10905 ASSERT(flow_registration->kstats_kaddr == NULL);
10906 }
10907 }
10908 if (flow_registration->ustats_uaddr != 0) {
10909 ASSERT(error == 0);
10910 ASSERT(flow_registration->stats_arena != NULL);
10911 ASSERT(flow_registration->kstats_kaddr != NULL);
10912
10913 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
10914 kstats->necp_stats_ustats->all_stats_u.tcp_stats.necp_tcp_hdr.necp_stats_type = bufreq->necp_stats_bufreq_type;
10915 kstats->necp_stats_ustats->all_stats_u.tcp_stats.necp_tcp_hdr.necp_stats_ver = bufreq->necp_stats_bufreq_ver;
10916
10917 if (out_header) {
10918 out_header->necp_stats_type = bufreq->necp_stats_bufreq_type;
10919 out_header->necp_stats_ver = bufreq->necp_stats_bufreq_ver;
10920 }
10921
10922 bufreq->necp_stats_bufreq_uaddr = flow_registration->ustats_uaddr;
10923 }
10924 } else {
10925 error = EINVAL;
10926 }
10927
10928 return error;
10929 }
10930
10931 static int
necp_client_stats_initial(struct necp_client_flow_registration * flow_registration,uint32_t stats_type,uint32_t stats_ver)10932 necp_client_stats_initial(struct necp_client_flow_registration *flow_registration, uint32_t stats_type, uint32_t stats_ver)
10933 {
10934 // An attempted create
10935 assert(flow_registration->stats_handler_context == NULL);
10936 assert(flow_registration->stats_arena);
10937 assert(flow_registration->ustats_uaddr);
10938 assert(flow_registration->kstats_kaddr);
10939
10940 int error = 0;
10941 uint64_t ntstat_properties = necp_find_netstat_initial_properties(flow_registration->client);
10942
10943 switch (stats_type) {
10944 case NECP_CLIENT_STATISTICS_TYPE_TCP: {
10945 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_TCP_VER_1) {
10946 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
10947 NSTAT_PROVIDER_TCP_USERLAND, ntstat_properties, necp_request_tcp_netstats, necp_find_extension_info);
10948 if (flow_registration->stats_handler_context == NULL) {
10949 error = EIO;
10950 }
10951 } else {
10952 error = ENOTSUP;
10953 }
10954 break;
10955 }
10956 case NECP_CLIENT_STATISTICS_TYPE_UDP: {
10957 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_UDP_VER_1) {
10958 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
10959 NSTAT_PROVIDER_UDP_USERLAND, ntstat_properties, necp_request_udp_netstats, necp_find_extension_info);
10960 if (flow_registration->stats_handler_context == NULL) {
10961 error = EIO;
10962 }
10963 } else {
10964 error = ENOTSUP;
10965 }
10966 break;
10967 }
10968 case NECP_CLIENT_STATISTICS_TYPE_QUIC: {
10969 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_QUIC_VER_1 && flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS) {
10970 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
10971 NSTAT_PROVIDER_QUIC_USERLAND, ntstat_properties, necp_request_quic_netstats, necp_find_extension_info);
10972 if (flow_registration->stats_handler_context == NULL) {
10973 error = EIO;
10974 }
10975 } else {
10976 error = ENOTSUP;
10977 }
10978 break;
10979 }
10980 default: {
10981 error = ENOTSUP;
10982 break;
10983 }
10984 }
10985 return error;
10986 }
10987
10988 static int
necp_stats_initialize(struct necp_fd_data * fd_data,struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct necp_stats_bufreq * bufreq)10989 necp_stats_initialize(struct necp_fd_data *fd_data,
10990 struct necp_client *client,
10991 struct necp_client_flow_registration *flow_registration,
10992 struct necp_stats_bufreq *bufreq)
10993 {
10994 int error = 0;
10995 struct necp_stats_hdr stats_hdr = {};
10996
10997 NECP_CLIENT_ASSERT_LOCKED(client);
10998 NECP_FD_ASSERT_LOCKED(fd_data);
10999 VERIFY(fd_data->stats_arena_active != NULL);
11000 VERIFY(fd_data->stats_arena_active->nai_arena != NULL);
11001 VERIFY(!(fd_data->stats_arena_active->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)));
11002
11003 if (bufreq == NULL) {
11004 return EINVAL;
11005 }
11006
11007 // Setup stats region
11008 error = necp_client_stats_bufreq(fd_data, client, flow_registration, bufreq, &stats_hdr);
11009 if (error) {
11010 return error;
11011 }
11012 // Notify ntstat about new flow
11013 if (flow_registration->stats_handler_context == NULL) {
11014 error = necp_client_stats_initial(flow_registration, stats_hdr.necp_stats_type, stats_hdr.necp_stats_ver);
11015 if (flow_registration->stats_handler_context != NULL) {
11016 ntstat_userland_stats_event(flow_registration->stats_handler_context, NECP_CLIENT_STATISTICS_EVENT_INIT);
11017 }
11018 NECP_CLIENT_FLOW_LOG(client, flow_registration, "Initialized stats <error %d>", error);
11019 }
11020
11021 return error;
11022 }
11023
11024 static int
necp_aop_offload_stats_initialize(struct necp_client_flow_registration * flow_registration,uuid_t netagent_uuid)11025 necp_aop_offload_stats_initialize(struct necp_client_flow_registration *flow_registration,
11026 uuid_t netagent_uuid)
11027 {
11028 int error = 0;
11029
11030 struct necp_client_flow *flow = NULL;
11031 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
11032 // Verify that the client nexus agent matches
11033 if (flow->nexus &&
11034 uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) {
11035 ASSERT(flow->flow_tag != 0);
11036 ASSERT(flow->aop_offload);
11037
11038 error = net_aop_setup_flow(flow->flow_tag,
11039 true, &flow->stats_index);
11040 if (error != 0) {
11041 NECPLOG(LOG_ERR, "failed to setup aop flow "
11042 "stats area, error %d", error);
11043 } else {
11044 flow->aop_stat_index_valid = true;
11045 }
11046 break;
11047 }
11048 }
11049
11050 return error;
11051 }
11052
11053 static void
necp_aop_offload_stats_destroy(struct necp_client_flow * flow)11054 necp_aop_offload_stats_destroy(struct necp_client_flow *flow)
11055 {
11056 int error = 0;
11057
11058 if (flow->flow_tag != 0 && flow->aop_stat_index_valid) {
11059 error = net_aop_setup_flow(flow->flow_tag,
11060 false, &flow->stats_index);
11061 if (error != 0) {
11062 NECPLOG(LOG_ERR, "failed to cleanup aop offload stats with error %d", error);
11063 }
11064 flow->aop_stat_index_valid = false;
11065 }
11066 return;
11067 }
11068
11069 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_map_sysctls(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11070 necp_client_map_sysctls(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11071 {
11072 int result = 0;
11073 if (!retval) {
11074 retval = &result;
11075 }
11076
11077 do {
11078 mach_vm_address_t uaddr = 0;
11079 if (uap->buffer_size != sizeof(uaddr)) {
11080 *retval = EINVAL;
11081 break;
11082 }
11083
11084 *retval = necp_sysctl_arena_initialize(fd_data, false);
11085 if (*retval != 0) {
11086 break;
11087 }
11088
11089 mach_vm_offset_t off = 0;
11090 void * __single location = NULL;
11091 NECP_FD_LOCK(fd_data);
11092 location = necp_arena_sysctls_obj(fd_data, &off, NULL);
11093 NECP_FD_UNLOCK(fd_data);
11094
11095 if (location == NULL) {
11096 *retval = ENOENT;
11097 break;
11098 }
11099
11100 uaddr = fd_data->sysctl_mmap.ami_mapaddr + off;
11101 *retval = copyout(&uaddr, uap->buffer, sizeof(uaddr));
11102 } while (false);
11103
11104 return *retval;
11105 }
11106
11107 #endif /* !SKYWALK */
11108
11109 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_route_statistics(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11110 necp_client_copy_route_statistics(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11111 {
11112 int error = 0;
11113 struct necp_client *client = NULL;
11114 uuid_t client_id;
11115
11116 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
11117 uap->buffer_size < sizeof(struct necp_stat_counts) || uap->buffer == 0) {
11118 NECPLOG0(LOG_ERR, "necp_client_copy_route_statistics bad input");
11119 error = EINVAL;
11120 goto done;
11121 }
11122
11123 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
11124 if (error) {
11125 NECPLOG(LOG_ERR, "necp_client_copy_route_statistics copyin client_id error (%d)", error);
11126 goto done;
11127 }
11128
11129 // Lock
11130 NECP_FD_LOCK(fd_data);
11131 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
11132 if (client != NULL) {
11133 NECP_CLIENT_ROUTE_LOCK(client);
11134 struct necp_stat_counts route_stats = {};
11135 if (client->current_route != NULL && client->current_route->rt_stats != NULL) {
11136 struct nstat_counts *rt_stats = client->current_route->rt_stats;
11137 route_stats.necp_stat_rxpackets = os_atomic_load(&rt_stats->nstat_rxpackets, relaxed);
11138 route_stats.necp_stat_rxbytes = os_atomic_load(&rt_stats->nstat_rxbytes, relaxed);
11139 route_stats.necp_stat_txpackets = os_atomic_load(&rt_stats->nstat_txpackets, relaxed);
11140 route_stats.necp_stat_txbytes = os_atomic_load(&rt_stats->nstat_txbytes, relaxed);
11141 route_stats.necp_stat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
11142 route_stats.necp_stat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
11143 route_stats.necp_stat_txretransmit = rt_stats->nstat_txretransmit;
11144 route_stats.necp_stat_connectattempts = rt_stats->nstat_connectattempts;
11145 route_stats.necp_stat_connectsuccesses = rt_stats->nstat_connectsuccesses;
11146 if (__probable(necp_client_stats_use_route_metrics == 0)) {
11147 route_stats.necp_stat_min_rtt = rt_stats->nstat_min_rtt;
11148 route_stats.necp_stat_avg_rtt = rt_stats->nstat_avg_rtt;
11149 route_stats.necp_stat_var_rtt = rt_stats->nstat_var_rtt;
11150 } else {
11151 route_stats.necp_stat_min_rtt = client->current_route->rtt_min;
11152 route_stats.necp_stat_avg_rtt = client->current_route->rt_rmx.rmx_rtt;
11153 route_stats.necp_stat_var_rtt = client->current_route->rt_rmx.rmx_rttvar;
11154 }
11155 route_stats.necp_stat_route_flags = client->current_route->rt_flags;
11156 }
11157
11158 // Unlock before copying out
11159 NECP_CLIENT_ROUTE_UNLOCK(client);
11160 NECP_CLIENT_UNLOCK(client);
11161 NECP_FD_UNLOCK(fd_data);
11162
11163 error = copyout(&route_stats, uap->buffer, sizeof(route_stats));
11164 if (error) {
11165 NECPLOG(LOG_ERR, "necp_client_copy_route_statistics copyout error (%d)", error);
11166 }
11167 } else {
11168 // Unlock
11169 NECP_FD_UNLOCK(fd_data);
11170 error = ENOENT;
11171 }
11172
11173
11174 done:
11175 *retval = error;
11176 return error;
11177 }
11178
11179 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_update_cache(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11180 necp_client_update_cache(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11181 {
11182 int error = 0;
11183 struct necp_client *client = NULL;
11184 uuid_t client_id;
11185
11186 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
11187 error = EINVAL;
11188 goto done;
11189 }
11190
11191 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
11192 if (error) {
11193 NECPLOG(LOG_ERR, "necp_client_update_cache copyin client_id error (%d)", error);
11194 goto done;
11195 }
11196
11197 NECP_FD_LOCK(fd_data);
11198 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
11199 if (client == NULL) {
11200 NECP_FD_UNLOCK(fd_data);
11201 error = ENOENT;
11202 goto done;
11203 }
11204
11205 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
11206 if (flow_registration == NULL) {
11207 NECP_CLIENT_UNLOCK(client);
11208 NECP_FD_UNLOCK(fd_data);
11209 error = ENOENT;
11210 goto done;
11211 }
11212
11213 NECP_CLIENT_ROUTE_LOCK(client);
11214 // This needs to be changed when TFO/ECN is supported by multiple flows
11215 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
11216 if (flow == NULL ||
11217 (flow->remote_addr.sa.sa_family != AF_INET &&
11218 flow->remote_addr.sa.sa_family != AF_INET6) ||
11219 (flow->local_addr.sa.sa_family != AF_INET &&
11220 flow->local_addr.sa.sa_family != AF_INET6)) {
11221 error = EINVAL;
11222 NECPLOG(LOG_ERR, "necp_client_update_cache no flow error (%d)", error);
11223 goto done_unlock;
11224 }
11225
11226 necp_cache_buffer cache_buffer;
11227 memset(&cache_buffer, 0, sizeof(cache_buffer));
11228
11229 if (uap->buffer_size != sizeof(necp_cache_buffer) ||
11230 uap->buffer == USER_ADDR_NULL) {
11231 error = EINVAL;
11232 goto done_unlock;
11233 }
11234
11235 error = copyin(uap->buffer, &cache_buffer, sizeof(cache_buffer));
11236 if (error) {
11237 NECPLOG(LOG_ERR, "necp_client_update_cache copyin cache buffer error (%d)", error);
11238 goto done_unlock;
11239 }
11240
11241 if (cache_buffer.necp_cache_buf_type == NECP_CLIENT_CACHE_TYPE_ECN &&
11242 cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_ECN_VER_1) {
11243 if (cache_buffer.necp_cache_buf_size != sizeof(necp_tcp_ecn_cache) ||
11244 cache_buffer.necp_cache_buf_addr == USER_ADDR_NULL) {
11245 error = EINVAL;
11246 goto done_unlock;
11247 }
11248
11249 necp_tcp_ecn_cache ecn_cache_buffer;
11250 memset(&ecn_cache_buffer, 0, sizeof(ecn_cache_buffer));
11251
11252 error = copyin(cache_buffer.necp_cache_buf_addr, &ecn_cache_buffer, sizeof(necp_tcp_ecn_cache));
11253 if (error) {
11254 NECPLOG(LOG_ERR, "necp_client_update_cache copyin ecn cache buffer error (%d)", error);
11255 goto done_unlock;
11256 }
11257
11258 if (client->current_route != NULL && client->current_route->rt_ifp != NULL) {
11259 if (!client->platform_binary) {
11260 ecn_cache_buffer.necp_tcp_ecn_heuristics_success = 0;
11261 }
11262 tcp_heuristics_ecn_update(&ecn_cache_buffer, client->current_route->rt_ifp,
11263 (union sockaddr_in_4_6 *)&flow->local_addr);
11264 }
11265 } else if (cache_buffer.necp_cache_buf_type == NECP_CLIENT_CACHE_TYPE_TFO &&
11266 cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_TFO_VER_1) {
11267 if (cache_buffer.necp_cache_buf_size != sizeof(necp_tcp_tfo_cache) ||
11268 cache_buffer.necp_cache_buf_addr == USER_ADDR_NULL) {
11269 error = EINVAL;
11270 goto done_unlock;
11271 }
11272
11273 necp_tcp_tfo_cache tfo_cache_buffer;
11274 memset(&tfo_cache_buffer, 0, sizeof(tfo_cache_buffer));
11275
11276 error = copyin(cache_buffer.necp_cache_buf_addr, &tfo_cache_buffer, sizeof(necp_tcp_tfo_cache));
11277 if (error) {
11278 NECPLOG(LOG_ERR, "necp_client_update_cache copyin tfo cache buffer error (%d)", error);
11279 goto done_unlock;
11280 }
11281
11282 if (client->current_route != NULL && client->current_route->rt_ifp != NULL) {
11283 if (!client->platform_binary) {
11284 tfo_cache_buffer.necp_tcp_tfo_heuristics_success = 0;
11285 }
11286 tcp_heuristics_tfo_update(&tfo_cache_buffer, client->current_route->rt_ifp,
11287 (union sockaddr_in_4_6 *)&flow->local_addr,
11288 (union sockaddr_in_4_6 *)&flow->remote_addr);
11289 }
11290 } else {
11291 error = EINVAL;
11292 }
11293 done_unlock:
11294 NECP_CLIENT_ROUTE_UNLOCK(client);
11295 NECP_CLIENT_UNLOCK(client);
11296 NECP_FD_UNLOCK(fd_data);
11297 done:
11298 *retval = error;
11299 return error;
11300 }
11301
11302 // Most results will fit into this size
11303 struct necp_client_signable_default {
11304 uuid_t client_id;
11305 u_int32_t sign_type;
11306 u_int8_t signable_data[NECP_CLIENT_ACTION_SIGN_DEFAULT_DATA_LENGTH];
11307 } __attribute__((__packed__));
11308
11309 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_sign(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11310 necp_client_sign(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11311 {
11312 int error = 0;
11313 u_int8_t tag[NECP_CLIENT_ACTION_SIGN_TAG_LENGTH] = {};
11314 struct necp_client_signable * __indexable signable = NULL;
11315 struct necp_client_signable * __indexable allocated_signable = NULL;
11316 struct necp_client_signable_default default_signable = {};
11317 size_t tag_size = sizeof(tag);
11318
11319 const size_t signable_length = uap->client_id_len;
11320 const size_t return_tag_length = uap->buffer_size;
11321
11322 *retval = 0;
11323
11324 const bool has_resolver_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_VALIDATED_RESOLVER, 0) == 0);
11325 if (!has_resolver_entitlement) {
11326 NECPLOG0(LOG_ERR, "Process does not hold the necessary entitlement to sign resolver answers");
11327 error = EPERM;
11328 goto done;
11329 }
11330
11331 if (uap->client_id == 0 || signable_length < sizeof(*signable) || signable_length > NECP_CLIENT_ACTION_SIGN_MAX_TOTAL_LENGTH) {
11332 error = EINVAL;
11333 goto done;
11334 }
11335
11336 if (uap->buffer == 0 || return_tag_length != NECP_CLIENT_ACTION_SIGN_TAG_LENGTH) {
11337 error = EINVAL;
11338 goto done;
11339 }
11340
11341 if (signable_length <= sizeof(default_signable)) {
11342 signable = (struct necp_client_signable *)&default_signable;
11343 } else {
11344 if ((allocated_signable = (struct necp_client_signable *)kalloc_data(signable_length, Z_WAITOK | Z_ZERO)) == NULL) {
11345 NECPLOG(LOG_ERR, "necp_client_sign allocate signable %zu failed", signable_length);
11346 error = ENOMEM;
11347 goto done;
11348 }
11349 signable = allocated_signable;
11350 }
11351
11352 error = copyin(uap->client_id, signable, signable_length);
11353 if (error) {
11354 NECPLOG(LOG_ERR, "necp_client_sign copyin signable error (%d)", error);
11355 goto done;
11356 }
11357
11358 size_t data_length = 0;
11359 switch (signable->sign_type) {
11360 case NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER:
11361 case NECP_CLIENT_SIGN_TYPE_SYSTEM_RESOLVER_ANSWER: {
11362 data_length = (sizeof(struct necp_client_host_resolver_answer) - sizeof(struct necp_client_signable));
11363 if (signable_length < (sizeof(struct necp_client_signable) + data_length)) {
11364 error = EINVAL;
11365 goto done;
11366 }
11367 struct necp_client_host_resolver_answer * __single signable_struct = (struct necp_client_host_resolver_answer *)signable;
11368 if (signable_struct->hostname_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11369 signable_length != (sizeof(struct necp_client_signable) + data_length + signable_struct->hostname_length)) {
11370 error = EINVAL;
11371 goto done;
11372 }
11373 data_length += signable_struct->hostname_length;
11374 break;
11375 }
11376 case NECP_CLIENT_SIGN_TYPE_BROWSE_RESULT:
11377 case NECP_CLIENT_SIGN_TYPE_SYSTEM_BROWSE_RESULT: {
11378 data_length = (sizeof(struct necp_client_browse_result) - sizeof(struct necp_client_signable));
11379 if (signable_length < (sizeof(struct necp_client_signable) + data_length)) {
11380 error = EINVAL;
11381 goto done;
11382 }
11383 struct necp_client_browse_result *signable_struct = (struct necp_client_browse_result *)signable;
11384 if (signable_struct->service_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11385 signable_length != (sizeof(struct necp_client_signable) + data_length + signable_struct->service_length)) {
11386 error = EINVAL;
11387 goto done;
11388 }
11389 data_length += signable_struct->service_length;
11390 break;
11391 }
11392 case NECP_CLIENT_SIGN_TYPE_SERVICE_RESOLVER_ANSWER:
11393 case NECP_CLIENT_SIGN_TYPE_SYSTEM_SERVICE_RESOLVER_ANSWER: {
11394 data_length = (sizeof(struct necp_client_service_resolver_answer) - sizeof(struct necp_client_signable));
11395 if (signable_length < (sizeof(struct necp_client_signable) + data_length)) {
11396 error = EINVAL;
11397 goto done;
11398 }
11399 struct necp_client_service_resolver_answer * __single signable_struct = (struct necp_client_service_resolver_answer *)signable;
11400 if (signable_struct->service_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11401 signable_struct->hostname_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11402 signable_length != (sizeof(struct necp_client_signable) + data_length + signable_struct->service_length + signable_struct->hostname_length)) {
11403 error = EINVAL;
11404 goto done;
11405 }
11406 data_length += signable_struct->service_length;
11407 data_length += signable_struct->hostname_length;
11408 break;
11409 }
11410 default: {
11411 NECPLOG(LOG_ERR, "necp_client_sign unknown signable type (%u)", signable->sign_type);
11412 error = EINVAL;
11413 goto done;
11414 }
11415 }
11416
11417 error = necp_sign_resolver_answer(signable->client_id, signable->sign_type,
11418 signable_get_data(signable, data_length), data_length,
11419 tag, &tag_size);
11420 if (tag_size != sizeof(tag)) {
11421 NECPLOG(LOG_ERR, "necp_client_sign unexpected tag size %zu", tag_size);
11422 error = EINVAL;
11423 goto done;
11424 }
11425 error = copyout(tag, uap->buffer, tag_size);
11426 if (error) {
11427 NECPLOG(LOG_ERR, "necp_client_sign copyout error (%d)", error);
11428 goto done;
11429 }
11430
11431 done:
11432 if (allocated_signable != NULL) {
11433 kfree_data(allocated_signable, signable_length);
11434 allocated_signable = NULL;
11435 }
11436 *retval = error;
11437 return error;
11438 }
11439
11440 // Most results will fit into this size
11441 struct necp_client_validatable_default {
11442 struct necp_client_signature signature;
11443 struct necp_client_signable_default signable;
11444 } __attribute__((__packed__));
11445
11446 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_validate(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11447 necp_client_validate(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11448 {
11449 int error = 0;
11450 struct necp_client_validatable *validatable = NULL;
11451 struct necp_client_validatable * __single allocated_validatable = NULL;
11452 struct necp_client_validatable_default default_validatable = {};
11453
11454 const size_t validatable_length = uap->client_id_len;
11455
11456 *retval = 0;
11457
11458 const bool has_resolver_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_VALIDATED_RESOLVER, 0) == 0);
11459 if (!has_resolver_entitlement) {
11460 NECPLOG0(LOG_ERR, "Process does not hold the necessary entitlement to directly validate resolver answers");
11461 error = EPERM;
11462 goto done;
11463 }
11464
11465 if (uap->client_id == 0 || validatable_length < sizeof(*validatable) ||
11466 validatable_length > (NECP_CLIENT_ACTION_SIGN_MAX_TOTAL_LENGTH + NECP_CLIENT_ACTION_SIGN_TAG_LENGTH)) {
11467 error = EINVAL;
11468 goto done;
11469 }
11470
11471 if (validatable_length <= sizeof(default_validatable)) {
11472 validatable = (struct necp_client_validatable *)&default_validatable;
11473 } else {
11474 if ((allocated_validatable = (struct necp_client_validatable *)kalloc_data(validatable_length, Z_WAITOK | Z_ZERO)) == NULL) {
11475 NECPLOG(LOG_ERR, "necp_client_validate allocate struct %zu failed", validatable_length);
11476 error = ENOMEM;
11477 goto done;
11478 }
11479 validatable = allocated_validatable;
11480 }
11481
11482 error = copyin(uap->client_id, validatable, validatable_length);
11483 if (error) {
11484 NECPLOG(LOG_ERR, "necp_client_validate copyin error (%d)", error);
11485 goto done;
11486 }
11487
11488 size_t signable_data_len = validatable_length - sizeof(struct necp_client_validatable);
11489 const bool validated = necp_validate_resolver_answer(validatable->signable.client_id, validatable->signable.sign_type,
11490 signable_get_data(&validatable->signable, signable_data_len), signable_data_len,
11491 validatable->signature.signed_tag, sizeof(validatable->signature.signed_tag));
11492 if (!validated) {
11493 // Return EAUTH to indicate that the signature failed
11494 error = EAUTH;
11495 }
11496
11497 done:
11498 if (allocated_validatable != NULL) {
11499 kfree_data(allocated_validatable, validatable_length);
11500 allocated_validatable = NULL;
11501 }
11502 *retval = error;
11503 return error;
11504 }
11505
11506 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_signed_client_id(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11507 necp_client_get_signed_client_id(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11508 {
11509 int error = 0;
11510 *retval = 0;
11511 u_int32_t request_type = 0;
11512 struct necp_client_signed_client_id_uuid client_id = { 0 };
11513 const size_t buffer_size = uap->buffer_size;
11514 u_int8_t tag[NECP_CLIENT_ACTION_SIGN_TAG_LENGTH] = {};
11515 size_t tag_size = sizeof(tag);
11516 proc_t proc = current_proc();
11517 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
11518 buffer_size < sizeof(struct necp_client_signed_client_id_uuid) ||
11519 uap->buffer == 0) {
11520 NECPLOG0(LOG_ERR, "necp_client_get_signed_client_id bad input");
11521 error = EINVAL;
11522 goto done;
11523 }
11524
11525 error = copyin(uap->client_id, &request_type, sizeof(u_int32_t));
11526 if (error) {
11527 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id copyin request_type error (%d)", error);
11528 goto done;
11529 }
11530
11531 if (request_type != NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID) {
11532 error = ENOENT;
11533 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id bad request_type (%d)", request_type);
11534 goto done;
11535 }
11536
11537 uuid_t application_uuid;
11538 uuid_clear(application_uuid);
11539 proc_getexecutableuuid(proc, application_uuid, sizeof(application_uuid));
11540
11541 error = necp_sign_application_id(application_uuid,
11542 NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID,
11543 tag, &tag_size);
11544 if (tag_size != sizeof(tag)) {
11545 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id unexpected tag size %zu", tag_size);
11546 error = EINVAL;
11547 goto done;
11548 }
11549 uuid_copy(client_id.client_id, application_uuid);
11550 client_id.signature_length = tag_size;
11551 memcpy(client_id.signature_data, tag, tag_size);
11552
11553 error = copyout(&client_id, uap->buffer, sizeof(client_id));
11554 if (error != 0) {
11555 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id copyout error (%d)", error);
11556 goto done;
11557 }
11558
11559 done:
11560 *retval = error;
11561 return error;
11562 }
11563
11564 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_set_signed_client_id(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11565 necp_client_set_signed_client_id(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11566 {
11567 int error = 0;
11568 *retval = 0;
11569 u_int32_t request_type = 0;
11570 struct necp_client_signed_client_id_uuid client_id = { 0 };
11571 const size_t buffer_size = uap->buffer_size;
11572
11573 // Only allow entitled processes to set the client ID.
11574 proc_t proc = current_proc();
11575 task_t __single task = proc_task(proc);
11576 bool has_delegation_entitlement = task != NULL && IOTaskHasEntitlement(task, kCSWebBrowserNetworkEntitlement);
11577 if (!has_delegation_entitlement) {
11578 has_delegation_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0);
11579 }
11580 if (!has_delegation_entitlement) {
11581 NECPLOG0(LOG_ERR, "necp_client_set_signed_client_id client lacks the necessary entitlement");
11582 error = EAUTH;
11583 goto done;
11584 }
11585
11586 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
11587 buffer_size < sizeof(struct necp_client_signed_client_id_uuid) ||
11588 uap->buffer == 0) {
11589 NECPLOG0(LOG_ERR, "necp_client_set_signed_client_id bad input");
11590 error = EINVAL;
11591 goto done;
11592 }
11593
11594 error = copyin(uap->client_id, &request_type, sizeof(u_int32_t));
11595 if (error) {
11596 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id copyin request_type error (%d)", error);
11597 goto done;
11598 }
11599
11600 if (request_type != NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID) {
11601 error = ENOENT;
11602 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id bad request_type (%d)", request_type);
11603 goto done;
11604 }
11605
11606 error = copyin(uap->buffer, &client_id, sizeof(struct necp_client_signed_client_id_uuid));
11607 if (error) {
11608 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id copyin request error (%d)", error);
11609 goto done;
11610 }
11611
11612 const bool validated = necp_validate_application_id(client_id.client_id,
11613 NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID,
11614 client_id.signature_data, sizeof(client_id.signature_data));
11615 if (!validated) {
11616 // Return EAUTH to indicate that the signature failed
11617 error = EAUTH;
11618 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id signature validation failed (%d)", error);
11619 goto done;
11620 }
11621
11622 proc_setresponsibleuuid(proc, client_id.client_id, sizeof(client_id.client_id));
11623
11624 done:
11625 *retval = error;
11626 return error;
11627 }
11628
11629 static int
necp_client_copy_flow_stats(struct necp_client_flow_registration * flow_registration,struct necp_flow_statistics * flow_stats)11630 necp_client_copy_flow_stats(struct necp_client_flow_registration *flow_registration,
11631 struct necp_flow_statistics *flow_stats)
11632 {
11633 struct aop_flow_stats aop_flow_stats = {};
11634 int error = 0;
11635
11636 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
11637 if (flow == NULL || !flow->aop_offload || !flow->aop_stat_index_valid) {
11638 NECPLOG0(LOG_ERR, "necp_client_copy_flow_stats only supported for aop flows");
11639 return EINVAL;
11640 }
11641 error = net_aop_get_flow_stats(flow->stats_index, &aop_flow_stats);
11642 if (error != 0) {
11643 NECPLOG(LOG_ERR, "net_aop_get_flow_stats failed (%d)", error);
11644 return error;
11645 }
11646
11647 if (flow_stats->transport_proto == IPPROTO_TCP) {
11648 struct tcp_info *tcpi = &flow_stats->transport.tcpi;
11649 struct tcp_info *a_tcpi = &aop_flow_stats.transport.tcp_stats.tcp_info;
11650 memcpy(tcpi, a_tcpi, sizeof(*tcpi));
11651 }
11652
11653 return 0;
11654 }
11655
11656 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_flow_statistics(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11657 necp_client_get_flow_statistics(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11658 {
11659 int error = 0;
11660 uuid_t flow_id = {};
11661 struct necp_flow_statistics flow_stats = {};
11662
11663 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
11664 error = EINVAL;
11665 NECPLOG(LOG_ERR, "necp_client_remove_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
11666 goto done;
11667 }
11668
11669 error = copyin(uap->client_id, flow_id, sizeof(uuid_t));
11670 if (error) {
11671 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics copyin client_id error (%d)", error);
11672 goto done;
11673 }
11674
11675 if (uap->buffer_size < sizeof(flow_stats) || uap->buffer == 0) {
11676 error = EINVAL;
11677 goto done;
11678 }
11679
11680 error = copyin(uap->buffer, &flow_stats, sizeof(flow_stats));
11681 if (error) {
11682 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics copyin protocol error (%d)", error);
11683 goto done;
11684 }
11685
11686 if (flow_stats.transport_proto != IPPROTO_TCP) {
11687 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics, transport proto %u not supported",
11688 flow_stats.transport_proto);
11689 error = ENOTSUP;
11690 goto done;
11691 }
11692
11693 NECP_FD_LOCK(fd_data);
11694 struct necp_client *client = NULL;
11695 struct necp_client_flow_registration *flow_registration = necp_client_fd_find_flow(fd_data, flow_id);
11696 if (flow_registration != NULL) {
11697 client = flow_registration->client;
11698 if (client != NULL) {
11699 necp_client_retain(client);
11700 }
11701 }
11702 NECP_FD_UNLOCK(fd_data);
11703
11704 if (flow_registration != NULL && client != NULL) {
11705 NECP_CLIENT_LOCK(client);
11706 if (flow_registration->client == client) {
11707 error = necp_client_copy_flow_stats(flow_registration, &flow_stats);
11708 if (error == 0) {
11709 error = copyout(&flow_stats, uap->buffer, sizeof(flow_stats));
11710 if (error != 0) {
11711 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics copyout failed (%d)", error);
11712 }
11713 }
11714 }
11715
11716 necp_client_release_locked(client);
11717 NECP_CLIENT_UNLOCK(client);
11718 }
11719
11720 done:
11721 *retval = error;
11722 if (error != 0) {
11723 NECPLOG(LOG_ERR, "get flow statistics error (%d)", error);
11724 }
11725
11726 return error;
11727 }
11728
11729 int
necp_client_action(struct proc * p,struct necp_client_action_args * uap,int * retval)11730 necp_client_action(struct proc *p, struct necp_client_action_args *uap, int *retval)
11731 {
11732 struct fileproc * __single fp;
11733 int error = 0;
11734 int return_value = 0;
11735 struct necp_fd_data * __single fd_data = NULL;
11736
11737 error = necp_find_fd_data(p, uap->necp_fd, &fp, &fd_data);
11738 if (error != 0) {
11739 NECPLOG(LOG_ERR, "necp_client_action find fd error (%d)", error);
11740 return error;
11741 }
11742
11743 u_int32_t action = uap->action;
11744
11745 #if CONFIG_MACF
11746 error = mac_necp_check_client_action(p, fp->fp_glob, action);
11747 if (error) {
11748 return_value = error;
11749 goto done;
11750 }
11751 #endif /* MACF */
11752
11753 switch (action) {
11754 case NECP_CLIENT_ACTION_ADD: {
11755 return_value = necp_client_add(p, fd_data, uap, retval);
11756 break;
11757 }
11758 case NECP_CLIENT_ACTION_CLAIM: {
11759 return_value = necp_client_claim(p, fd_data, uap, retval);
11760 break;
11761 }
11762 case NECP_CLIENT_ACTION_REMOVE: {
11763 return_value = necp_client_remove(fd_data, uap, retval);
11764 break;
11765 }
11766 case NECP_CLIENT_ACTION_COPY_PARAMETERS:
11767 case NECP_CLIENT_ACTION_COPY_RESULT:
11768 case NECP_CLIENT_ACTION_COPY_UPDATED_RESULT:
11769 case NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL: {
11770 return_value = necp_client_copy(fd_data, uap, retval);
11771 break;
11772 }
11773 case NECP_CLIENT_ACTION_COPY_LIST: {
11774 return_value = necp_client_list(fd_data, uap, retval);
11775 break;
11776 }
11777 case NECP_CLIENT_ACTION_ADD_FLOW: {
11778 return_value = necp_client_add_flow(fd_data, uap, retval);
11779 break;
11780 }
11781 case NECP_CLIENT_ACTION_REMOVE_FLOW: {
11782 return_value = necp_client_remove_flow(fd_data, uap, retval);
11783 break;
11784 }
11785 #if SKYWALK
11786 case NECP_CLIENT_ACTION_REQUEST_NEXUS_INSTANCE: {
11787 return_value = necp_client_request_nexus(fd_data, uap, retval);
11788 break;
11789 }
11790 #endif /* !SKYWALK */
11791 case NECP_CLIENT_ACTION_AGENT: {
11792 return_value = necp_client_agent_action(fd_data, uap, retval);
11793 break;
11794 }
11795 case NECP_CLIENT_ACTION_COPY_AGENT: {
11796 return_value = necp_client_copy_agent(fd_data, uap, retval);
11797 break;
11798 }
11799 case NECP_CLIENT_ACTION_AGENT_USE: {
11800 return_value = necp_client_agent_use(fd_data, uap, retval);
11801 break;
11802 }
11803 case NECP_CLIENT_ACTION_ACQUIRE_AGENT_TOKEN: {
11804 return_value = necp_client_acquire_agent_token(fd_data, uap, retval);
11805 break;
11806 }
11807 case NECP_CLIENT_ACTION_COPY_INTERFACE: {
11808 return_value = necp_client_copy_interface(fd_data, uap, retval);
11809 break;
11810 }
11811 #if SKYWALK
11812 case NECP_CLIENT_ACTION_GET_INTERFACE_ADDRESS: {
11813 return_value = necp_client_get_interface_address(fd_data, uap, retval);
11814 break;
11815 }
11816 case NECP_CLIENT_ACTION_SET_STATISTICS: {
11817 return_value = ENOTSUP;
11818 break;
11819 }
11820 case NECP_CLIENT_ACTION_MAP_SYSCTLS: {
11821 return_value = necp_client_map_sysctls(fd_data, uap, retval);
11822 break;
11823 }
11824 #endif /* !SKYWALK */
11825 case NECP_CLIENT_ACTION_COPY_ROUTE_STATISTICS: {
11826 return_value = necp_client_copy_route_statistics(fd_data, uap, retval);
11827 break;
11828 }
11829 case NECP_CLIENT_ACTION_UPDATE_CACHE: {
11830 return_value = necp_client_update_cache(fd_data, uap, retval);
11831 break;
11832 }
11833 case NECP_CLIENT_ACTION_COPY_CLIENT_UPDATE: {
11834 return_value = necp_client_copy_client_update(fd_data, uap, retval);
11835 break;
11836 }
11837 case NECP_CLIENT_ACTION_SIGN: {
11838 return_value = necp_client_sign(fd_data, uap, retval);
11839 break;
11840 }
11841 case NECP_CLIENT_ACTION_VALIDATE: {
11842 return_value = necp_client_validate(fd_data, uap, retval);
11843 break;
11844 }
11845 case NECP_CLIENT_ACTION_GET_SIGNED_CLIENT_ID: {
11846 return_value = necp_client_get_signed_client_id(fd_data, uap, retval);
11847 break;
11848 }
11849 case NECP_CLIENT_ACTION_SET_SIGNED_CLIENT_ID: {
11850 return_value = necp_client_set_signed_client_id(fd_data, uap, retval);
11851 break;
11852 }
11853 case NECP_CLIENT_ACTION_GET_FLOW_STATISTICS: {
11854 return_value = necp_client_get_flow_statistics(fd_data, uap, retval);
11855 break;
11856 }
11857 default: {
11858 NECPLOG(LOG_ERR, "necp_client_action unknown action (%u)", action);
11859 return_value = EINVAL;
11860 break;
11861 }
11862 }
11863
11864 done:
11865 fp_drop(p, uap->necp_fd, fp, 0);
11866 return return_value;
11867 }
11868
11869 #define NECP_MAX_MATCH_POLICY_PARAMETER_SIZE 1024
11870
11871 int
necp_match_policy(struct proc * p,struct necp_match_policy_args * uap,int32_t * retval)11872 necp_match_policy(struct proc *p, struct necp_match_policy_args *uap, int32_t *retval)
11873 {
11874 #pragma unused(retval)
11875 size_t buffer_size = 0;
11876 u_int8_t * __sized_by(buffer_size) parameters = NULL;
11877 struct necp_aggregate_result returned_result;
11878 int error = 0;
11879
11880 if (uap == NULL) {
11881 error = EINVAL;
11882 goto done;
11883 }
11884
11885 if (uap->parameters == 0 || uap->parameters_size == 0 || uap->parameters_size > NECP_MAX_MATCH_POLICY_PARAMETER_SIZE || uap->returned_result == 0) {
11886 error = EINVAL;
11887 goto done;
11888 }
11889
11890 parameters = (u_int8_t *)kalloc_data(uap->parameters_size, Z_WAITOK | Z_ZERO);
11891 buffer_size = uap->parameters_size;
11892 if (parameters == NULL) {
11893 error = ENOMEM;
11894 goto done;
11895 }
11896 // Copy parameters in
11897 error = copyin(uap->parameters, parameters, buffer_size);
11898 if (error) {
11899 goto done;
11900 }
11901
11902 error = necp_application_find_policy_match_internal(p, parameters, buffer_size,
11903 &returned_result, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, false, false, NULL);
11904 if (error) {
11905 goto done;
11906 }
11907
11908 // Copy return value back
11909 error = copyout(&returned_result, uap->returned_result, sizeof(struct necp_aggregate_result));
11910 if (error) {
11911 goto done;
11912 }
11913 done:
11914 if (parameters != NULL) {
11915 kfree_data_sized_by(parameters, buffer_size);
11916 }
11917 return error;
11918 }
11919
11920 /// Socket operations
11921
11922 static errno_t
necp_set_socket_attribute(u_int8_t * __sized_by (buffer_length)buffer,size_t buffer_length,u_int8_t type,char * __null_terminated * buffer_p,bool * single_tlv)11923 necp_set_socket_attribute(u_int8_t * __sized_by(buffer_length)buffer, size_t buffer_length, u_int8_t type, char *__null_terminated *buffer_p, bool *single_tlv)
11924 {
11925 int error = 0;
11926 int cursor = 0;
11927 size_t string_size = 0;
11928 size_t local_string_length = 0;
11929 char * __sized_by(local_string_length) local_string = NULL;
11930 u_int8_t * __indexable value = NULL;
11931 char * __indexable buffer_to_free = NULL;
11932
11933 cursor = necp_buffer_find_tlv(buffer, buffer_length, 0, type, NULL, 0);
11934 if (cursor < 0) {
11935 // This will clear out the parameter
11936 goto done;
11937 }
11938
11939 string_size = necp_buffer_get_tlv_length(buffer, buffer_length, cursor);
11940 if (single_tlv != NULL && (buffer_length == sizeof(struct necp_tlv_header) + string_size)) {
11941 *single_tlv = true;
11942 }
11943 if (string_size == 0 || string_size > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
11944 // This will clear out the parameter
11945 goto done;
11946 }
11947
11948 local_string = (char *)kalloc_data(string_size + 1, Z_WAITOK | Z_ZERO);
11949 local_string_length = string_size + 1;
11950 if (local_string == NULL) {
11951 NECPLOG(LOG_ERR, "Failed to allocate a socket attribute buffer (size %zu)", string_size);
11952 goto fail;
11953 }
11954
11955 value = necp_buffer_get_tlv_value(buffer, buffer_length, cursor, NULL);
11956 if (value == NULL) {
11957 NECPLOG0(LOG_ERR, "Failed to get socket attribute");
11958 goto fail;
11959 }
11960
11961 memcpy(local_string, value, string_size);
11962 local_string[string_size] = 0;
11963
11964 done:
11965 if (*buffer_p != NULL) {
11966 buffer_to_free = __unsafe_null_terminated_to_indexable(*buffer_p);
11967 }
11968
11969 // Protect switching of buffer pointer
11970 necp_lock_socket_attributes();
11971 if (local_string != NULL) {
11972 *buffer_p = __unsafe_null_terminated_from_indexable(local_string, &local_string[string_size]);
11973 } else {
11974 *buffer_p = NULL;
11975 }
11976 necp_unlock_socket_attributes();
11977
11978 if (buffer_to_free != NULL) {
11979 kfree_data_addr(buffer_to_free);
11980 }
11981 return 0;
11982 fail:
11983 if (local_string != NULL) {
11984 kfree_data_sized_by(local_string, local_string_length);
11985 }
11986 return error;
11987 }
11988
11989 errno_t
necp_set_socket_attributes(struct inp_necp_attributes * attributes,struct sockopt * sopt)11990 necp_set_socket_attributes(struct inp_necp_attributes *attributes, struct sockopt *sopt)
11991 {
11992 int error = 0;
11993 u_int8_t *buffer = NULL;
11994 bool single_tlv = false;
11995 size_t valsize = sopt->sopt_valsize;
11996 if (valsize == 0 ||
11997 valsize > ((sizeof(struct necp_tlv_header) + NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) * 4)) {
11998 goto done;
11999 }
12000
12001 buffer = (u_int8_t *)kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12002 if (buffer == NULL) {
12003 goto done;
12004 }
12005
12006 error = sooptcopyin(sopt, buffer, valsize, 0);
12007 if (error) {
12008 goto done;
12009 }
12010
12011 // If NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT is being set/cleared separately from the other attributes,
12012 // do not clear other attributes.
12013 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT, &attributes->inp_domain_context, &single_tlv);
12014 if (error) {
12015 NECPLOG0(LOG_ERR, "Could not set domain context TLV for socket attributes");
12016 goto done;
12017 }
12018 if (single_tlv == true) {
12019 goto done;
12020 }
12021
12022 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN, &attributes->inp_domain, NULL);
12023 if (error) {
12024 NECPLOG0(LOG_ERR, "Could not set domain TLV for socket attributes");
12025 goto done;
12026 }
12027
12028 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN_OWNER, &attributes->inp_domain_owner, NULL);
12029 if (error) {
12030 NECPLOG0(LOG_ERR, "Could not set domain owner TLV for socket attributes");
12031 goto done;
12032 }
12033
12034 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_TRACKER_DOMAIN, &attributes->inp_tracker_domain, NULL);
12035 if (error) {
12036 NECPLOG0(LOG_ERR, "Could not set tracker domain TLV for socket attributes");
12037 goto done;
12038 }
12039
12040 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_ACCOUNT, &attributes->inp_account, NULL);
12041 if (error) {
12042 NECPLOG0(LOG_ERR, "Could not set account TLV for socket attributes");
12043 goto done;
12044 }
12045
12046 done:
12047 NECP_SOCKET_ATTRIBUTE_LOG("NECP ATTRIBUTES SOCKET - domain <%s> owner <%s> context <%s> tracker domain <%s> account <%s>",
12048 attributes->inp_domain,
12049 attributes->inp_domain_owner,
12050 attributes->inp_domain_context,
12051 attributes->inp_tracker_domain,
12052 attributes->inp_account);
12053
12054 if (necp_debug) {
12055 NECPLOG(LOG_DEBUG, "Set on socket: Domain %s, Domain owner %s, Domain context %s, Tracker domain %s, Account %s",
12056 attributes->inp_domain,
12057 attributes->inp_domain_owner,
12058 attributes->inp_domain_context,
12059 attributes->inp_tracker_domain,
12060 attributes->inp_account);
12061 }
12062
12063 if (buffer != NULL) {
12064 kfree_data(buffer, valsize);
12065 }
12066
12067 return error;
12068 }
12069
12070 errno_t
necp_get_socket_attributes(struct inp_necp_attributes * attributes,struct sockopt * sopt)12071 necp_get_socket_attributes(struct inp_necp_attributes *attributes, struct sockopt *sopt)
12072 {
12073 int error = 0;
12074 size_t valsize = 0;
12075 u_int8_t *buffer = NULL;
12076 u_int8_t * __indexable cursor = NULL;
12077
12078 if (attributes->inp_domain != NULL) {
12079 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain);
12080 }
12081 if (attributes->inp_domain_owner != NULL) {
12082 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain_owner);
12083 }
12084 if (attributes->inp_domain_context != NULL) {
12085 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain_context);
12086 }
12087 if (attributes->inp_tracker_domain != NULL) {
12088 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_tracker_domain);
12089 }
12090 if (attributes->inp_account != NULL) {
12091 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_account);
12092 }
12093 if (valsize == 0) {
12094 goto done;
12095 }
12096
12097 buffer = (u_int8_t *)kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12098 if (buffer == NULL) {
12099 goto done;
12100 }
12101
12102 cursor = buffer;
12103 if (attributes->inp_domain != NULL) {
12104 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN, strlen(attributes->inp_domain), __terminated_by_to_indexable(attributes->inp_domain),
12105 buffer, valsize);
12106 }
12107
12108 if (attributes->inp_domain_owner != NULL) {
12109 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN_OWNER, strlen(attributes->inp_domain_owner), __terminated_by_to_indexable(attributes->inp_domain_owner),
12110 buffer, valsize);
12111 }
12112
12113 if (attributes->inp_domain_context != NULL) {
12114 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT, strlen(attributes->inp_domain_context), __terminated_by_to_indexable(attributes->inp_domain_context),
12115 buffer, valsize);
12116 }
12117
12118 if (attributes->inp_tracker_domain != NULL) {
12119 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_TRACKER_DOMAIN, strlen(attributes->inp_tracker_domain), __terminated_by_to_indexable(attributes->inp_tracker_domain),
12120 buffer, valsize);
12121 }
12122
12123 if (attributes->inp_account != NULL) {
12124 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_ACCOUNT, strlen(attributes->inp_account), __terminated_by_to_indexable(attributes->inp_account),
12125 buffer, valsize);
12126 }
12127
12128 error = sooptcopyout(sopt, buffer, valsize);
12129 if (error) {
12130 goto done;
12131 }
12132 done:
12133 if (buffer != NULL) {
12134 kfree_data(buffer, valsize);
12135 }
12136
12137 return error;
12138 }
12139
12140 int
necp_set_socket_resolver_signature(struct inpcb * inp,struct sockopt * sopt)12141 necp_set_socket_resolver_signature(struct inpcb *inp, struct sockopt *sopt)
12142 {
12143 const size_t valsize = sopt->sopt_valsize;
12144 if (valsize > NECP_CLIENT_ACTION_SIGN_MAX_TOTAL_LENGTH + NECP_CLIENT_ACTION_SIGN_TAG_LENGTH) {
12145 return EINVAL;
12146 }
12147
12148 necp_lock_socket_attributes();
12149 if (inp->inp_resolver_signature != NULL) {
12150 kfree_data_sized_by(inp->inp_resolver_signature, inp->inp_resolver_signature_length);
12151 }
12152
12153 int error = 0;
12154 if (valsize > 0) {
12155 inp->inp_resolver_signature = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12156 inp->inp_resolver_signature_length = valsize;
12157 if ((error = sooptcopyin(sopt, inp->inp_resolver_signature, valsize,
12158 valsize)) != 0) {
12159 // Free the signature buffer if the copyin failed
12160 kfree_data_sized_by(inp->inp_resolver_signature, inp->inp_resolver_signature_length);
12161 }
12162 }
12163 necp_unlock_socket_attributes();
12164
12165 return error;
12166 }
12167
12168 int
necp_get_socket_resolver_signature(struct inpcb * inp,struct sockopt * sopt)12169 necp_get_socket_resolver_signature(struct inpcb *inp, struct sockopt *sopt)
12170 {
12171 int error = 0;
12172 necp_lock_socket_attributes();
12173 if (inp->inp_resolver_signature == NULL ||
12174 inp->inp_resolver_signature_length == 0) {
12175 error = ENOENT;
12176 } else {
12177 error = sooptcopyout(sopt, inp->inp_resolver_signature,
12178 inp->inp_resolver_signature_length);
12179 }
12180 necp_unlock_socket_attributes();
12181 return error;
12182 }
12183
12184 bool
necp_socket_has_resolver_signature(struct inpcb * inp)12185 necp_socket_has_resolver_signature(struct inpcb *inp)
12186 {
12187 necp_lock_socket_attributes();
12188 bool has_signature = (inp->inp_resolver_signature != NULL && inp->inp_resolver_signature_length != 0);
12189 necp_unlock_socket_attributes();
12190 return has_signature;
12191 }
12192
12193 bool
necp_socket_resolver_signature_matches_address(struct inpcb * inp,union necp_sockaddr_union * address)12194 necp_socket_resolver_signature_matches_address(struct inpcb *inp, union necp_sockaddr_union *address)
12195 {
12196 bool matches_address = false;
12197 necp_lock_socket_attributes();
12198 if (inp->inp_resolver_signature != NULL && inp->inp_resolver_signature_length > 0 && address->sa.sa_len > 0) {
12199 struct necp_client_validatable *validatable = (struct necp_client_validatable *)inp->inp_resolver_signature;
12200 if (inp->inp_resolver_signature_length > sizeof(struct necp_client_validatable) &&
12201 validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_SYSTEM_RESOLVER_ANSWER) {
12202 size_t data_length = inp->inp_resolver_signature_length - sizeof(struct necp_client_validatable);
12203 if (data_length >= (sizeof(struct necp_client_host_resolver_answer) - sizeof(struct necp_client_signable))) {
12204 struct necp_client_host_resolver_answer * __single answer_struct = (struct necp_client_host_resolver_answer *)&validatable->signable;
12205 struct sockaddr_in6 sin6 = answer_struct->address_answer.sin6;
12206 if (data_length == (sizeof(struct necp_client_host_resolver_answer) + answer_struct->hostname_length - sizeof(struct necp_client_signable)) &&
12207 answer_struct->address_answer.sa.sa_family == address->sa.sa_family &&
12208 answer_struct->address_answer.sa.sa_len == address->sa.sa_len &&
12209 (answer_struct->address_answer.sin.sin_port == 0 ||
12210 answer_struct->address_answer.sin.sin_port == address->sin.sin_port) &&
12211 ((answer_struct->address_answer.sa.sa_family == AF_INET &&
12212 answer_struct->address_answer.sin.sin_addr.s_addr == address->sin.sin_addr.s_addr) ||
12213 (answer_struct->address_answer.sa.sa_family == AF_INET6 &&
12214 memcmp(&sin6.sin6_addr, &address->sin6.sin6_addr, sizeof(struct in6_addr)) == 0))) {
12215 // Address matches
12216 const bool validated = necp_validate_resolver_answer(validatable->signable.client_id,
12217 validatable->signable.sign_type,
12218 signable_get_data(&validatable->signable, data_length), data_length,
12219 validatable->signature.signed_tag, sizeof(validatable->signature.signed_tag));
12220 if (validated) {
12221 // Answer is validated
12222 matches_address = true;
12223 }
12224 }
12225 }
12226 }
12227 }
12228 necp_unlock_socket_attributes();
12229 return matches_address;
12230 }
12231
12232 /*
12233 * necp_set_socket_domain_attributes
12234 * Called from soconnectlock/soconnectxlock to directly set the tracker domain and owner for
12235 * a newly marked tracker socket.
12236 */
12237 errno_t
necp_set_socket_domain_attributes(struct socket * so,const char * domain __null_terminated,const char * domain_owner __null_terminated)12238 necp_set_socket_domain_attributes(struct socket *so, const char *domain __null_terminated, const char *domain_owner __null_terminated)
12239 {
12240 int error = 0;
12241 struct inpcb * __single inp = NULL;
12242 size_t valsize = 0;
12243 size_t buffer_size = 0;
12244 u_int8_t * __sized_by(buffer_size) buffer = NULL;
12245 char * __indexable buffer_to_free = NULL;
12246
12247 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
12248 error = EINVAL;
12249 goto fail;
12250 }
12251
12252 // Set domain (required)
12253
12254 valsize = strlen(domain);
12255 if (valsize == 0 || valsize > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
12256 error = EINVAL;
12257 goto fail;
12258 }
12259
12260 buffer = (u_int8_t *)kalloc_data(valsize + 1, Z_WAITOK | Z_ZERO);
12261 buffer_size = valsize + 1;
12262 if (buffer == NULL) {
12263 error = ENOMEM;
12264 goto fail;
12265 }
12266 strlcpy((char *)buffer, domain, buffer_size);
12267 buffer[valsize] = 0;
12268
12269 inp = sotoinpcb(so);
12270 // Do not overwrite a previously set domain if tracker domain is different.
12271 if (inp->inp_necp_attributes.inp_domain != NULL) {
12272 if (strlen(inp->inp_necp_attributes.inp_domain) != strlen(domain) ||
12273 strcmp(inp->inp_necp_attributes.inp_domain, domain) != 0) {
12274 buffer_to_free = (inp->inp_necp_attributes.inp_tracker_domain != NULL) ? __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_tracker_domain) : NULL;
12275 // Protect switching of buffer pointer
12276 necp_lock_socket_attributes();
12277 inp->inp_necp_attributes.inp_tracker_domain = __unsafe_null_terminated_from_indexable((char *)buffer, (char *)&buffer[valsize]);
12278 necp_unlock_socket_attributes();
12279 if (buffer_to_free != NULL) {
12280 kfree_data_addr(buffer_to_free);
12281 }
12282 } else {
12283 kfree_data_sized_by(buffer, buffer_size);
12284 }
12285 } else {
12286 // Protect switching of buffer pointer
12287 necp_lock_socket_attributes();
12288 inp->inp_necp_attributes.inp_domain = __unsafe_null_terminated_from_indexable((char *)buffer, (char *)&buffer[valsize]);
12289 necp_unlock_socket_attributes();
12290 }
12291 buffer = NULL;
12292 buffer_size = 0;
12293
12294 // set domain_owner (required only for tracker)
12295 if (!(so->so_flags1 & SOF1_KNOWN_TRACKER)) {
12296 goto done;
12297 }
12298
12299 valsize = strlen(domain_owner);
12300 if (valsize == 0 || valsize > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
12301 error = EINVAL;
12302 goto fail;
12303 }
12304
12305 buffer = (u_int8_t *)kalloc_data(valsize + 1, Z_WAITOK | Z_ZERO);
12306 buffer_size = valsize + 1;
12307 if (buffer == NULL) {
12308 error = ENOMEM;
12309 goto fail;
12310 }
12311 strlcpy((char *)buffer, domain_owner, buffer_size);
12312 buffer[valsize] = 0;
12313
12314 inp = sotoinpcb(so);
12315
12316 buffer_to_free = (inp->inp_necp_attributes.inp_domain_owner != NULL) ? __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain_owner) : NULL;
12317 // Protect switching of buffer pointer
12318 necp_lock_socket_attributes();
12319 inp->inp_necp_attributes.inp_domain_owner = __unsafe_null_terminated_from_indexable((char *)buffer, (char *)&buffer[valsize]);
12320 necp_unlock_socket_attributes();
12321 buffer = NULL;
12322 buffer_size = 0;
12323
12324 if (buffer_to_free != NULL) {
12325 kfree_data_addr(buffer_to_free);
12326 }
12327
12328 done:
12329 NECP_SOCKET_PARAMS_LOG(so, "NECP ATTRIBUTES SOCKET - domain <%s> owner <%s> context <%s> tracker domain <%s> account <%s> "
12330 "<so flags - is_tracker %X non-app-initiated %X app-approved-domain %X",
12331 inp->inp_necp_attributes.inp_domain,
12332 inp->inp_necp_attributes.inp_domain_owner,
12333 inp->inp_necp_attributes.inp_domain_context,
12334 inp->inp_necp_attributes.inp_tracker_domain,
12335 inp->inp_necp_attributes.inp_account,
12336 so->so_flags1 & SOF1_KNOWN_TRACKER,
12337 so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED,
12338 so->so_flags1 & SOF1_APPROVED_APP_DOMAIN);
12339
12340 if (necp_debug) {
12341 NECPLOG(LOG_DEBUG, "Set on socket: Domain <%s> Domain owner <%s> Domain context <%s> Tracker domain <%s> Account <%s> ",
12342 inp->inp_necp_attributes.inp_domain,
12343 inp->inp_necp_attributes.inp_domain_owner,
12344 inp->inp_necp_attributes.inp_domain_context,
12345 inp->inp_necp_attributes.inp_tracker_domain,
12346 inp->inp_necp_attributes.inp_account);
12347 }
12348 fail:
12349 if (buffer != NULL) {
12350 kfree_data_sized_by(buffer, buffer_size);
12351 }
12352 return error;
12353 }
12354
12355 void *
12356 __sized_by(*message_length)
necp_create_nexus_assign_message(uuid_t nexus_instance,nexus_port_t nexus_port,void * __sized_by (key_length)key,uint32_t key_length,struct necp_client_endpoint * local_endpoint,struct necp_client_endpoint * remote_endpoint,struct ether_addr * local_ether_addr,u_int32_t flow_adv_index,void * flow_stats,uint32_t flow_id,size_t * message_length)12357 necp_create_nexus_assign_message(uuid_t nexus_instance, nexus_port_t nexus_port, void * __sized_by(key_length) key, uint32_t key_length,
12358 struct necp_client_endpoint *local_endpoint, struct necp_client_endpoint *remote_endpoint, struct ether_addr *local_ether_addr,
12359 u_int32_t flow_adv_index, void *flow_stats, uint32_t flow_id, size_t *message_length)
12360 {
12361 u_int8_t * __indexable buffer = NULL;
12362 u_int8_t * __indexable cursor = NULL;
12363 size_t valsize = 0;
12364 bool has_nexus_assignment = FALSE;
12365
12366 if (!uuid_is_null(nexus_instance)) {
12367 has_nexus_assignment = TRUE;
12368 valsize += sizeof(struct necp_tlv_header) + sizeof(uuid_t);
12369 valsize += sizeof(struct necp_tlv_header) + sizeof(nexus_port_t);
12370 }
12371 if (flow_adv_index != NECP_FLOWADV_IDX_INVALID) {
12372 valsize += sizeof(struct necp_tlv_header) + sizeof(u_int32_t);
12373 }
12374 if (key != NULL && key_length > 0) {
12375 valsize += sizeof(struct necp_tlv_header) + key_length;
12376 }
12377 if (local_endpoint != NULL) {
12378 valsize += sizeof(struct necp_tlv_header) + sizeof(struct necp_client_endpoint);
12379 }
12380 if (remote_endpoint != NULL) {
12381 valsize += sizeof(struct necp_tlv_header) + sizeof(struct necp_client_endpoint);
12382 }
12383 if (local_ether_addr != NULL) {
12384 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
12385 }
12386 if (flow_stats != NULL) {
12387 valsize += sizeof(struct necp_tlv_header) + sizeof(void *);
12388 }
12389 if (flow_id != 0) {
12390 valsize += sizeof(struct necp_tlv_header) + sizeof(u_int32_t);
12391 }
12392 if (valsize == 0) {
12393 *message_length = 0;
12394 return NULL;
12395 }
12396
12397 buffer = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12398 if (buffer == NULL) {
12399 *message_length = 0;
12400 return NULL;
12401 }
12402
12403 cursor = buffer;
12404 if (has_nexus_assignment) {
12405 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_INSTANCE, sizeof(uuid_t), nexus_instance, buffer, valsize);
12406 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_PORT, sizeof(nexus_port_t), &nexus_port, buffer, valsize);
12407 }
12408 if (flow_adv_index != NECP_FLOWADV_IDX_INVALID) {
12409 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_PORT_FLOW_INDEX, sizeof(u_int32_t), &flow_adv_index, buffer, valsize);
12410 }
12411 if (key != NULL && key_length > 0) {
12412 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_PARAMETER_NEXUS_KEY, key_length, key, buffer, valsize);
12413 }
12414 if (local_endpoint != NULL) {
12415 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ENDPOINT, sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)local_endpoint, buffer, valsize);
12416 }
12417 if (remote_endpoint != NULL) {
12418 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_REMOTE_ENDPOINT, sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)remote_endpoint, buffer, valsize);
12419 }
12420 if (local_ether_addr != NULL) {
12421 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ETHER_ADDR, sizeof(struct ether_addr), (uint8_t *)(struct ether_addr * __bidi_indexable)local_ether_addr, buffer, valsize);
12422 }
12423 if (flow_stats != NULL) {
12424 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_FLOW_STATS, sizeof(void *), &flow_stats, buffer, valsize);
12425 }
12426 if (flow_id != 0) {
12427 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_UNIQUE_FLOW_TAG, sizeof(u_int32_t), &flow_id, buffer, valsize);
12428 }
12429
12430 *message_length = valsize;
12431
12432 return buffer;
12433 }
12434
12435 void
necp_inpcb_remove_cb(struct inpcb * inp)12436 necp_inpcb_remove_cb(struct inpcb *inp)
12437 {
12438 if (!uuid_is_null(inp->necp_client_uuid)) {
12439 necp_client_unregister_socket_flow(inp->necp_client_uuid, inp);
12440 uuid_clear(inp->necp_client_uuid);
12441 }
12442 }
12443
12444 void
necp_inpcb_dispose(struct inpcb * inp)12445 necp_inpcb_dispose(struct inpcb *inp)
12446 {
12447 char * __indexable buffer = NULL;
12448
12449 necp_inpcb_remove_cb(inp); // Clear out socket registrations if not yet done
12450 if (inp->inp_necp_attributes.inp_domain != NULL) {
12451 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain);
12452 kfree_data_addr(buffer);
12453 inp->inp_necp_attributes.inp_domain = NULL;
12454 }
12455 if (inp->inp_necp_attributes.inp_account != NULL) {
12456 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_account);
12457 kfree_data_addr(buffer);
12458 inp->inp_necp_attributes.inp_account = NULL;
12459 }
12460 if (inp->inp_necp_attributes.inp_domain_owner != NULL) {
12461 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain_owner);
12462 kfree_data_addr(buffer);
12463 inp->inp_necp_attributes.inp_domain_owner = NULL;
12464 }
12465 if (inp->inp_necp_attributes.inp_domain_context != NULL) {
12466 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain_context);
12467 kfree_data_addr(buffer);
12468 inp->inp_necp_attributes.inp_domain_context = NULL;
12469 }
12470 if (inp->inp_necp_attributes.inp_tracker_domain != NULL) {
12471 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_tracker_domain);
12472 kfree_data_addr(buffer);
12473 inp->inp_necp_attributes.inp_tracker_domain = NULL;
12474 }
12475 if (inp->inp_resolver_signature != NULL) {
12476 kfree_data_sized_by(inp->inp_resolver_signature, inp->inp_resolver_signature_length);
12477 }
12478 }
12479
12480 void
necp_mppcb_dispose(struct mppcb * mpp)12481 necp_mppcb_dispose(struct mppcb *mpp)
12482 {
12483 char * __indexable buffer = NULL;
12484
12485 if (!uuid_is_null(mpp->necp_client_uuid)) {
12486 necp_client_unregister_multipath_cb(mpp->necp_client_uuid, mpp);
12487 uuid_clear(mpp->necp_client_uuid);
12488 }
12489
12490 if (mpp->inp_necp_attributes.inp_domain != NULL) {
12491 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_domain);
12492 kfree_data_addr(buffer);
12493 mpp->inp_necp_attributes.inp_domain = NULL;
12494 }
12495 if (mpp->inp_necp_attributes.inp_account != NULL) {
12496 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_account);
12497 kfree_data_addr(buffer);
12498 mpp->inp_necp_attributes.inp_account = NULL;
12499 }
12500 if (mpp->inp_necp_attributes.inp_domain_owner != NULL) {
12501 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_domain_owner);
12502 kfree_data_addr(buffer);
12503 mpp->inp_necp_attributes.inp_domain_owner = NULL;
12504 }
12505 if (mpp->inp_necp_attributes.inp_tracker_domain != NULL) {
12506 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_tracker_domain);
12507 kfree_data_addr(buffer);
12508 mpp->inp_necp_attributes.inp_tracker_domain = NULL;
12509 }
12510 if (mpp->inp_necp_attributes.inp_domain_context != NULL) {
12511 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_domain_context);
12512 kfree_data_addr(buffer);
12513 mpp->inp_necp_attributes.inp_domain_context = NULL;
12514 }
12515 }
12516
12517 /// Module init
12518
12519 void
necp_client_init(void)12520 necp_client_init(void)
12521 {
12522 necp_client_update_tcall = thread_call_allocate_with_options(necp_update_all_clients_callout, NULL,
12523 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
12524 VERIFY(necp_client_update_tcall != NULL);
12525 #if SKYWALK
12526
12527 necp_client_collect_stats_tcall = thread_call_allocate_with_options(necp_collect_stats_client_callout, NULL,
12528 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
12529 VERIFY(necp_client_collect_stats_tcall != NULL);
12530
12531 necp_close_empty_arenas_tcall = thread_call_allocate_with_options(necp_close_empty_arenas_callout, NULL,
12532 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
12533 VERIFY(necp_close_empty_arenas_tcall != NULL);
12534 #endif /* SKYWALK */
12535
12536 LIST_INIT(&necp_fd_list);
12537 LIST_INIT(&necp_fd_observer_list);
12538 LIST_INIT(&necp_collect_stats_flow_list);
12539
12540 RB_INIT(&necp_client_global_tree);
12541 RB_INIT(&necp_client_flow_global_tree);
12542 }
12543
12544 #if SKYWALK
12545 pid_t
necp_client_get_proc_pid_from_arena_info(struct skmem_arena_mmap_info * arena_info)12546 necp_client_get_proc_pid_from_arena_info(struct skmem_arena_mmap_info *arena_info)
12547 {
12548 ASSERT((arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_NECP) || (arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_SYSTEM));
12549
12550 if (arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_NECP) {
12551 struct necp_arena_info * __single nai = __unsafe_forge_single(struct necp_arena_info *, container_of(arena_info, struct necp_arena_info, nai_mmap));
12552 return nai->nai_proc_pid;
12553 } else {
12554 struct necp_fd_data * __single fd_data = __unsafe_forge_single(struct necp_fd_data *, container_of(arena_info, struct necp_fd_data, sysctl_mmap));
12555 return fd_data->proc_pid;
12556 }
12557 }
12558 #endif /* !SKYWALK */
12559