1 /*
2 * Copyright (c) 2015-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30
31 #include <kern/thread_call.h>
32 #include <kern/uipc_domain.h>
33 #include <kern/zalloc.h>
34
35 #include <net/if.h>
36 #include <net/if_types.h>
37 #include <net/if_var.h>
38 #include <net/net_api_stats.h>
39 #include <net/necp.h>
40 #include <net/network_agent.h>
41 #include <net/ntstat.h>
42 #include <net/aop/kpi_aop.h>
43 #include <net/aop/aop_stats.h>
44
45 #include <netinet/in_pcb.h>
46 #include <netinet/in_var.h>
47 #include <netinet/ip.h>
48 #include <netinet/ip6.h>
49 #include <netinet/mp_pcb.h>
50 #include <netinet/tcp_cc.h>
51 #include <netinet/tcp_fsm.h>
52 #include <netinet/tcp_cache.h>
53 #include <netinet6/in6_var.h>
54
55 #include <sys/domain.h>
56 #include <sys/file_internal.h>
57 #include <sys/kauth.h>
58 #include <sys/kernel.h>
59 #include <sys/malloc.h>
60 #include <sys/poll.h>
61 #include <sys/priv.h>
62 #include <sys/protosw.h>
63 #include <sys/queue.h>
64 #include <sys/socket.h>
65 #include <sys/socketvar.h>
66 #include <sys/sysproto.h>
67 #include <sys/systm.h>
68 #include <sys/types.h>
69 #include <sys/codesign.h>
70 #include <libkern/section_keywords.h>
71 #include <IOKit/IOBSD.h>
72
73 #include <os/refcnt.h>
74
75 #include <CodeSignature/Entitlements.h>
76
77 #if SKYWALK
78 #include <skywalk/os_skywalk_private.h>
79 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
80 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
81 #endif /* SKYWALK */
82
83 #if CONFIG_MACF
84 #include <security/mac_framework.h>
85 #endif
86
87 #include <net/sockaddr_utils.h>
88
89 /*
90 * NECP Client Architecture
91 * ------------------------------------------------
92 * See <net/necp.c> for a discussion on NECP database architecture.
93 *
94 * Each client of NECP provides a set of parameters for a connection or network state
95 * evaluation, on which NECP policy evaluation is run. This produces a policy result
96 * which can be accessed by the originating process, along with events for when policies
97 * results have changed.
98 *
99 * ------------------------------------------------
100 * NECP Client FD
101 * ------------------------------------------------
102 * A process opens an NECP file descriptor using necp_open(). This is a very simple
103 * file descriptor, upon which the process may do the following operations:
104 * - necp_client_action(...), to add/remove/query clients
105 * - kqueue, to watch for readable events
106 * - close(), to close the client session and release all clients
107 *
108 * Client objects are allocated structures that hang off of the file descriptor. Each
109 * client contains:
110 * - Client ID, a UUID that references the client across the system
111 * - Parameters, a buffer of TLVs that describe the client's connection parameters,
112 * such as the remote and local endpoints, interface requirements, etc.
113 * - Result, a buffer of TLVs containing the current policy evaluation for the client.
114 * This result will be updated whenever a network change occurs that impacts the
115 * policy result for that client.
116 *
117 * +--------------+
118 * | NECP fd |
119 * +--------------+
120 * ||
121 * ==================================
122 * || || ||
123 * +--------------+ +--------------+ +--------------+
124 * | Client ID | | Client ID | | Client ID |
125 * | ---- | | ---- | | ---- |
126 * | Parameters | | Parameters | | Parameters |
127 * | ---- | | ---- | | ---- |
128 * | Result | | Result | | Result |
129 * +--------------+ +--------------+ +--------------+
130 *
131 * ------------------------------------------------
132 * Client Actions
133 * ------------------------------------------------
134 * - Add. Input parameters as a buffer of TLVs, and output a client ID. Allocates a
135 * new client structure on the file descriptor.
136 * - Remove. Input a client ID. Removes a client structure from the file descriptor.
137 * - Copy Parameters. Input a client ID, and output parameter TLVs.
138 * - Copy Result. Input a client ID, and output result TLVs. Alternatively, input empty
139 * client ID and get next unread client result.
140 * - Copy List. List all client IDs.
141 *
142 * ------------------------------------------------
143 * Client Policy Evaluation
144 * ------------------------------------------------
145 * Policies are evaluated for clients upon client creation, and upon update events,
146 * which are network/agent/policy changes coalesced by a timer.
147 *
148 * The policy evaluation goes through the following steps:
149 * 1. Parse client parameters.
150 * 2. Select a scoped interface if applicable. This involves using require/prohibit
151 * parameters, along with the local address, to select the most appropriate interface
152 * if not explicitly set by the client parameters.
153 * 3. Run NECP application-level policy evalution
154 * 4. Set policy result into client result buffer.
155 *
156 * ------------------------------------------------
157 * Client Observers
158 * ------------------------------------------------
159 * If necp_open() is called with the NECP_OPEN_FLAG_OBSERVER flag, and the process
160 * passes the necessary privilege check, the fd is allowed to use necp_client_action()
161 * to copy client state attached to the file descriptors of other processes, and to
162 * list all client IDs on the system.
163 */
164
165 extern u_int32_t necp_debug;
166
167 static int necpop_select(struct fileproc *, int, void *, vfs_context_t);
168 static int necpop_close(struct fileglob *, vfs_context_t);
169 static int necpop_kqfilter(struct fileproc *, struct knote *, struct kevent_qos_s *);
170
171 // Timer functions
172 static int necp_timeout_microseconds = 1000 * 100; // 100ms
173 static int necp_timeout_leeway_microseconds = 1000 * 50; // 50ms
174 #if SKYWALK
175 static int necp_collect_stats_timeout_microseconds = 1000 * 1000 * 1; // 1s
176 static int necp_collect_stats_timeout_leeway_microseconds = 1000 * 500; // 500ms
177 static int necp_close_arenas_timeout_microseconds = 1000 * 1000 * 10; // 10s
178 static int necp_close_arenas_timeout_leeway_microseconds = 1000 * 1000 * 1; // 1s
179 #endif /* SKYWALK */
180
181 static int necp_client_fd_count = 0;
182 static int necp_observer_fd_count = 0;
183 static int necp_client_count = 0;
184 static int necp_socket_flow_count = 0;
185 static int necp_if_flow_count = 0;
186 static int necp_observer_message_limit = 256;
187
188 /*
189 * NECP client tracing control -
190 *
191 * necp_client_tracing_level : 1 for client trace, 2 for flow trace, 3 for parameter details
192 * necp_client_tracing_pid : match client with pid
193 */
194 static int necp_client_tracing_level = 0;
195 static int necp_client_tracing_pid = 0;
196
197 #define NECP_CLIENT_TRACE_LEVEL_CLIENT 1
198 #define NECP_CLIENT_TRACE_LEVEL_FLOW 2
199 #define NECP_CLIENT_TRACE_LEVEL_PARAMS 3
200
201 #define NECP_CLIENT_TRACE_PID_MATCHED(pid) \
202 (pid == necp_client_tracing_pid)
203
204 #define NECP_ENABLE_CLIENT_TRACE(level) \
205 ((necp_client_tracing_level >= level && \
206 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(client->proc_pid))) ? necp_client_tracing_level : 0)
207
208 #define NECP_CLIENT_LOG(client, fmt, ...) \
209 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_CLIENT)) { \
210 uuid_string_t client_uuid_str = { }; \
211 uuid_unparse_lower(client->client_id, client_uuid_str); \
212 NECPLOG(LOG_NOTICE, "NECP_CLIENT_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
213 }
214
215 #define NECP_CLIENT_FLOW_LOG(client, flow, fmt, ...) \
216 if (client && flow && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) { \
217 uuid_string_t client_uuid_str = { }; \
218 uuid_unparse_lower(client->client_id, client_uuid_str); \
219 uuid_string_t flow_uuid_str = { }; \
220 uuid_unparse_lower(flow->registration_id, flow_uuid_str); \
221 NECPLOG(LOG_NOTICE, "NECP CLIENT FLOW TRACE <pid %d %s> <flow %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, flow_uuid_str, ##__VA_ARGS__); \
222 }
223
224 #define NECP_CLIENT_PARAMS_LOG(client, fmt, ...) \
225 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
226 uuid_string_t client_uuid_str = { }; \
227 uuid_unparse_lower(client->client_id, client_uuid_str); \
228 NECPLOG(LOG_NOTICE, "NECP_CLIENT_PARAMS_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
229 }
230
231 #define NECP_SOCKET_PID(so) \
232 ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid)
233
234 #define NECP_ENABLE_SOCKET_TRACE(level) \
235 ((necp_client_tracing_level >= level && \
236 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(NECP_SOCKET_PID(so)))) ? necp_client_tracing_level : 0)
237
238 #define NECP_SOCKET_PARAMS_LOG(so, fmt, ...) \
239 if (so && NECP_ENABLE_SOCKET_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
240 NECPLOG(LOG_NOTICE, "NECP_SOCKET_PARAMS_LOG <pid %d>: " fmt "\n", NECP_SOCKET_PID(so), ##__VA_ARGS__); \
241 }
242
243 #define NECP_SOCKET_ATTRIBUTE_LOG(fmt, ...) \
244 if (necp_client_tracing_level >= NECP_CLIENT_TRACE_LEVEL_PARAMS) { \
245 NECPLOG(LOG_NOTICE, "NECP_SOCKET_ATTRIBUTE_LOG: " fmt "\n", ##__VA_ARGS__); \
246 }
247
248 #define NECP_CLIENT_TRACKER_LOG(pid, fmt, ...) \
249 if (pid) { \
250 NECPLOG(LOG_NOTICE, "NECP_CLIENT_TRACKER_LOG <pid %d>: " fmt "\n", pid, ##__VA_ARGS__); \
251 }
252
253 #if SKYWALK
254 static int necp_arena_count = 0;
255 static int necp_sysctl_arena_count = 0;
256 static int necp_nexus_flow_count = 0;
257
258 /* userspace stats sanity check range, same unit as TCP (see TCP_RTT_SCALE) */
259 static uint32_t necp_client_stats_rtt_floor = 1; // 32us
260 static uint32_t necp_client_stats_rtt_ceiling = 1920000; // 60s
261 const static struct sk_stats_flow ntstat_sk_stats_zero;
262 #endif /* SKYWALK */
263
264 static int necp_client_stats_use_route_metrics = 0;
265
266 /*
267 * Global lock to protect socket inp_necp_attributes across updates.
268 * NECP updating these attributes and clients accessing these attributes
269 * must take this lock.
270 */
271 static LCK_GRP_DECLARE(necp_socket_attr_lock_grp, "necpSocketAttrGroup");
272 LCK_MTX_DECLARE(necp_socket_attr_lock, &necp_socket_attr_lock_grp);
273
274 os_refgrp_decl(static, necp_client_refgrp, "NECPClientRefGroup", NULL);
275
276 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_FD_COUNT, client_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_fd_count, 0, "");
277 SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_FD_COUNT, observer_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_observer_fd_count, 0, "");
278 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_COUNT, client_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_count, 0, "");
279 SYSCTL_INT(_net_necp, NECPCTL_SOCKET_FLOW_COUNT, socket_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_socket_flow_count, 0, "");
280 SYSCTL_INT(_net_necp, NECPCTL_IF_FLOW_COUNT, if_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_if_flow_count, 0, "");
281 SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_MESSAGE_LIMIT, observer_message_limit, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_observer_message_limit, 256, "");
282 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_LEVEL, necp_client_tracing_level, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_level, 0, "");
283 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_PID, necp_client_tracing_pid, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_pid, 0, "");
284
285 #if SKYWALK
286 SYSCTL_INT(_net_necp, NECPCTL_ARENA_COUNT, arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_arena_count, 0, "");
287 SYSCTL_INT(_net_necp, NECPCTL_SYSCTL_ARENA_COUNT, sysctl_arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_sysctl_arena_count, 0, "");
288 SYSCTL_INT(_net_necp, NECPCTL_NEXUS_FLOW_COUNT, nexus_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_nexus_flow_count, 0, "");
289 #if (DEVELOPMENT || DEBUG)
290 SYSCTL_UINT(_net_necp, OID_AUTO, collect_stats_interval_us, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_collect_stats_timeout_microseconds, 0, "");
291 SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_floor, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_floor, 0, "");
292 SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_ceiling, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_ceiling, 0, "");
293 SYSCTL_INT(_net_necp, OID_AUTO, necp_client_stats_use_route_metrics, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_use_route_metrics, 0, "");
294 #endif /* (DEVELOPMENT || DEBUG) */
295 #endif /* SKYWALK */
296
297 #define NECP_MAX_CLIENT_LIST_SIZE 1024 * 1024 // 1MB
298 #define NECP_MAX_AGENT_ACTION_SIZE 10 * 1024 // 10K
299
300 extern int tvtohz(struct timeval *);
301 extern unsigned int get_maxmtu(struct rtentry *);
302
303 // Parsed parameters
304 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR 0x00001
305 #define NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR 0x00002
306 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF 0x00004
307 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF 0x00008
308 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE 0x00010
309 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE 0x00020
310 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT 0x00040
311 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT 0x00080
312 #define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT 0x00100
313 #define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT 0x00200
314 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE 0x00400
315 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE 0x00800
316 #define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE 0x01000
317 #define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE 0x02000
318 #define NECP_PARSED_PARAMETERS_FIELD_FLAGS 0x04000
319 #define NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL 0x08000
320 #define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID 0x10000
321 #define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID 0x20000
322 #define NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS 0x40000
323 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT 0x80000
324 #define NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID 0x100000
325 #define NECP_PARSED_PARAMETERS_FIELD_ETHERTYPE 0x200000
326 #define NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL 0x400000
327 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE 0x800000
328 #define NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER 0x1000000
329 #define NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID 0x2000000
330 #define NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN 0x4000000
331 #define NECP_PARSED_PARAMETERS_FIELD_UID 0x8000000
332 #define NECP_PARSED_PARAMETERS_FIELD_PERSONA_ID 0x10000000
333 #define NECP_PARSED_PARAMETERS_FIELD_EXTENDED_FLAGS 0x20000000
334
335
336 #define NECP_MAX_INTERFACE_PARAMETERS 16
337 #define NECP_MAX_AGENT_PARAMETERS 4
338 struct necp_client_parsed_parameters {
339 u_int32_t valid_fields;
340 u_int32_t flags;
341 u_int64_t delegated_upid;
342 union necp_sockaddr_union local_addr;
343 union necp_sockaddr_union remote_addr;
344 u_int32_t required_interface_index;
345 char prohibited_interfaces[NECP_MAX_INTERFACE_PARAMETERS][IFXNAMSIZ];
346 u_int8_t required_interface_type;
347 u_int8_t local_address_preference;
348 u_int8_t prohibited_interface_types[NECP_MAX_INTERFACE_PARAMETERS];
349 struct necp_client_parameter_netagent_type required_netagent_types[NECP_MAX_AGENT_PARAMETERS];
350 struct necp_client_parameter_netagent_type prohibited_netagent_types[NECP_MAX_AGENT_PARAMETERS];
351 struct necp_client_parameter_netagent_type preferred_netagent_types[NECP_MAX_AGENT_PARAMETERS];
352 struct necp_client_parameter_netagent_type avoided_netagent_types[NECP_MAX_AGENT_PARAMETERS];
353 uuid_t required_netagents[NECP_MAX_AGENT_PARAMETERS];
354 uuid_t prohibited_netagents[NECP_MAX_AGENT_PARAMETERS];
355 uuid_t preferred_netagents[NECP_MAX_AGENT_PARAMETERS];
356 uuid_t avoided_netagents[NECP_MAX_AGENT_PARAMETERS];
357 u_int8_t ip_protocol;
358 u_int8_t transport_protocol;
359 u_int16_t ethertype;
360 pid_t effective_pid;
361 uuid_t effective_uuid;
362 uuid_t parent_uuid;
363 u_int32_t traffic_class;
364 struct necp_demux_pattern demux_patterns[NECP_MAX_DEMUX_PATTERNS];
365 u_int8_t demux_pattern_count;
366 uid_t uid;
367 uid_t persona_id;
368 u_int64_t extended_flags;
369 };
370
371 static bool
372 necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters,
373 u_int *return_ifindex, bool *validate_agents);
374
375 static bool
376 necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa);
377
378 static bool
379 necp_ifnet_matches_parameters(struct ifnet *ifp,
380 struct necp_client_parsed_parameters *parsed_parameters,
381 u_int32_t override_flags,
382 u_int32_t *preferred_count,
383 bool secondary_interface,
384 bool require_scoped_field);
385
386 static const struct fileops necp_fd_ops = {
387 .fo_type = DTYPE_NETPOLICY,
388 .fo_read = fo_no_read,
389 .fo_write = fo_no_write,
390 .fo_ioctl = fo_no_ioctl,
391 .fo_select = necpop_select,
392 .fo_close = necpop_close,
393 .fo_drain = fo_no_drain,
394 .fo_kqfilter = necpop_kqfilter,
395 };
396
397 struct necp_client_assertion {
398 LIST_ENTRY(necp_client_assertion) assertion_chain;
399 uuid_t asserted_netagent;
400 };
401
402 struct necp_client_flow_header {
403 struct necp_tlv_header outer_header;
404 struct necp_tlv_header flow_id_tlv_header;
405 uuid_t flow_id;
406 struct necp_tlv_header flags_tlv_header;
407 u_int32_t flags_value;
408 struct necp_tlv_header interface_tlv_header;
409 struct necp_client_result_interface interface_value;
410 } __attribute__((__packed__));
411
412 struct necp_client_flow_protoctl_event_header {
413 struct necp_tlv_header protoctl_tlv_header;
414 struct necp_client_flow_protoctl_event protoctl_event;
415 } __attribute__((__packed__));
416
417 struct necp_client_flow_stats_index_header {
418 struct necp_tlv_header stats_index_tlv_header;
419 uint32_t stats_index;
420 } __attribute__((__packed__));
421
422 struct necp_client_nexus_flow_header {
423 struct necp_client_flow_header flow_header;
424 struct necp_tlv_header agent_tlv_header;
425 struct necp_client_result_netagent agent_value;
426 struct necp_tlv_header tfo_cookie_tlv_header;
427 u_int8_t tfo_cookie_value[NECP_TFO_COOKIE_LEN_MAX];
428 } __attribute__((__packed__));
429
430 #if SKYWALK
431 struct necp_arena_info;
432 #endif
433
434 struct necp_client_flow {
435 LIST_ENTRY(necp_client_flow) flow_chain;
436 unsigned invalid : 1;
437 unsigned nexus : 1; // If true, flow is a nexus; if false, flow is attached to socket
438 unsigned socket : 1;
439 unsigned viable : 1;
440 unsigned assigned : 1;
441 unsigned has_protoctl_event : 1;
442 unsigned check_tcp_heuristics : 1;
443 unsigned aop_offload : 1;
444 unsigned aop_stat_index_valid : 1;
445 union {
446 uuid_t nexus_agent;
447 struct {
448 void *socket_handle;
449 necp_client_flow_cb cb;
450 };
451 } u;
452 uint32_t interface_index;
453 u_short delegated_interface_index;
454 uint32_t interface_flags;
455 uint32_t necp_flow_flags;
456 struct necp_client_flow_protoctl_event protoctl_event;
457 union necp_sockaddr_union local_addr;
458 union necp_sockaddr_union remote_addr;
459 uint32_t flow_tag;
460 uint32_t stats_index; // Index associated with AOP flows
461
462 size_t assigned_results_length;
463 u_int8_t *__counted_by(assigned_results_length) assigned_results;
464 };
465
466 struct necp_client_flow_registration {
467 RB_ENTRY(necp_client_flow_registration) fd_link;
468 RB_ENTRY(necp_client_flow_registration) global_link;
469 RB_ENTRY(necp_client_flow_registration) client_link;
470 LIST_ENTRY(necp_client_flow_registration) collect_stats_chain;
471 uuid_t registration_id;
472 u_int32_t flags;
473 unsigned flow_result_read : 1;
474 unsigned defunct : 1;
475 unsigned aop_offload : 1;
476 void *interface_handle;
477 necp_client_flow_cb interface_cb;
478 struct necp_client *client;
479 LIST_HEAD(_necp_registration_flow_list, necp_client_flow) flow_list;
480 #if SKYWALK
481 struct necp_arena_info *stats_arena; /* arena where the stats objects came from */
482 void * kstats_kaddr; /* kernel snapshot of untrusted userspace stats, for calculating delta */
483 mach_vm_address_t ustats_uaddr; /* userspace stats (untrusted) */
484 nstat_userland_context stats_handler_context;
485 struct flow_stats *nexus_stats; /* shared stats objects between necp_client and skywalk */
486 #endif /* !SKYWALK */
487 u_int64_t last_interface_details __attribute__((aligned(sizeof(u_int64_t))));
488 };
489
490 static int necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1);
491
492 RB_HEAD(_necp_client_flow_tree, necp_client_flow_registration);
493 RB_PROTOTYPE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
494 RB_GENERATE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
495
496 #define NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT 4
497 #define NECP_CLIENT_MAX_INTERFACE_OPTIONS 32
498
499 #define NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT (NECP_CLIENT_MAX_INTERFACE_OPTIONS - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT)
500
501 struct necp_client {
502 RB_ENTRY(necp_client) link;
503 RB_ENTRY(necp_client) global_link;
504
505 decl_lck_mtx_data(, lock);
506 decl_lck_mtx_data(, route_lock);
507 os_refcnt_t reference_count;
508
509 uuid_t client_id;
510 unsigned result_read : 1;
511 unsigned group_members_read : 1;
512 unsigned allow_multiple_flows : 1;
513 unsigned legacy_client_is_flow : 1;
514
515 unsigned platform_binary : 1;
516 unsigned validated_parent : 1;
517
518 size_t result_length;
519 u_int8_t result[NECP_BASE_CLIENT_RESULT_SIZE];
520
521 necp_policy_id policy_id;
522 necp_policy_id skip_policy_id;
523
524 u_int8_t ip_protocol;
525 int proc_pid;
526
527 u_int64_t delegated_upid;
528
529 struct _necp_client_flow_tree flow_registrations;
530 LIST_HEAD(_necp_client_assertion_list, necp_client_assertion) assertion_list;
531
532 size_t assigned_group_members_length;
533 u_int8_t *__counted_by(assigned_group_members_length) assigned_group_members;
534
535 struct rtentry *current_route;
536
537 struct necp_client_interface_option interface_options[NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
538 struct necp_client_interface_option * __indexable extra_interface_options;
539 u_int8_t interface_option_count; // Number in interface_options + extra_interface_options
540
541 struct necp_client_result_netagent failed_trigger_agent;
542
543 void *agent_handle;
544
545 uuid_t override_euuid;
546
547 #if SKYWALK
548 netns_token port_reservation;
549 nstat_context nstat_context;
550 uuid_t latest_flow_registration_id;
551 uuid_t parent_client_id;
552 struct necp_client *original_parameters_source;
553 #endif /* !SKYWALK */
554
555 size_t parameters_length;
556 u_int8_t * __sized_by(parameters_length) parameters;
557 };
558
559 #define NECP_CLIENT_LOCK(_c) lck_mtx_lock(&_c->lock)
560 #define NECP_CLIENT_UNLOCK(_c) lck_mtx_unlock(&_c->lock)
561 #define NECP_CLIENT_ASSERT_LOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_OWNED)
562 #define NECP_CLIENT_ASSERT_UNLOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_NOTOWNED)
563
564 #define NECP_CLIENT_ROUTE_LOCK(_c) lck_mtx_lock(&_c->route_lock)
565 #define NECP_CLIENT_ROUTE_UNLOCK(_c) lck_mtx_unlock(&_c->route_lock)
566
567 static void necp_client_retain_locked(struct necp_client *client);
568 static void necp_client_retain(struct necp_client *client);
569
570 static bool necp_client_release_locked(struct necp_client *client);
571 static bool necp_client_release(struct necp_client *client);
572
573 static void
574 necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid);
575
576 static bool
577 necp_client_remove_assertion(struct necp_client *client, uuid_t netagent_uuid);
578
579 static int
580 necp_client_copy_parameters_locked(struct necp_client *client,
581 struct necp_client_nexus_parameters *parameters);
582
583 LIST_HEAD(_necp_flow_registration_list, necp_client_flow_registration);
584 static struct _necp_flow_registration_list necp_collect_stats_flow_list;
585
586 struct necp_flow_defunct {
587 LIST_ENTRY(necp_flow_defunct) chain;
588
589 uuid_t flow_id;
590 uuid_t nexus_agent;
591 void *agent_handle;
592 void *socket_handle;
593 int proc_pid;
594 u_int32_t flags;
595 struct necp_client_agent_parameters close_parameters;
596 bool has_close_parameters;
597 };
598
599 LIST_HEAD(_necp_flow_defunct_list, necp_flow_defunct);
600
601 static int necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1);
602
603 RB_HEAD(_necp_client_tree, necp_client);
604 RB_PROTOTYPE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
605 RB_GENERATE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
606
607 RB_HEAD(_necp_client_global_tree, necp_client);
608 RB_PROTOTYPE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
609 RB_GENERATE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
610
611 RB_HEAD(_necp_fd_flow_tree, necp_client_flow_registration);
612 RB_PROTOTYPE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
613 RB_GENERATE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
614
615 RB_HEAD(_necp_client_flow_global_tree, necp_client_flow_registration);
616 RB_PROTOTYPE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
617 RB_GENERATE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
618
619 static struct _necp_client_global_tree necp_client_global_tree;
620 static struct _necp_client_flow_global_tree necp_client_flow_global_tree;
621
622 struct necp_client_update {
623 TAILQ_ENTRY(necp_client_update) chain;
624
625 uuid_t client_id;
626
627 size_t update_length;
628 struct necp_client_observer_update *__sized_by(update_length) update;
629 };
630
631 #if SKYWALK
632 struct necp_arena_info {
633 LIST_ENTRY(necp_arena_info) nai_chain;
634 u_int32_t nai_flags;
635 pid_t nai_proc_pid;
636 struct skmem_arena *nai_arena;
637 struct skmem_arena_mmap_info nai_mmap;
638 mach_vm_offset_t nai_roff;
639 u_int32_t nai_use_count;
640 };
641 #endif /* !SKYWALK */
642
643 #define NAIF_ATTACHED 0x1 // arena is attached to list
644 #define NAIF_REDIRECT 0x2 // arena mmap has been redirected
645 #define NAIF_DEFUNCT 0x4 // arena is now defunct
646
647 #define NECP_FD_REPORTED_AGENT_COUNT 2
648
649 struct necp_fd_reported_agents {
650 uuid_t agent_uuid[NECP_FD_REPORTED_AGENT_COUNT];
651 };
652
653 struct necp_fd_data {
654 u_int8_t necp_fd_type;
655 LIST_ENTRY(necp_fd_data) chain;
656 struct _necp_client_tree clients;
657 struct _necp_fd_flow_tree flows;
658 TAILQ_HEAD(_necp_client_update_list, necp_client_update) update_list;
659 int update_count;
660 int flags;
661
662 unsigned background : 1;
663 unsigned request_in_process_flow_divert : 1;
664
665 int proc_pid;
666 decl_lck_mtx_data(, fd_lock);
667 struct selinfo si;
668
669 struct necp_fd_reported_agents reported_agents;
670 #if SKYWALK
671 // Arenas and their mmap info for per-process stats. Stats objects are allocated from an active arena
672 // that is not redirected/defunct. The stats_arena_active keeps track of such an arena, and it also
673 // holds a reference count on the object. Each flow allocating a stats object also holds a reference
674 // the necp_arena_info (where the object got allocated from). During defunct, we redirect the mapping
675 // of the arena such that any attempt to access (read/write) will result in getting zero-filled pages.
676 // We then go thru all of the flows for the process and free the stats objects associated with them,
677 // followed by destroying the skmem region(s) associated with the arena. The stats_arena_list keeps
678 // track of all current and defunct stats arenas; there could be more than one arena created for the
679 // process as the arena destruction happens when its reference count drops to 0.
680 struct necp_arena_info *stats_arena_active;
681 LIST_HEAD(_necp_arena_info_list, necp_arena_info) stats_arena_list;
682 u_int32_t stats_arena_gencnt;
683
684 struct skmem_arena *sysctl_arena;
685 struct skmem_arena_mmap_info sysctl_mmap;
686 mach_vm_offset_t system_sysctls_roff;
687 #endif /* !SKYWALK */
688 };
689
690 #define NECP_FD_LOCK(_f) lck_mtx_lock(&_f->fd_lock)
691 #define NECP_FD_UNLOCK(_f) lck_mtx_unlock(&_f->fd_lock)
692 #define NECP_FD_ASSERT_LOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_OWNED)
693 #define NECP_FD_ASSERT_UNLOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_NOTOWNED)
694
695 static LIST_HEAD(_necp_fd_list, necp_fd_data) necp_fd_list;
696 static LIST_HEAD(_necp_fd_observer_list, necp_fd_data) necp_fd_observer_list;
697
698 #if SKYWALK
699 static KALLOC_TYPE_DEFINE(necp_arena_info_zone, struct necp_arena_info, NET_KT_DEFAULT);
700 #endif /* !SKYWALK */
701
702 static LCK_ATTR_DECLARE(necp_fd_mtx_attr, 0, 0);
703 static LCK_GRP_DECLARE(necp_fd_mtx_grp, "necp_fd");
704
705 static LCK_RW_DECLARE_ATTR(necp_fd_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
706 static LCK_RW_DECLARE_ATTR(necp_observer_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
707 static LCK_RW_DECLARE_ATTR(necp_client_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
708 static LCK_RW_DECLARE_ATTR(necp_flow_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
709 static LCK_RW_DECLARE_ATTR(necp_collect_stats_list_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
710
711
712 #define NECP_STATS_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_collect_stats_list_lock)
713 #define NECP_STATS_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_collect_stats_list_lock)
714 #define NECP_STATS_LIST_UNLOCK() lck_rw_done(&necp_collect_stats_list_lock)
715
716 #define NECP_CLIENT_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_client_tree_lock)
717 #define NECP_CLIENT_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_client_tree_lock)
718 #define NECP_CLIENT_TREE_UNLOCK() lck_rw_done(&necp_client_tree_lock)
719 #define NECP_CLIENT_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_client_tree_lock, LCK_RW_ASSERT_HELD)
720
721 #define NECP_FLOW_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_flow_tree_lock)
722 #define NECP_FLOW_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_flow_tree_lock)
723 #define NECP_FLOW_TREE_UNLOCK() lck_rw_done(&necp_flow_tree_lock)
724 #define NECP_FLOW_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_flow_tree_lock, LCK_RW_ASSERT_HELD)
725
726 #define NECP_FD_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_fd_lock)
727 #define NECP_FD_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_fd_lock)
728 #define NECP_FD_LIST_UNLOCK() lck_rw_done(&necp_fd_lock)
729 #define NECP_FD_LIST_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_fd_lock, LCK_RW_ASSERT_HELD)
730
731 #define NECP_OBSERVER_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_observer_lock)
732 #define NECP_OBSERVER_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_observer_lock)
733 #define NECP_OBSERVER_LIST_UNLOCK() lck_rw_done(&necp_observer_lock)
734
735 // Locking Notes
736
737 // Take NECP_FD_LIST_LOCK when accessing or modifying the necp_fd_list
738 // Take NECP_CLIENT_TREE_LOCK when accessing or modifying the necp_client_global_tree
739 // Take NECP_FLOW_TREE_LOCK when accessing or modifying the necp_client_flow_global_tree
740 // Take NECP_STATS_LIST_LOCK when accessing or modifying the necp_collect_stats_flow_list
741 // Take NECP_FD_LOCK when accessing or modifying an necp_fd_data entry
742 // Take NECP_CLIENT_LOCK when accessing or modifying a single necp_client
743 // Take NECP_CLIENT_ROUTE_LOCK when accessing or modifying a client's route
744
745 // Precedence, where 1 is the first lock that must be taken
746 // 1. NECP_FD_LIST_LOCK
747 // 2. NECP_FD_LOCK (any)
748 // 3. NECP_CLIENT_TREE_LOCK
749 // 4. NECP_CLIENT_LOCK (any)
750 // 5. NECP_FLOW_TREE_LOCK
751 // 6. NECP_STATS_LIST_LOCK
752 // 7. NECP_CLIENT_ROUTE_LOCK (any)
753
754 static thread_call_t necp_client_update_tcall;
755 static uint32_t necp_update_all_clients_sched_cnt = 0;
756 static uint64_t necp_update_all_clients_sched_abstime = 0;
757 static LCK_RW_DECLARE_ATTR(necp_update_all_clients_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
758 #define NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_update_all_clients_lock)
759 #define NECP_UPDATE_ALL_CLIENTS_SHARED_TO_EXCLUSIVE() lck_rw_lock_shared_to_exclusive(&necp_update_all_clients_lock)
760 #define NECP_UPDATE_ALL_CLIENTS_SHARED() lck_rw_lock_shared(&necp_update_all_clients_lock)
761 #define NECP_UPDATE_ALL_CLIENTS_UNLOCK() lck_rw_done(&necp_update_all_clients_lock)
762
763 // Array of PIDs that will trigger in-process flow divert, protected by NECP_FD_LIST_LOCK
764 #define NECP_MAX_FLOW_DIVERT_NEEDED_PIDS 4
765 static pid_t necp_flow_divert_needed_pids[NECP_MAX_FLOW_DIVERT_NEEDED_PIDS];
766
767 #if SKYWALK
768 static thread_call_t necp_client_collect_stats_tcall;
769 static thread_call_t necp_close_empty_arenas_tcall;
770
771 static void necp_fd_insert_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
772 static void necp_fd_remove_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
773 static struct necp_arena_info *necp_fd_mredirect_stats_arena(struct necp_fd_data *fd_data, struct proc *proc);
774
775 static void necp_arena_info_retain(struct necp_arena_info *nai);
776 static void necp_arena_info_release(struct necp_arena_info *nai);
777 static struct necp_arena_info *necp_arena_info_alloc(void);
778 static void necp_arena_info_free(struct necp_arena_info *nai);
779
780 static int necp_arena_initialize(struct necp_fd_data *fd_data, bool locked);
781 static int necp_stats_initialize(struct necp_fd_data *fd_data, struct necp_client *client,
782 struct necp_client_flow_registration *flow_registration, struct necp_stats_bufreq *bufreq);
783 static int necp_arena_create(struct necp_fd_data *fd_data, size_t obj_size, size_t obj_cnt, struct proc *p);
784 static int necp_arena_stats_obj_alloc(struct necp_fd_data *fd_data, mach_vm_offset_t *off, struct necp_arena_info **stats_arena, void **kstats_kaddr, boolean_t cansleep);
785 static void necp_arena_stats_obj_free(struct necp_fd_data *fd_data, struct necp_arena_info *stats_arena, void **kstats_kaddr, mach_vm_address_t *ustats_uaddr);
786 static void necp_stats_arenas_destroy(struct necp_fd_data *fd_data, boolean_t closing);
787
788 static int necp_sysctl_arena_initialize(struct necp_fd_data *fd_data, bool locked);
789 static void necp_sysctl_arena_destroy(struct necp_fd_data *fd_data);
790 static void *necp_arena_sysctls_obj(struct necp_fd_data *fd_data, mach_vm_offset_t *off, size_t *size);
791 #endif /* !SKYWALK */
792
793 static int necp_aop_offload_stats_initialize(struct necp_client_flow_registration *flow_registration, uuid_t netagent_uuid);
794 static void necp_aop_offload_stats_destroy(struct necp_client_flow *flow);
795
796 void necp_copy_inp_domain_info(struct inpcb *, struct socket *, nstat_domain_info *);
797 void necp_with_inp_domain_name(struct socket *so, void *ctx, void (*with_func)(char *domain_name __null_terminated, void *ctx));
798
799 #if __has_ptrcheck
800 static inline
801 __attribute__((always_inline)) __pure
802 struct necp_client_flow_stats * __indexable
necp_client_get_flow_stats(const struct necp_client_add_flow * req)803 necp_client_get_flow_stats(const struct necp_client_add_flow *req)
804 {
805 if (req == NULL) {
806 return NULL;
807 }
808
809 return __unsafe_forge_bidi_indexable(struct necp_client_flow_stats *, req->stats_requests, sizeof(struct necp_client_flow_stats) * req->stats_request_count);
810 }
811 #else
812 #define necp_client_get_flow_stats(req) ((struct necp_client_flow_stats *)&(req)->stats_requests[0])
813 #endif
814
815 #if __has_ptrcheck
816 static inline
817 __attribute__((always_inline)) __pure
818 uint8_t * __bidi_indexable
signable_get_data(const struct necp_client_signable * signable,size_t data_length)819 signable_get_data(const struct necp_client_signable *signable, size_t data_length)
820 {
821 if (signable == NULL) {
822 return NULL;
823 }
824
825 return __unsafe_forge_bidi_indexable(uint8_t *, signable->signable_data, data_length);
826 }
827 #else
828 #define signable_get_data(signable, data_length) ((signable)->signable_data)
829 #endif
830
831 #if __has_ptrcheck
832 static inline
833 __attribute__((always_inline)) __pure
834 struct sockaddr * __single
flow_req_get_address(const struct necp_client_add_flow * req,size_t offset_of_address)835 flow_req_get_address(const struct necp_client_add_flow *req, size_t offset_of_address)
836 {
837 if (req == NULL) {
838 return NULL;
839 }
840
841 uint8_t * __indexable req_ptr = __unsafe_forge_bidi_indexable(uint8_t *, req, sizeof(struct necp_client_add_flow));
842 return __unsafe_forge_single(struct sockaddr *, req_ptr + offset_of_address);
843 }
844 #else
845 #define flow_req_get_address(req, offset_of_address) ((struct sockaddr *)(((uint8_t *)req) + offset_of_address))
846 #endif
847
848 #if __has_ptrcheck
849 static inline
850 __attribute__((always_inline)) __pure
851 uint8_t * __single
flow_req_get_proto(const struct necp_client_add_flow * req,size_t offset_of_proto)852 flow_req_get_proto(const struct necp_client_add_flow *req, size_t offset_of_proto)
853 {
854 if (req == NULL) {
855 return NULL;
856 }
857
858 uint8_t * __indexable req_ptr = __unsafe_forge_bidi_indexable(uint8_t *, req, sizeof(struct necp_client_add_flow));
859 return __unsafe_forge_single(uint8_t *, req_ptr + offset_of_proto);
860 }
861 #else
862 #define flow_req_get_proto(req, offset_of_proto) ((uint8_t *)(((uint8_t *)req) + offset_of_proto))
863 #endif
864
865 #if __has_ptrcheck
866 static inline
867 __attribute__((always_inline)) __pure
868 uint8_t * __bidi_indexable
necp_update_get_tlv_buffer(const struct necp_client_observer_update * update,size_t buffer_size)869 necp_update_get_tlv_buffer(const struct necp_client_observer_update *update, size_t buffer_size)
870 {
871 if (update == NULL) {
872 return NULL;
873 }
874
875 return __unsafe_forge_bidi_indexable(uint8_t *, update->tlv_buffer, buffer_size);
876 }
877 #else
878 #define necp_update_get_tlv_buffer(update, buffer_size) ((update)->tlv_buffer)
879 #endif
880
881 #if __has_ptrcheck
882 static inline
883 __attribute__((always_inline)) __pure
884 char * __bidi_indexable
necp_answer_get_hostname(const struct necp_client_host_resolver_answer * answer,size_t hostname_length)885 necp_answer_get_hostname(const struct necp_client_host_resolver_answer *answer, size_t hostname_length)
886 {
887 if (answer == NULL) {
888 return NULL;
889 }
890
891 return __unsafe_forge_bidi_indexable(char *, answer->hostname, hostname_length);
892 }
893 #else
894 #define necp_answer_get_hostname(answer, hostname_length) ((answer)->hostname)
895 #endif
896
897 static void
necp_lock_socket_attributes(void)898 necp_lock_socket_attributes(void)
899 {
900 lck_mtx_lock(&necp_socket_attr_lock);
901 }
902
903 static void
necp_unlock_socket_attributes(void)904 necp_unlock_socket_attributes(void)
905 {
906 lck_mtx_unlock(&necp_socket_attr_lock);
907 }
908
909 /// NECP file descriptor functions
910
911 static void
necp_fd_notify(struct necp_fd_data * fd_data,bool locked)912 necp_fd_notify(struct necp_fd_data *fd_data, bool locked)
913 {
914 struct selinfo *si = &fd_data->si;
915
916 if (!locked) {
917 NECP_FD_LOCK(fd_data);
918 }
919
920 selwakeup(si);
921
922 // use a non-zero hint to tell the notification from the
923 // call done in kqueue_scan() which uses 0
924 KNOTE(&si->si_note, 1); // notification
925
926 if (!locked) {
927 NECP_FD_UNLOCK(fd_data);
928 }
929 }
930
931 static inline bool
necp_client_has_unread_flows(struct necp_client * client)932 necp_client_has_unread_flows(struct necp_client *client)
933 {
934 NECP_CLIENT_ASSERT_LOCKED(client);
935 struct necp_client_flow_registration *flow_registration = NULL;
936 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
937 if (!flow_registration->flow_result_read) {
938 return true;
939 }
940 }
941 return false;
942 }
943
944 static int
necp_fd_poll(struct necp_fd_data * fd_data,int events,void * wql,struct proc * p,int is_kevent)945 necp_fd_poll(struct necp_fd_data *fd_data, int events, void *wql, struct proc *p, int is_kevent)
946 {
947 #pragma unused(wql, p, is_kevent)
948 u_int revents = 0;
949
950 u_int want_rx = events & (POLLIN | POLLRDNORM);
951 if (want_rx) {
952 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
953 // Push-mode observers are readable when they have a new update
954 if (!TAILQ_EMPTY(&fd_data->update_list)) {
955 revents |= want_rx;
956 }
957 } else {
958 // Standard fds are readable when some client is unread
959 struct necp_client *client = NULL;
960 bool has_unread_clients = FALSE;
961 RB_FOREACH(client, _necp_client_tree, &fd_data->clients) {
962 NECP_CLIENT_LOCK(client);
963 if (!client->result_read || !client->group_members_read || necp_client_has_unread_flows(client)) {
964 has_unread_clients = TRUE;
965 }
966 NECP_CLIENT_UNLOCK(client);
967 if (has_unread_clients) {
968 break;
969 }
970 }
971
972 if (has_unread_clients || fd_data->request_in_process_flow_divert) {
973 revents |= want_rx;
974 }
975 }
976 }
977
978 return revents;
979 }
980
981 static inline void
necp_generate_client_id(uuid_t client_id,bool is_flow)982 necp_generate_client_id(uuid_t client_id, bool is_flow)
983 {
984 uuid_generate_random(client_id);
985
986 if (is_flow) {
987 client_id[9] |= 0x01;
988 } else {
989 client_id[9] &= ~0x01;
990 }
991 }
992
993 static inline bool
necp_client_id_is_flow(uuid_t client_id)994 necp_client_id_is_flow(uuid_t client_id)
995 {
996 return client_id[9] & 0x01;
997 }
998
999 static struct necp_client *
necp_find_client_and_lock(uuid_t client_id)1000 necp_find_client_and_lock(uuid_t client_id)
1001 {
1002 NECP_CLIENT_TREE_ASSERT_LOCKED();
1003
1004 struct necp_client *client = NULL;
1005
1006 if (necp_client_id_is_flow(client_id)) {
1007 NECP_FLOW_TREE_LOCK_SHARED();
1008 struct necp_client_flow_registration find;
1009 uuid_copy(find.registration_id, client_id);
1010 struct necp_client_flow_registration *flow = RB_FIND(_necp_client_flow_global_tree, &necp_client_flow_global_tree, &find);
1011 if (flow != NULL) {
1012 client = flow->client;
1013 }
1014 NECP_FLOW_TREE_UNLOCK();
1015 } else {
1016 struct necp_client find;
1017 uuid_copy(find.client_id, client_id);
1018 client = RB_FIND(_necp_client_global_tree, &necp_client_global_tree, &find);
1019 }
1020
1021 if (client != NULL) {
1022 NECP_CLIENT_LOCK(client);
1023 }
1024
1025 return client;
1026 }
1027
1028 static struct necp_client_flow_registration *
necp_client_find_flow(struct necp_client * client,uuid_t flow_id)1029 necp_client_find_flow(struct necp_client *client, uuid_t flow_id)
1030 {
1031 NECP_CLIENT_ASSERT_LOCKED(client);
1032 struct necp_client_flow_registration *flow = NULL;
1033
1034 if (necp_client_id_is_flow(flow_id)) {
1035 struct necp_client_flow_registration find;
1036 uuid_copy(find.registration_id, flow_id);
1037 flow = RB_FIND(_necp_client_flow_tree, &client->flow_registrations, &find);
1038 } else {
1039 flow = RB_ROOT(&client->flow_registrations);
1040 }
1041
1042 return flow;
1043 }
1044
1045 static struct necp_client *
necp_client_fd_find_client_unlocked(struct necp_fd_data * client_fd,uuid_t client_id)1046 necp_client_fd_find_client_unlocked(struct necp_fd_data *client_fd, uuid_t client_id)
1047 {
1048 NECP_FD_ASSERT_LOCKED(client_fd);
1049 struct necp_client *client = NULL;
1050
1051 if (necp_client_id_is_flow(client_id)) {
1052 struct necp_client_flow_registration find;
1053 uuid_copy(find.registration_id, client_id);
1054 struct necp_client_flow_registration *flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find);
1055 if (flow != NULL) {
1056 client = flow->client;
1057 }
1058 } else {
1059 struct necp_client find;
1060 uuid_copy(find.client_id, client_id);
1061 client = RB_FIND(_necp_client_tree, &client_fd->clients, &find);
1062 }
1063
1064 return client;
1065 }
1066
1067 static struct necp_client *
necp_client_fd_find_client_and_lock(struct necp_fd_data * client_fd,uuid_t client_id)1068 necp_client_fd_find_client_and_lock(struct necp_fd_data *client_fd, uuid_t client_id)
1069 {
1070 struct necp_client *client = necp_client_fd_find_client_unlocked(client_fd, client_id);
1071 if (client != NULL) {
1072 NECP_CLIENT_LOCK(client);
1073 }
1074
1075 return client;
1076 }
1077
1078 static inline int
necp_client_id_cmp(struct necp_client * client0,struct necp_client * client1)1079 necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1)
1080 {
1081 return uuid_compare(client0->client_id, client1->client_id);
1082 }
1083
1084 static inline int
necp_client_flow_id_cmp(struct necp_client_flow_registration * flow0,struct necp_client_flow_registration * flow1)1085 necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1)
1086 {
1087 return uuid_compare(flow0->registration_id, flow1->registration_id);
1088 }
1089
1090 static int
necpop_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)1091 necpop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
1092 {
1093 #pragma unused(fp, which, wql, ctx)
1094 return 0;
1095 struct necp_fd_data *fd_data = NULL;
1096 int revents = 0;
1097 int events = 0;
1098 proc_t procp;
1099
1100 fd_data = (struct necp_fd_data *)fp_get_data(fp);
1101 if (fd_data == NULL) {
1102 return 0;
1103 }
1104
1105 procp = vfs_context_proc(ctx);
1106
1107 switch (which) {
1108 case FREAD: {
1109 events = POLLIN;
1110 break;
1111 }
1112
1113 default: {
1114 return 1;
1115 }
1116 }
1117
1118 NECP_FD_LOCK(fd_data);
1119 revents = necp_fd_poll(fd_data, events, wql, procp, 0);
1120 NECP_FD_UNLOCK(fd_data);
1121
1122 return (events & revents) ? 1 : 0;
1123 }
1124
1125 static void
necp_fd_knrdetach(struct knote * kn)1126 necp_fd_knrdetach(struct knote *kn)
1127 {
1128 struct necp_fd_data *fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1129 struct selinfo *si = &fd_data->si;
1130
1131 NECP_FD_LOCK(fd_data);
1132 KNOTE_DETACH(&si->si_note, kn);
1133 NECP_FD_UNLOCK(fd_data);
1134 }
1135
1136 static int
necp_fd_knread(struct knote * kn,long hint)1137 necp_fd_knread(struct knote *kn, long hint)
1138 {
1139 #pragma unused(kn, hint)
1140 return 1; /* assume we are ready */
1141 }
1142
1143 static int
necp_fd_knrprocess(struct knote * kn,struct kevent_qos_s * kev)1144 necp_fd_knrprocess(struct knote *kn, struct kevent_qos_s *kev)
1145 {
1146 struct necp_fd_data *fd_data;
1147 int revents;
1148 int res;
1149
1150 fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1151
1152 NECP_FD_LOCK(fd_data);
1153 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1154 res = ((revents & POLLIN) != 0);
1155 if (res) {
1156 knote_fill_kevent(kn, kev, 0);
1157 }
1158 NECP_FD_UNLOCK(fd_data);
1159 return res;
1160 }
1161
1162 static int
necp_fd_knrtouch(struct knote * kn,struct kevent_qos_s * kev)1163 necp_fd_knrtouch(struct knote *kn, struct kevent_qos_s *kev)
1164 {
1165 #pragma unused(kev)
1166 struct necp_fd_data *fd_data;
1167 int revents;
1168
1169 fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1170
1171 NECP_FD_LOCK(fd_data);
1172 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1173 NECP_FD_UNLOCK(fd_data);
1174
1175 return (revents & POLLIN) != 0;
1176 }
1177
1178 SECURITY_READ_ONLY_EARLY(struct filterops) necp_fd_rfiltops = {
1179 .f_isfd = 1,
1180 .f_detach = necp_fd_knrdetach,
1181 .f_event = necp_fd_knread,
1182 .f_touch = necp_fd_knrtouch,
1183 .f_process = necp_fd_knrprocess,
1184 };
1185
1186 static int
necpop_kqfilter(struct fileproc * fp,struct knote * kn,__unused struct kevent_qos_s * kev)1187 necpop_kqfilter(struct fileproc *fp, struct knote *kn,
1188 __unused struct kevent_qos_s *kev)
1189 {
1190 struct necp_fd_data *fd_data = NULL;
1191 int revents;
1192
1193 if (kn->kn_filter != EVFILT_READ) {
1194 NECPLOG(LOG_ERR, "bad filter request %d", kn->kn_filter);
1195 knote_set_error(kn, EINVAL);
1196 return 0;
1197 }
1198
1199 fd_data = (struct necp_fd_data *)fp_get_data(fp);
1200 if (fd_data == NULL) {
1201 NECPLOG0(LOG_ERR, "No channel for kqfilter");
1202 knote_set_error(kn, ENOENT);
1203 return 0;
1204 }
1205
1206 NECP_FD_LOCK(fd_data);
1207 kn->kn_filtid = EVFILTID_NECP_FD;
1208 knote_kn_hook_set_raw(kn, fd_data);
1209 KNOTE_ATTACH(&fd_data->si.si_note, kn);
1210
1211 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1212
1213 NECP_FD_UNLOCK(fd_data);
1214
1215 return (revents & POLLIN) != 0;
1216 }
1217
1218 #define INTERFACE_FLAGS_SHIFT 32
1219 #define INTERFACE_FLAGS_MASK 0xffffffff
1220 #define INTERFACE_INDEX_SHIFT 0
1221 #define INTERFACE_INDEX_MASK 0xffffffff
1222
1223 static uint64_t
combine_interface_details(uint32_t interface_index,uint32_t interface_flags)1224 combine_interface_details(uint32_t interface_index, uint32_t interface_flags)
1225 {
1226 return ((uint64_t)interface_flags & INTERFACE_FLAGS_MASK) << INTERFACE_FLAGS_SHIFT |
1227 ((uint64_t)interface_index & INTERFACE_INDEX_MASK) << INTERFACE_INDEX_SHIFT;
1228 }
1229
1230 #if SKYWALK
1231
1232 static void
split_interface_details(uint64_t combined_details,uint32_t * interface_index,uint32_t * interface_flags)1233 split_interface_details(uint64_t combined_details, uint32_t *interface_index, uint32_t *interface_flags)
1234 {
1235 *interface_index = (combined_details >> INTERFACE_INDEX_SHIFT) & INTERFACE_INDEX_MASK;
1236 *interface_flags = (combined_details >> INTERFACE_FLAGS_SHIFT) & INTERFACE_FLAGS_MASK;
1237 }
1238
1239 static void
necp_flow_save_current_interface_details(struct necp_client_flow_registration * flow_registration)1240 necp_flow_save_current_interface_details(struct necp_client_flow_registration *flow_registration)
1241 {
1242 struct necp_client_flow *flow = NULL;
1243 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1244 if (flow->nexus) {
1245 uint64_t combined_details = combine_interface_details(flow->interface_index, flow->interface_flags);
1246 os_atomic_store(&flow_registration->last_interface_details, combined_details, release);
1247 break;
1248 }
1249 }
1250 }
1251
1252 static void
necp_client_collect_interface_stats(struct necp_client_flow_registration * flow_registration,struct ifnet_stats_per_flow * ifs)1253 necp_client_collect_interface_stats(struct necp_client_flow_registration *flow_registration, struct ifnet_stats_per_flow *ifs)
1254 {
1255 struct necp_client_flow *flow = NULL;
1256
1257 if (ifs == NULL || ifs->txpackets == 0 || ifs->rxpackets == 0) {
1258 return; // App might have crashed without publishing ifs
1259 }
1260
1261 // Do malicious stats detection here
1262
1263 // Fold userspace stats into (trusted) kernel stats (stored in ifp).
1264 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1265 uint32_t if_idx = flow->interface_index;
1266 ifnet_t ifp = NULL;
1267 ifnet_head_lock_shared();
1268 if (if_idx != IFSCOPE_NONE && if_idx <= (uint32_t)if_index) {
1269 ifp = ifindex2ifnet[if_idx];
1270 ifnet_update_stats_per_flow(ifs, ifp);
1271 }
1272 ifnet_head_done();
1273
1274 // Currently there is only one flow that uses the shared necp
1275 // stats region, so this loop should exit after updating an ifp
1276 break;
1277 }
1278 }
1279
1280 static void
necp_client_collect_aop_flow_stats(struct necp_client_flow_registration * flow_registration)1281 necp_client_collect_aop_flow_stats(struct necp_client_flow_registration *flow_registration)
1282 {
1283 struct aop_flow_stats flow_stats = {};
1284 struct tcp_info *tcpi = &flow_stats.transport.tcp_stats.tcp_info;
1285 uint32_t aop_flow_count = 0;
1286 int err = 0;
1287
1288 ASSERT(flow_registration->aop_offload);
1289 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
1290 if (kstats == NULL) {
1291 return;
1292 }
1293
1294 struct necp_stat_counts *prev_tcpstats = &(((struct necp_tcp_stats *)&kstats->necp_stats_comm)->necp_tcp_counts);
1295 struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
1296
1297 struct necp_client_flow *flow = NULL;
1298 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1299 aop_flow_count++;
1300 ASSERT(flow->aop_offload && aop_flow_count == 1);
1301 if (flow->flow_tag > 0 && flow->aop_stat_index_valid) {
1302 err = net_aop_get_flow_stats(flow->stats_index, &flow_stats);
1303 if (err != 0) {
1304 NECPLOG(LOG_ERR, "failed to get aop flow stats "
1305 "for flow id %u with error %d", flow->flow_tag, err);
1306 continue;
1307 }
1308
1309 if (__improbable(flow->flow_tag != flow_stats.flow_id)) {
1310 NECPLOG(LOG_NOTICE, "aop flow stats, flow tag 0x%x != 0x%x",
1311 flow->flow_tag, flow_stats.flow_id);
1312 continue;
1313 }
1314
1315 if ((prev_tcpstats->necp_stat_rxpackets == tcpi->tcpi_rxpackets) &&
1316 prev_tcpstats->necp_stat_txpackets == tcpi->tcpi_txpackets) {
1317 continue;
1318 }
1319
1320 uint32_t d_rxpackets = tcpi->tcpi_rxpackets - prev_tcpstats->necp_stat_rxpackets;
1321 prev_tcpstats->necp_stat_rxpackets += d_rxpackets;
1322
1323 uint32_t d_txpackets = tcpi->tcpi_txpackets - prev_tcpstats->necp_stat_txpackets;
1324 prev_tcpstats->necp_stat_txpackets += d_txpackets;
1325
1326 uint32_t d_rxbytes = tcpi->tcpi_rxbytes - prev_tcpstats->necp_stat_rxbytes;
1327 prev_tcpstats->necp_stat_rxbytes += d_rxbytes;
1328
1329 uint32_t d_txbytes = tcpi->tcpi_txbytes - prev_tcpstats->necp_stat_txbytes;
1330 prev_tcpstats->necp_stat_txbytes += d_txbytes;
1331
1332 uint32_t d_rxduplicatebytes = tcpi->tcpi_rxduplicatebytes - prev_tcpstats->necp_stat_rxduplicatebytes;
1333 prev_tcpstats->necp_stat_rxduplicatebytes += d_rxduplicatebytes;
1334
1335 uint32_t d_rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes - prev_tcpstats->necp_stat_rxoutoforderbytes;
1336 prev_tcpstats->necp_stat_rxoutoforderbytes += d_rxoutoforderbytes;
1337
1338 uint32_t d_txretransmit = tcpi->tcpi_txretransmitbytes - prev_tcpstats->necp_stat_txretransmit;
1339 prev_tcpstats->necp_stat_txretransmit += d_txretransmit;
1340
1341 uint32_t d_connectattempts = prev_tcpstats->necp_stat_connectattempts - (tcpi->tcpi_state >= TCPS_SYN_SENT ? 1 : 0);
1342 prev_tcpstats->necp_stat_connectattempts += d_connectattempts;
1343
1344 uint32_t d_connectsuccesses = prev_tcpstats->necp_stat_connectsuccesses - (tcpi->tcpi_state >= TCPS_ESTABLISHED ? 1 : 0);
1345 prev_tcpstats->necp_stat_connectsuccesses += d_connectsuccesses;
1346
1347 prev_tcpstats->necp_stat_avg_rtt = tcpi->tcpi_srtt;
1348 prev_tcpstats->necp_stat_var_rtt = tcpi->tcpi_rttvar;
1349
1350 /* Update route stats */
1351 NECP_CLIENT_ROUTE_LOCK(flow_registration->client);
1352 struct rtentry *route = flow_registration->client->current_route;
1353 if (route != NULL) {
1354 nstat_route_update(route, d_connectattempts,
1355 d_connectsuccesses, d_rxpackets, d_rxbytes,
1356 d_rxduplicatebytes, d_rxoutoforderbytes,
1357 d_txpackets, d_txbytes, d_txretransmit,
1358 prev_tcpstats->necp_stat_avg_rtt, prev_tcpstats->necp_stat_var_rtt);
1359 }
1360 NECP_CLIENT_ROUTE_UNLOCK(flow_registration->client);
1361
1362 /* Update nexus flow stats */
1363 if (sf != NULL) {
1364 sf->sf_ibytes = flow_stats.rxbytes;
1365 sf->sf_obytes = flow_stats.txbytes;
1366 sf->sf_ipackets = flow_stats.rxpkts;
1367 sf->sf_opackets = flow_stats.txpkts;
1368 sf->sf_lseq = tcpi->tcpi_snd_nxt - 1;
1369 sf->sf_rseq = tcpi->tcpi_rcv_nxt - 1;
1370 sf->sf_lrtt = tcpi->tcpi_srtt;
1371 sf->sf_rrtt = tcpi->tcpi_rcv_srtt;
1372 sf->sf_ltrack.sft_state = tcpi->tcpi_state;
1373 sf->sf_lwscale = tcpi->tcpi_snd_wscale;
1374 sf->sf_rwscale = tcpi->tcpi_rcv_wscale;
1375
1376 memcpy(&sf->sf_activity, &flow_stats.activity_bitmap,
1377 sizeof(sf->sf_activity));
1378 }
1379 }
1380 }
1381 }
1382
1383 static void
necp_client_collect_nexus_flow_stats(struct necp_client_flow_registration * flow_registration)1384 necp_client_collect_nexus_flow_stats(struct necp_client_flow_registration *flow_registration)
1385 {
1386 ASSERT(!flow_registration->aop_offload);
1387
1388 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
1389 if (kstats == NULL) {
1390 return;
1391 }
1392
1393 // Grab userspace stats delta (untrusted).
1394 struct necp_tcp_stats *curr_tcpstats = (struct necp_tcp_stats *)kstats->necp_stats_ustats;
1395 struct necp_tcp_stats *prev_tcpstats = (struct necp_tcp_stats *)&kstats->necp_stats_comm;
1396 #define diff_n_update(field) \
1397 u_int32_t d_##field = (curr_tcpstats->necp_tcp_counts.necp_stat_##field - prev_tcpstats->necp_tcp_counts.necp_stat_##field); \
1398 prev_tcpstats->necp_tcp_counts.necp_stat_##field += d_##field;
1399 diff_n_update(rxpackets);
1400 diff_n_update(txpackets);
1401 if (d_rxpackets == 0 && d_txpackets == 0) {
1402 return; // no activity since last collection, stop here
1403 }
1404 diff_n_update(rxbytes);
1405 diff_n_update(txbytes);
1406 diff_n_update(rxduplicatebytes);
1407 diff_n_update(rxoutoforderbytes);
1408 diff_n_update(txretransmit);
1409 diff_n_update(connectattempts);
1410 diff_n_update(connectsuccesses);
1411 uint32_t rtt = prev_tcpstats->necp_tcp_counts.necp_stat_avg_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
1412 uint32_t rtt_var = prev_tcpstats->necp_tcp_counts.necp_stat_var_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_var_rtt;
1413 #undef diff_n_update
1414
1415 // Do malicious stats detection with the deltas here.
1416 // RTT check (not necessarily attacks, might just be not measured since we report stats async periodically).
1417 if (rtt < necp_client_stats_rtt_floor || rtt > necp_client_stats_rtt_ceiling) {
1418 rtt = rtt_var = 0; // nstat_route_update to skip 0 rtt
1419 }
1420
1421 // Fold userspace stats into (trusted) kernel stats (stored in route).
1422 NECP_CLIENT_ROUTE_LOCK(flow_registration->client);
1423 struct rtentry *route = flow_registration->client->current_route;
1424 if (route != NULL) {
1425 nstat_route_update(route, d_connectattempts, d_connectsuccesses, d_rxpackets, d_rxbytes, d_rxduplicatebytes,
1426 d_rxoutoforderbytes, d_txpackets, d_txbytes, d_txretransmit, rtt, rtt_var);
1427 }
1428 NECP_CLIENT_ROUTE_UNLOCK(flow_registration->client);
1429 }
1430
1431 static void
necp_client_collect_stats(struct necp_client_flow_registration * flow_registration)1432 necp_client_collect_stats(struct necp_client_flow_registration *flow_registration)
1433 {
1434 if (__probable(!flow_registration->aop_offload)) {
1435 necp_client_collect_nexus_flow_stats(flow_registration);
1436 } else {
1437 necp_client_collect_aop_flow_stats(flow_registration);
1438 }
1439 }
1440
1441 // This is called from various places; "closing" here implies the client being closed/removed if true, otherwise being
1442 // defunct. In the former, we expect the caller to not hold the lock; for the latter it must have acquired it.
1443 static void
necp_destroy_flow_stats(struct necp_fd_data * fd_data,struct necp_client_flow_registration * flow_registration,struct ifnet_stats_per_flow * flow_ifnet_stats,boolean_t closing)1444 necp_destroy_flow_stats(struct necp_fd_data *fd_data,
1445 struct necp_client_flow_registration *flow_registration,
1446 struct ifnet_stats_per_flow *flow_ifnet_stats,
1447 boolean_t closing)
1448 {
1449 NECP_FD_ASSERT_LOCKED(fd_data);
1450
1451 struct necp_client *client = flow_registration->client;
1452
1453 if (closing) {
1454 NECP_CLIENT_ASSERT_UNLOCKED(client);
1455 NECP_CLIENT_LOCK(client);
1456 } else {
1457 NECP_CLIENT_ASSERT_LOCKED(client);
1458 }
1459
1460 // the interface stats are independent of the flow stats, hence we check here
1461 if (flow_ifnet_stats != NULL) {
1462 necp_client_collect_interface_stats(flow_registration, flow_ifnet_stats);
1463 }
1464
1465 if (flow_registration->kstats_kaddr != NULL) {
1466 NECP_STATS_LIST_LOCK_EXCLUSIVE();
1467 necp_client_collect_stats(flow_registration);
1468 const bool destroyed = necp_client_release_locked(client); // Drop the reference held by the stats list
1469 ASSERT(!destroyed);
1470 (void)destroyed;
1471 LIST_REMOVE(flow_registration, collect_stats_chain);
1472 NECP_STATS_LIST_UNLOCK();
1473 if (flow_registration->stats_handler_context != NULL) {
1474 ntstat_userland_stats_close(flow_registration->stats_handler_context);
1475 flow_registration->stats_handler_context = NULL;
1476 }
1477 necp_arena_stats_obj_free(fd_data, flow_registration->stats_arena, &flow_registration->kstats_kaddr, &flow_registration->ustats_uaddr);
1478 ASSERT(flow_registration->kstats_kaddr == NULL);
1479 ASSERT(flow_registration->ustats_uaddr == 0);
1480 }
1481
1482 if (flow_registration->nexus_stats != NULL) {
1483 flow_stats_release(flow_registration->nexus_stats);
1484 flow_registration->nexus_stats = NULL;
1485 }
1486
1487 if (closing) {
1488 NECP_CLIENT_UNLOCK(client);
1489 }
1490 }
1491
1492 static void
necp_schedule_collect_stats_clients(bool recur)1493 necp_schedule_collect_stats_clients(bool recur)
1494 {
1495 if (necp_client_collect_stats_tcall == NULL ||
1496 (!recur && thread_call_isactive(necp_client_collect_stats_tcall))) {
1497 return;
1498 }
1499
1500 uint64_t deadline = 0;
1501 uint64_t leeway = 0;
1502 clock_interval_to_deadline(necp_collect_stats_timeout_microseconds, NSEC_PER_USEC, &deadline);
1503 clock_interval_to_absolutetime_interval(necp_collect_stats_timeout_leeway_microseconds, NSEC_PER_USEC, &leeway);
1504
1505 thread_call_enter_delayed_with_leeway(necp_client_collect_stats_tcall, NULL,
1506 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1507 }
1508
1509 static void
necp_collect_stats_client_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)1510 necp_collect_stats_client_callout(__unused thread_call_param_t dummy,
1511 __unused thread_call_param_t arg)
1512 {
1513 struct necp_client_flow_registration *flow_registration;
1514
1515 net_update_uptime();
1516 NECP_STATS_LIST_LOCK_SHARED();
1517 if (LIST_EMPTY(&necp_collect_stats_flow_list)) {
1518 NECP_STATS_LIST_UNLOCK();
1519 return;
1520 }
1521 LIST_FOREACH(flow_registration, &necp_collect_stats_flow_list, collect_stats_chain) {
1522 // Collecting stats should be cheap (atomic increments)
1523 // Values like flow_registration->kstats_kaddr are guaranteed to be valid
1524 // as long as the flow_registration is in the stats list
1525 necp_client_collect_stats(flow_registration);
1526 }
1527 NECP_STATS_LIST_UNLOCK();
1528
1529 necp_schedule_collect_stats_clients(TRUE); // recurring collection
1530 }
1531
1532 #endif /* !SKYWALK */
1533
1534 static void
necp_defunct_flow_registration(struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct _necp_flow_defunct_list * defunct_list,bool defunct_socket_flows)1535 necp_defunct_flow_registration(struct necp_client *client,
1536 struct necp_client_flow_registration *flow_registration,
1537 struct _necp_flow_defunct_list *defunct_list,
1538 bool defunct_socket_flows)
1539 {
1540 NECP_CLIENT_ASSERT_LOCKED(client);
1541
1542 if (!flow_registration->defunct) {
1543 bool needs_defunct = false;
1544 struct necp_client_flow *search_flow = NULL;
1545 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
1546 bool should_defunct_flow = false;
1547 if (search_flow->nexus &&
1548 !uuid_is_null(search_flow->u.nexus_agent)) {
1549 should_defunct_flow = true;
1550 } else if (defunct_socket_flows &&
1551 search_flow->socket &&
1552 search_flow->u.socket_handle != NULL) {
1553 should_defunct_flow = true;
1554 }
1555
1556 if (should_defunct_flow) {
1557 // Save defunct values for the nexus/socket
1558 if (defunct_list != NULL) {
1559 // Sleeping alloc won't fail; copy only what's necessary
1560 struct necp_flow_defunct *flow_defunct = kalloc_type(struct necp_flow_defunct,
1561 Z_WAITOK | Z_ZERO);
1562 if (search_flow->nexus) {
1563 uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent);
1564 } else if (search_flow->socket) {
1565 flow_defunct->socket_handle = search_flow->u.socket_handle;
1566 }
1567 uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1568 client->client_id :
1569 flow_registration->registration_id));
1570 flow_defunct->proc_pid = client->proc_pid;
1571 flow_defunct->agent_handle = client->agent_handle;
1572 flow_defunct->flags = flow_registration->flags;
1573 #if SKYWALK
1574 if (flow_registration->kstats_kaddr != NULL) {
1575 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1576 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1577 if (quicstats != NULL) {
1578 memcpy(flow_defunct->close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(flow_defunct->close_parameters.u.close_token));
1579 flow_defunct->has_close_parameters = true;
1580 }
1581 }
1582 #endif /* SKYWALK */
1583 // Add to the list provided by caller
1584 LIST_INSERT_HEAD(defunct_list, flow_defunct, chain);
1585 }
1586
1587 needs_defunct = true;
1588 }
1589 }
1590
1591 if (needs_defunct) {
1592 #if SKYWALK
1593 // Close the stats early
1594 if (flow_registration->stats_handler_context != NULL) {
1595 ntstat_userland_stats_event(flow_registration->stats_handler_context,
1596 NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT);
1597 }
1598 #endif /* SKYWALK */
1599
1600 // Only set defunct if there was some assigned flow
1601 flow_registration->defunct = true;
1602 }
1603 }
1604 }
1605
1606 static void
necp_defunct_client_for_policy(struct necp_client * client,struct _necp_flow_defunct_list * defunct_list,bool defunct_socket_flows)1607 necp_defunct_client_for_policy(struct necp_client *client,
1608 struct _necp_flow_defunct_list *defunct_list, bool defunct_socket_flows)
1609 {
1610 NECP_CLIENT_ASSERT_LOCKED(client);
1611
1612 struct necp_client_flow_registration *flow_registration = NULL;
1613 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
1614 necp_defunct_flow_registration(client, flow_registration, defunct_list, defunct_socket_flows);
1615 }
1616 }
1617
1618 static void
necp_client_free(struct necp_client * client)1619 necp_client_free(struct necp_client *client)
1620 {
1621 NECP_CLIENT_ASSERT_UNLOCKED(client);
1622
1623 kfree_data(client->extra_interface_options,
1624 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT);
1625 client->extra_interface_options = NULL;
1626
1627 kfree_data_sized_by(client->parameters, client->parameters_length);
1628 kfree_data_counted_by(client->assigned_group_members, client->assigned_group_members_length);
1629
1630 lck_mtx_destroy(&client->route_lock, &necp_fd_mtx_grp);
1631 lck_mtx_destroy(&client->lock, &necp_fd_mtx_grp);
1632
1633 kfree_type(struct necp_client, client);
1634 }
1635
1636 static void
necp_client_retain_locked(struct necp_client * client)1637 necp_client_retain_locked(struct necp_client *client)
1638 {
1639 NECP_CLIENT_ASSERT_LOCKED(client);
1640
1641 os_ref_retain_locked(&client->reference_count);
1642 }
1643
1644 static void
necp_client_retain(struct necp_client * client)1645 necp_client_retain(struct necp_client *client)
1646 {
1647 NECP_CLIENT_LOCK(client);
1648 necp_client_retain_locked(client);
1649 NECP_CLIENT_UNLOCK(client);
1650 }
1651
1652 static bool
necp_client_release_locked(struct necp_client * client)1653 necp_client_release_locked(struct necp_client *client)
1654 {
1655 NECP_CLIENT_ASSERT_LOCKED(client);
1656
1657 os_ref_count_t count = os_ref_release_locked(&client->reference_count);
1658 if (count == 0) {
1659 NECP_CLIENT_UNLOCK(client);
1660 necp_client_free(client);
1661 }
1662
1663 return count == 0;
1664 }
1665
1666 static bool
necp_client_release(struct necp_client * client)1667 necp_client_release(struct necp_client *client)
1668 {
1669 bool last_ref;
1670
1671 NECP_CLIENT_LOCK(client);
1672 if (!(last_ref = necp_client_release_locked(client))) {
1673 NECP_CLIENT_UNLOCK(client);
1674 }
1675
1676 return last_ref;
1677 }
1678
1679 static struct necp_client_update *
necp_client_update_alloc(const void * __sized_by (length)data,size_t length)1680 necp_client_update_alloc(const void * __sized_by(length)data, size_t length)
1681 {
1682 struct necp_client_update *client_update;
1683 struct necp_client_observer_update *buffer;
1684 size_t alloc_size;
1685
1686 if (os_add_overflow(length, sizeof(*buffer), &alloc_size)) {
1687 return NULL;
1688 }
1689 buffer = kalloc_data(alloc_size, Z_WAITOK);
1690 if (buffer == NULL) {
1691 return NULL;
1692 }
1693
1694 client_update = kalloc_type(struct necp_client_update,
1695 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1696 client_update->update_length = alloc_size;
1697 client_update->update = buffer;
1698 memcpy(necp_update_get_tlv_buffer(buffer, alloc_size), data, length);
1699 return client_update;
1700 }
1701
1702 static void
necp_client_update_free(struct necp_client_update * client_update)1703 necp_client_update_free(struct necp_client_update *client_update)
1704 {
1705 kfree_data_sized_by(client_update->update, client_update->update_length);
1706 kfree_type(struct necp_client_update, client_update);
1707 }
1708
1709 static void
necp_client_update_observer_add_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1710 necp_client_update_observer_add_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1711 {
1712 struct necp_client_update *client_update;
1713
1714 NECP_FD_LOCK(observer_fd);
1715
1716 if (observer_fd->update_count >= necp_observer_message_limit) {
1717 NECP_FD_UNLOCK(observer_fd);
1718 return;
1719 }
1720
1721 client_update = necp_client_update_alloc(client->parameters, client->parameters_length);
1722 if (client_update != NULL) {
1723 uuid_copy(client_update->client_id, client->client_id);
1724 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_PARAMETERS;
1725 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1726 observer_fd->update_count++;
1727
1728 necp_fd_notify(observer_fd, true);
1729 }
1730
1731 NECP_FD_UNLOCK(observer_fd);
1732 }
1733
1734 static void
necp_client_update_observer_update_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1735 necp_client_update_observer_update_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1736 {
1737 NECP_FD_LOCK(observer_fd);
1738
1739 if (observer_fd->update_count >= necp_observer_message_limit) {
1740 NECP_FD_UNLOCK(observer_fd);
1741 return;
1742 }
1743
1744 struct necp_client_update *client_update = necp_client_update_alloc(client->result, client->result_length);
1745 if (client_update != NULL) {
1746 uuid_copy(client_update->client_id, client->client_id);
1747 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_RESULT;
1748 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1749 observer_fd->update_count++;
1750
1751 necp_fd_notify(observer_fd, true);
1752 }
1753
1754 NECP_FD_UNLOCK(observer_fd);
1755 }
1756
1757 static void
necp_client_update_observer_remove_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1758 necp_client_update_observer_remove_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1759 {
1760 NECP_FD_LOCK(observer_fd);
1761
1762 if (observer_fd->update_count >= necp_observer_message_limit) {
1763 NECP_FD_UNLOCK(observer_fd);
1764 return;
1765 }
1766
1767 struct necp_client_update *client_update = necp_client_update_alloc(NULL, 0);
1768 if (client_update != NULL) {
1769 uuid_copy(client_update->client_id, client->client_id);
1770 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_REMOVE;
1771 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1772 observer_fd->update_count++;
1773
1774 necp_fd_notify(observer_fd, true);
1775 }
1776
1777 NECP_FD_UNLOCK(observer_fd);
1778 }
1779
1780 static void
necp_client_update_observer_add(struct necp_client * client)1781 necp_client_update_observer_add(struct necp_client *client)
1782 {
1783 NECP_OBSERVER_LIST_LOCK_SHARED();
1784
1785 if (LIST_EMPTY(&necp_fd_observer_list)) {
1786 // No observers, bail
1787 NECP_OBSERVER_LIST_UNLOCK();
1788 return;
1789 }
1790
1791 struct necp_fd_data *observer_fd = NULL;
1792 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1793 necp_client_update_observer_add_internal(observer_fd, client);
1794 }
1795
1796 NECP_OBSERVER_LIST_UNLOCK();
1797 }
1798
1799 static void
necp_client_update_observer_update(struct necp_client * client)1800 necp_client_update_observer_update(struct necp_client *client)
1801 {
1802 NECP_OBSERVER_LIST_LOCK_SHARED();
1803
1804 if (LIST_EMPTY(&necp_fd_observer_list)) {
1805 // No observers, bail
1806 NECP_OBSERVER_LIST_UNLOCK();
1807 return;
1808 }
1809
1810 struct necp_fd_data *observer_fd = NULL;
1811 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1812 necp_client_update_observer_update_internal(observer_fd, client);
1813 }
1814
1815 NECP_OBSERVER_LIST_UNLOCK();
1816 }
1817
1818 static void
necp_client_update_observer_remove(struct necp_client * client)1819 necp_client_update_observer_remove(struct necp_client *client)
1820 {
1821 NECP_OBSERVER_LIST_LOCK_SHARED();
1822
1823 if (LIST_EMPTY(&necp_fd_observer_list)) {
1824 // No observers, bail
1825 NECP_OBSERVER_LIST_UNLOCK();
1826 return;
1827 }
1828
1829 struct necp_fd_data *observer_fd = NULL;
1830 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1831 necp_client_update_observer_remove_internal(observer_fd, client);
1832 }
1833
1834 NECP_OBSERVER_LIST_UNLOCK();
1835 }
1836
1837 static void
necp_destroy_client_flow_registration(struct necp_client * client,struct necp_client_flow_registration * flow_registration,pid_t pid,bool abort)1838 necp_destroy_client_flow_registration(struct necp_client *client,
1839 struct necp_client_flow_registration *flow_registration,
1840 pid_t pid, bool abort)
1841 {
1842 NECP_CLIENT_ASSERT_LOCKED(client);
1843
1844 bool has_close_parameters = false;
1845 struct necp_client_agent_parameters close_parameters = {};
1846 memset(close_parameters.u.close_token, 0, sizeof(close_parameters.u.close_token));
1847 #if SKYWALK
1848 if (flow_registration->kstats_kaddr != NULL) {
1849 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1850 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1851 if (quicstats != NULL &&
1852 quicstats->necp_quic_udp_stats.necp_udp_hdr.necp_stats_type == NECP_CLIENT_STATISTICS_TYPE_QUIC) {
1853 memcpy(close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(close_parameters.u.close_token));
1854 has_close_parameters = true;
1855 }
1856 }
1857
1858 // Release reference held on the stats arena
1859 if (flow_registration->stats_arena != NULL) {
1860 necp_arena_info_release(flow_registration->stats_arena);
1861 flow_registration->stats_arena = NULL;
1862 }
1863 #endif /* SKYWALK */
1864
1865 struct necp_client_flow * __single search_flow = NULL;
1866 struct necp_client_flow *temp_flow = NULL;
1867 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
1868 if (search_flow->nexus &&
1869 !uuid_is_null(search_flow->u.nexus_agent)) {
1870 // Don't unregister for defunct flows
1871 if (!flow_registration->defunct) {
1872 u_int8_t message_type = (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS :
1873 NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS);
1874 if (((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1875 (flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
1876 !(flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
1877 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
1878 }
1879 size_t dummy_length = 0;
1880 void * __sized_by(dummy_length) dummy_results = NULL;
1881 int netagent_error = netagent_client_message_with_params(search_flow->u.nexus_agent,
1882 ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1883 client->client_id :
1884 flow_registration->registration_id),
1885 pid, client->agent_handle,
1886 message_type,
1887 has_close_parameters ? &close_parameters : NULL,
1888 &dummy_results, &dummy_length);
1889 if (netagent_error != 0 && netagent_error != ENOENT) {
1890 NECPLOG(LOG_ERR, "necp_client_remove close nexus error (%d) MESSAGE TYPE %u", netagent_error, message_type);
1891 }
1892 }
1893 uuid_clear(search_flow->u.nexus_agent);
1894 }
1895 if (search_flow->assigned_results != NULL) {
1896 kfree_data_counted_by(search_flow->assigned_results, search_flow->assigned_results_length);
1897 }
1898 LIST_REMOVE(search_flow, flow_chain);
1899 #if SKYWALK
1900 if (search_flow->nexus) {
1901 OSDecrementAtomic(&necp_nexus_flow_count);
1902 } else
1903 #endif /* SKYWALK */
1904 if (search_flow->socket) {
1905 OSDecrementAtomic(&necp_socket_flow_count);
1906 } else {
1907 OSDecrementAtomic(&necp_if_flow_count);
1908 }
1909
1910 necp_aop_offload_stats_destroy(search_flow);
1911
1912 kfree_type(struct necp_client_flow, search_flow);
1913 }
1914
1915 RB_REMOVE(_necp_client_flow_tree, &client->flow_registrations, flow_registration);
1916 flow_registration->client = NULL;
1917
1918 kfree_type(struct necp_client_flow_registration, flow_registration);
1919 }
1920
1921 static void
necp_destroy_client(struct necp_client * client,pid_t pid,bool abort)1922 necp_destroy_client(struct necp_client *client, pid_t pid, bool abort)
1923 {
1924 NECP_CLIENT_ASSERT_UNLOCKED(client);
1925
1926 #if SKYWALK
1927 if (client->nstat_context != NULL) {
1928 // This is a catch-all that should be rarely used.
1929 nstat_provider_stats_close(client->nstat_context);
1930 client->nstat_context = NULL;
1931 }
1932 if (client->original_parameters_source != NULL) {
1933 necp_client_release(client->original_parameters_source);
1934 client->original_parameters_source = NULL;
1935 }
1936 #endif /* SKYWALK */
1937 necp_client_update_observer_remove(client);
1938
1939 NECP_CLIENT_LOCK(client);
1940
1941 // Free route
1942 NECP_CLIENT_ROUTE_LOCK(client);
1943 if (client->current_route != NULL) {
1944 rtfree(client->current_route);
1945 client->current_route = NULL;
1946 }
1947 NECP_CLIENT_ROUTE_UNLOCK(client);
1948
1949 // Remove flow assignments
1950 struct necp_client_flow_registration *flow_registration = NULL;
1951 struct necp_client_flow_registration *temp_flow_registration = NULL;
1952 RB_FOREACH_SAFE(flow_registration, _necp_client_flow_tree, &client->flow_registrations, temp_flow_registration) {
1953 necp_destroy_client_flow_registration(client, flow_registration, pid, abort);
1954 }
1955
1956 #if SKYWALK
1957 // Remove port reservation
1958 if (NETNS_TOKEN_VALID(&client->port_reservation)) {
1959 netns_release(&client->port_reservation);
1960 }
1961 #endif /* !SKYWALK */
1962
1963 // Remove agent assertions
1964 struct necp_client_assertion * __single search_assertion = NULL;
1965 struct necp_client_assertion *temp_assertion = NULL;
1966 LIST_FOREACH_SAFE(search_assertion, &client->assertion_list, assertion_chain, temp_assertion) {
1967 int netagent_error = netagent_client_message(search_assertion->asserted_netagent, client->client_id, pid,
1968 client->agent_handle, NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT);
1969 if (netagent_error != 0) {
1970 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR),
1971 "necp_client_remove unassert agent error (%d)", netagent_error);
1972 }
1973 LIST_REMOVE(search_assertion, assertion_chain);
1974 kfree_type(struct necp_client_assertion, search_assertion);
1975 }
1976
1977 if (!necp_client_release_locked(client)) {
1978 NECP_CLIENT_UNLOCK(client);
1979 }
1980
1981 OSDecrementAtomic(&necp_client_count);
1982 }
1983
1984 static bool
1985 necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats);
1986
1987 static void
necp_process_defunct_list(struct _necp_flow_defunct_list * defunct_list)1988 necp_process_defunct_list(struct _necp_flow_defunct_list *defunct_list)
1989 {
1990 if (!LIST_EMPTY(defunct_list)) {
1991 struct necp_flow_defunct * __single flow_defunct = NULL;
1992 struct necp_flow_defunct *temp_flow_defunct = NULL;
1993
1994 // For each newly defunct client, send a message to the nexus to remove the flow
1995 LIST_FOREACH_SAFE(flow_defunct, defunct_list, chain, temp_flow_defunct) {
1996 if (!uuid_is_null(flow_defunct->nexus_agent)) {
1997 u_int8_t message_type = NETAGENT_MESSAGE_TYPE_ABORT_NEXUS;
1998 if (((flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1999 (flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
2000 !(flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
2001 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
2002 }
2003 size_t dummy_length = 0;
2004 void * __sized_by(dummy_length) dummy_results = NULL;
2005 int netagent_error = netagent_client_message_with_params(flow_defunct->nexus_agent,
2006 flow_defunct->flow_id,
2007 flow_defunct->proc_pid,
2008 flow_defunct->agent_handle,
2009 message_type,
2010 flow_defunct->has_close_parameters ? &flow_defunct->close_parameters : NULL,
2011 &dummy_results, &dummy_length);
2012 if (netagent_error != 0) {
2013 char namebuf[MAXCOMLEN + 1];
2014 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
2015 proc_name(flow_defunct->proc_pid, namebuf, sizeof(namebuf));
2016 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_update_client abort nexus error (%d) for pid %d %s", netagent_error, flow_defunct->proc_pid, namebuf);
2017 }
2018 } else if (flow_defunct->socket_handle != NULL) {
2019 struct inpcb *inp = (struct inpcb *)flow_defunct->socket_handle;
2020 struct socket *so = inp->inp_socket;
2021 if (so != NULL) {
2022 proc_t proc = proc_find(flow_defunct->proc_pid);
2023 if (proc != PROC_NULL) {
2024 proc_fdlock(proc);
2025 (void)socket_defunct(proc, so, SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
2026 proc_fdunlock(proc);
2027 proc_rele(proc);
2028 }
2029 }
2030 }
2031 LIST_REMOVE(flow_defunct, chain);
2032 kfree_type(struct necp_flow_defunct, flow_defunct);
2033 }
2034 }
2035 ASSERT(LIST_EMPTY(defunct_list));
2036 }
2037
2038 static int
necpop_close(struct fileglob * fg,vfs_context_t ctx)2039 necpop_close(struct fileglob *fg, vfs_context_t ctx)
2040 {
2041 #pragma unused(ctx)
2042 struct necp_fd_data * __single fd_data = NULL;
2043 int error = 0;
2044
2045 fd_data = (struct necp_fd_data *)fg_get_data(fg);
2046 fg_set_data(fg, NULL);
2047
2048 if (fd_data != NULL) {
2049 struct _necp_client_tree clients_to_close;
2050 RB_INIT(&clients_to_close);
2051
2052 // Remove from list quickly
2053 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
2054 NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
2055 LIST_REMOVE(fd_data, chain);
2056 NECP_OBSERVER_LIST_UNLOCK();
2057 } else {
2058 NECP_FD_LIST_LOCK_EXCLUSIVE();
2059 LIST_REMOVE(fd_data, chain);
2060 NECP_FD_LIST_UNLOCK();
2061 }
2062
2063 NECP_FD_LOCK(fd_data);
2064 pid_t pid = fd_data->proc_pid;
2065
2066 struct _necp_flow_defunct_list defunct_list;
2067 LIST_INIT(&defunct_list);
2068
2069 (void)necp_defunct_client_fd_locked_inner(fd_data, &defunct_list, false);
2070
2071 struct necp_client_flow_registration *flow_registration = NULL;
2072 struct necp_client_flow_registration *temp_flow_registration = NULL;
2073 RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
2074 #if SKYWALK
2075 necp_destroy_flow_stats(fd_data, flow_registration, NULL, TRUE);
2076 #endif /* SKYWALK */
2077 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
2078 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
2079 NECP_FLOW_TREE_UNLOCK();
2080 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
2081 }
2082
2083 struct necp_client *client = NULL;
2084 struct necp_client *temp_client = NULL;
2085 RB_FOREACH_SAFE(client, _necp_client_tree, &fd_data->clients, temp_client) {
2086 // Clear out the agent_handle to avoid dangling pointers back to fd_data
2087 NECP_CLIENT_LOCK(client);
2088 client->agent_handle = NULL;
2089 NECP_CLIENT_UNLOCK(client);
2090
2091 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
2092 RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
2093 NECP_CLIENT_TREE_UNLOCK();
2094 RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
2095 RB_INSERT(_necp_client_tree, &clients_to_close, client);
2096 }
2097
2098 struct necp_client_update *client_update = NULL;
2099 struct necp_client_update *temp_update = NULL;
2100 TAILQ_FOREACH_SAFE(client_update, &fd_data->update_list, chain, temp_update) {
2101 // Flush pending updates
2102 TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
2103 necp_client_update_free(client_update);
2104 }
2105 fd_data->update_count = 0;
2106
2107 #if SKYWALK
2108 // Cleanup stats arena(s); indicate that we're closing
2109 necp_stats_arenas_destroy(fd_data, TRUE);
2110 ASSERT(fd_data->stats_arena_active == NULL);
2111 ASSERT(LIST_EMPTY(&fd_data->stats_arena_list));
2112
2113 // Cleanup systctl arena
2114 necp_sysctl_arena_destroy(fd_data);
2115 ASSERT(fd_data->sysctl_arena == NULL);
2116 #endif /* SKYWALK */
2117
2118 NECP_FD_UNLOCK(fd_data);
2119
2120 selthreadclear(&fd_data->si);
2121
2122 lck_mtx_destroy(&fd_data->fd_lock, &necp_fd_mtx_grp);
2123
2124 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
2125 OSDecrementAtomic(&necp_observer_fd_count);
2126 } else {
2127 OSDecrementAtomic(&necp_client_fd_count);
2128 }
2129
2130 kfree_type(struct necp_fd_data, fd_data);
2131
2132 RB_FOREACH_SAFE(client, _necp_client_tree, &clients_to_close, temp_client) {
2133 RB_REMOVE(_necp_client_tree, &clients_to_close, client);
2134 necp_destroy_client(client, pid, true);
2135 }
2136
2137 necp_process_defunct_list(&defunct_list);
2138 }
2139
2140 return error;
2141 }
2142
2143 /// NECP client utilities
2144
2145 static inline bool
necp_address_is_wildcard(const union necp_sockaddr_union * const addr)2146 necp_address_is_wildcard(const union necp_sockaddr_union * const addr)
2147 {
2148 return (addr->sa.sa_family == AF_INET && addr->sin.sin_addr.s_addr == INADDR_ANY) ||
2149 (addr->sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr->sin6.sin6_addr));
2150 }
2151
2152 static int
necp_find_fd_data(struct proc * p,int fd,struct fileproc ** fpp,struct necp_fd_data ** fd_data)2153 necp_find_fd_data(struct proc *p, int fd,
2154 struct fileproc **fpp, struct necp_fd_data **fd_data)
2155 {
2156 struct fileproc * __single fp;
2157 int error = fp_get_ftype(p, fd, DTYPE_NETPOLICY, ENODEV, &fp);
2158
2159 if (error == 0) {
2160 *fd_data = (struct necp_fd_data *)fp_get_data(fp);
2161 *fpp = fp;
2162
2163 if ((*fd_data)->necp_fd_type != necp_fd_type_client) {
2164 // Not a client fd, ignore
2165 fp_drop(p, fd, fp, 0);
2166 error = EINVAL;
2167 }
2168 }
2169 return error;
2170 }
2171
2172 static void
necp_client_add_nexus_flow(struct necp_client_flow_registration * flow_registration,uuid_t nexus_agent,uint32_t interface_index,uint32_t interface_flags,bool aop_offload)2173 necp_client_add_nexus_flow(struct necp_client_flow_registration *flow_registration,
2174 uuid_t nexus_agent,
2175 uint32_t interface_index,
2176 uint32_t interface_flags,
2177 bool aop_offload)
2178 {
2179 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2180
2181 new_flow->nexus = TRUE;
2182 uuid_copy(new_flow->u.nexus_agent, nexus_agent);
2183 new_flow->interface_index = interface_index;
2184 new_flow->interface_flags = interface_flags;
2185 new_flow->check_tcp_heuristics = TRUE;
2186 new_flow->aop_offload = aop_offload ? TRUE : FALSE;
2187 #if SKYWALK
2188 OSIncrementAtomic(&necp_nexus_flow_count);
2189 #endif /* SKYWALK */
2190
2191 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
2192
2193 #if SKYWALK
2194 necp_flow_save_current_interface_details(flow_registration);
2195 #endif /* SKYWALK */
2196 }
2197
2198 static void
necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration * flow_registration,uuid_t nexus_agent,uint32_t interface_index,bool aop_offload)2199 necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration *flow_registration,
2200 uuid_t nexus_agent, uint32_t interface_index, bool aop_offload)
2201 {
2202 struct necp_client_flow *flow = NULL;
2203 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2204 if (flow->nexus &&
2205 uuid_compare(flow->u.nexus_agent, nexus_agent) == 0) {
2206 return;
2207 }
2208 }
2209
2210 uint32_t interface_flags = 0;
2211 ifnet_t ifp = NULL;
2212 ifnet_head_lock_shared();
2213 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
2214 ifp = ifindex2ifnet[interface_index];
2215 if (ifp != NULL) {
2216 ifnet_lock_shared(ifp);
2217 interface_flags = nstat_ifnet_to_flags(ifp);
2218 ifnet_lock_done(ifp);
2219 }
2220 }
2221 ifnet_head_done();
2222 necp_client_add_nexus_flow(flow_registration, nexus_agent, interface_index, interface_flags, aop_offload);
2223 }
2224
2225 static struct necp_client_flow *
necp_client_add_interface_flow(struct necp_client_flow_registration * flow_registration,uint32_t interface_index)2226 necp_client_add_interface_flow(struct necp_client_flow_registration *flow_registration,
2227 uint32_t interface_index)
2228 {
2229 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2230
2231 // Neither nexus nor socket
2232 new_flow->interface_index = interface_index;
2233 new_flow->u.socket_handle = flow_registration->interface_handle;
2234 new_flow->u.cb = flow_registration->interface_cb;
2235
2236 OSIncrementAtomic(&necp_if_flow_count);
2237
2238 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
2239
2240 return new_flow;
2241 }
2242
2243 static struct necp_client_flow *
necp_client_add_interface_flow_if_needed(struct necp_client * client,struct necp_client_flow_registration * flow_registration,uint32_t interface_index)2244 necp_client_add_interface_flow_if_needed(struct necp_client *client,
2245 struct necp_client_flow_registration *flow_registration,
2246 uint32_t interface_index)
2247 {
2248 if (!client->allow_multiple_flows ||
2249 interface_index == IFSCOPE_NONE) {
2250 // Interface not set, or client not allowed to use this mode
2251 return NULL;
2252 }
2253
2254 struct necp_client_flow *flow = NULL;
2255 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2256 if (!flow->nexus && !flow->socket && flow->interface_index == interface_index) {
2257 // Already have the flow
2258 flow->invalid = FALSE;
2259 flow->u.socket_handle = flow_registration->interface_handle;
2260 flow->u.cb = flow_registration->interface_cb;
2261 return NULL;
2262 }
2263 }
2264 return necp_client_add_interface_flow(flow_registration, interface_index);
2265 }
2266
2267 static void
necp_client_add_interface_option_if_needed(struct necp_client * client,uint32_t interface_index,uint32_t interface_generation,uuid_t * nexus_agent,bool network_provider)2268 necp_client_add_interface_option_if_needed(struct necp_client *client,
2269 uint32_t interface_index,
2270 uint32_t interface_generation,
2271 uuid_t *nexus_agent,
2272 bool network_provider)
2273 {
2274 if ((interface_index == IFSCOPE_NONE && !network_provider) ||
2275 (client->interface_option_count != 0 && !client->allow_multiple_flows)) {
2276 // Interface not set, or client not allowed to use this mode
2277 return;
2278 }
2279
2280 if (client->interface_option_count >= NECP_CLIENT_MAX_INTERFACE_OPTIONS) {
2281 // Cannot take any more interface options
2282 return;
2283 }
2284
2285 // Check if already present
2286 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
2287 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2288 struct necp_client_interface_option *option = &client->interface_options[option_i];
2289 if (option->interface_index == interface_index) {
2290 if (nexus_agent == NULL) {
2291 return;
2292 }
2293 if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
2294 return;
2295 }
2296 if (uuid_is_null(option->nexus_agent)) {
2297 uuid_copy(option->nexus_agent, *nexus_agent);
2298 return;
2299 }
2300 // If we get to this point, this is a new nexus flow
2301 }
2302 } else {
2303 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2304 if (option->interface_index == interface_index) {
2305 if (nexus_agent == NULL) {
2306 return;
2307 }
2308 if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
2309 return;
2310 }
2311 if (uuid_is_null(option->nexus_agent)) {
2312 uuid_copy(option->nexus_agent, *nexus_agent);
2313 return;
2314 }
2315 // If we get to this point, this is a new nexus flow
2316 }
2317 }
2318 }
2319
2320 // Add a new entry
2321 if (client->interface_option_count < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2322 // Add to static
2323 struct necp_client_interface_option *option = &client->interface_options[client->interface_option_count];
2324 option->interface_index = interface_index;
2325 option->interface_generation = interface_generation;
2326 if (nexus_agent != NULL) {
2327 uuid_copy(option->nexus_agent, *nexus_agent);
2328 } else {
2329 uuid_clear(option->nexus_agent);
2330 }
2331 client->interface_option_count++;
2332 } else {
2333 // Add to extra
2334 if (client->extra_interface_options == NULL) {
2335 client->extra_interface_options = (struct necp_client_interface_option *)kalloc_data(
2336 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT, Z_WAITOK | Z_ZERO);
2337 }
2338 if (client->extra_interface_options != NULL) {
2339 struct necp_client_interface_option *option = &client->extra_interface_options[client->interface_option_count - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2340 option->interface_index = interface_index;
2341 option->interface_generation = interface_generation;
2342 if (nexus_agent != NULL) {
2343 uuid_copy(option->nexus_agent, *nexus_agent);
2344 } else {
2345 uuid_clear(option->nexus_agent);
2346 }
2347 client->interface_option_count++;
2348 }
2349 }
2350 }
2351
2352 static bool
necp_client_flow_is_viable(proc_t proc,struct necp_client * client,struct necp_client_flow * flow)2353 necp_client_flow_is_viable(proc_t proc, struct necp_client *client,
2354 struct necp_client_flow *flow)
2355 {
2356 struct necp_aggregate_result result;
2357 bool ignore_address = (client->allow_multiple_flows && !flow->nexus && !flow->socket);
2358
2359 flow->necp_flow_flags = 0;
2360 int error = necp_application_find_policy_match_internal(proc, client->parameters,
2361 (u_int32_t)client->parameters_length,
2362 &result, &flow->necp_flow_flags, NULL,
2363 flow->interface_index,
2364 &flow->local_addr, &flow->remote_addr, NULL, NULL,
2365 NULL, ignore_address, true, NULL);
2366
2367 // Check for blocking agents
2368 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
2369 if (uuid_is_null(result.netagents[i])) {
2370 // Passed end of valid agents
2371 break;
2372 }
2373 if (result.netagent_use_flags[i] & NECP_AGENT_USE_FLAG_REMOVE) {
2374 // A removed agent, ignore
2375 continue;
2376 }
2377 u_int32_t flags = netagent_get_flags(result.netagents[i]);
2378 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2379 !(flags & NETAGENT_FLAG_VOLUNTARY) &&
2380 !(flags & NETAGENT_FLAG_ACTIVE) &&
2381 !(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY)) {
2382 // A required agent is not active, cause the flow to be marked non-viable
2383 return false;
2384 }
2385 }
2386
2387 if (flow->interface_index != IFSCOPE_NONE) {
2388 ifnet_head_lock_shared();
2389
2390 struct ifnet *ifp = ifindex2ifnet[flow->interface_index];
2391 if (ifp && ifp->if_delegated.ifp != IFSCOPE_NONE) {
2392 flow->delegated_interface_index = ifp->if_delegated.ifp->if_index;
2393 }
2394
2395 ifnet_head_done();
2396 }
2397
2398 return error == 0 &&
2399 result.routed_interface_index != IFSCOPE_NONE &&
2400 result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP;
2401 }
2402
2403 static void
necp_flow_add_interface_flows(proc_t proc,struct necp_client * client,struct necp_client_flow_registration * flow_registration,bool send_initial)2404 necp_flow_add_interface_flows(proc_t proc,
2405 struct necp_client *client,
2406 struct necp_client_flow_registration *flow_registration,
2407 bool send_initial)
2408 {
2409 // Traverse all interfaces and add a tracking flow if needed
2410 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
2411 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2412 struct necp_client_interface_option *option = &client->interface_options[option_i];
2413 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
2414 if (flow != NULL && send_initial) {
2415 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2416 if (flow->viable && flow->u.cb) {
2417 bool viable = flow->viable;
2418 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2419 flow->viable = viable;
2420 }
2421 }
2422 } else {
2423 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2424 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
2425 if (flow != NULL && send_initial) {
2426 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2427 if (flow->viable && flow->u.cb) {
2428 bool viable = flow->viable;
2429 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2430 flow->viable = viable;
2431 }
2432 }
2433 }
2434 }
2435 }
2436
2437 static bool
necp_client_update_flows(proc_t proc,struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)2438 necp_client_update_flows(proc_t proc,
2439 struct necp_client *client,
2440 struct _necp_flow_defunct_list *defunct_list)
2441 {
2442 NECP_CLIENT_ASSERT_LOCKED(client);
2443
2444 bool any_client_updated = FALSE;
2445 struct necp_client_flow * __single flow = NULL;
2446 struct necp_client_flow *temp_flow = NULL;
2447 struct necp_client_flow_registration *flow_registration = NULL;
2448 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2449 if (flow_registration->interface_cb != NULL) {
2450 // Add any interface flows that are not already tracked
2451 necp_flow_add_interface_flows(proc, client, flow_registration, false);
2452 }
2453
2454 LIST_FOREACH_SAFE(flow, &flow_registration->flow_list, flow_chain, temp_flow) {
2455 bool client_updated = FALSE;
2456
2457 // Check policy result for flow
2458 u_short old_delegated_ifindex = flow->delegated_interface_index;
2459
2460 int old_flags = flow->necp_flow_flags;
2461 bool viable = necp_client_flow_is_viable(proc, client, flow);
2462
2463 // TODO: Defunct nexus flows that are blocked by policy
2464
2465 if (flow->viable != viable) {
2466 flow->viable = viable;
2467 client_updated = TRUE;
2468 }
2469
2470 if ((old_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE) !=
2471 (flow->necp_flow_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE)) {
2472 client_updated = TRUE;
2473 }
2474
2475 if (flow->delegated_interface_index != old_delegated_ifindex) {
2476 client_updated = TRUE;
2477 }
2478
2479 if (flow->viable && client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2480 bool flow_viable = flow->viable;
2481 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_VIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2482 flow->viable = flow_viable;
2483 }
2484
2485 if (!flow->viable || flow->invalid) {
2486 if (client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2487 bool flow_viable = flow->viable;
2488 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_NONVIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2489 flow->viable = flow_viable;
2490 }
2491 // The callback might change the viable-flag of the
2492 // flow depending on its policy. Thus, we need to
2493 // check the flags again after the callback.
2494 }
2495
2496 #if SKYWALK
2497 if (defunct_list != NULL) {
2498 if (flow->invalid && flow->nexus && flow->assigned && !uuid_is_null(flow->u.nexus_agent)) {
2499 // This is a nexus flow that was assigned, but not found on path
2500 u_int32_t flags = netagent_get_flags(flow->u.nexus_agent);
2501 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2502 // The agent is no longer registered! Mark defunct.
2503 necp_defunct_flow_registration(client, flow_registration, defunct_list, false);
2504 client_updated = TRUE;
2505 }
2506 }
2507 }
2508 #else /* !SKYWALK */
2509 (void)defunct_list;
2510 #endif /* !SKYWALK */
2511
2512 // Handle flows that no longer match
2513 if (!flow->viable || flow->invalid) {
2514 // Drop them as long as they aren't assigned data
2515 if (!flow->nexus && !flow->assigned) {
2516 if (flow->assigned_results != NULL) {
2517 kfree_data_counted_by(flow->assigned_results, flow->assigned_results_length);
2518 client_updated = TRUE;
2519 }
2520 LIST_REMOVE(flow, flow_chain);
2521 #if SKYWALK
2522 if (flow->nexus) {
2523 OSDecrementAtomic(&necp_nexus_flow_count);
2524 } else
2525 #endif /* SKYWALK */
2526 if (flow->socket) {
2527 OSDecrementAtomic(&necp_socket_flow_count);
2528 } else {
2529 OSDecrementAtomic(&necp_if_flow_count);
2530 }
2531
2532 necp_aop_offload_stats_destroy(flow);
2533
2534 kfree_type(struct necp_client_flow, flow);
2535 }
2536 }
2537
2538 any_client_updated |= client_updated;
2539 }
2540 #if SKYWALK
2541 necp_flow_save_current_interface_details(flow_registration);
2542 #endif /* SKYWALK */
2543 }
2544
2545 return any_client_updated;
2546 }
2547
2548 static void
necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client * client)2549 necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client *client)
2550 {
2551 struct necp_client_flow_registration *flow_registration = NULL;
2552 struct necp_client_flow *flow = NULL;
2553 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2554 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2555 if (!flow->socket) { // Socket flows are not marked as invalid
2556 flow->invalid = TRUE;
2557 }
2558 }
2559 }
2560
2561 // Reset option count every update
2562 client->interface_option_count = 0;
2563 }
2564
2565 static inline bool
necp_netagent_is_requested(const struct necp_client_parsed_parameters * parameters,uuid_t * netagent_uuid)2566 necp_netagent_is_requested(const struct necp_client_parsed_parameters *parameters,
2567 uuid_t *netagent_uuid)
2568 {
2569 // Specific use agents only apply when requested
2570 bool requested = false;
2571 if (parameters != NULL) {
2572 // Check required agent UUIDs
2573 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2574 if (uuid_is_null(parameters->required_netagents[i])) {
2575 break;
2576 }
2577 if (uuid_compare(parameters->required_netagents[i], *netagent_uuid) == 0) {
2578 requested = true;
2579 break;
2580 }
2581 }
2582
2583 if (!requested) {
2584 // Check required agent types
2585 bool fetched_type = false;
2586 char netagent_domain[NETAGENT_DOMAINSIZE];
2587 char netagent_type[NETAGENT_TYPESIZE];
2588 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
2589 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
2590
2591 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2592 if (strbuflen(parameters->required_netagent_types[i].netagent_domain, sizeof(parameters->required_netagent_types[i].netagent_domain)) == 0 ||
2593 strbuflen(parameters->required_netagent_types[i].netagent_type, sizeof(parameters->required_netagent_types[i].netagent_type)) == 0) {
2594 break;
2595 }
2596
2597 if (!fetched_type) {
2598 if (netagent_get_agent_domain_and_type(*netagent_uuid, netagent_domain, netagent_type)) {
2599 fetched_type = TRUE;
2600 } else {
2601 break;
2602 }
2603 }
2604
2605 if ((strbuflen(parameters->required_netagent_types[i].netagent_domain, sizeof(parameters->required_netagent_types[i].netagent_domain)) == 0 ||
2606 strbufcmp(netagent_domain, NETAGENT_DOMAINSIZE, parameters->required_netagent_types[i].netagent_domain, sizeof(parameters->required_netagent_types[i].netagent_domain)) == 0) &&
2607 (strbuflen(parameters->required_netagent_types[i].netagent_type, sizeof(parameters->required_netagent_types[i].netagent_type)) == 0 ||
2608 strbufcmp(netagent_type, NETAGENT_TYPESIZE, parameters->required_netagent_types[i].netagent_type, sizeof(parameters->required_netagent_types[i].netagent_type)) == 0)) {
2609 requested = true;
2610 break;
2611 }
2612 }
2613 }
2614
2615 // Check preferred agent UUIDs
2616 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2617 if (uuid_is_null(parameters->preferred_netagents[i])) {
2618 break;
2619 }
2620 if (uuid_compare(parameters->preferred_netagents[i], *netagent_uuid) == 0) {
2621 requested = true;
2622 break;
2623 }
2624 }
2625
2626 if (!requested) {
2627 // Check preferred agent types
2628 bool fetched_type = false;
2629 char netagent_domain[NETAGENT_DOMAINSIZE];
2630 char netagent_type[NETAGENT_TYPESIZE];
2631 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
2632 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
2633
2634 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2635 if (strbuflen(parameters->preferred_netagent_types[i].netagent_domain, sizeof(parameters->preferred_netagent_types[i].netagent_domain)) == 0 ||
2636 strbuflen(parameters->preferred_netagent_types[i].netagent_type, sizeof(parameters->preferred_netagent_types[i].netagent_type)) == 0) {
2637 break;
2638 }
2639
2640 if (!fetched_type) {
2641 if (netagent_get_agent_domain_and_type(*netagent_uuid, netagent_domain, netagent_type)) {
2642 fetched_type = TRUE;
2643 } else {
2644 break;
2645 }
2646 }
2647
2648 if ((strbuflen(parameters->preferred_netagent_types[i].netagent_domain, sizeof(parameters->preferred_netagent_types[i].netagent_domain)) == 0 ||
2649 strbufcmp(netagent_domain, NETAGENT_DOMAINSIZE, parameters->preferred_netagent_types[i].netagent_domain, sizeof(parameters->preferred_netagent_types[i].netagent_domain)) == 0) &&
2650 (strbuflen(parameters->preferred_netagent_types[i].netagent_type, sizeof(parameters->preferred_netagent_types[i].netagent_type)) == 0 ||
2651 strbufcmp(netagent_type, NETAGENT_TYPESIZE, parameters->preferred_netagent_types[i].netagent_type, sizeof(parameters->preferred_netagent_types[i].netagent_type)) == 0)) {
2652 requested = true;
2653 break;
2654 }
2655 }
2656 }
2657 }
2658
2659 return requested;
2660 }
2661
2662 static bool
necp_netagent_applies_to_client(struct necp_client * client,const struct necp_client_parsed_parameters * parameters,uuid_t * netagent_uuid,bool allow_nexus,uint32_t interface_index,uint32_t interface_generation)2663 necp_netagent_applies_to_client(struct necp_client *client,
2664 const struct necp_client_parsed_parameters *parameters,
2665 uuid_t *netagent_uuid, bool allow_nexus,
2666 uint32_t interface_index, uint32_t interface_generation)
2667 {
2668 #pragma unused(interface_index, interface_generation)
2669 bool applies = FALSE;
2670 u_int32_t flags = netagent_get_flags(*netagent_uuid);
2671 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2672 // Unregistered agents never apply
2673 return applies;
2674 }
2675
2676 const bool is_nexus_agent = ((flags & NETAGENT_FLAG_NEXUS_PROVIDER) ||
2677 (flags & NETAGENT_FLAG_NEXUS_LISTENER) ||
2678 (flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS) ||
2679 (flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS) ||
2680 (flags & NETAGENT_FLAG_INTERPOSE_NEXUS));
2681 if (is_nexus_agent) {
2682 if (!allow_nexus) {
2683 // Hide nexus providers unless allowed
2684 // Direct interfaces and direct policies are allowed to use a nexus
2685 // Delegate interfaces or re-scoped interfaces are not allowed
2686 return applies;
2687 }
2688
2689 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2690 !(flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS)) {
2691 // Client requested a custom ether nexus, but this nexus isn't one
2692 return applies;
2693 }
2694
2695 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2696 !(flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS)) {
2697 // Client requested a custom IP nexus, but this nexus isn't one
2698 return applies;
2699 }
2700
2701 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2702 !(flags & NETAGENT_FLAG_INTERPOSE_NEXUS)) {
2703 // Client requested an interpose nexus, but this nexus isn't one
2704 return applies;
2705 }
2706
2707 if (!(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2708 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2709 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2710 !(flags & NETAGENT_FLAG_NEXUS_PROVIDER)) {
2711 // Client requested default parameters, but this nexus isn't generic
2712 return applies;
2713 }
2714 }
2715
2716 if (uuid_compare(client->failed_trigger_agent.netagent_uuid, *netagent_uuid) == 0) {
2717 if (client->failed_trigger_agent.generation == netagent_get_generation(*netagent_uuid)) {
2718 // If this agent was triggered, and failed, and hasn't changed, keep hiding it
2719 return applies;
2720 } else {
2721 // Mismatch generation, clear out old trigger
2722 uuid_clear(client->failed_trigger_agent.netagent_uuid);
2723 client->failed_trigger_agent.generation = 0;
2724 }
2725 }
2726
2727 if (flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) {
2728 // Specific use agents only apply when requested
2729 applies = necp_netagent_is_requested(parameters, netagent_uuid);
2730 } else {
2731 applies = TRUE;
2732 }
2733
2734 #if SKYWALK
2735 // Add nexus agent if it is a nexus, and either is not a listener, or the nexus supports listeners
2736 if (applies && is_nexus_agent &&
2737 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) && // Don't add for browse paths
2738 ((flags & NETAGENT_FLAG_NEXUS_LISTENER) || !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER))) {
2739 necp_client_add_interface_option_if_needed(client, interface_index,
2740 interface_generation, netagent_uuid,
2741 (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2742 }
2743 #endif /* SKYWALK */
2744
2745 return applies;
2746 }
2747
2748 static void
necp_client_add_agent_interface_options(struct necp_client * client,const struct necp_client_parsed_parameters * parsed_parameters,ifnet_t ifp)2749 necp_client_add_agent_interface_options(struct necp_client *client,
2750 const struct necp_client_parsed_parameters *parsed_parameters,
2751 ifnet_t ifp)
2752 {
2753 if (ifp == NULL) {
2754 return;
2755 }
2756
2757 ifnet_lock_shared(ifp);
2758 if (ifp->if_agentids != NULL) {
2759 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2760 if (uuid_is_null(ifp->if_agentids[i])) {
2761 continue;
2762 }
2763 // Relies on the side effect that nexus agents that apply will create flows
2764 (void)necp_netagent_applies_to_client(client, parsed_parameters, &ifp->if_agentids[i], TRUE,
2765 ifp->if_index, ifnet_get_generation(ifp));
2766 }
2767 }
2768 ifnet_lock_done(ifp);
2769 }
2770
2771 static void
necp_client_add_browse_interface_options(struct necp_client * client,const struct necp_client_parsed_parameters * parsed_parameters,ifnet_t ifp)2772 necp_client_add_browse_interface_options(struct necp_client *client,
2773 const struct necp_client_parsed_parameters *parsed_parameters,
2774 ifnet_t ifp)
2775 {
2776 if (ifp == NULL) {
2777 return;
2778 }
2779
2780 ifnet_lock_shared(ifp);
2781 if (ifp->if_agentids != NULL) {
2782 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2783 if (uuid_is_null(ifp->if_agentids[i])) {
2784 continue;
2785 }
2786
2787 u_int32_t flags = netagent_get_flags(ifp->if_agentids[i]);
2788 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2789 (flags & NETAGENT_FLAG_ACTIVE) &&
2790 (flags & NETAGENT_FLAG_SUPPORTS_BROWSE) &&
2791 (!(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) ||
2792 necp_netagent_is_requested(parsed_parameters, &ifp->if_agentids[i]))) {
2793 necp_client_add_interface_option_if_needed(client, ifp->if_index, ifnet_get_generation(ifp), &ifp->if_agentids[i], (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2794
2795 // Finding one is enough
2796 break;
2797 }
2798 }
2799 }
2800 ifnet_lock_done(ifp);
2801 }
2802
2803 static inline bool
_necp_client_address_is_valid(struct sockaddr * address)2804 _necp_client_address_is_valid(struct sockaddr *address)
2805 {
2806 if (address->sa_family == AF_INET) {
2807 return address->sa_len == sizeof(struct sockaddr_in);
2808 } else if (address->sa_family == AF_INET6) {
2809 return address->sa_len == sizeof(struct sockaddr_in6);
2810 } else {
2811 return FALSE;
2812 }
2813 }
2814
2815 #define necp_client_address_is_valid(S) _necp_client_address_is_valid(SA(S))
2816
2817 static inline bool
necp_client_endpoint_is_unspecified(struct necp_client_endpoint * endpoint)2818 necp_client_endpoint_is_unspecified(struct necp_client_endpoint *endpoint)
2819 {
2820 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2821 if (endpoint->u.sa.sa_family == AF_INET) {
2822 return endpoint->u.sin.sin_addr.s_addr == INADDR_ANY;
2823 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2824 return IN6_IS_ADDR_UNSPECIFIED(&endpoint->u.sin6.sin6_addr);
2825 } else {
2826 return TRUE;
2827 }
2828 } else {
2829 return TRUE;
2830 }
2831 }
2832
2833 #if SKYWALK
2834 static void
necp_client_update_local_port_parameters(u_int8_t * __sized_by (parameters_size)parameters,u_int32_t parameters_size,uint16_t local_port)2835 necp_client_update_local_port_parameters(u_int8_t * __sized_by(parameters_size)parameters,
2836 u_int32_t parameters_size,
2837 uint16_t local_port)
2838 {
2839 size_t offset = 0;
2840 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
2841 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
2842 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
2843
2844 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
2845 // If the length is larger than what can fit in the remaining parameters size, bail
2846 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
2847 break;
2848 }
2849
2850 if (length > 0) {
2851 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
2852 if (value != NULL) {
2853 switch (type) {
2854 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
2855 if (length >= sizeof(struct necp_policy_condition_addr)) {
2856 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
2857 if (necp_client_address_is_valid(&address_struct->address.sa)) {
2858 if (address_struct->address.sa.sa_family == AF_INET) {
2859 address_struct->address.sin.sin_port = local_port;
2860 } else if (address_struct->address.sa.sa_family == AF_INET6) {
2861 address_struct->address.sin6.sin6_port = local_port;
2862 }
2863 }
2864 }
2865 break;
2866 }
2867 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: {
2868 if (length >= sizeof(struct necp_client_endpoint)) {
2869 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
2870 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2871 if (endpoint->u.sa.sa_family == AF_INET) {
2872 endpoint->u.sin.sin_port = local_port;
2873 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2874 endpoint->u.sin6.sin6_port = local_port;
2875 }
2876 }
2877 }
2878 break;
2879 }
2880 default: {
2881 break;
2882 }
2883 }
2884 }
2885 }
2886
2887 offset += sizeof(struct necp_tlv_header) + length;
2888 }
2889 }
2890 #endif /* !SKYWALK */
2891
2892 #define NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH 253
2893
2894 static void
necp_client_trace_parameter_parsing(struct necp_client * client,u_int8_t type,u_int8_t * __sized_by (length)value,u_int32_t length)2895 necp_client_trace_parameter_parsing(struct necp_client *client, u_int8_t type, u_int8_t * __sized_by(length)value, u_int32_t length)
2896 {
2897 uint64_t num = 0;
2898 uint16_t shortBuf;
2899 uint32_t intBuf;
2900 char buffer[NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH + 1];
2901
2902 if (value != NULL && length > 0) {
2903 switch (length) {
2904 case 1:
2905 num = *value;
2906 break;
2907 case 2:
2908 memcpy(&shortBuf, value, sizeof(shortBuf));
2909 num = shortBuf;
2910 break;
2911 case 4:
2912 memcpy(&intBuf, value, sizeof(intBuf));
2913 num = intBuf;
2914 break;
2915 case 8:
2916 memcpy(&num, value, sizeof(num));
2917 break;
2918 default:
2919 num = 0;
2920 break;
2921 }
2922 int len = NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH < length ? NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH : length;
2923 memcpy(buffer, value, len);
2924 buffer[len] = 0;
2925 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d value <%llu (%llX)> %s", type, length, num, num, buffer);
2926 } else {
2927 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d", type, length);
2928 }
2929 }
2930
2931 static void
necp_client_trace_parsed_parameters(struct necp_client * client,struct necp_client_parsed_parameters * parsed_parameters)2932 necp_client_trace_parsed_parameters(struct necp_client *client, struct necp_client_parsed_parameters *parsed_parameters)
2933 {
2934 int i;
2935 char local_buffer[64] = { };
2936 char remote_buffer[64] = { };
2937 uuid_string_t uuid_str = { };
2938 uuid_unparse_lower(parsed_parameters->effective_uuid, uuid_str);
2939
2940 switch (parsed_parameters->local_addr.sa.sa_family) {
2941 case AF_INET:
2942 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2943 struct sockaddr_in *addr = &parsed_parameters->local_addr.sin;
2944 inet_ntop(AF_INET, &(addr->sin_addr), local_buffer, sizeof(local_buffer));
2945 }
2946 break;
2947 case AF_INET6:
2948 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2949 struct sockaddr_in6 *addr6 = &parsed_parameters->local_addr.sin6;
2950 inet_ntop(AF_INET6, &(addr6->sin6_addr), local_buffer, sizeof(local_buffer));
2951 }
2952 break;
2953 default:
2954 break;
2955 }
2956
2957 switch (parsed_parameters->remote_addr.sa.sa_family) {
2958 case AF_INET:
2959 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2960 struct sockaddr_in *addr = &parsed_parameters->remote_addr.sin;
2961 inet_ntop(AF_INET, &(addr->sin_addr), remote_buffer, sizeof(remote_buffer));
2962 }
2963 break;
2964 case AF_INET6:
2965 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2966 struct sockaddr_in6 *addr6 = &parsed_parameters->remote_addr.sin6;
2967 inet_ntop(AF_INET6, &(addr6->sin6_addr), remote_buffer, sizeof(remote_buffer));
2968 }
2969 break;
2970 default:
2971 break;
2972 }
2973
2974 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - valid_fields %X flags %X "
2975 "extended flags %llX delegated_upid %llu local_addr %s remote_addr %s "
2976 "required_interface_index %u required_interface_type %d local_address_preference %d "
2977 "ip_protocol %d transport_protocol %d ethertype %d effective_pid %d "
2978 "effective_uuid %s uid %d persona_id %d traffic_class %d",
2979 parsed_parameters->valid_fields,
2980 parsed_parameters->flags,
2981 parsed_parameters->extended_flags,
2982 parsed_parameters->delegated_upid,
2983 local_buffer, remote_buffer,
2984 parsed_parameters->required_interface_index,
2985 parsed_parameters->required_interface_type,
2986 parsed_parameters->local_address_preference,
2987 parsed_parameters->ip_protocol,
2988 parsed_parameters->transport_protocol,
2989 parsed_parameters->ethertype,
2990 parsed_parameters->effective_pid,
2991 uuid_str,
2992 parsed_parameters->uid,
2993 parsed_parameters->persona_id,
2994 parsed_parameters->traffic_class);
2995
2996 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - tracker flags <known-tracker %X> <non-app-initiated %X> <silent %X> <app-approved %X>",
2997 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER,
2998 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED,
2999 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_SILENT,
3000 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_APPROVED_APP_DOMAIN);
3001
3002 for (i = 0; i < NECP_MAX_INTERFACE_PARAMETERS && parsed_parameters->prohibited_interfaces[i][0]; i++) {
3003 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_interfaces[%d] <%s>", i, parsed_parameters->prohibited_interfaces[i]);
3004 }
3005
3006 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->required_netagent_types[i].netagent_domain[0]; i++) {
3007 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagent_types[%d] <%s> <%s>", i,
3008 parsed_parameters->required_netagent_types[i].netagent_domain,
3009 parsed_parameters->required_netagent_types[i].netagent_type);
3010 }
3011 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->prohibited_netagent_types[i].netagent_domain[0]; i++) {
3012 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagent_types[%d] <%s> <%s>", i,
3013 parsed_parameters->prohibited_netagent_types[i].netagent_domain,
3014 parsed_parameters->prohibited_netagent_types[i].netagent_type);
3015 }
3016 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->preferred_netagent_types[i].netagent_domain[0]; i++) {
3017 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagent_types[%d] <%s> <%s>", i,
3018 parsed_parameters->preferred_netagent_types[i].netagent_domain,
3019 parsed_parameters->preferred_netagent_types[i].netagent_type);
3020 }
3021 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->avoided_netagent_types[i].netagent_domain[0]; i++) {
3022 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagent_types[%d] <%s> <%s>", i,
3023 parsed_parameters->avoided_netagent_types[i].netagent_domain,
3024 parsed_parameters->avoided_netagent_types[i].netagent_type);
3025 }
3026
3027 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->required_netagents[i]); i++) {
3028 uuid_unparse_lower(parsed_parameters->required_netagents[i], uuid_str);
3029 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagents[%d] <%s>", i, uuid_str);
3030 }
3031 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->prohibited_netagents[i]); i++) {
3032 uuid_unparse_lower(parsed_parameters->prohibited_netagents[i], uuid_str);
3033 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagents[%d] <%s>", i, uuid_str);
3034 }
3035 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->preferred_netagents[i]); i++) {
3036 uuid_unparse_lower(parsed_parameters->preferred_netagents[i], uuid_str);
3037 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagents[%d] <%s>", i, uuid_str);
3038 }
3039 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->avoided_netagents[i]); i++) {
3040 uuid_unparse_lower(parsed_parameters->avoided_netagents[i], uuid_str);
3041 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagents[%d] <%s>", i, uuid_str);
3042 }
3043 }
3044
3045 static bool
necp_client_strings_are_equal(const char * __sized_by (string1_length)string1,size_t string1_length,const char * __sized_by (string2_length)string2,size_t string2_length)3046 necp_client_strings_are_equal(const char * __sized_by(string1_length)string1, size_t string1_length,
3047 const char * __sized_by(string2_length)string2, size_t string2_length)
3048 {
3049 if (string1 == NULL || string2 == NULL) {
3050 return false;
3051 }
3052 const size_t string1_actual_length = strnlen(string1, string1_length);
3053 const size_t string2_actual_length = strnlen(string2, string2_length);
3054 if (string1_actual_length != string2_actual_length) {
3055 return false;
3056 }
3057 return strbufcmp(string1, string1_actual_length, string2, string2_actual_length) == 0;
3058 }
3059
3060 static int
necp_client_parse_parameters(struct necp_client * client,u_int8_t * __sized_by (parameters_size)parameters,u_int32_t parameters_size,struct necp_client_parsed_parameters * parsed_parameters)3061 necp_client_parse_parameters(struct necp_client *client, u_int8_t * __sized_by(parameters_size)parameters,
3062 u_int32_t parameters_size,
3063 struct necp_client_parsed_parameters *parsed_parameters)
3064 {
3065 int error = 0;
3066 size_t offset = 0;
3067
3068 u_int32_t num_prohibited_interfaces = 0;
3069 u_int32_t num_prohibited_interface_types = 0;
3070 u_int32_t num_required_agents = 0;
3071 u_int32_t num_prohibited_agents = 0;
3072 u_int32_t num_preferred_agents = 0;
3073 u_int32_t num_avoided_agents = 0;
3074 u_int32_t num_required_agent_types = 0;
3075 u_int32_t num_prohibited_agent_types = 0;
3076 u_int32_t num_preferred_agent_types = 0;
3077 u_int32_t num_avoided_agent_types = 0;
3078 u_int32_t resolver_tag_length = 0;
3079 u_int8_t * __sized_by(resolver_tag_length) resolver_tag = NULL;
3080 u_int32_t hostname_length = 0;
3081 u_int8_t * __sized_by(hostname_length) client_hostname = NULL;
3082 uuid_t parent_id = {};
3083
3084 if (parsed_parameters == NULL) {
3085 return EINVAL;
3086 }
3087
3088 memset(parsed_parameters, 0, sizeof(struct necp_client_parsed_parameters));
3089
3090 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
3091 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
3092 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
3093
3094 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
3095 // If the length is larger than what can fit in the remaining parameters size, bail
3096 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
3097 break;
3098 }
3099
3100 if (length > 0) {
3101 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
3102 if (value != NULL) {
3103 switch (type) {
3104 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: {
3105 if (length <= IFXNAMSIZ && length > 0) {
3106 ifnet_t __single bound_interface = NULL;
3107 char interface_name[IFXNAMSIZ];
3108 memcpy(interface_name, value, length);
3109 interface_name[length - 1] = 0; // Make sure the string is NULL terminated
3110 if (ifnet_find_by_name(__unsafe_null_terminated_from_indexable(interface_name, &interface_name[length - 1]), &bound_interface) == 0) {
3111 parsed_parameters->required_interface_index = bound_interface->if_index;
3112 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF;
3113 ifnet_release(bound_interface);
3114 }
3115 }
3116 break;
3117 }
3118 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
3119 if (length >= sizeof(struct necp_policy_condition_addr)) {
3120 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
3121 if (necp_client_address_is_valid(&address_struct->address.sa)) {
3122 parsed_parameters->local_addr.sin6 = address_struct->address.sin6;
3123 if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) {
3124 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR;
3125 }
3126 if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) ||
3127 (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) {
3128 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT;
3129 }
3130 }
3131 }
3132 break;
3133 }
3134 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: {
3135 if (length >= sizeof(struct necp_client_endpoint)) {
3136 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3137 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3138 parsed_parameters->local_addr.sin6 = endpoint->u.sin6;
3139 if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) {
3140 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR;
3141 }
3142 if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) ||
3143 (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) {
3144 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT;
3145 }
3146 }
3147 }
3148 break;
3149 }
3150 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
3151 if (length >= sizeof(struct necp_policy_condition_addr)) {
3152 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
3153 if (necp_client_address_is_valid(&address_struct->address.sa)) {
3154 parsed_parameters->remote_addr.sin6 = address_struct->address.sin6;
3155 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
3156 }
3157 }
3158 break;
3159 }
3160 case NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT: {
3161 if (length >= sizeof(struct necp_client_endpoint)) {
3162 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3163 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3164 parsed_parameters->remote_addr.sin6 = endpoint->u.sin6;
3165 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
3166 }
3167 }
3168 break;
3169 }
3170 case NECP_CLIENT_PARAMETER_PROHIBIT_INTERFACE: {
3171 if (num_prohibited_interfaces >= NECP_MAX_INTERFACE_PARAMETERS) {
3172 break;
3173 }
3174 if (length <= IFXNAMSIZ && length > 0) {
3175 memcpy(parsed_parameters->prohibited_interfaces[num_prohibited_interfaces], value, length);
3176 parsed_parameters->prohibited_interfaces[num_prohibited_interfaces][length - 1] = 0; // Make sure the string is NULL terminated
3177 num_prohibited_interfaces++;
3178 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF;
3179 }
3180 break;
3181 }
3182 case NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE: {
3183 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) {
3184 break;
3185 }
3186 if (length >= sizeof(u_int8_t)) {
3187 memcpy(&parsed_parameters->required_interface_type, value, sizeof(u_int8_t));
3188 if (parsed_parameters->required_interface_type) {
3189 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE;
3190 }
3191 }
3192 break;
3193 }
3194 case NECP_CLIENT_PARAMETER_PROHIBIT_IF_TYPE: {
3195 if (num_prohibited_interface_types >= NECP_MAX_INTERFACE_PARAMETERS) {
3196 break;
3197 }
3198 if (length >= sizeof(u_int8_t)) {
3199 memcpy(&parsed_parameters->prohibited_interface_types[num_prohibited_interface_types], value, sizeof(u_int8_t));
3200 num_prohibited_interface_types++;
3201 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE;
3202 }
3203 break;
3204 }
3205 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT: {
3206 if (num_required_agents >= NECP_MAX_AGENT_PARAMETERS) {
3207 break;
3208 }
3209 if (length >= sizeof(uuid_t)) {
3210 memcpy(&parsed_parameters->required_netagents[num_required_agents], value, sizeof(uuid_t));
3211 num_required_agents++;
3212 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT;
3213 }
3214 break;
3215 }
3216 case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT: {
3217 if (num_prohibited_agents >= NECP_MAX_AGENT_PARAMETERS) {
3218 break;
3219 }
3220 if (length >= sizeof(uuid_t)) {
3221 memcpy(&parsed_parameters->prohibited_netagents[num_prohibited_agents], value, sizeof(uuid_t));
3222 num_prohibited_agents++;
3223 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT;
3224 }
3225 break;
3226 }
3227 case NECP_CLIENT_PARAMETER_PREFER_AGENT: {
3228 if (num_preferred_agents >= NECP_MAX_AGENT_PARAMETERS) {
3229 break;
3230 }
3231 if (length >= sizeof(uuid_t)) {
3232 memcpy(&parsed_parameters->preferred_netagents[num_preferred_agents], value, sizeof(uuid_t));
3233 num_preferred_agents++;
3234 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT;
3235 }
3236 break;
3237 }
3238 case NECP_CLIENT_PARAMETER_AVOID_AGENT: {
3239 if (num_avoided_agents >= NECP_MAX_AGENT_PARAMETERS) {
3240 break;
3241 }
3242 if (length >= sizeof(uuid_t)) {
3243 memcpy(&parsed_parameters->avoided_netagents[num_avoided_agents], value, sizeof(uuid_t));
3244 num_avoided_agents++;
3245 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT;
3246 }
3247 break;
3248 }
3249 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE: {
3250 if (num_required_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3251 break;
3252 }
3253 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3254 memcpy(&parsed_parameters->required_netagent_types[num_required_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3255 num_required_agent_types++;
3256 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE;
3257 }
3258 break;
3259 }
3260 case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT_TYPE: {
3261 if (num_prohibited_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3262 break;
3263 }
3264 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3265 memcpy(&parsed_parameters->prohibited_netagent_types[num_prohibited_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3266 num_prohibited_agent_types++;
3267 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE;
3268 }
3269 break;
3270 }
3271 case NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE: {
3272 if (num_preferred_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3273 break;
3274 }
3275 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3276 memcpy(&parsed_parameters->preferred_netagent_types[num_preferred_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3277 num_preferred_agent_types++;
3278 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE;
3279 }
3280 break;
3281 }
3282 case NECP_CLIENT_PARAMETER_AVOID_AGENT_TYPE: {
3283 if (num_avoided_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
3284 break;
3285 }
3286 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
3287 memcpy(&parsed_parameters->avoided_netagent_types[num_avoided_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
3288 num_avoided_agent_types++;
3289 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE;
3290 }
3291 break;
3292 }
3293 case NECP_CLIENT_PARAMETER_FLAGS: {
3294 if (length >= sizeof(u_int32_t)) {
3295 memcpy(&parsed_parameters->flags, value, sizeof(parsed_parameters->flags));
3296 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_FLAGS;
3297 }
3298 break;
3299 }
3300 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
3301 if (length == sizeof(u_int16_t)) {
3302 u_int16_t large_ip_protocol = 0;
3303 memcpy(&large_ip_protocol, value, sizeof(large_ip_protocol));
3304 parsed_parameters->ip_protocol = (u_int8_t)large_ip_protocol;
3305 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL;
3306 } else if (length >= sizeof(parsed_parameters->ip_protocol)) {
3307 memcpy(&parsed_parameters->ip_protocol, value, sizeof(parsed_parameters->ip_protocol));
3308 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL;
3309 }
3310 break;
3311 }
3312 case NECP_CLIENT_PARAMETER_TRANSPORT_PROTOCOL: {
3313 if (length >= sizeof(parsed_parameters->transport_protocol)) {
3314 memcpy(&parsed_parameters->transport_protocol, value, sizeof(parsed_parameters->transport_protocol));
3315 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL;
3316 }
3317 break;
3318 }
3319 case NECP_CLIENT_PARAMETER_PID: {
3320 if (length >= sizeof(parsed_parameters->effective_pid)) {
3321 memcpy(&parsed_parameters->effective_pid, value, sizeof(parsed_parameters->effective_pid));
3322 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID;
3323 }
3324 break;
3325 }
3326 case NECP_CLIENT_PARAMETER_DELEGATED_UPID: {
3327 if (length >= sizeof(parsed_parameters->delegated_upid)) {
3328 memcpy(&parsed_parameters->delegated_upid, value, sizeof(parsed_parameters->delegated_upid));
3329 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID;
3330 }
3331 break;
3332 }
3333 case NECP_CLIENT_PARAMETER_ETHERTYPE: {
3334 if (length >= sizeof(parsed_parameters->ethertype)) {
3335 memcpy(&parsed_parameters->ethertype, value, sizeof(parsed_parameters->ethertype));
3336 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_ETHERTYPE;
3337 }
3338 break;
3339 }
3340 case NECP_CLIENT_PARAMETER_APPLICATION: {
3341 if (length >= sizeof(parsed_parameters->effective_uuid)) {
3342 memcpy(&parsed_parameters->effective_uuid, value, sizeof(parsed_parameters->effective_uuid));
3343 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID;
3344 }
3345 break;
3346 }
3347 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
3348 if (length >= sizeof(parsed_parameters->traffic_class)) {
3349 memcpy(&parsed_parameters->traffic_class, value, sizeof(parsed_parameters->traffic_class));
3350 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS;
3351 }
3352 break;
3353 }
3354 case NECP_CLIENT_PARAMETER_RESOLVER_TAG: {
3355 if (length > 0) {
3356 if (resolver_tag != NULL) {
3357 // Multiple resolver tags is invalid
3358 NECPLOG0(LOG_ERR, "Multiple resolver tags are not supported");
3359 error = EINVAL;
3360 } else {
3361 resolver_tag = (u_int8_t *)value;
3362 resolver_tag_length = length;
3363 }
3364 }
3365 break;
3366 }
3367 case NECP_CLIENT_PARAMETER_DOMAIN: {
3368 if (length > 0) {
3369 client_hostname = (u_int8_t *)value;
3370 hostname_length = length;
3371 }
3372 break;
3373 }
3374 case NECP_CLIENT_PARAMETER_PARENT_ID: {
3375 if (length == sizeof(parent_id)) {
3376 uuid_copy(parent_id, value);
3377 memcpy(&parsed_parameters->parent_uuid, value, sizeof(parsed_parameters->parent_uuid));
3378 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID;
3379 }
3380 break;
3381 }
3382 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE: {
3383 if (length >= sizeof(parsed_parameters->local_address_preference)) {
3384 memcpy(&parsed_parameters->local_address_preference, value, sizeof(parsed_parameters->local_address_preference));
3385 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE;
3386 }
3387 break;
3388 }
3389 case NECP_CLIENT_PARAMETER_ATTRIBUTED_BUNDLE_IDENTIFIER: {
3390 if (length > 0) {
3391 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER;
3392 }
3393 break;
3394 }
3395 case NECP_CLIENT_PARAMETER_FLOW_DEMUX_PATTERN: {
3396 if (parsed_parameters->demux_pattern_count >= NECP_MAX_DEMUX_PATTERNS) {
3397 break;
3398 }
3399 if (length >= sizeof(struct necp_demux_pattern)) {
3400 memcpy(&parsed_parameters->demux_patterns[parsed_parameters->demux_pattern_count], value, sizeof(struct necp_demux_pattern));
3401 parsed_parameters->demux_pattern_count++;
3402 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN;
3403 }
3404 break;
3405 }
3406 case NECP_CLIENT_PARAMETER_APPLICATION_ID: {
3407 if (length >= sizeof(necp_application_id_t)) {
3408 necp_application_id_t *application_id = (necp_application_id_t *)(void *)value;
3409 // UID
3410 parsed_parameters->uid = application_id->uid;
3411 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_UID;
3412 // EUUID
3413 uuid_copy(parsed_parameters->effective_uuid, application_id->effective_uuid);
3414 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID;
3415 // PERSONA
3416 parsed_parameters->persona_id = application_id->persona_id;
3417 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PERSONA_ID;
3418 }
3419 break;
3420 }
3421 case NECP_CLIENT_PARAMETER_EXTENDED_FLAGS: {
3422 if (length >= sizeof(u_int64_t)) {
3423 memcpy(&parsed_parameters->extended_flags, value, sizeof(parsed_parameters->extended_flags));
3424 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EXTENDED_FLAGS;
3425 }
3426 break;
3427 }
3428 default: {
3429 break;
3430 }
3431 }
3432 }
3433
3434 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) {
3435 necp_client_trace_parameter_parsing(client, type, value, length);
3436 }
3437 }
3438
3439 offset += sizeof(struct necp_tlv_header) + length;
3440 }
3441
3442 if (resolver_tag != NULL) {
3443 struct necp_client_validatable *validatable = (struct necp_client_validatable *)resolver_tag;
3444 if (resolver_tag_length <= sizeof(struct necp_client_validatable)) {
3445 error = EINVAL;
3446 NECPLOG(LOG_ERR, "Resolver tag length too short: %u", resolver_tag_length);
3447 } else {
3448 bool matches = true;
3449
3450 // Check the client UUID for client-specific results
3451 if (validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER ||
3452 validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_BROWSE_RESULT ||
3453 validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_SERVICE_RESOLVER_ANSWER) {
3454 if (uuid_compare(parent_id, validatable->signable.client_id) != 0 &&
3455 uuid_compare(client->client_id, validatable->signable.client_id) != 0) {
3456 NECPLOG0(LOG_ERR, "Resolver tag invalid client ID");
3457 matches = false;
3458 }
3459 }
3460
3461 size_t data_length = resolver_tag_length - sizeof(struct necp_client_validatable);
3462 switch (validatable->signable.sign_type) {
3463 case NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER:
3464 case NECP_CLIENT_SIGN_TYPE_SYSTEM_RESOLVER_ANSWER: {
3465 if (data_length < (sizeof(struct necp_client_host_resolver_answer) - sizeof(struct necp_client_signable))) {
3466 NECPLOG0(LOG_ERR, "Resolver tag invalid length for resolver answer");
3467 matches = false;
3468 } else {
3469 struct necp_client_host_resolver_answer * __single answer_struct = (struct necp_client_host_resolver_answer *)&validatable->signable;
3470 if (data_length != (sizeof(struct necp_client_host_resolver_answer) + answer_struct->hostname_length - sizeof(struct necp_client_signable))) {
3471 NECPLOG0(LOG_ERR, "Resolver tag invalid length for resolver answer");
3472 matches = false;
3473 } else {
3474 struct sockaddr_in6 sin6 = answer_struct->address_answer.sin6;
3475 if (answer_struct->hostname_length != 0 && // If the hostname on the signed answer is empty, ignore
3476 !necp_client_strings_are_equal((const char *)client_hostname, hostname_length,
3477 necp_answer_get_hostname(answer_struct, answer_struct->hostname_length), answer_struct->hostname_length)) {
3478 NECPLOG0(LOG_ERR, "Resolver tag hostname does not match");
3479 matches = false;
3480 } else if (answer_struct->address_answer.sa.sa_family != parsed_parameters->remote_addr.sa.sa_family ||
3481 answer_struct->address_answer.sa.sa_len != parsed_parameters->remote_addr.sa.sa_len) {
3482 NECPLOG0(LOG_ERR, "Resolver tag address type does not match");
3483 matches = false;
3484 } else if (answer_struct->address_answer.sin.sin_port != 0 && // If the port on the signed answer is empty, ignore
3485 answer_struct->address_answer.sin.sin_port != parsed_parameters->remote_addr.sin.sin_port) {
3486 NECPLOG0(LOG_ERR, "Resolver tag port does not match");
3487 matches = false;
3488 } else if ((answer_struct->address_answer.sa.sa_family == AF_INET &&
3489 answer_struct->address_answer.sin.sin_addr.s_addr != parsed_parameters->remote_addr.sin.sin_addr.s_addr) ||
3490 (answer_struct->address_answer.sa.sa_family == AF_INET6 &&
3491 memcmp(&sin6.sin6_addr, &parsed_parameters->remote_addr.sin6.sin6_addr, sizeof(struct in6_addr)) != 0)) {
3492 NECPLOG0(LOG_ERR, "Resolver tag address does not match");
3493 matches = false;
3494 }
3495 }
3496 }
3497 break;
3498 }
3499 case NECP_CLIENT_SIGN_TYPE_BROWSE_RESULT:
3500 case NECP_CLIENT_SIGN_TYPE_SYSTEM_BROWSE_RESULT: {
3501 if (data_length < (sizeof(struct necp_client_browse_result) - sizeof(struct necp_client_signable))) {
3502 NECPLOG0(LOG_ERR, "Resolver tag invalid length for browse result");
3503 matches = false;
3504 } else {
3505 struct necp_client_browse_result * __single answer_struct = (struct necp_client_browse_result *)&validatable->signable;
3506 if (data_length != (sizeof(struct necp_client_browse_result) + answer_struct->service_length - sizeof(struct necp_client_signable))) {
3507 NECPLOG0(LOG_ERR, "Resolver tag invalid length for browse result");
3508 matches = false;
3509 }
3510 }
3511 break;
3512 }
3513 case NECP_CLIENT_SIGN_TYPE_SERVICE_RESOLVER_ANSWER:
3514 case NECP_CLIENT_SIGN_TYPE_SYSTEM_SERVICE_RESOLVER_ANSWER: {
3515 if (data_length < (sizeof(struct necp_client_service_resolver_answer) - sizeof(struct necp_client_signable))) {
3516 NECPLOG0(LOG_ERR, "Resolver tag invalid length for service resolver answer");
3517 matches = false;
3518 } else {
3519 struct necp_client_service_resolver_answer * __single answer_struct = (struct necp_client_service_resolver_answer *)&validatable->signable;
3520 if (data_length != (sizeof(struct necp_client_service_resolver_answer) + answer_struct->service_length + answer_struct->hostname_length - sizeof(struct necp_client_signable))) {
3521 NECPLOG0(LOG_ERR, "Resolver tag invalid length for service resolver answer");
3522 matches = false;
3523 }
3524 }
3525 break;
3526 }
3527 default: {
3528 NECPLOG(LOG_ERR, "Resolver tag unknown sign type: %u", validatable->signable.sign_type);
3529 matches = false;
3530 break;
3531 }
3532 }
3533 if (!matches) {
3534 error = EAUTH;
3535 } else {
3536 const bool validated = necp_validate_resolver_answer(validatable->signable.client_id,
3537 validatable->signable.sign_type,
3538 signable_get_data(&validatable->signable, data_length), data_length,
3539 validatable->signature.signed_tag, sizeof(validatable->signature.signed_tag));
3540 if (!validated) {
3541 error = EAUTH;
3542 NECPLOG0(LOG_ERR, "Failed to validate resolve answer");
3543 }
3544 }
3545 }
3546 }
3547
3548 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) {
3549 necp_client_trace_parsed_parameters(client, parsed_parameters);
3550 }
3551
3552 return error;
3553 }
3554
3555 static int
necp_client_parse_result(u_int8_t * __indexable result,u_int32_t result_size,struct necp_client_flow * flow,void ** flow_stats)3556 necp_client_parse_result(u_int8_t * __indexable result,
3557 u_int32_t result_size,
3558 struct necp_client_flow *flow,
3559 void **flow_stats)
3560 {
3561 #pragma unused(flow_stats)
3562 int error = 0;
3563 size_t offset = 0;
3564
3565 while ((offset + sizeof(struct necp_tlv_header)) <= result_size) {
3566 u_int8_t type = necp_buffer_get_tlv_type(result, result_size, offset);
3567 u_int32_t length = necp_buffer_get_tlv_length(result, result_size, offset);
3568
3569 if (length > 0 && (offset + sizeof(struct necp_tlv_header) + length) <= result_size) {
3570 u_int8_t * __indexable value = necp_buffer_get_tlv_value(result, result_size, offset, NULL);
3571 if (value != NULL) {
3572 switch (type) {
3573 case NECP_CLIENT_RESULT_LOCAL_ENDPOINT: {
3574 if (length >= sizeof(struct necp_client_endpoint)) {
3575 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3576 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3577 flow->local_addr.sin6 = endpoint->u.sin6;
3578 }
3579 }
3580 break;
3581 }
3582 case NECP_CLIENT_RESULT_REMOTE_ENDPOINT: {
3583 if (length >= sizeof(struct necp_client_endpoint)) {
3584 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3585 if (necp_client_address_is_valid(&endpoint->u.sa)) {
3586 flow->remote_addr.sin6 = endpoint->u.sin6;
3587 }
3588 }
3589 break;
3590 }
3591 #if SKYWALK
3592 case NECP_CLIENT_RESULT_NEXUS_FLOW_STATS: {
3593 // this TLV contains flow_stats pointer which is refcnt'ed.
3594 if (flow_stats != NULL && length >= sizeof(struct sk_stats_flow *)) {
3595 struct flow_stats * __single fs = *(void **)(void *)value;
3596 // transfer the refcnt to flow_stats pointer
3597 *flow_stats = fs;
3598 }
3599 memset(value, 0, length); // nullify TLV always
3600 break;
3601 }
3602 case NECP_CLIENT_RESULT_UNIQUE_FLOW_TAG: {
3603 if (length >= sizeof(uint32_t)) {
3604 flow->flow_tag = *(uint32_t *)(void *)value;
3605 break;
3606 }
3607 }
3608 #endif /* SKYWALK */
3609 default: {
3610 break;
3611 }
3612 }
3613 }
3614 }
3615
3616 offset += sizeof(struct necp_tlv_header) + length;
3617 }
3618
3619 return error;
3620 }
3621
3622 static struct necp_client_flow_registration *
necp_client_create_flow_registration(struct necp_fd_data * fd_data,struct necp_client * client)3623 necp_client_create_flow_registration(struct necp_fd_data *fd_data, struct necp_client *client)
3624 {
3625 NECP_FD_ASSERT_LOCKED(fd_data);
3626 NECP_CLIENT_ASSERT_LOCKED(client);
3627
3628 struct necp_client_flow_registration *new_registration = kalloc_type(struct necp_client_flow_registration, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3629
3630 new_registration->last_interface_details = combine_interface_details(IFSCOPE_NONE, NSTAT_IFNET_IS_UNKNOWN_TYPE);
3631
3632 necp_generate_client_id(new_registration->registration_id, true);
3633 LIST_INIT(&new_registration->flow_list);
3634
3635 // Add registration to client list
3636 RB_INSERT(_necp_client_flow_tree, &client->flow_registrations, new_registration);
3637
3638 // Add registration to fd list
3639 RB_INSERT(_necp_fd_flow_tree, &fd_data->flows, new_registration);
3640
3641 // Add registration to global tree for lookup
3642 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
3643 RB_INSERT(_necp_client_flow_global_tree, &necp_client_flow_global_tree, new_registration);
3644 NECP_FLOW_TREE_UNLOCK();
3645
3646 new_registration->client = client;
3647
3648 #if SKYWALK
3649 {
3650 // The uuid caching here is something of a hack, but saves a dynamic lookup with attendant lock hierarchy issues
3651 uint64_t stats_event_type = (uuid_is_null(client->latest_flow_registration_id)) ? NSTAT_EVENT_SRC_FLOW_UUID_ASSIGNED : NSTAT_EVENT_SRC_FLOW_UUID_CHANGED;
3652 uuid_copy(client->latest_flow_registration_id, new_registration->registration_id);
3653
3654 // With the flow uuid known, push a new statistics update to ensure the uuid gets known by any clients before the flow can close
3655 if (client->nstat_context != NULL) {
3656 nstat_provider_stats_event(client->nstat_context, stats_event_type);
3657 }
3658 }
3659 #endif /* !SKYWALK */
3660
3661 // Start out assuming there is nothing to read from the flow
3662 new_registration->flow_result_read = true;
3663
3664 return new_registration;
3665 }
3666
3667 static void
necp_client_add_socket_flow(struct necp_client_flow_registration * flow_registration,struct inpcb * inp)3668 necp_client_add_socket_flow(struct necp_client_flow_registration *flow_registration,
3669 struct inpcb *inp)
3670 {
3671 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3672
3673 new_flow->socket = TRUE;
3674 new_flow->u.socket_handle = inp;
3675 new_flow->u.cb = inp->necp_cb;
3676
3677 OSIncrementAtomic(&necp_socket_flow_count);
3678
3679 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
3680 }
3681
3682 static int
necp_client_register_socket_inner(pid_t pid,uuid_t client_id,struct inpcb * inp,bool is_listener)3683 necp_client_register_socket_inner(pid_t pid, uuid_t client_id, struct inpcb *inp, bool is_listener)
3684 {
3685 int error = 0;
3686 struct necp_fd_data *client_fd = NULL;
3687 bool found_client = FALSE;
3688
3689 NECP_FD_LIST_LOCK_SHARED();
3690 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3691 NECP_FD_LOCK(client_fd);
3692 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3693 if (client != NULL) {
3694 if (!pid || client->proc_pid == pid) {
3695 if (is_listener) {
3696 found_client = TRUE;
3697 #if SKYWALK
3698 // Check netns token for registration
3699 if (!NETNS_TOKEN_VALID(&client->port_reservation)) {
3700 error = EINVAL;
3701 }
3702 #endif /* !SKYWALK */
3703 } else {
3704 // Find client flow and assign from socket
3705 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3706 if (flow_registration != NULL) {
3707 // Found the right client and flow registration, add a new flow
3708 found_client = TRUE;
3709 necp_client_add_socket_flow(flow_registration, inp);
3710 } else if (RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
3711 // No flows yet on this client, add a new registration
3712 flow_registration = necp_client_create_flow_registration(client_fd, client);
3713 if (flow_registration == NULL) {
3714 error = ENOMEM;
3715 } else {
3716 // Add a new flow
3717 found_client = TRUE;
3718 necp_client_add_socket_flow(flow_registration, inp);
3719 }
3720 }
3721 }
3722 }
3723
3724 NECP_CLIENT_UNLOCK(client);
3725 }
3726 NECP_FD_UNLOCK(client_fd);
3727
3728 if (found_client) {
3729 break;
3730 }
3731 }
3732 NECP_FD_LIST_UNLOCK();
3733
3734 if (!found_client) {
3735 error = ENOENT;
3736 } else {
3737 // Count the sockets that have the NECP client UUID set
3738 struct socket *so = inp->inp_socket;
3739 if (!(so->so_flags1 & SOF1_HAS_NECP_CLIENT_UUID)) {
3740 so->so_flags1 |= SOF1_HAS_NECP_CLIENT_UUID;
3741 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_necp_clientuuid_total);
3742 }
3743 }
3744
3745 return error;
3746 }
3747
3748 int
necp_client_register_socket_flow(pid_t pid,uuid_t client_id,struct inpcb * inp)3749 necp_client_register_socket_flow(pid_t pid, uuid_t client_id, struct inpcb *inp)
3750 {
3751 return necp_client_register_socket_inner(pid, client_id, inp, false);
3752 }
3753
3754 int
necp_client_register_socket_listener(pid_t pid,uuid_t client_id,struct inpcb * inp)3755 necp_client_register_socket_listener(pid_t pid, uuid_t client_id, struct inpcb *inp)
3756 {
3757 return necp_client_register_socket_inner(pid, client_id, inp, true);
3758 }
3759
3760 #if SKYWALK
3761 int
necp_client_get_netns_flow_info(uuid_t client_id,struct ns_flow_info * flow_info)3762 necp_client_get_netns_flow_info(uuid_t client_id, struct ns_flow_info *flow_info)
3763 {
3764 int error = 0;
3765 struct necp_fd_data *client_fd = NULL;
3766 bool found_client = FALSE;
3767
3768 NECP_FD_LIST_LOCK_SHARED();
3769 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3770 NECP_FD_LOCK(client_fd);
3771 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3772 if (client != NULL) {
3773 found_client = TRUE;
3774 if (!NETNS_TOKEN_VALID(&client->port_reservation)) {
3775 error = EINVAL;
3776 } else {
3777 error = netns_get_flow_info(&client->port_reservation, flow_info);
3778 }
3779
3780 NECP_CLIENT_UNLOCK(client);
3781 }
3782 NECP_FD_UNLOCK(client_fd);
3783
3784 if (found_client) {
3785 break;
3786 }
3787 }
3788 NECP_FD_LIST_UNLOCK();
3789
3790 if (!found_client) {
3791 error = ENOENT;
3792 }
3793
3794 return error;
3795 }
3796 #endif /* !SKYWALK */
3797
3798 static void
necp_client_add_multipath_interface_flows(struct necp_client_flow_registration * flow_registration,struct necp_client * client,struct mppcb * mpp)3799 necp_client_add_multipath_interface_flows(struct necp_client_flow_registration *flow_registration,
3800 struct necp_client *client,
3801 struct mppcb *mpp)
3802 {
3803 flow_registration->interface_handle = mpp;
3804 flow_registration->interface_cb = mpp->necp_cb;
3805
3806 proc_t proc = proc_find(client->proc_pid);
3807 if (proc == PROC_NULL) {
3808 return;
3809 }
3810
3811 // Traverse all interfaces and add a tracking flow if needed
3812 necp_flow_add_interface_flows(proc, client, flow_registration, true);
3813
3814 proc_rele(proc);
3815 proc = PROC_NULL;
3816 }
3817
3818 int
necp_client_register_multipath_cb(pid_t pid,uuid_t client_id,struct mppcb * mpp)3819 necp_client_register_multipath_cb(pid_t pid, uuid_t client_id, struct mppcb *mpp)
3820 {
3821 int error = 0;
3822 struct necp_fd_data *client_fd = NULL;
3823 bool found_client = FALSE;
3824
3825 NECP_FD_LIST_LOCK_SHARED();
3826 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3827 NECP_FD_LOCK(client_fd);
3828 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3829 if (client != NULL) {
3830 if (!pid || client->proc_pid == pid) {
3831 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3832 if (flow_registration != NULL) {
3833 // Found the right client and flow registration, add a new flow
3834 found_client = TRUE;
3835 necp_client_add_multipath_interface_flows(flow_registration, client, mpp);
3836 } else if (RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
3837 // No flows yet on this client, add a new registration
3838 flow_registration = necp_client_create_flow_registration(client_fd, client);
3839 if (flow_registration == NULL) {
3840 error = ENOMEM;
3841 } else {
3842 // Add a new flow
3843 found_client = TRUE;
3844 necp_client_add_multipath_interface_flows(flow_registration, client, mpp);
3845 }
3846 }
3847 }
3848
3849 NECP_CLIENT_UNLOCK(client);
3850 }
3851 NECP_FD_UNLOCK(client_fd);
3852
3853 if (found_client) {
3854 break;
3855 }
3856 }
3857 NECP_FD_LIST_UNLOCK();
3858
3859 if (!found_client && error == 0) {
3860 error = ENOENT;
3861 }
3862
3863 return error;
3864 }
3865
3866 #define NETAGENT_DOMAIN_RADIO_MANAGER "WirelessRadioManager"
3867 #define NETAGENT_TYPE_RADIO_MANAGER "WirelessRadioManager:BB Manager"
3868
3869 static int
necp_client_lookup_bb_radio_manager(struct necp_client * client,uuid_t netagent_uuid)3870 necp_client_lookup_bb_radio_manager(struct necp_client *client,
3871 uuid_t netagent_uuid)
3872 {
3873 char netagent_domain[NETAGENT_DOMAINSIZE];
3874 char netagent_type[NETAGENT_TYPESIZE];
3875 struct necp_aggregate_result result;
3876 proc_t proc;
3877 int error;
3878
3879 proc = proc_find(client->proc_pid);
3880 if (proc == PROC_NULL) {
3881 return ESRCH;
3882 }
3883
3884 error = necp_application_find_policy_match_internal(proc, client->parameters, (u_int32_t)client->parameters_length,
3885 &result, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, true, true, NULL);
3886
3887 proc_rele(proc);
3888 proc = PROC_NULL;
3889
3890 if (error) {
3891 return error;
3892 }
3893
3894 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
3895 if (uuid_is_null(result.netagents[i])) {
3896 // Passed end of valid agents
3897 break;
3898 }
3899
3900 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
3901 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
3902 if (netagent_get_agent_domain_and_type(result.netagents[i], netagent_domain, netagent_type) == FALSE) {
3903 continue;
3904 }
3905
3906 if (strlcmp(netagent_domain, NETAGENT_DOMAIN_RADIO_MANAGER, NETAGENT_DOMAINSIZE) != 0) {
3907 continue;
3908 }
3909
3910 if (strlcmp(netagent_type, NETAGENT_TYPE_RADIO_MANAGER, NETAGENT_TYPESIZE) != 0) {
3911 continue;
3912 }
3913
3914 uuid_copy(netagent_uuid, result.netagents[i]);
3915
3916 break;
3917 }
3918
3919 return 0;
3920 }
3921
3922 static int
necp_client_assert_bb_radio_manager_common(struct necp_client * client,bool assert)3923 necp_client_assert_bb_radio_manager_common(struct necp_client *client, bool assert)
3924 {
3925 uuid_t netagent_uuid;
3926 uint8_t assert_type;
3927 int error;
3928
3929 error = necp_client_lookup_bb_radio_manager(client, netagent_uuid);
3930 if (error) {
3931 NECPLOG0(LOG_ERR, "BB radio manager agent not found");
3932 return error;
3933 }
3934
3935 // Before unasserting, verify that the assertion was already taken
3936 if (assert == FALSE) {
3937 assert_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
3938
3939 if (!necp_client_remove_assertion(client, netagent_uuid)) {
3940 return EINVAL;
3941 }
3942 } else {
3943 assert_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT;
3944 }
3945
3946 error = netagent_client_message(netagent_uuid, client->client_id, client->proc_pid, client->agent_handle, assert_type);
3947 if (error) {
3948 NECPLOG0(LOG_ERR, "netagent_client_message failed");
3949 return error;
3950 }
3951
3952 // Only save the assertion if the action succeeded
3953 if (assert == TRUE) {
3954 necp_client_add_assertion(client, netagent_uuid);
3955 }
3956
3957 return 0;
3958 }
3959
3960 int
necp_client_assert_bb_radio_manager(uuid_t client_id,bool assert)3961 necp_client_assert_bb_radio_manager(uuid_t client_id, bool assert)
3962 {
3963 struct necp_client *client;
3964 int error = 0;
3965
3966 NECP_CLIENT_TREE_LOCK_SHARED();
3967
3968 client = necp_find_client_and_lock(client_id);
3969
3970 if (client) {
3971 // Found the right client!
3972 error = necp_client_assert_bb_radio_manager_common(client, assert);
3973
3974 NECP_CLIENT_UNLOCK(client);
3975 } else {
3976 NECPLOG0(LOG_ERR, "Couldn't find client");
3977 error = ENOENT;
3978 }
3979
3980 NECP_CLIENT_TREE_UNLOCK();
3981
3982 return error;
3983 }
3984
3985 static int
necp_client_unregister_socket_flow(uuid_t client_id,void * handle)3986 necp_client_unregister_socket_flow(uuid_t client_id, void *handle)
3987 {
3988 int error = 0;
3989 struct necp_fd_data *client_fd = NULL;
3990 bool found_client = FALSE;
3991 bool client_updated = FALSE;
3992
3993 NECP_FD_LIST_LOCK_SHARED();
3994 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3995 NECP_FD_LOCK(client_fd);
3996
3997 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3998 if (client != NULL) {
3999 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4000 if (flow_registration != NULL) {
4001 // Found the right client and flow!
4002 found_client = TRUE;
4003
4004 // Remove flow assignment
4005 struct necp_client_flow * __single search_flow = NULL;
4006 struct necp_client_flow *temp_flow = NULL;
4007 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
4008 if (search_flow->socket && search_flow->u.socket_handle == handle) {
4009 if (search_flow->assigned_results != NULL) {
4010 kfree_data_counted_by(search_flow->assigned_results, search_flow->assigned_results_length);
4011 }
4012 client_updated = TRUE;
4013 flow_registration->flow_result_read = FALSE;
4014 LIST_REMOVE(search_flow, flow_chain);
4015 OSDecrementAtomic(&necp_socket_flow_count);
4016 kfree_type(struct necp_client_flow, search_flow);
4017 }
4018 }
4019 }
4020
4021 NECP_CLIENT_UNLOCK(client);
4022 }
4023
4024 if (client_updated) {
4025 necp_fd_notify(client_fd, true);
4026 }
4027 NECP_FD_UNLOCK(client_fd);
4028
4029 if (found_client) {
4030 break;
4031 }
4032 }
4033 NECP_FD_LIST_UNLOCK();
4034
4035 if (!found_client) {
4036 error = ENOENT;
4037 }
4038
4039 return error;
4040 }
4041
4042 static int
necp_client_unregister_multipath_cb(uuid_t client_id,void * handle)4043 necp_client_unregister_multipath_cb(uuid_t client_id, void *handle)
4044 {
4045 int error = 0;
4046 bool found_client = FALSE;
4047
4048 NECP_CLIENT_TREE_LOCK_SHARED();
4049
4050 struct necp_client *client = necp_find_client_and_lock(client_id);
4051 if (client != NULL) {
4052 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4053 if (flow_registration != NULL) {
4054 // Found the right client and flow!
4055 found_client = TRUE;
4056
4057 // Remove flow assignment
4058 struct necp_client_flow *search_flow = NULL;
4059 struct necp_client_flow *temp_flow = NULL;
4060 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
4061 if (!search_flow->socket && !search_flow->nexus &&
4062 search_flow->u.socket_handle == handle) {
4063 search_flow->u.socket_handle = NULL;
4064 search_flow->u.cb = NULL;
4065 }
4066 }
4067
4068 flow_registration->interface_handle = NULL;
4069 flow_registration->interface_cb = NULL;
4070 }
4071
4072 NECP_CLIENT_UNLOCK(client);
4073 }
4074
4075 NECP_CLIENT_TREE_UNLOCK();
4076
4077 if (!found_client) {
4078 error = ENOENT;
4079 }
4080
4081 return error;
4082 }
4083
4084 int
necp_client_assign_from_socket(pid_t pid,uuid_t client_id,struct inpcb * inp)4085 necp_client_assign_from_socket(pid_t pid, uuid_t client_id, struct inpcb *inp)
4086 {
4087 int error = 0;
4088 struct necp_fd_data *client_fd = NULL;
4089 bool found_client = FALSE;
4090 bool client_updated = FALSE;
4091
4092 NECP_FD_LIST_LOCK_SHARED();
4093 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4094 if (pid && client_fd->proc_pid != pid) {
4095 continue;
4096 }
4097
4098 proc_t proc = proc_find(client_fd->proc_pid);
4099 if (proc == PROC_NULL) {
4100 continue;
4101 }
4102
4103 NECP_FD_LOCK(client_fd);
4104
4105 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4106 if (client != NULL) {
4107 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4108 if (flow_registration == NULL && RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
4109 // No flows yet on this client, add a new registration
4110 flow_registration = necp_client_create_flow_registration(client_fd, client);
4111 if (flow_registration == NULL) {
4112 error = ENOMEM;
4113 }
4114 }
4115 if (flow_registration != NULL) {
4116 // Found the right client and flow!
4117 found_client = TRUE;
4118
4119 struct necp_client_flow *flow = NULL;
4120 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
4121 if (flow->socket && flow->u.socket_handle == inp) {
4122 // Release prior results and route
4123 if (flow->assigned_results != NULL) {
4124 kfree_data_counted_by(flow->assigned_results, flow->assigned_results_length);
4125 }
4126
4127 ifnet_t ifp = NULL;
4128 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp) {
4129 ifp = inp->inp_boundifp;
4130 } else {
4131 ifp = inp->inp_last_outifp;
4132 }
4133
4134 if (ifp != NULL) {
4135 flow->interface_index = ifp->if_index;
4136 } else {
4137 flow->interface_index = IFSCOPE_NONE;
4138 }
4139
4140 if (inp->inp_vflag & INP_IPV4) {
4141 flow->local_addr.sin.sin_family = AF_INET;
4142 flow->local_addr.sin.sin_len = sizeof(struct sockaddr_in);
4143 flow->local_addr.sin.sin_port = inp->inp_lport;
4144 memcpy(&flow->local_addr.sin.sin_addr, &inp->inp_laddr, sizeof(struct in_addr));
4145
4146 flow->remote_addr.sin.sin_family = AF_INET;
4147 flow->remote_addr.sin.sin_len = sizeof(struct sockaddr_in);
4148 flow->remote_addr.sin.sin_port = inp->inp_fport;
4149 memcpy(&flow->remote_addr.sin.sin_addr, &inp->inp_faddr, sizeof(struct in_addr));
4150 } else if (inp->inp_vflag & INP_IPV6) {
4151 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope, &flow->local_addr.sin6, sizeof(flow->local_addr));
4152 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope, &flow->remote_addr.sin6, sizeof(flow->remote_addr));
4153 }
4154
4155 flow->viable = necp_client_flow_is_viable(proc, client, flow);
4156
4157 uuid_t empty_uuid;
4158 uuid_clear(empty_uuid);
4159 flow->assigned = TRUE;
4160
4161 size_t message_length;
4162 void *message = necp_create_nexus_assign_message(empty_uuid, 0, NULL, 0,
4163 (struct necp_client_endpoint *)&flow->local_addr,
4164 (struct necp_client_endpoint *)&flow->remote_addr,
4165 NULL, 0, NULL, 0, &message_length);
4166 flow->assigned_results = message;
4167 flow->assigned_results_length = message_length;
4168 flow_registration->flow_result_read = FALSE;
4169 client_updated = TRUE;
4170 break;
4171 }
4172 }
4173 }
4174
4175 NECP_CLIENT_UNLOCK(client);
4176 }
4177 if (client_updated) {
4178 necp_fd_notify(client_fd, true);
4179 }
4180 NECP_FD_UNLOCK(client_fd);
4181
4182 proc_rele(proc);
4183 proc = PROC_NULL;
4184
4185 if (found_client) {
4186 break;
4187 }
4188 }
4189 NECP_FD_LIST_UNLOCK();
4190
4191 if (error == 0) {
4192 if (!found_client) {
4193 error = ENOENT;
4194 } else if (!client_updated) {
4195 error = EINVAL;
4196 }
4197 }
4198
4199 return error;
4200 }
4201
4202 bool
necp_socket_is_allowed_to_recv_on_interface(struct inpcb * inp,ifnet_t interface)4203 necp_socket_is_allowed_to_recv_on_interface(struct inpcb *inp, ifnet_t interface)
4204 {
4205 if (interface == NULL ||
4206 inp == NULL ||
4207 !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
4208 uuid_is_null(inp->necp_client_uuid)) {
4209 // If there's no interface or client ID to check,
4210 // or if this is not a listener, pass.
4211 // Outbound connections will have already been
4212 // validated for policy.
4213 return TRUE;
4214 }
4215
4216 // Only filter out listener sockets (no remote address specified)
4217 if ((inp->inp_vflag & INP_IPV4) &&
4218 inp->inp_faddr.s_addr != INADDR_ANY) {
4219 return TRUE;
4220 }
4221 if ((inp->inp_vflag & INP_IPV6) &&
4222 !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
4223 return TRUE;
4224 }
4225
4226 bool allowed = TRUE;
4227
4228 NECP_CLIENT_TREE_LOCK_SHARED();
4229
4230 struct necp_client *client = necp_find_client_and_lock(inp->necp_client_uuid);
4231 if (client != NULL) {
4232 struct necp_client_parsed_parameters * __single parsed_parameters = NULL;
4233
4234 parsed_parameters = kalloc_type(struct necp_client_parsed_parameters,
4235 Z_WAITOK | Z_ZERO | Z_NOFAIL);
4236 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, parsed_parameters);
4237 if (error == 0) {
4238 if (!necp_ifnet_matches_parameters(interface, parsed_parameters, 0, NULL, true, false)) {
4239 allowed = FALSE;
4240 }
4241 }
4242 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4243
4244 NECP_CLIENT_UNLOCK(client);
4245 }
4246
4247 NECP_CLIENT_TREE_UNLOCK();
4248
4249 return allowed;
4250 }
4251
4252 int
necp_update_flow_protoctl_event(uuid_t netagent_uuid,uuid_t client_id,uint32_t protoctl_event_code,uint32_t protoctl_event_val,uint32_t protoctl_event_tcp_seq_number)4253 necp_update_flow_protoctl_event(uuid_t netagent_uuid, uuid_t client_id,
4254 uint32_t protoctl_event_code, uint32_t protoctl_event_val,
4255 uint32_t protoctl_event_tcp_seq_number)
4256 {
4257 int error = 0;
4258 struct necp_fd_data *client_fd = NULL;
4259 bool found_client = FALSE;
4260 bool client_updated = FALSE;
4261
4262 NECP_FD_LIST_LOCK_SHARED();
4263 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4264 proc_t proc = proc_find(client_fd->proc_pid);
4265 if (proc == PROC_NULL) {
4266 continue;
4267 }
4268
4269 NECP_FD_LOCK(client_fd);
4270
4271 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4272 if (client != NULL) {
4273 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4274 if (flow_registration != NULL) {
4275 // Found the right client and flow!
4276 found_client = TRUE;
4277
4278 struct necp_client_flow *flow = NULL;
4279 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
4280 // Verify that the client nexus agent matches
4281 if ((flow->nexus && uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) ||
4282 flow->socket) {
4283 flow->has_protoctl_event = TRUE;
4284 flow->protoctl_event.protoctl_event_code = protoctl_event_code;
4285 flow->protoctl_event.protoctl_event_val = protoctl_event_val;
4286 flow->protoctl_event.protoctl_event_tcp_seq_num = protoctl_event_tcp_seq_number;
4287 flow_registration->flow_result_read = FALSE;
4288 client_updated = TRUE;
4289 break;
4290 }
4291 }
4292 }
4293
4294 NECP_CLIENT_UNLOCK(client);
4295 }
4296
4297 if (client_updated) {
4298 necp_fd_notify(client_fd, true);
4299 }
4300
4301 NECP_FD_UNLOCK(client_fd);
4302 proc_rele(proc);
4303 proc = PROC_NULL;
4304
4305 if (found_client) {
4306 break;
4307 }
4308 }
4309 NECP_FD_LIST_UNLOCK();
4310
4311 if (!found_client) {
4312 error = ENOENT;
4313 } else if (!client_updated) {
4314 error = EINVAL;
4315 }
4316 return error;
4317 }
4318
4319 static bool
necp_assign_client_result_locked(struct proc * proc,struct necp_fd_data * client_fd,struct necp_client * client,struct necp_client_flow_registration * flow_registration,uuid_t netagent_uuid,u_int8_t * __indexable assigned_results,size_t assigned_results_length,bool notify_fd,bool assigned_from_userspace_agent)4320 necp_assign_client_result_locked(struct proc *proc,
4321 struct necp_fd_data *client_fd,
4322 struct necp_client *client,
4323 struct necp_client_flow_registration *flow_registration,
4324 uuid_t netagent_uuid,
4325 u_int8_t * __indexable assigned_results,
4326 size_t assigned_results_length,
4327 bool notify_fd,
4328 bool assigned_from_userspace_agent)
4329 {
4330 bool client_updated = FALSE;
4331
4332 NECP_FD_ASSERT_LOCKED(client_fd);
4333 NECP_CLIENT_ASSERT_LOCKED(client);
4334
4335 struct necp_client_flow *flow = NULL;
4336 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
4337 // Verify that the client nexus agent matches
4338 if (flow->nexus &&
4339 uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) {
4340 // Release prior results and route
4341 if (flow->assigned_results != NULL) {
4342 kfree_data_counted_by(flow->assigned_results, flow->assigned_results_length);
4343 }
4344
4345 void * __single nexus_stats = NULL;
4346 if (assigned_results != NULL && assigned_results_length > 0) {
4347 int error = necp_client_parse_result(assigned_results, (u_int32_t)assigned_results_length,
4348 flow, assigned_from_userspace_agent ? NULL : &nexus_stats); // Only assign stats from kernel agents
4349 VERIFY(error == 0);
4350 }
4351
4352 flow->viable = necp_client_flow_is_viable(proc, client, flow);
4353
4354 flow->assigned = TRUE;
4355 flow->assigned_results = assigned_results;
4356 flow->assigned_results_length = assigned_results_length;
4357 flow_registration->flow_result_read = FALSE;
4358 #if SKYWALK
4359 if (nexus_stats != NULL) {
4360 if (flow_registration->nexus_stats != NULL) {
4361 flow_stats_release(flow_registration->nexus_stats);
4362 }
4363 flow_registration->nexus_stats = nexus_stats;
4364 }
4365 #endif /* SKYWALK */
4366 client_updated = TRUE;
4367 break;
4368 }
4369 }
4370
4371 if (client_updated && notify_fd) {
4372 necp_fd_notify(client_fd, true);
4373 }
4374
4375 // if not updated, client must free assigned_results
4376 return client_updated;
4377 }
4378
4379 int
necp_assign_client_result(uuid_t netagent_uuid,uuid_t client_id,u_int8_t * __sized_by (assigned_results_length)assigned_results,size_t assigned_results_length)4380 necp_assign_client_result(uuid_t netagent_uuid, uuid_t client_id,
4381 u_int8_t * __sized_by(assigned_results_length)assigned_results, size_t assigned_results_length)
4382 {
4383 int error = 0;
4384 struct necp_fd_data *client_fd = NULL;
4385 bool found_client = FALSE;
4386 bool client_updated = FALSE;
4387
4388 NECP_FD_LIST_LOCK_SHARED();
4389
4390 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4391 proc_t proc = proc_find(client_fd->proc_pid);
4392 if (proc == PROC_NULL) {
4393 continue;
4394 }
4395
4396 NECP_FD_LOCK(client_fd);
4397 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4398 if (client != NULL) {
4399 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
4400 if (flow_registration != NULL) {
4401 // Found the right client and flow!
4402 found_client = TRUE;
4403 if (necp_assign_client_result_locked(proc, client_fd, client, flow_registration, netagent_uuid,
4404 assigned_results, assigned_results_length, true, true)) {
4405 client_updated = TRUE;
4406 }
4407 }
4408
4409 NECP_CLIENT_UNLOCK(client);
4410 }
4411 NECP_FD_UNLOCK(client_fd);
4412
4413 proc_rele(proc);
4414 proc = PROC_NULL;
4415
4416 if (found_client) {
4417 break;
4418 }
4419 }
4420
4421 NECP_FD_LIST_UNLOCK();
4422
4423 // upon error, client must free assigned_results
4424 if (!found_client) {
4425 error = ENOENT;
4426 } else if (!client_updated) {
4427 error = EINVAL;
4428 }
4429
4430 return error;
4431 }
4432
4433 int
necp_assign_client_group_members(uuid_t netagent_uuid,uuid_t client_id,u_int8_t * __counted_by (assigned_group_members_length)assigned_group_members,size_t assigned_group_members_length)4434 necp_assign_client_group_members(uuid_t netagent_uuid, uuid_t client_id,
4435 u_int8_t *__counted_by(assigned_group_members_length) assigned_group_members,
4436 size_t assigned_group_members_length)
4437 {
4438 #pragma unused(netagent_uuid)
4439 int error = 0;
4440 struct necp_fd_data *client_fd = NULL;
4441 bool found_client = false;
4442 bool client_updated = false;
4443
4444 NECP_FD_LIST_LOCK_SHARED();
4445
4446 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4447 proc_t proc = proc_find(client_fd->proc_pid);
4448 if (proc == PROC_NULL) {
4449 continue;
4450 }
4451
4452 NECP_FD_LOCK(client_fd);
4453 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
4454 if (client != NULL) {
4455 found_client = true;
4456 // Release prior results
4457 if (client->assigned_group_members != NULL) {
4458 kfree_data_counted_by(client->assigned_group_members, client->assigned_group_members_length);
4459 }
4460
4461 // Save new results
4462 client->assigned_group_members = assigned_group_members;
4463 client->assigned_group_members_length = assigned_group_members_length;
4464 client->group_members_read = false;
4465
4466 client_updated = true;
4467 necp_fd_notify(client_fd, true);
4468
4469 NECP_CLIENT_UNLOCK(client);
4470 }
4471 NECP_FD_UNLOCK(client_fd);
4472
4473 proc_rele(proc);
4474 proc = PROC_NULL;
4475
4476 if (found_client) {
4477 break;
4478 }
4479 }
4480
4481 NECP_FD_LIST_UNLOCK();
4482
4483 // upon error, client must free assigned_results
4484 if (!found_client) {
4485 error = ENOENT;
4486 } else if (!client_updated) {
4487 error = EINVAL;
4488 }
4489
4490 return error;
4491 }
4492
4493 /// Client updating
4494
4495 static bool
necp_update_parsed_parameters(struct necp_client_parsed_parameters * parsed_parameters,struct necp_aggregate_result * result)4496 necp_update_parsed_parameters(struct necp_client_parsed_parameters *parsed_parameters,
4497 struct necp_aggregate_result *result)
4498 {
4499 if (parsed_parameters == NULL ||
4500 result == NULL) {
4501 return false;
4502 }
4503
4504 bool updated = false;
4505 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
4506 if (uuid_is_null(result->netagents[i])) {
4507 // Passed end of valid agents
4508 break;
4509 }
4510
4511 if (!(result->netagent_use_flags[i] & NECP_AGENT_USE_FLAG_SCOPE)) {
4512 // Not a scoped agent, ignore
4513 continue;
4514 }
4515
4516 // This is a scoped agent. Add it to the required agents.
4517 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
4518 // Already some required agents, add this at the end
4519 for (int j = 0; j < NECP_MAX_AGENT_PARAMETERS; j++) {
4520 if (uuid_compare(parsed_parameters->required_netagents[j], result->netagents[i]) == 0) {
4521 // Already required, break
4522 break;
4523 }
4524 if (uuid_is_null(parsed_parameters->required_netagents[j])) {
4525 // Add here
4526 memcpy(&parsed_parameters->required_netagents[j], result->netagents[i], sizeof(uuid_t));
4527 updated = true;
4528 break;
4529 }
4530 }
4531 } else {
4532 // No required agents yet, add this one
4533 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT;
4534 memcpy(&parsed_parameters->required_netagents[0], result->netagents[i], sizeof(uuid_t));
4535 updated = true;
4536 }
4537
4538 // Remove requirements for agents of the same type
4539 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
4540 char remove_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4541 char remove_agent_type[NETAGENT_TYPESIZE] = { 0 };
4542 if (netagent_get_agent_domain_and_type(result->netagents[i], remove_agent_domain, remove_agent_type)) {
4543 for (int j = 0; j < NECP_MAX_AGENT_PARAMETERS; j++) {
4544 if (strbuflen(parsed_parameters->required_netagent_types[j].netagent_domain, sizeof(parsed_parameters->required_netagent_types[j].netagent_domain)) == 0 &&
4545 strbuflen(parsed_parameters->required_netagent_types[j].netagent_type, sizeof(parsed_parameters->required_netagent_types[j].netagent_type)) == 0) {
4546 break;
4547 }
4548
4549 if (strbufcmp(parsed_parameters->required_netagent_types[j].netagent_domain, sizeof(parsed_parameters->required_netagent_types[j].netagent_domain), remove_agent_domain, NETAGENT_DOMAINSIZE) == 0 &&
4550 strbufcmp(parsed_parameters->required_netagent_types[j].netagent_type, sizeof(parsed_parameters->required_netagent_types[j].netagent_type), remove_agent_type, NETAGENT_TYPESIZE) == 0) {
4551 updated = true;
4552
4553 if (j == NECP_MAX_AGENT_PARAMETERS - 1) {
4554 // Last field, just clear and break
4555 memset(&parsed_parameters->required_netagent_types[NECP_MAX_AGENT_PARAMETERS - 1], 0, sizeof(struct necp_client_parameter_netagent_type));
4556 break;
4557 } else {
4558 // Move the parameters down, clear the last entry
4559 memmove(&parsed_parameters->required_netagent_types[j],
4560 &parsed_parameters->required_netagent_types[j + 1],
4561 sizeof(struct necp_client_parameter_netagent_type) * (NECP_MAX_AGENT_PARAMETERS - (j + 1)));
4562 memset(&parsed_parameters->required_netagent_types[NECP_MAX_AGENT_PARAMETERS - 1], 0, sizeof(struct necp_client_parameter_netagent_type));
4563 // Continue, don't increment but look at the new shifted item instead
4564 continue;
4565 }
4566 }
4567
4568 // Increment j to look at the next agent type parameter
4569 j++;
4570 }
4571 }
4572 }
4573 }
4574
4575 if (updated &&
4576 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4577 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) {
4578 // A required interface index was added after the fact. Clear it.
4579 parsed_parameters->required_interface_index = IFSCOPE_NONE;
4580 }
4581
4582
4583 return updated;
4584 }
4585
4586 static inline bool
necp_agent_types_match(const char * __sized_by (NETAGENT_DOMAINSIZE)agent_domain1,const char * __sized_by (NETAGENT_TYPESIZE)agent_type1,const char * __sized_by (NETAGENT_DOMAINSIZE)agent_domain2,const char * __sized_by (NETAGENT_TYPESIZE)agent_type2)4587 necp_agent_types_match(const char * __sized_by(NETAGENT_DOMAINSIZE)agent_domain1, const char * __sized_by(NETAGENT_TYPESIZE)agent_type1,
4588 const char * __sized_by(NETAGENT_DOMAINSIZE)agent_domain2, const char * __sized_by(NETAGENT_TYPESIZE)agent_type2)
4589 {
4590 return (strbuflen(agent_domain1, NETAGENT_DOMAINSIZE) == 0 ||
4591 strbufcmp(agent_domain2, NETAGENT_DOMAINSIZE, agent_domain1, NETAGENT_DOMAINSIZE) == 0) &&
4592 (strbuflen(agent_type1, NETAGENT_TYPESIZE) == 0 ||
4593 strbufcmp(agent_type2, NETAGENT_TYPESIZE, agent_type1, NETAGENT_TYPESIZE) == 0);
4594 }
4595
4596 static inline bool
necp_calculate_client_result(proc_t proc,struct necp_client * client,struct necp_client_parsed_parameters * parsed_parameters,struct necp_aggregate_result * result,u_int32_t * flags,u_int32_t * reason,struct necp_client_endpoint * v4_gateway,struct necp_client_endpoint * v6_gateway,uuid_t * override_euuid)4597 necp_calculate_client_result(proc_t proc,
4598 struct necp_client *client,
4599 struct necp_client_parsed_parameters *parsed_parameters,
4600 struct necp_aggregate_result *result,
4601 u_int32_t *flags,
4602 u_int32_t *reason,
4603 struct necp_client_endpoint *v4_gateway,
4604 struct necp_client_endpoint *v6_gateway,
4605 uuid_t *override_euuid)
4606 {
4607 struct rtentry * __single route = NULL;
4608
4609 // Check parameters to find best interface
4610 bool validate_agents = false;
4611 u_int matching_if_index = 0;
4612 if (necp_find_matching_interface_index(parsed_parameters, &matching_if_index, &validate_agents)) {
4613 if (matching_if_index != 0) {
4614 parsed_parameters->required_interface_index = matching_if_index;
4615 }
4616 // Interface found or not needed, match policy.
4617 memset(result, 0, sizeof(*result));
4618 int error = necp_application_find_policy_match_internal(proc, client->parameters,
4619 (u_int32_t)client->parameters_length,
4620 result, flags, reason, matching_if_index,
4621 NULL, NULL,
4622 v4_gateway, v6_gateway,
4623 &route, false, true,
4624 override_euuid);
4625 if (error != 0) {
4626 if (route != NULL) {
4627 rtfree(route);
4628 }
4629 return FALSE;
4630 }
4631
4632 if (validate_agents) {
4633 bool requirement_failed = FALSE;
4634 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
4635 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
4636 if (uuid_is_null(parsed_parameters->required_netagents[i])) {
4637 break;
4638 }
4639
4640 bool requirement_found = FALSE;
4641 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4642 if (uuid_is_null(result->netagents[j])) {
4643 break;
4644 }
4645
4646 if (result->netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) {
4647 // A removed agent, ignore
4648 continue;
4649 }
4650
4651 if (uuid_compare(parsed_parameters->required_netagents[i], result->netagents[j]) == 0) {
4652 requirement_found = TRUE;
4653 break;
4654 }
4655 }
4656
4657 if (!requirement_found) {
4658 requirement_failed = TRUE;
4659 break;
4660 }
4661 }
4662 }
4663
4664 if (!requirement_failed && parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
4665 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
4666 if (strbuflen(parsed_parameters->required_netagent_types[i].netagent_domain, sizeof(parsed_parameters->required_netagent_types[i].netagent_domain)) == 0 &&
4667 strbuflen(parsed_parameters->required_netagent_types[i].netagent_type, sizeof(parsed_parameters->required_netagent_types[i].netagent_type)) == 0) {
4668 break;
4669 }
4670
4671 bool requirement_found = FALSE;
4672 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4673 if (uuid_is_null(result->netagents[j])) {
4674 break;
4675 }
4676
4677 if (result->netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) {
4678 // A removed agent, ignore
4679 continue;
4680 }
4681
4682 char policy_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4683 char policy_agent_type[NETAGENT_TYPESIZE] = { 0 };
4684
4685 if (netagent_get_agent_domain_and_type(result->netagents[j], policy_agent_domain, policy_agent_type)) {
4686 if (necp_agent_types_match(parsed_parameters->required_netagent_types[i].netagent_domain,
4687 parsed_parameters->required_netagent_types[i].netagent_type,
4688 policy_agent_domain, policy_agent_type)) {
4689 requirement_found = TRUE;
4690 break;
4691 }
4692 }
4693 }
4694
4695 if (!requirement_found) {
4696 requirement_failed = TRUE;
4697 break;
4698 }
4699 }
4700 }
4701
4702 if (requirement_failed) {
4703 // Agent requirement failed. Clear out the whole result, make everything fail.
4704 memset(result, 0, sizeof(*result));
4705 if (route != NULL) {
4706 rtfree(route);
4707 }
4708 return TRUE;
4709 }
4710 }
4711
4712 // Reset current route
4713 NECP_CLIENT_ROUTE_LOCK(client);
4714 if (client->current_route != NULL) {
4715 rtfree(client->current_route);
4716 }
4717 client->current_route = route;
4718 NECP_CLIENT_ROUTE_UNLOCK(client);
4719 } else {
4720 // Interface not found. Clear out the whole result, make everything fail.
4721 memset(result, 0, sizeof(*result));
4722 }
4723
4724 return TRUE;
4725 }
4726
4727 static bool
necp_agent_is_removed_by_type(struct necp_aggregate_result * result,uuid_t agent_uuid)4728 necp_agent_is_removed_by_type(struct necp_aggregate_result *result,
4729 uuid_t agent_uuid)
4730 {
4731 for (int i = 0; i < NECP_MAX_REMOVE_NETAGENT_TYPES; i++) {
4732 if (result->remove_netagent_types[i].agent_domain[0] == 0 &&
4733 result->remove_netagent_types[i].agent_type[0] == 0) {
4734 // Empty type, hit the end of the list
4735 return false;
4736 }
4737
4738 char compare_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4739 char compare_agent_type[NETAGENT_TYPESIZE] = { 0 };
4740 if (netagent_get_agent_domain_and_type(agent_uuid, compare_agent_domain, compare_agent_type)) {
4741 if (necp_agent_types_match(result->remove_netagent_types[i].agent_domain,
4742 result->remove_netagent_types[i].agent_type,
4743 compare_agent_domain, compare_agent_type)) {
4744 return true;
4745 }
4746 }
4747 }
4748 return false;
4749 }
4750
4751 #define NECP_PARSED_PARAMETERS_REQUIRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF | \
4752 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
4753 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
4754 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE)
4755
4756 static bool
necp_update_client_result(proc_t proc,struct necp_fd_data * client_fd,struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)4757 necp_update_client_result(proc_t proc,
4758 struct necp_fd_data *client_fd,
4759 struct necp_client *client,
4760 struct _necp_flow_defunct_list *defunct_list)
4761 {
4762 struct necp_client_result_netagent netagent;
4763 struct necp_aggregate_result result;
4764 struct necp_client_parsed_parameters * __single parsed_parameters = NULL;
4765 u_int32_t flags = 0;
4766 u_int32_t reason = 0;
4767
4768 NECP_CLIENT_ASSERT_LOCKED(client);
4769
4770 parsed_parameters = kalloc_type(struct necp_client_parsed_parameters,
4771 Z_WAITOK | Z_ZERO | Z_NOFAIL);
4772
4773 // Nexus flows will be brought back if they are still valid
4774 necp_client_mark_all_nonsocket_flows_as_invalid(client);
4775
4776 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, parsed_parameters);
4777 if (error != 0) {
4778 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4779 return FALSE;
4780 }
4781 bool originally_scoped = (parsed_parameters->required_interface_index != IFSCOPE_NONE);
4782
4783 // Update saved IP protocol
4784 client->ip_protocol = parsed_parameters->ip_protocol;
4785
4786 // Calculate the policy result
4787 struct necp_client_endpoint v4_gateway = {};
4788 struct necp_client_endpoint v6_gateway = {};
4789 uuid_t override_euuid;
4790 uuid_clear(override_euuid);
4791 if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags, &reason, &v4_gateway, &v6_gateway, &override_euuid)) {
4792 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4793 return FALSE;
4794 }
4795
4796 if (necp_update_parsed_parameters(parsed_parameters, &result)) {
4797 // Changed the parameters based on result, try again (only once)
4798 if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags, &reason, &v4_gateway, &v6_gateway, &override_euuid)) {
4799 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4800 return FALSE;
4801 }
4802 }
4803
4804 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) &&
4805 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4806 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) {
4807 // Listener should not apply required interface index if
4808 parsed_parameters->required_interface_index = IFSCOPE_NONE;
4809 }
4810
4811 // Save the last policy id on the client
4812 client->policy_id = result.policy_id;
4813 client->skip_policy_id = result.skip_policy_id;
4814 uuid_copy(client->override_euuid, override_euuid);
4815
4816 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH) ||
4817 (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) ||
4818 ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) &&
4819 result.routing_result != NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED)) {
4820 client->allow_multiple_flows = TRUE;
4821 } else {
4822 client->allow_multiple_flows = FALSE;
4823 }
4824
4825 // If the original request was scoped, and the policy result matches, make sure the result is scoped
4826 if ((result.routing_result == NECP_KERNEL_POLICY_RESULT_NONE ||
4827 result.routing_result == NECP_KERNEL_POLICY_RESULT_PASS) &&
4828 result.routed_interface_index != IFSCOPE_NONE &&
4829 parsed_parameters->required_interface_index == result.routed_interface_index) {
4830 result.routing_result = NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED;
4831 result.routing_result_parameter.scoped_interface_index = result.routed_interface_index;
4832 }
4833
4834 if (defunct_list != NULL &&
4835 result.routing_result == NECP_KERNEL_POLICY_RESULT_DROP) {
4836 // If we are forced to drop the client, defunct it if it has flows
4837 bool defunct_socket_flows = false;
4838 if (result.routing_result_parameter.drop_flags & NECP_KERNEL_POLICY_DROP_FLAG_DEFUNCT_ALL_FLOWS) {
4839 defunct_socket_flows = true;
4840 }
4841 necp_defunct_client_for_policy(client, defunct_list, defunct_socket_flows);
4842 }
4843
4844 // Recalculate flags
4845 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
4846 // Listeners are valid as long as they aren't dropped
4847 if (result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP) {
4848 flags |= NECP_CLIENT_RESULT_FLAG_SATISFIED;
4849 }
4850 } else if (result.routed_interface_index != 0) {
4851 // Clients without flows determine viability based on having some routable interface
4852 flags |= NECP_CLIENT_RESULT_FLAG_SATISFIED;
4853 }
4854
4855 bool updated = FALSE;
4856 u_int8_t * __indexable cursor = client->result;
4857 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLAGS, sizeof(flags), &flags, &updated, client->result, sizeof(client->result));
4858 if (reason != 0) {
4859 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_REASON, sizeof(reason), &reason, &updated, client->result, sizeof(client->result));
4860 }
4861 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_CLIENT_ID, sizeof(uuid_t), client->client_id, &updated,
4862 client->result, sizeof(client->result));
4863 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT, sizeof(result.routing_result), &result.routing_result, &updated,
4864 client->result, sizeof(client->result));
4865 if (result.routing_result_parameter.tunnel_interface_index != 0) {
4866 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT_PARAMETER,
4867 sizeof(result.routing_result_parameter), &result.routing_result_parameter, &updated,
4868 client->result, sizeof(client->result));
4869 }
4870 if (result.filter_control_unit != 0) {
4871 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FILTER_CONTROL_UNIT,
4872 sizeof(result.filter_control_unit), &result.filter_control_unit, &updated,
4873 client->result, sizeof(client->result));
4874 }
4875 if (result.flow_divert_aggregate_unit != 0) {
4876 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLOW_DIVERT_AGGREGATE_UNIT,
4877 sizeof(result.flow_divert_aggregate_unit), &result.flow_divert_aggregate_unit, &updated,
4878 client->result, sizeof(client->result));
4879 }
4880 if (result.routed_interface_index != 0) {
4881 u_int routed_interface_index = result.routed_interface_index;
4882 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
4883 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_REQUIRED_FIELDS) &&
4884 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4885 parsed_parameters->required_interface_index != result.routed_interface_index) {
4886 routed_interface_index = parsed_parameters->required_interface_index;
4887 }
4888
4889 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_INDEX,
4890 sizeof(routed_interface_index), &routed_interface_index, &updated,
4891 client->result, sizeof(client->result));
4892 }
4893 if (client_fd && client_fd->flags & NECP_OPEN_FLAG_BACKGROUND) {
4894 u_int32_t effective_traffic_class = SO_TC_BK_SYS;
4895 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_TRAFFIC_CLASS,
4896 sizeof(effective_traffic_class), &effective_traffic_class, &updated,
4897 client->result, sizeof(client->result));
4898 }
4899
4900 if (client_fd->background) {
4901 bool has_assigned_flow = FALSE;
4902 struct necp_client_flow_registration *flow_registration = NULL;
4903 struct necp_client_flow *search_flow = NULL;
4904 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
4905 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
4906 if (search_flow->assigned) {
4907 has_assigned_flow = TRUE;
4908 break;
4909 }
4910 }
4911 }
4912
4913 if (has_assigned_flow) {
4914 u_int32_t background = client_fd->background;
4915 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_TRAFFIC_MGMT_BG,
4916 sizeof(background), &background, &updated,
4917 client->result, sizeof(client->result));
4918 }
4919 }
4920
4921 bool write_v4_gateway = !necp_client_endpoint_is_unspecified(&v4_gateway);
4922 bool write_v6_gateway = !necp_client_endpoint_is_unspecified(&v6_gateway);
4923
4924 NECP_CLIENT_ROUTE_LOCK(client);
4925 if (client->current_route != NULL) {
4926 const u_int32_t route_mtu = get_maxmtu(client->current_route);
4927 if (route_mtu != 0) {
4928 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_MTU,
4929 sizeof(route_mtu), &route_mtu, &updated,
4930 client->result, sizeof(client->result));
4931 }
4932 bool has_remote_addr = parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
4933 if (has_remote_addr && client->current_route->rt_gateway != NULL) {
4934 if (client->current_route->rt_gateway->sa_family == AF_INET) {
4935 write_v6_gateway = false;
4936 } else if (client->current_route->rt_gateway->sa_family == AF_INET6) {
4937 write_v4_gateway = false;
4938 }
4939 }
4940
4941 if (client->current_route->rt_ifp != NULL) {
4942 int8_t if_lqm = client->current_route->rt_ifp->if_interface_state.lqm_state;
4943
4944 // Upgrade to enhancedLQM for cellular interfaces that support it
4945 if (client->current_route->rt_ifp->if_type == IFT_CELLULAR && client->current_route->rt_ifp->if_link_status != NULL) {
4946 struct if_cellular_status_v1 *cell_link_status = &client->current_route->rt_ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
4947
4948 if (cell_link_status->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
4949 if_lqm = ifnet_lqm_normalize(cell_link_status->link_quality_metric);
4950 }
4951 }
4952
4953 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_LINK_QUALITY,
4954 sizeof(if_lqm), &if_lqm, &updated,
4955 client->result, sizeof(client->result));
4956 }
4957 }
4958 NECP_CLIENT_ROUTE_UNLOCK(client);
4959
4960 if (write_v4_gateway) {
4961 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_GATEWAY,
4962 sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)&v4_gateway, &updated,
4963 client->result, sizeof(client->result));
4964 }
4965
4966 if (write_v6_gateway) {
4967 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_GATEWAY,
4968 sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)&v6_gateway, &updated,
4969 client->result, sizeof(client->result));
4970 }
4971
4972 for (int i = 0; i < NAT64_MAX_NUM_PREFIXES; i++) {
4973 if (result.nat64_prefixes[i].prefix_len != 0) {
4974 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NAT64,
4975 sizeof(result.nat64_prefixes), result.nat64_prefixes, &updated,
4976 client->result, sizeof(client->result));
4977 break;
4978 }
4979 }
4980
4981 if (result.mss_recommended != 0) {
4982 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_RECOMMENDED_MSS,
4983 sizeof(result.mss_recommended), &result.mss_recommended, &updated,
4984 client->result, sizeof(client->result));
4985 }
4986
4987 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
4988 if (uuid_is_null(result.netagents[i])) {
4989 break;
4990 }
4991 if (result.netagent_use_flags[i] & NECP_AGENT_USE_FLAG_REMOVE) {
4992 // A removed agent, ignore
4993 continue;
4994 }
4995
4996 if (necp_agent_is_removed_by_type(&result, result.netagents[i])) {
4997 // A removed agent, ignore
4998 continue;
4999 }
5000
5001 uuid_copy(netagent.netagent_uuid, result.netagents[i]);
5002 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5003 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE, 0, 0)) {
5004 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5005 client->result, sizeof(client->result));
5006 }
5007 }
5008
5009 ifnet_head_lock_shared();
5010 ifnet_t direct_interface = NULL;
5011 ifnet_t delegate_interface = NULL;
5012 ifnet_t original_scoped_interface = NULL;
5013
5014 if (result.routed_interface_index != IFSCOPE_NONE && result.routed_interface_index <= (u_int32_t)if_index) {
5015 direct_interface = ifindex2ifnet[result.routed_interface_index];
5016 } else if (parsed_parameters->required_interface_index != IFSCOPE_NONE &&
5017 parsed_parameters->required_interface_index <= (u_int32_t)if_index) {
5018 // If the request was scoped, but the route didn't match, still grab the agents
5019 direct_interface = ifindex2ifnet[parsed_parameters->required_interface_index];
5020 } else if (result.routed_interface_index == IFSCOPE_NONE &&
5021 result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED &&
5022 result.routing_result_parameter.scoped_interface_index != IFSCOPE_NONE) {
5023 direct_interface = ifindex2ifnet[result.routing_result_parameter.scoped_interface_index];
5024 }
5025 if (direct_interface != NULL) {
5026 delegate_interface = direct_interface->if_delegated.ifp;
5027 }
5028 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
5029 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_REQUIRED_FIELDS) &&
5030 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
5031 parsed_parameters->required_interface_index != result.routing_result_parameter.tunnel_interface_index &&
5032 parsed_parameters->required_interface_index <= (u_int32_t)if_index) {
5033 original_scoped_interface = ifindex2ifnet[parsed_parameters->required_interface_index];
5034 }
5035 // Add interfaces
5036 if (original_scoped_interface != NULL) {
5037 struct necp_client_result_interface interface_struct;
5038 interface_struct.index = original_scoped_interface->if_index;
5039 interface_struct.generation = ifnet_get_generation(original_scoped_interface);
5040 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
5041 client->result, sizeof(client->result));
5042 }
5043 if (direct_interface != NULL) {
5044 struct necp_client_result_interface interface_struct;
5045 interface_struct.index = direct_interface->if_index;
5046 interface_struct.generation = ifnet_get_generation(direct_interface);
5047 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
5048 client->result, sizeof(client->result));
5049
5050 // Set the delta time since interface up/down
5051 struct timeval updown_delta = {};
5052 if (ifnet_updown_delta(direct_interface, &updown_delta) == 0) {
5053 u_int32_t delta = updown_delta.tv_sec;
5054 bool ignore_updated = FALSE;
5055 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_TIME_DELTA,
5056 sizeof(delta), &delta, &ignore_updated,
5057 client->result, sizeof(client->result));
5058 }
5059 }
5060 if (delegate_interface != NULL) {
5061 struct necp_client_result_interface interface_struct;
5062 interface_struct.index = delegate_interface->if_index;
5063 interface_struct.generation = ifnet_get_generation(delegate_interface);
5064 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
5065 client->result, sizeof(client->result));
5066 }
5067
5068 // Update multipath/listener interface flows
5069 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH && !(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE)) {
5070 // Add the interface option for the routed interface first
5071 if (direct_interface != NULL) {
5072 // Add nexus agent
5073 necp_client_add_agent_interface_options(client, parsed_parameters, direct_interface);
5074
5075 // Add interface option in case it is not a nexus
5076 necp_client_add_interface_option_if_needed(client, direct_interface->if_index,
5077 ifnet_get_generation(direct_interface), NULL, false);
5078 }
5079 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INBOUND) {
5080 // For inbound multipath, add from the global list (like a listener)
5081 struct ifnet *multi_interface = NULL;
5082 TAILQ_FOREACH(multi_interface, &ifnet_head, if_link) {
5083 if ((multi_interface->if_flags & (IFF_UP | IFF_RUNNING)) &&
5084 necp_ifnet_matches_parameters(multi_interface, parsed_parameters, 0, NULL, true, false)) {
5085 // Add nexus agents for inbound multipath
5086 necp_client_add_agent_interface_options(client, parsed_parameters, multi_interface);
5087 }
5088 }
5089 } else {
5090 // Get other multipath interface options from ordered list
5091 struct ifnet *multi_interface = NULL;
5092 TAILQ_FOREACH(multi_interface, &ifnet_ordered_head, if_ordered_link) {
5093 if (multi_interface != direct_interface &&
5094 necp_ifnet_matches_parameters(multi_interface, parsed_parameters, 0, NULL, true, false)) {
5095 // Add nexus agents for multipath
5096 necp_client_add_agent_interface_options(client, parsed_parameters, multi_interface);
5097
5098 // Add multipath interface flows for kernel MPTCP
5099 necp_client_add_interface_option_if_needed(client, multi_interface->if_index,
5100 ifnet_get_generation(multi_interface), NULL, false);
5101 }
5102 }
5103 }
5104 } else if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
5105 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
5106 if (direct_interface != NULL) {
5107 // If scoped, only listen on that interface
5108 // Add nexus agents for listeners
5109 necp_client_add_agent_interface_options(client, parsed_parameters, direct_interface);
5110
5111 // Add interface option in case it is not a nexus
5112 necp_client_add_interface_option_if_needed(client, direct_interface->if_index,
5113 ifnet_get_generation(direct_interface), NULL, false);
5114 }
5115 } else {
5116 // Get listener interface options from global list
5117 struct ifnet *listen_interface = NULL;
5118 TAILQ_FOREACH(listen_interface, &ifnet_head, if_link) {
5119 if ((listen_interface->if_flags & (IFF_UP | IFF_RUNNING)) &&
5120 necp_ifnet_matches_parameters(listen_interface, parsed_parameters, 0, NULL, true, false)) {
5121 // Add nexus agents for listeners
5122 necp_client_add_agent_interface_options(client, parsed_parameters, listen_interface);
5123 }
5124 }
5125 }
5126 } else if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) {
5127 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED && originally_scoped) {
5128 if (direct_interface != NULL) {
5129 // Add browse option if it has an agent
5130 necp_client_add_browse_interface_options(client, parsed_parameters, direct_interface);
5131 }
5132 } else {
5133 // Get browse interface options from global list
5134 struct ifnet *browse_interface = NULL;
5135 TAILQ_FOREACH(browse_interface, &ifnet_head, if_link) {
5136 if (necp_ifnet_matches_parameters(browse_interface, parsed_parameters, 0, NULL, true, false)) {
5137 necp_client_add_browse_interface_options(client, parsed_parameters, browse_interface);
5138 }
5139 }
5140 }
5141 }
5142
5143 struct necp_client_result_estimated_throughput throughput = {
5144 .up = 0,
5145 .down = 0,
5146 };
5147
5148 // Add agents
5149 if (original_scoped_interface != NULL) {
5150 ifnet_lock_shared(original_scoped_interface);
5151 if (original_scoped_interface->if_agentids != NULL) {
5152 for (u_int32_t i = 0; i < original_scoped_interface->if_agentcount; i++) {
5153 if (uuid_is_null(original_scoped_interface->if_agentids[i])) {
5154 continue;
5155 }
5156 bool skip_agent = false;
5157 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
5158 if (uuid_is_null(result.netagents[j])) {
5159 break;
5160 }
5161 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
5162 uuid_compare(original_scoped_interface->if_agentids[i], result.netagents[j]) == 0) {
5163 skip_agent = true;
5164 break;
5165 }
5166 }
5167
5168 if (!skip_agent && necp_agent_is_removed_by_type(&result, original_scoped_interface->if_agentids[i])) {
5169 skip_agent = true;
5170 }
5171
5172 if (skip_agent) {
5173 continue;
5174 }
5175
5176 uuid_copy(netagent.netagent_uuid, original_scoped_interface->if_agentids[i]);
5177 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5178 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE,
5179 original_scoped_interface->if_index, ifnet_get_generation(original_scoped_interface))) {
5180 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5181 client->result, sizeof(client->result));
5182 }
5183 }
5184 }
5185 ifnet_lock_done(original_scoped_interface);
5186 }
5187 if (direct_interface != NULL) {
5188 ifnet_lock_shared(direct_interface);
5189 throughput.up = direct_interface->if_estimated_up_bucket;
5190 throughput.down = direct_interface->if_estimated_down_bucket;
5191 if (direct_interface->if_agentids != NULL) {
5192 for (u_int32_t i = 0; i < direct_interface->if_agentcount; i++) {
5193 if (uuid_is_null(direct_interface->if_agentids[i])) {
5194 continue;
5195 }
5196 bool skip_agent = false;
5197 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
5198 if (uuid_is_null(result.netagents[j])) {
5199 break;
5200 }
5201 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
5202 uuid_compare(direct_interface->if_agentids[i], result.netagents[j]) == 0) {
5203 skip_agent = true;
5204 break;
5205 }
5206 }
5207
5208 if (!skip_agent && necp_agent_is_removed_by_type(&result, direct_interface->if_agentids[i])) {
5209 skip_agent = true;
5210 }
5211
5212 if (skip_agent) {
5213 continue;
5214 }
5215 uuid_copy(netagent.netagent_uuid, direct_interface->if_agentids[i]);
5216 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5217 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE,
5218 direct_interface->if_index, ifnet_get_generation(direct_interface))) {
5219 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5220 client->result, sizeof(client->result));
5221 }
5222 }
5223 }
5224 ifnet_lock_done(direct_interface);
5225 }
5226 if (delegate_interface != NULL) {
5227 ifnet_lock_shared(delegate_interface);
5228 if (throughput.up == 0 && throughput.down == 0) {
5229 throughput.up = delegate_interface->if_estimated_up_bucket;
5230 throughput.down = delegate_interface->if_estimated_down_bucket;
5231 }
5232 if (delegate_interface->if_agentids != NULL) {
5233 for (u_int32_t i = 0; i < delegate_interface->if_agentcount; i++) {
5234 if (uuid_is_null(delegate_interface->if_agentids[i])) {
5235 continue;
5236 }
5237 bool skip_agent = false;
5238 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
5239 if (uuid_is_null(result.netagents[j])) {
5240 break;
5241 }
5242 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
5243 uuid_compare(delegate_interface->if_agentids[i], result.netagents[j]) == 0) {
5244 skip_agent = true;
5245 break;
5246 }
5247 }
5248
5249 if (!skip_agent && necp_agent_is_removed_by_type(&result, delegate_interface->if_agentids[i])) {
5250 skip_agent = true;
5251 }
5252
5253 if (skip_agent) {
5254 continue;
5255 }
5256 uuid_copy(netagent.netagent_uuid, delegate_interface->if_agentids[i]);
5257 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
5258 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE,
5259 delegate_interface->if_index, ifnet_get_generation(delegate_interface))) {
5260 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
5261 client->result, sizeof(client->result));
5262 }
5263 }
5264 }
5265 ifnet_lock_done(delegate_interface);
5266 }
5267 ifnet_head_done();
5268
5269 if (throughput.up != 0 || throughput.down != 0) {
5270 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_ESTIMATED_THROUGHPUT,
5271 sizeof(throughput), &throughput, &updated, client->result, sizeof(client->result));
5272 }
5273
5274 // Add interface options
5275 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
5276 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
5277 struct necp_client_interface_option *option = &client->interface_options[option_i];
5278 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated,
5279 client->result, sizeof(client->result));
5280 } else {
5281 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
5282 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated,
5283 client->result, sizeof(client->result));
5284 }
5285 }
5286
5287 size_t new_result_length = (cursor - client->result);
5288 if (new_result_length != client->result_length) {
5289 client->result_length = new_result_length;
5290 updated = TRUE;
5291 }
5292
5293 // Update flow viability/flags
5294 if (necp_client_update_flows(proc, client, defunct_list)) {
5295 updated = TRUE;
5296 }
5297
5298 if (updated) {
5299 client->result_read = FALSE;
5300 necp_client_update_observer_update(client);
5301 }
5302
5303 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
5304 return updated;
5305 }
5306
5307 static bool
necp_defunct_client_fd_locked_inner(struct necp_fd_data * client_fd,struct _necp_flow_defunct_list * defunct_list,bool destroy_stats)5308 necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats)
5309 {
5310 bool updated_result = FALSE;
5311 struct necp_client *client = NULL;
5312
5313 NECP_FD_ASSERT_LOCKED(client_fd);
5314
5315 RB_FOREACH(client, _necp_client_tree, &client_fd->clients) {
5316 struct necp_client_flow_registration *flow_registration = NULL;
5317
5318 NECP_CLIENT_LOCK(client);
5319
5320 // Prepare close events to be sent to the nexus to effectively remove the flows
5321 struct necp_client_flow *search_flow = NULL;
5322 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
5323 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
5324 if (search_flow->nexus &&
5325 !uuid_is_null(search_flow->u.nexus_agent)) {
5326 // Sleeping alloc won't fail; copy only what's necessary
5327 struct necp_flow_defunct *flow_defunct = kalloc_type(struct necp_flow_defunct, Z_WAITOK | Z_ZERO);
5328 uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent);
5329 uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
5330 client->client_id :
5331 flow_registration->registration_id));
5332 flow_defunct->proc_pid = client->proc_pid;
5333 flow_defunct->agent_handle = client->agent_handle;
5334 flow_defunct->flags = flow_registration->flags;
5335 #if SKYWALK
5336 if (flow_registration->kstats_kaddr != NULL) {
5337 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
5338 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
5339 if (quicstats != NULL &&
5340 quicstats->necp_quic_udp_stats.necp_udp_hdr.necp_stats_type == NECP_CLIENT_STATISTICS_TYPE_QUIC) {
5341 memcpy(flow_defunct->close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(flow_defunct->close_parameters.u.close_token));
5342 flow_defunct->has_close_parameters = true;
5343 }
5344 }
5345 #endif /* SKYWALK */
5346 // Add to the list provided by caller
5347 LIST_INSERT_HEAD(defunct_list, flow_defunct, chain);
5348
5349 flow_registration->defunct = true;
5350 flow_registration->flow_result_read = false;
5351 updated_result = true;
5352 }
5353 }
5354 }
5355 if (destroy_stats) {
5356 #if SKYWALK
5357 // Free any remaining stats objects back to the arena where they came from;
5358 // do this independent of the above defunct check, as the client may have
5359 // been marked as defunct separately via necp_defunct_client_for_policy().
5360 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
5361 necp_destroy_flow_stats(client_fd, flow_registration, NULL, FALSE);
5362 }
5363 #endif /* SKYWALK */
5364 }
5365 NECP_CLIENT_UNLOCK(client);
5366 }
5367
5368 return updated_result;
5369 }
5370
5371 static inline void
necp_defunct_client_fd_locked(struct necp_fd_data * client_fd,struct _necp_flow_defunct_list * defunct_list,struct proc * proc)5372 necp_defunct_client_fd_locked(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, struct proc *proc)
5373 {
5374 #pragma unused(proc)
5375 bool updated_result = FALSE;
5376
5377 NECP_FD_ASSERT_LOCKED(client_fd);
5378 #if SKYWALK
5379 // redirect regions of currently-active stats arena to zero-filled pages
5380 struct necp_arena_info *nai = necp_fd_mredirect_stats_arena(client_fd, proc);
5381 #endif /* SKYWALK */
5382
5383 updated_result = necp_defunct_client_fd_locked_inner(client_fd, defunct_list, true);
5384
5385 #if SKYWALK
5386 // and tear down the currently-active arena's regions now that the redirection and freeing are done
5387 if (nai != NULL) {
5388 ASSERT((nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)) == NAIF_REDIRECT);
5389 ASSERT(nai->nai_arena != NULL);
5390 ASSERT(nai->nai_mmap.ami_mapref != NULL);
5391
5392 int err = skmem_arena_defunct(nai->nai_arena);
5393 VERIFY(err == 0);
5394
5395 nai->nai_flags |= NAIF_DEFUNCT;
5396 }
5397 #endif /* SKYWALK */
5398
5399 if (updated_result) {
5400 necp_fd_notify(client_fd, true);
5401 }
5402 }
5403
5404 static inline void
necp_update_client_fd_locked(struct necp_fd_data * client_fd,proc_t proc,struct _necp_flow_defunct_list * defunct_list)5405 necp_update_client_fd_locked(struct necp_fd_data *client_fd,
5406 proc_t proc,
5407 struct _necp_flow_defunct_list *defunct_list)
5408 {
5409 struct necp_client *client = NULL;
5410 bool updated_result = FALSE;
5411 NECP_FD_ASSERT_LOCKED(client_fd);
5412 RB_FOREACH(client, _necp_client_tree, &client_fd->clients) {
5413 NECP_CLIENT_LOCK(client);
5414 if (necp_update_client_result(proc, client_fd, client, defunct_list)) {
5415 updated_result = TRUE;
5416 }
5417 NECP_CLIENT_UNLOCK(client);
5418 }
5419
5420 // Check if this PID needs to request in-process flow divert
5421 NECP_FD_LIST_ASSERT_LOCKED();
5422 for (int i = 0; i < NECP_MAX_FLOW_DIVERT_NEEDED_PIDS; i++) {
5423 if (necp_flow_divert_needed_pids[i] == 0) {
5424 break;
5425 }
5426 if (necp_flow_divert_needed_pids[i] == client_fd->proc_pid) {
5427 client_fd->request_in_process_flow_divert = true;
5428 break;
5429 }
5430 }
5431
5432 if (updated_result || client_fd->request_in_process_flow_divert) {
5433 necp_fd_notify(client_fd, true);
5434 }
5435 }
5436
5437 #if SKYWALK
5438 static void
necp_close_empty_arenas_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)5439 necp_close_empty_arenas_callout(__unused thread_call_param_t dummy,
5440 __unused thread_call_param_t arg)
5441 {
5442 struct necp_fd_data *client_fd = NULL;
5443
5444 NECP_FD_LIST_LOCK_SHARED();
5445
5446 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5447 NECP_FD_LOCK(client_fd);
5448 necp_stats_arenas_destroy(client_fd, FALSE);
5449 NECP_FD_UNLOCK(client_fd);
5450 }
5451
5452 NECP_FD_LIST_UNLOCK();
5453 }
5454 #endif /* SKYWALK */
5455
5456 static void
necp_update_all_clients_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)5457 necp_update_all_clients_callout(__unused thread_call_param_t dummy,
5458 __unused thread_call_param_t arg)
5459 {
5460 struct necp_fd_data *client_fd = NULL;
5461
5462 NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE();
5463 uint32_t count = necp_update_all_clients_sched_cnt;
5464 necp_update_all_clients_sched_cnt = 0;
5465 necp_update_all_clients_sched_abstime = 0;
5466 NECP_UPDATE_ALL_CLIENTS_UNLOCK();
5467
5468 if (necp_debug > 0) {
5469 NECPLOG(LOG_DEBUG,
5470 "necp_update_all_clients_callout running for coalesced %u updates",
5471 count);
5472 }
5473
5474 struct _necp_flow_defunct_list defunct_list;
5475 LIST_INIT(&defunct_list);
5476
5477 NECP_FD_LIST_LOCK_SHARED();
5478
5479 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5480 proc_t proc = proc_find(client_fd->proc_pid);
5481 if (proc == PROC_NULL) {
5482 continue;
5483 }
5484
5485 // Update all clients on one fd
5486 NECP_FD_LOCK(client_fd);
5487 necp_update_client_fd_locked(client_fd, proc, &defunct_list);
5488 NECP_FD_UNLOCK(client_fd);
5489
5490 proc_rele(proc);
5491 proc = PROC_NULL;
5492 }
5493
5494 // Reset the necp_flow_divert_needed_pids list
5495 for (int i = 0; i < NECP_MAX_FLOW_DIVERT_NEEDED_PIDS; i++) {
5496 necp_flow_divert_needed_pids[i] = 0;
5497 }
5498
5499 NECP_FD_LIST_UNLOCK();
5500
5501 // Handle the case in which some clients became newly defunct
5502 necp_process_defunct_list(&defunct_list);
5503 }
5504
5505 void
necp_update_all_clients(void)5506 necp_update_all_clients(void)
5507 {
5508 necp_update_all_clients_immediately_if_needed(false);
5509 }
5510
5511 void
necp_update_all_clients_immediately_if_needed(bool should_update_immediately)5512 necp_update_all_clients_immediately_if_needed(bool should_update_immediately)
5513 {
5514 if (necp_client_update_tcall == NULL) {
5515 // Don't try to update clients if the module is not initialized
5516 return;
5517 }
5518
5519 uint64_t deadline = 0;
5520 uint64_t leeway = 0;
5521
5522 uint32_t timeout_to_use = necp_timeout_microseconds;
5523 uint32_t leeway_to_use = necp_timeout_leeway_microseconds;
5524 if (should_update_immediately) {
5525 timeout_to_use = 1000 * 10; // 10ms
5526 leeway_to_use = 1000 * 10; // 10ms;
5527 }
5528
5529 clock_interval_to_deadline(timeout_to_use, NSEC_PER_USEC, &deadline);
5530 clock_interval_to_absolutetime_interval(leeway_to_use, NSEC_PER_USEC, &leeway);
5531
5532 NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE();
5533 bool need_cancel = false;
5534 bool need_schedule = true;
5535 uint64_t sched_abstime;
5536
5537 clock_absolutetime_interval_to_deadline(deadline + leeway, &sched_abstime);
5538
5539 /*
5540 * Do not push the timer if it is already scheduled
5541 */
5542 if (necp_update_all_clients_sched_abstime != 0) {
5543 need_schedule = false;
5544
5545 if (should_update_immediately) {
5546 /*
5547 * To update immediately we may have to cancel the current timer
5548 * if it's scheduled too far out.
5549 */
5550 if (necp_update_all_clients_sched_abstime > sched_abstime) {
5551 need_cancel = true;
5552 need_schedule = true;
5553 }
5554 }
5555 }
5556
5557 /*
5558 * Record the time of the deadline with leeway
5559 */
5560 if (need_schedule) {
5561 necp_update_all_clients_sched_abstime = sched_abstime;
5562 }
5563
5564 necp_update_all_clients_sched_cnt += 1;
5565 uint32_t count = necp_update_all_clients_sched_cnt;
5566 NECP_UPDATE_ALL_CLIENTS_UNLOCK();
5567
5568 if (need_schedule) {
5569 /*
5570 * Wait if the thread call is currently executing to make sure the
5571 * next update will be delivered to all clients
5572 */
5573 if (need_cancel) {
5574 (void) thread_call_cancel_wait(necp_client_update_tcall);
5575 }
5576
5577 (void) thread_call_enter_delayed_with_leeway(necp_client_update_tcall, NULL,
5578 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
5579 }
5580 if (necp_debug > 0) {
5581 NECPLOG(LOG_DEBUG,
5582 "necp_update_all_clients immediate %s update %u",
5583 should_update_immediately ? "true" : "false", count);
5584 }
5585 }
5586
5587 bool
necp_set_client_as_background(proc_t proc,struct fileproc * fp,bool background)5588 necp_set_client_as_background(proc_t proc,
5589 struct fileproc *fp,
5590 bool background)
5591 {
5592 if (proc == PROC_NULL) {
5593 NECPLOG0(LOG_ERR, "NULL proc");
5594 return FALSE;
5595 }
5596
5597 if (fp == NULL) {
5598 NECPLOG0(LOG_ERR, "NULL fp");
5599 return FALSE;
5600 }
5601
5602 struct necp_fd_data *client_fd = (struct necp_fd_data *)fp_get_data(fp);
5603 if (client_fd == NULL) {
5604 NECPLOG0(LOG_ERR, "Could not find client structure for backgrounded client");
5605 return FALSE;
5606 }
5607
5608 if (client_fd->necp_fd_type != necp_fd_type_client) {
5609 // Not a client fd, ignore
5610 NECPLOG0(LOG_ERR, "Not a client fd, ignore");
5611 return FALSE;
5612 }
5613
5614 client_fd->background = background;
5615
5616 return TRUE;
5617 }
5618
5619 void
necp_fd_memstatus(proc_t proc,uint32_t status,struct necp_fd_data * client_fd)5620 necp_fd_memstatus(proc_t proc, uint32_t status,
5621 struct necp_fd_data *client_fd)
5622 {
5623 #pragma unused(proc, status, client_fd)
5624 ASSERT(proc != PROC_NULL);
5625 ASSERT(client_fd != NULL);
5626
5627 // Nothing to reap for the process or client for now,
5628 // but this is where we would trigger that in future.
5629 }
5630
5631 void
necp_fd_defunct(proc_t proc,struct necp_fd_data * client_fd)5632 necp_fd_defunct(proc_t proc, struct necp_fd_data *client_fd)
5633 {
5634 struct _necp_flow_defunct_list defunct_list;
5635
5636 ASSERT(proc != PROC_NULL);
5637 ASSERT(client_fd != NULL);
5638
5639 if (client_fd->necp_fd_type != necp_fd_type_client) {
5640 // Not a client fd, ignore
5641 return;
5642 }
5643
5644 // Our local temporary list
5645 LIST_INIT(&defunct_list);
5646
5647 // Need to hold lock so ntstats defunct the same set of clients
5648 NECP_FD_LOCK(client_fd);
5649 #if SKYWALK
5650 // Shut down statistics
5651 nstats_userland_stats_defunct_for_process(proc_getpid(proc));
5652 #endif /* SKYWALK */
5653 necp_defunct_client_fd_locked(client_fd, &defunct_list, proc);
5654 NECP_FD_UNLOCK(client_fd);
5655
5656 necp_process_defunct_list(&defunct_list);
5657 }
5658
5659 void
necp_client_request_in_process_flow_divert(pid_t pid)5660 necp_client_request_in_process_flow_divert(pid_t pid)
5661 {
5662 if (pid == 0) {
5663 return;
5664 }
5665
5666 // Add to the list of pids that should get an update. These will
5667 // get picked up on the next thread call to update client paths.
5668 NECP_FD_LIST_LOCK_SHARED();
5669 for (int i = 0; i < NECP_MAX_FLOW_DIVERT_NEEDED_PIDS; i++) {
5670 if (necp_flow_divert_needed_pids[i] == 0) {
5671 necp_flow_divert_needed_pids[i] = pid;
5672 break;
5673 }
5674 }
5675 NECP_FD_LIST_UNLOCK();
5676 }
5677
5678 static void
necp_client_remove_agent_from_result(struct necp_client * client,uuid_t netagent_uuid)5679 necp_client_remove_agent_from_result(struct necp_client *client, uuid_t netagent_uuid)
5680 {
5681 size_t offset = 0;
5682
5683 u_int8_t *result_buffer = client->result;
5684 while ((offset + sizeof(struct necp_tlv_header)) <= client->result_length) {
5685 u_int8_t type = necp_buffer_get_tlv_type(result_buffer, client->result_length, offset);
5686 u_int32_t length = necp_buffer_get_tlv_length(result_buffer, client->result_length, offset);
5687
5688 size_t tlv_total_length = (sizeof(struct necp_tlv_header) + length);
5689 if (type == NECP_CLIENT_RESULT_NETAGENT &&
5690 length == sizeof(struct necp_client_result_netagent) &&
5691 (offset + tlv_total_length) <= client->result_length) {
5692 struct necp_client_result_netagent *value = ((struct necp_client_result_netagent *)(void *)
5693 necp_buffer_get_tlv_value(result_buffer, client->result_length, offset, NULL));
5694 if (uuid_compare(value->netagent_uuid, netagent_uuid) == 0) {
5695 // Found a netagent to remove
5696 // Shift bytes down to remove the tlv, and adjust total length
5697 // Don't adjust the current offset
5698 memmove(result_buffer + offset,
5699 result_buffer + offset + tlv_total_length,
5700 client->result_length - (offset + tlv_total_length));
5701 client->result_length -= tlv_total_length;
5702 memset(result_buffer + client->result_length, 0, sizeof(client->result) - client->result_length);
5703 continue;
5704 }
5705 }
5706
5707 offset += tlv_total_length;
5708 }
5709 }
5710
5711 void
necp_force_update_client(uuid_t client_id,uuid_t remove_netagent_uuid,u_int32_t agent_generation)5712 necp_force_update_client(uuid_t client_id, uuid_t remove_netagent_uuid, u_int32_t agent_generation)
5713 {
5714 struct necp_fd_data *client_fd = NULL;
5715
5716 NECP_FD_LIST_LOCK_SHARED();
5717
5718 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5719 bool updated_result = FALSE;
5720 NECP_FD_LOCK(client_fd);
5721 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
5722 if (client != NULL) {
5723 client->failed_trigger_agent.generation = agent_generation;
5724 uuid_copy(client->failed_trigger_agent.netagent_uuid, remove_netagent_uuid);
5725 if (!uuid_is_null(remove_netagent_uuid)) {
5726 necp_client_remove_agent_from_result(client, remove_netagent_uuid);
5727 }
5728 client->result_read = FALSE;
5729 // Found the client, break
5730 updated_result = TRUE;
5731 NECP_CLIENT_UNLOCK(client);
5732 }
5733 if (updated_result) {
5734 necp_fd_notify(client_fd, true);
5735 }
5736 NECP_FD_UNLOCK(client_fd);
5737 if (updated_result) {
5738 // Found the client, break
5739 break;
5740 }
5741 }
5742
5743 NECP_FD_LIST_UNLOCK();
5744 }
5745
5746 #if SKYWALK
5747 void
necp_client_early_close(uuid_t client_id)5748 necp_client_early_close(uuid_t client_id)
5749 {
5750 NECP_CLIENT_TREE_LOCK_SHARED();
5751
5752 struct necp_client *client = necp_find_client_and_lock(client_id);
5753 if (client != NULL) {
5754 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
5755 if (flow_registration != NULL) {
5756 // Found the right client and flow, mark the stats as over
5757 if (flow_registration->stats_handler_context != NULL) {
5758 ntstat_userland_stats_event(flow_registration->stats_handler_context,
5759 NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT);
5760 }
5761 }
5762 NECP_CLIENT_UNLOCK(client);
5763 }
5764
5765 NECP_CLIENT_TREE_UNLOCK();
5766 }
5767 #endif /* SKYWALK */
5768
5769 /// Interface matching
5770
5771 #define NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5772 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF | \
5773 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
5774 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE | \
5775 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
5776 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT | \
5777 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5778 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \
5779 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \
5780 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE | \
5781 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \
5782 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE)
5783
5784 #define NECP_PARSED_PARAMETERS_SCOPED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5785 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
5786 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
5787 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5788 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \
5789 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE)
5790
5791 #define NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5792 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)
5793
5794 #define NECP_PARSED_PARAMETERS_PREFERRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5795 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \
5796 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \
5797 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE)
5798
5799 static bool
necp_ifnet_matches_type(struct ifnet * ifp,u_int8_t interface_type,bool check_delegates)5800 necp_ifnet_matches_type(struct ifnet *ifp, u_int8_t interface_type, bool check_delegates)
5801 {
5802 struct ifnet *check_ifp = ifp;
5803 while (check_ifp) {
5804 if (if_functional_type(check_ifp, TRUE) == interface_type) {
5805 return TRUE;
5806 }
5807 if (!check_delegates) {
5808 break;
5809 }
5810 check_ifp = check_ifp->if_delegated.ifp;
5811 }
5812 return FALSE;
5813 }
5814
5815 static bool
necp_ifnet_matches_name(struct ifnet * ifp,const char * __sized_by (IFXNAMSIZ)interface_name,bool check_delegates)5816 necp_ifnet_matches_name(struct ifnet *ifp, const char * __sized_by(IFXNAMSIZ)interface_name, bool check_delegates)
5817 {
5818 struct ifnet *check_ifp = ifp;
5819 while (check_ifp) {
5820 if (strlcmp(interface_name, check_ifp->if_xname, IFXNAMSIZ) == 0) {
5821 return TRUE;
5822 }
5823 if (!check_delegates) {
5824 break;
5825 }
5826 check_ifp = check_ifp->if_delegated.ifp;
5827 }
5828 return FALSE;
5829 }
5830
5831 static bool
necp_ifnet_matches_agent(struct ifnet * ifp,uuid_t * agent_uuid,bool check_delegates)5832 necp_ifnet_matches_agent(struct ifnet *ifp, uuid_t *agent_uuid, bool check_delegates)
5833 {
5834 struct ifnet *check_ifp = ifp;
5835
5836 while (check_ifp != NULL) {
5837 ifnet_lock_shared(check_ifp);
5838 if (check_ifp->if_agentids != NULL) {
5839 for (u_int32_t index = 0; index < check_ifp->if_agentcount; index++) {
5840 if (uuid_compare(check_ifp->if_agentids[index], *agent_uuid) == 0) {
5841 ifnet_lock_done(check_ifp);
5842 return TRUE;
5843 }
5844 }
5845 }
5846 ifnet_lock_done(check_ifp);
5847
5848 if (!check_delegates) {
5849 break;
5850 }
5851 check_ifp = check_ifp->if_delegated.ifp;
5852 }
5853 return FALSE;
5854 }
5855
5856 static bool
necp_ifnet_matches_agent_type(struct ifnet * ifp,const char * __sized_by (NETAGENT_DOMAINSIZE)agent_domain,const char * __sized_by (NETAGENT_TYPESIZE)agent_type,bool check_delegates)5857 necp_ifnet_matches_agent_type(struct ifnet *ifp, const char * __sized_by(NETAGENT_DOMAINSIZE)agent_domain, const char * __sized_by(NETAGENT_TYPESIZE)agent_type, bool check_delegates)
5858 {
5859 struct ifnet *check_ifp = ifp;
5860
5861 while (check_ifp != NULL) {
5862 ifnet_lock_shared(check_ifp);
5863 if (check_ifp->if_agentids != NULL) {
5864 for (u_int32_t index = 0; index < check_ifp->if_agentcount; index++) {
5865 if (uuid_is_null(check_ifp->if_agentids[index])) {
5866 continue;
5867 }
5868
5869 char if_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
5870 char if_agent_type[NETAGENT_TYPESIZE] = { 0 };
5871
5872 if (netagent_get_agent_domain_and_type(check_ifp->if_agentids[index], if_agent_domain, if_agent_type)) {
5873 if (necp_agent_types_match(agent_domain, agent_type, if_agent_domain, if_agent_type)) {
5874 ifnet_lock_done(check_ifp);
5875 return TRUE;
5876 }
5877 }
5878 }
5879 }
5880 ifnet_lock_done(check_ifp);
5881
5882 if (!check_delegates) {
5883 break;
5884 }
5885 check_ifp = check_ifp->if_delegated.ifp;
5886 }
5887 return FALSE;
5888 }
5889
5890 static bool
necp_ifnet_matches_local_address(struct ifnet * ifp,struct sockaddr * sa)5891 necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa)
5892 {
5893 struct ifaddr *ifa = NULL;
5894 bool matched_local_address = FALSE;
5895
5896 // Transform sa into the ifaddr form
5897 // IPv6 Scope IDs are always embedded in the ifaddr list
5898 struct sockaddr_storage address;
5899 u_int ifscope = IFSCOPE_NONE;
5900 (void)sa_copy(sa, &address, &ifscope);
5901 SIN(&address)->sin_port = 0;
5902 if (address.ss_family == AF_INET6) {
5903 if (in6_embedded_scope ||
5904 !IN6_IS_SCOPE_EMBED(&SIN6(&address)->sin6_addr)) {
5905 SIN6(&address)->sin6_scope_id = 0;
5906 }
5907 }
5908
5909 ifa = ifa_ifwithaddr_scoped_locked(SA(&address), ifp->if_index);
5910 matched_local_address = (ifa != NULL);
5911
5912 if (ifa) {
5913 ifaddr_release(ifa);
5914 }
5915
5916 return matched_local_address;
5917 }
5918
5919 static bool
necp_interface_type_should_match_unranked_interfaces(u_int8_t interface_type)5920 necp_interface_type_should_match_unranked_interfaces(u_int8_t interface_type)
5921 {
5922 switch (interface_type) {
5923 // These are the interface types we allow a client to request even if the matching
5924 // interface isn't currently eligible to be primary (has default route, dns, etc)
5925 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
5926 case IFRTYPE_FUNCTIONAL_INTCOPROC:
5927 case IFRTYPE_FUNCTIONAL_COMPANIONLINK:
5928 return true;
5929 default:
5930 break;
5931 }
5932 return false;
5933 }
5934
5935 #define NECP_IFP_IS_ON_ORDERED_LIST(_ifp) ((_ifp)->if_ordered_link.tqe_next != NULL || (_ifp)->if_ordered_link.tqe_prev != NULL)
5936
5937 // Secondary interface flag indicates that the interface is being
5938 // used for multipath or a listener as an extra path
5939 static bool
necp_ifnet_matches_parameters(struct ifnet * ifp,struct necp_client_parsed_parameters * parsed_parameters,u_int32_t override_flags,u_int32_t * preferred_count,bool secondary_interface,bool require_scoped_field)5940 necp_ifnet_matches_parameters(struct ifnet *ifp,
5941 struct necp_client_parsed_parameters *parsed_parameters,
5942 u_int32_t override_flags,
5943 u_int32_t *preferred_count,
5944 bool secondary_interface,
5945 bool require_scoped_field)
5946 {
5947 bool matched_some_scoped_field = FALSE;
5948
5949 if (preferred_count) {
5950 *preferred_count = 0;
5951 }
5952
5953 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) {
5954 if (parsed_parameters->required_interface_index != ifp->if_index) {
5955 return FALSE;
5956 }
5957 }
5958 #if SKYWALK
5959 else {
5960 if (ifnet_is_low_latency(ifp)) {
5961 return FALSE;
5962 }
5963 }
5964 #endif /* SKYWALK */
5965
5966 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR) {
5967 if (!necp_ifnet_matches_local_address(ifp, SA(&parsed_parameters->local_addr.sa))) {
5968 return FALSE;
5969 }
5970 if (require_scoped_field) {
5971 matched_some_scoped_field = TRUE;
5972 }
5973 }
5974
5975 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) {
5976 if (override_flags != 0) {
5977 if ((override_flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE) &&
5978 IFNET_IS_EXPENSIVE(ifp)) {
5979 return FALSE;
5980 }
5981 if ((override_flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED) &&
5982 IFNET_IS_CONSTRAINED(ifp)) {
5983 return FALSE;
5984 }
5985 if (!(override_flags & NECP_CLIENT_PARAMETER_FLAG_ALLOW_ULTRA_CONSTRAINED) &&
5986 IFNET_IS_ULTRA_CONSTRAINED(ifp)) {
5987 return FALSE;
5988 }
5989 } else {
5990 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE) &&
5991 IFNET_IS_EXPENSIVE(ifp)) {
5992 return FALSE;
5993 }
5994 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED) &&
5995 IFNET_IS_CONSTRAINED(ifp)) {
5996 return FALSE;
5997 }
5998 if (!(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_ALLOW_ULTRA_CONSTRAINED) &&
5999 IFNET_IS_ULTRA_CONSTRAINED(ifp)) {
6000 return FALSE;
6001 }
6002 }
6003 }
6004
6005 if ((!secondary_interface || // Enforce interface type if this is the primary interface
6006 !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) || // or if there are no flags
6007 !(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_ONLY_PRIMARY_REQUIRES_TYPE)) && // or if the flags don't give an exception
6008 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) &&
6009 !necp_ifnet_matches_type(ifp, parsed_parameters->required_interface_type, FALSE)) {
6010 return FALSE;
6011 }
6012
6013 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) {
6014 if (require_scoped_field) {
6015 matched_some_scoped_field = TRUE;
6016 }
6017 }
6018
6019 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE) {
6020 for (int i = 0; i < NECP_MAX_INTERFACE_PARAMETERS; i++) {
6021 if (parsed_parameters->prohibited_interface_types[i] == 0) {
6022 break;
6023 }
6024
6025 if (necp_ifnet_matches_type(ifp, parsed_parameters->prohibited_interface_types[i], TRUE)) {
6026 return FALSE;
6027 }
6028 }
6029 }
6030
6031 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF) {
6032 for (int i = 0; i < NECP_MAX_INTERFACE_PARAMETERS; i++) {
6033 if (strbuflen(parsed_parameters->prohibited_interfaces[i], sizeof(parsed_parameters->prohibited_interfaces[i])) == 0) {
6034 break;
6035 }
6036
6037 if (necp_ifnet_matches_name(ifp, parsed_parameters->prohibited_interfaces[i], TRUE)) {
6038 return FALSE;
6039 }
6040 }
6041 }
6042
6043 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
6044 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6045 if (uuid_is_null(parsed_parameters->required_netagents[i])) {
6046 break;
6047 }
6048
6049 if (!necp_ifnet_matches_agent(ifp, &parsed_parameters->required_netagents[i], FALSE)) {
6050 return FALSE;
6051 }
6052
6053 if (require_scoped_field) {
6054 matched_some_scoped_field = TRUE;
6055 }
6056 }
6057 }
6058
6059 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT) {
6060 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6061 if (uuid_is_null(parsed_parameters->prohibited_netagents[i])) {
6062 break;
6063 }
6064
6065 if (necp_ifnet_matches_agent(ifp, &parsed_parameters->prohibited_netagents[i], TRUE)) {
6066 return FALSE;
6067 }
6068 }
6069 }
6070
6071 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
6072 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6073 if (strbuflen(parsed_parameters->required_netagent_types[i].netagent_domain, sizeof(parsed_parameters->required_netagent_types[i].netagent_domain)) == 0 &&
6074 strbuflen(parsed_parameters->required_netagent_types[i].netagent_type, sizeof(parsed_parameters->required_netagent_types[i].netagent_type)) == 0) {
6075 break;
6076 }
6077
6078 if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->required_netagent_types[i].netagent_domain, parsed_parameters->required_netagent_types[i].netagent_type, FALSE)) {
6079 return FALSE;
6080 }
6081
6082 if (require_scoped_field) {
6083 matched_some_scoped_field = TRUE;
6084 }
6085 }
6086 }
6087
6088 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE) {
6089 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6090 if (strbuflen(parsed_parameters->prohibited_netagent_types[i].netagent_domain, sizeof(parsed_parameters->prohibited_netagent_types[i].netagent_domain)) == 0 &&
6091 strbuflen(parsed_parameters->prohibited_netagent_types[i].netagent_type, sizeof(parsed_parameters->prohibited_netagent_types[i].netagent_type)) == 0) {
6092 break;
6093 }
6094
6095 if (necp_ifnet_matches_agent_type(ifp, parsed_parameters->prohibited_netagent_types[i].netagent_domain, parsed_parameters->prohibited_netagent_types[i].netagent_type, TRUE)) {
6096 return FALSE;
6097 }
6098 }
6099 }
6100
6101 // Checked preferred properties
6102 if (preferred_count) {
6103 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT) {
6104 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6105 if (uuid_is_null(parsed_parameters->preferred_netagents[i])) {
6106 break;
6107 }
6108
6109 if (necp_ifnet_matches_agent(ifp, &parsed_parameters->preferred_netagents[i], TRUE)) {
6110 (*preferred_count)++;
6111 if (require_scoped_field) {
6112 matched_some_scoped_field = TRUE;
6113 }
6114 }
6115 }
6116 }
6117
6118 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE) {
6119 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6120 if (strbuflen(parsed_parameters->preferred_netagent_types[i].netagent_domain, sizeof(parsed_parameters->preferred_netagent_types[i].netagent_domain)) == 0 &&
6121 strbuflen(parsed_parameters->preferred_netagent_types[i].netagent_type, sizeof(parsed_parameters->preferred_netagent_types[i].netagent_type)) == 0) {
6122 break;
6123 }
6124
6125 if (necp_ifnet_matches_agent_type(ifp, parsed_parameters->preferred_netagent_types[i].netagent_domain, parsed_parameters->preferred_netagent_types[i].netagent_type, TRUE)) {
6126 (*preferred_count)++;
6127 if (require_scoped_field) {
6128 matched_some_scoped_field = TRUE;
6129 }
6130 }
6131 }
6132 }
6133
6134 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT) {
6135 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6136 if (uuid_is_null(parsed_parameters->avoided_netagents[i])) {
6137 break;
6138 }
6139
6140 if (!necp_ifnet_matches_agent(ifp, &parsed_parameters->avoided_netagents[i], TRUE)) {
6141 (*preferred_count)++;
6142 }
6143 }
6144 }
6145
6146 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) {
6147 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
6148 if (strbuflen(parsed_parameters->avoided_netagent_types[i].netagent_domain, sizeof(parsed_parameters->avoided_netagent_types[i].netagent_domain)) == 0 &&
6149 strbuflen(parsed_parameters->avoided_netagent_types[i].netagent_type, sizeof(parsed_parameters->avoided_netagent_types[i].netagent_type)) == 0) {
6150 break;
6151 }
6152
6153 if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->avoided_netagent_types[i].netagent_domain,
6154 parsed_parameters->avoided_netagent_types[i].netagent_type, TRUE)) {
6155 (*preferred_count)++;
6156 }
6157 }
6158 }
6159 }
6160
6161 if (require_scoped_field) {
6162 return matched_some_scoped_field;
6163 }
6164
6165 return TRUE;
6166 }
6167
6168 static bool
necp_find_matching_interface_index(struct necp_client_parsed_parameters * parsed_parameters,u_int * return_ifindex,bool * validate_agents)6169 necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters,
6170 u_int *return_ifindex, bool *validate_agents)
6171 {
6172 struct ifnet *ifp = NULL;
6173 u_int32_t best_preferred_count = 0;
6174 bool has_preferred_fields = FALSE;
6175 *return_ifindex = 0;
6176
6177 if (parsed_parameters->required_interface_index != 0) {
6178 *return_ifindex = parsed_parameters->required_interface_index;
6179 return TRUE;
6180 }
6181
6182 // Check and save off flags
6183 u_int32_t flags = 0;
6184 bool has_prohibit_flags = FALSE;
6185 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) {
6186 flags = parsed_parameters->flags;
6187 has_prohibit_flags = (parsed_parameters->flags &
6188 (NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE |
6189 NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED));
6190 }
6191
6192 if (!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS) &&
6193 !has_prohibit_flags) {
6194 return TRUE;
6195 }
6196
6197 has_preferred_fields = (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS);
6198
6199 // We have interesting parameters to parse and find a matching interface
6200 ifnet_head_lock_shared();
6201
6202 if (!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_FIELDS) &&
6203 !has_preferred_fields) {
6204 // We do have fields to match, but they are only prohibitory
6205 // If the first interface in the list matches, or there are no ordered interfaces, we don't need to scope
6206 ifp = TAILQ_FIRST(&ifnet_ordered_head);
6207 if (ifp == NULL || necp_ifnet_matches_parameters(ifp, parsed_parameters, 0, NULL, false, false)) {
6208 // Don't set return_ifindex, so the client doesn't need to scope
6209 ifnet_head_done();
6210 return TRUE;
6211 }
6212
6213 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR &&
6214 parsed_parameters->remote_addr.sin6.sin6_family == AF_INET6 &&
6215 parsed_parameters->remote_addr.sin6.sin6_scope_id != IFSCOPE_NONE &&
6216 parsed_parameters->remote_addr.sin6.sin6_scope_id <= (u_int32_t)if_index) {
6217 ifp = ifindex2ifnet[parsed_parameters->remote_addr.sin6.sin6_scope_id];
6218 if (ifp != NULL && necp_ifnet_matches_parameters(ifp, parsed_parameters, 0, NULL, false, false)) {
6219 // Don't set return_ifindex, so the client doesn't need to scope since the v6 scope ID will
6220 // already route to the correct interface
6221 ifnet_head_done();
6222 return TRUE;
6223 }
6224 }
6225 }
6226
6227 // First check the ordered interface list
6228 TAILQ_FOREACH(ifp, &ifnet_ordered_head, if_ordered_link) {
6229 u_int32_t preferred_count = 0;
6230 if (necp_ifnet_matches_parameters(ifp, parsed_parameters, flags, &preferred_count, false, false)) {
6231 if (preferred_count > best_preferred_count ||
6232 *return_ifindex == 0) {
6233 // Everything matched, and is most preferred. Return this interface.
6234 *return_ifindex = ifp->if_index;
6235 best_preferred_count = preferred_count;
6236
6237 if (!has_preferred_fields) {
6238 break;
6239 }
6240 }
6241 }
6242
6243 if (has_prohibit_flags &&
6244 ifp == TAILQ_FIRST(&ifnet_ordered_head)) {
6245 // This was the first interface. From here on, if the
6246 // client prohibited either expensive or constrained,
6247 // don't allow either as a secondary interface option.
6248 flags |= (NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE |
6249 NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED);
6250 }
6251 }
6252
6253 bool is_listener = ((parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
6254 (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER));
6255
6256 // Then check the remaining interfaces
6257 if ((parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_FIELDS) &&
6258 ((!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)) ||
6259 necp_interface_type_should_match_unranked_interfaces(parsed_parameters->required_interface_type) ||
6260 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR) ||
6261 is_listener) &&
6262 (*return_ifindex == 0 || has_preferred_fields)) {
6263 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
6264 u_int32_t preferred_count = 0;
6265 if (NECP_IFP_IS_ON_ORDERED_LIST(ifp)) {
6266 // This interface was in the ordered list, skip
6267 continue;
6268 }
6269 if (necp_ifnet_matches_parameters(ifp, parsed_parameters, flags, &preferred_count, false, true)) {
6270 if (preferred_count > best_preferred_count ||
6271 *return_ifindex == 0) {
6272 // Everything matched, and is most preferred. Return this interface.
6273 *return_ifindex = ifp->if_index;
6274 best_preferred_count = preferred_count;
6275
6276 if (!has_preferred_fields) {
6277 break;
6278 }
6279 }
6280 }
6281 }
6282 }
6283
6284 ifnet_head_done();
6285
6286 if (has_preferred_fields && best_preferred_count == 0 &&
6287 ((parsed_parameters->valid_fields & (NECP_PARSED_PARAMETERS_SCOPED_FIELDS | NECP_PARSED_PARAMETERS_PREFERRED_FIELDS)) ==
6288 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS))) {
6289 // If only has preferred ifnet fields, and nothing was found, clear the interface index and return TRUE
6290 *return_ifindex = 0;
6291 return TRUE;
6292 }
6293
6294 if (*return_ifindex == 0 &&
6295 !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS)) {
6296 // Has required fields, but not including specific interface fields. Pass for now, and check
6297 // to see if agents are satisfied by policy.
6298 *validate_agents = TRUE;
6299 return TRUE;
6300 }
6301
6302 return *return_ifindex != 0;
6303 }
6304
6305 void
necp_copy_inp_domain_info(struct inpcb * inp,struct socket * so,nstat_domain_info * domain_info)6306 necp_copy_inp_domain_info(struct inpcb *inp, struct socket *so, nstat_domain_info *domain_info)
6307 {
6308 if (inp == NULL || so == NULL || domain_info == NULL) {
6309 return;
6310 }
6311
6312 necp_lock_socket_attributes();
6313
6314 domain_info->is_silent = !!(so->so_flags1 & SOF1_DOMAIN_INFO_SILENT);
6315 if (!domain_info->is_silent) {
6316 domain_info->is_tracker = !!(so->so_flags1 & SOF1_KNOWN_TRACKER);
6317 domain_info->is_non_app_initiated = !!(so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED);
6318 if (domain_info->is_tracker &&
6319 inp->inp_necp_attributes.inp_tracker_domain != NULL) {
6320 strlcpy(domain_info->domain_name, inp->inp_necp_attributes.inp_tracker_domain,
6321 sizeof(domain_info->domain_name));
6322 } else if (inp->inp_necp_attributes.inp_domain != NULL) {
6323 strlcpy(domain_info->domain_name, inp->inp_necp_attributes.inp_domain,
6324 sizeof(domain_info->domain_name));
6325 }
6326 if (inp->inp_necp_attributes.inp_domain_owner != NULL) {
6327 strlcpy(domain_info->domain_owner, inp->inp_necp_attributes.inp_domain_owner,
6328 sizeof(domain_info->domain_owner));
6329 }
6330 if (inp->inp_necp_attributes.inp_domain_context != NULL) {
6331 strlcpy(domain_info->domain_tracker_ctxt, inp->inp_necp_attributes.inp_domain_context,
6332 sizeof(domain_info->domain_tracker_ctxt));
6333 }
6334 }
6335
6336 necp_unlock_socket_attributes();
6337 }
6338
6339 void
necp_with_inp_domain_name(struct socket * so,void * ctx,void (* with_func)(char * domain_name __null_terminated,void * ctx))6340 necp_with_inp_domain_name(struct socket *so, void *ctx, void (*with_func)(char *domain_name __null_terminated, void *ctx))
6341 {
6342 struct inpcb *inp = NULL;
6343
6344 if (so == NULL || with_func == NULL) {
6345 return;
6346 }
6347
6348 inp = (struct inpcb *)so->so_pcb;
6349 if (inp == NULL) {
6350 return;
6351 }
6352
6353 necp_lock_socket_attributes();
6354 with_func(inp->inp_necp_attributes.inp_domain, ctx);
6355 necp_unlock_socket_attributes();
6356 }
6357
6358 static size_t
necp_find_domain_info_common(struct necp_client * client,u_int8_t * __sized_by (parameters_size)parameters,size_t parameters_size,struct necp_client_flow_registration * flow_registration,nstat_domain_info * domain_info)6359 necp_find_domain_info_common(struct necp_client *client,
6360 u_int8_t * __sized_by(parameters_size)parameters,
6361 size_t parameters_size,
6362 struct necp_client_flow_registration *flow_registration, /* For logging purposes only */
6363 nstat_domain_info *domain_info)
6364 {
6365 if (client == NULL) {
6366 return 0;
6367 }
6368 if (domain_info == NULL) {
6369 return sizeof(nstat_domain_info);
6370 }
6371
6372 size_t offset = 0;
6373 u_int32_t flags = 0;
6374 u_int8_t *tracker_domain = NULL;
6375 u_int8_t *domain = NULL;
6376 size_t tracker_domain_length = 0;
6377 size_t domain_length = 0;
6378
6379 NECP_CLIENT_FLOW_LOG(client, flow_registration, "Collecting stats");
6380
6381 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6382 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
6383 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
6384
6385 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6386 // If the length is larger than what can fit in the remaining parameters size, bail
6387 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6388 break;
6389 }
6390
6391 if (length > 0) {
6392 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
6393 if (value != NULL) {
6394 switch (type) {
6395 case NECP_CLIENT_PARAMETER_FLAGS: {
6396 if (length >= sizeof(u_int32_t)) {
6397 memcpy(&flags, value, sizeof(u_int32_t));
6398 }
6399
6400 domain_info->is_tracker =
6401 !!(flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER);
6402 domain_info->is_non_app_initiated =
6403 !!(flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED);
6404 domain_info->is_silent =
6405 !!(flags & NECP_CLIENT_PARAMETER_FLAG_SILENT);
6406 break;
6407 }
6408 case NECP_CLIENT_PARAMETER_TRACKER_DOMAIN: {
6409 tracker_domain_length = length;
6410 tracker_domain = value;
6411 break;
6412 }
6413 case NECP_CLIENT_PARAMETER_DOMAIN: {
6414 domain_length = length;
6415 domain = value;
6416 break;
6417 }
6418 case NECP_CLIENT_PARAMETER_DOMAIN_OWNER: {
6419 size_t length_to_copy = MIN(length, sizeof(domain_info->domain_owner));
6420 strbufcpy(domain_info->domain_owner, sizeof(domain_info->domain_owner), (const char *)value, length_to_copy);
6421 break;
6422 }
6423 case NECP_CLIENT_PARAMETER_DOMAIN_CONTEXT: {
6424 size_t length_to_copy = MIN(length, sizeof(domain_info->domain_tracker_ctxt));
6425 strbufcpy(domain_info->domain_tracker_ctxt, sizeof(domain_info->domain_tracker_ctxt), (const char *)value, length_to_copy);
6426 break;
6427 }
6428 case NECP_CLIENT_PARAMETER_ATTRIBUTED_BUNDLE_IDENTIFIER: {
6429 size_t length_to_copy = MIN(length, sizeof(domain_info->domain_attributed_bundle_id));
6430 strbufcpy(domain_info->domain_attributed_bundle_id, sizeof(domain_info->domain_attributed_bundle_id), (const char *)value, length_to_copy);
6431 break;
6432 }
6433 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
6434 if (length >= sizeof(struct necp_policy_condition_addr)) {
6435 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6436 if (necp_client_address_is_valid(&address_struct->address.sa)) {
6437 domain_info->remote.v6 = address_struct->address.sin6;
6438 }
6439 }
6440 break;
6441 }
6442 default: {
6443 break;
6444 }
6445 }
6446 }
6447 }
6448 offset += sizeof(struct necp_tlv_header) + length;
6449 }
6450
6451 if (domain_info->is_silent) {
6452 memset(domain_info, 0, sizeof(*domain_info));
6453 domain_info->is_silent = true;
6454 } else if (domain_info->is_tracker && tracker_domain != NULL && tracker_domain_length > 0) {
6455 size_t length_to_copy = MIN(tracker_domain_length, sizeof(domain_info->domain_name));
6456 strbufcpy(domain_info->domain_name, sizeof(domain_info->domain_name), (const char *)tracker_domain, length_to_copy);
6457 } else if (domain != NULL && domain_length > 0) {
6458 size_t length_to_copy = MIN(domain_length, sizeof(domain_info->domain_name));
6459 strbufcpy(domain_info->domain_name, sizeof(domain_info->domain_name), (const char *)domain, length_to_copy);
6460 }
6461
6462 NECP_CLIENT_FLOW_LOG(client, flow_registration,
6463 "Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> "
6464 "is_tracker %d is_non_app_initiated %d is_silent %d",
6465 domain_info->domain_name,
6466 domain_info->domain_owner,
6467 domain_info->domain_tracker_ctxt,
6468 domain_info->domain_attributed_bundle_id,
6469 domain_info->is_tracker,
6470 domain_info->is_non_app_initiated,
6471 domain_info->is_silent);
6472
6473 return sizeof(nstat_domain_info);
6474 }
6475
6476 static size_t
necp_find_conn_extension_info(nstat_provider_context ctx,int requested_extension,void * __sized_by (buf_size)buf,size_t buf_size)6477 necp_find_conn_extension_info(nstat_provider_context ctx,
6478 int requested_extension, /* The extension to be returned */
6479 void * __sized_by(buf_size)buf, /* If not NULL, the address for extensions to be returned in */
6480 size_t buf_size) /* The size of the buffer space, typically matching the return from a previous call with a NULL buf pointer */
6481 {
6482 // Note, the caller has guaranteed that any buffer has been zeroed, there is no need to clear it again
6483
6484 if (ctx == NULL) {
6485 return 0;
6486 }
6487 struct necp_client *client = (struct necp_client *)ctx;
6488 switch (requested_extension) {
6489 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
6490 // This is for completeness. The intent is that domain information can be extracted at user level from the TLV parameters
6491 if (buf == NULL) {
6492 return sizeof(nstat_domain_info);
6493 }
6494 if (buf_size < sizeof(nstat_domain_info)) {
6495 return 0;
6496 }
6497 return necp_find_domain_info_common(client, client->parameters, client->parameters_length, NULL, (nstat_domain_info *)buf);
6498
6499 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV: {
6500 size_t parameters_length = client->parameters_length;
6501 if (buf == NULL) {
6502 return parameters_length;
6503 }
6504 if (buf_size < parameters_length) {
6505 return 0;
6506 }
6507 memcpy(buf, client->parameters, parameters_length);
6508 return parameters_length;
6509 }
6510 case NSTAT_EXTENDED_UPDATE_TYPE_ORIGINAL_NECP_TLV:
6511 if (buf == NULL) {
6512 return (client->original_parameters_source != NULL) ? client->original_parameters_source->parameters_length : 0;
6513 }
6514 if ((client->original_parameters_source == NULL) || (buf_size < client->original_parameters_source->parameters_length)) {
6515 return 0;
6516 }
6517 memcpy(buf, client->original_parameters_source->parameters, client->original_parameters_source->parameters_length);
6518 return client->original_parameters_source->parameters_length;
6519
6520 case NSTAT_EXTENDED_UPDATE_TYPE_ORIGINAL_DOMAIN:
6521 if (buf == NULL) {
6522 return (client->original_parameters_source != NULL) ? sizeof(nstat_domain_info) : 0;
6523 }
6524 if ((buf_size < sizeof(nstat_domain_info)) || (client->original_parameters_source == NULL)) {
6525 return 0;
6526 }
6527 return necp_find_domain_info_common(client, client->original_parameters_source->parameters, client->original_parameters_source->parameters_length,
6528 NULL, (nstat_domain_info *)buf);
6529
6530 default:
6531 return 0;
6532 }
6533 }
6534
6535 #if SKYWALK
6536
6537 static struct traffic_stats*
media_stats_embedded_ts(struct media_stats * media_stats,uint32_t ifflags)6538 media_stats_embedded_ts(struct media_stats *media_stats, uint32_t ifflags)
6539 {
6540 struct traffic_stats *ts = NULL;
6541 if (media_stats) {
6542 if (ifflags & NSTAT_IFNET_IS_WIFI) {
6543 if (ifflags & NSTAT_IFNET_IS_WIFI_INFRA) {
6544 ts = &media_stats->ms_wifi_infra;
6545 } else {
6546 ts = &media_stats->ms_wifi_non_infra;
6547 }
6548 } else if (ifflags & NSTAT_IFNET_IS_CELLULAR) {
6549 ts = &media_stats->ms_cellular;
6550 } else if (ifflags & NSTAT_IFNET_IS_WIRED) {
6551 ts = &media_stats->ms_wired;
6552 } else if (ifflags & NSTAT_IFNET_IS_COMPANIONLINK_BT) {
6553 ts = &media_stats->ms_bluetooth;
6554 } else if (!(ifflags & NSTAT_IFNET_IS_LOOPBACK)) {
6555 ts = &media_stats->ms_alternate;
6556 }
6557 }
6558 return ts;
6559 }
6560
6561 static size_t
necp_find_extension_info(userland_stats_provider_context * ctx,int requested_extension,void * __sized_by (buf_size)buf,size_t buf_size)6562 necp_find_extension_info(userland_stats_provider_context *ctx,
6563 int requested_extension, /* The extension to be returned */
6564 void * __sized_by(buf_size)buf, /* If not NULL, the address for extensions to be returned in */
6565 size_t buf_size) /* The size of the buffer space, typically matching the return from a previous call with a NULL buf pointer */
6566 {
6567 if (ctx == NULL) {
6568 return 0;
6569 }
6570 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
6571 struct necp_client *client = flow_registration->client;
6572
6573 switch (requested_extension) {
6574 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
6575 if (buf == NULL) {
6576 return sizeof(nstat_domain_info);
6577 }
6578 if (buf_size < sizeof(nstat_domain_info)) {
6579 return 0;
6580 }
6581 return necp_find_domain_info_common(client, client->parameters, client->parameters_length, flow_registration, (nstat_domain_info *)buf);
6582
6583 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV:
6584 if (buf == NULL) {
6585 return client->parameters_length;
6586 }
6587 if (buf_size < client->parameters_length) {
6588 return 0;
6589 }
6590 memcpy(buf, client->parameters, client->parameters_length);
6591 return client->parameters_length;
6592
6593 case NSTAT_EXTENDED_UPDATE_TYPE_FUUID:
6594 if (buf == NULL) {
6595 return sizeof(uuid_t);
6596 }
6597 if (buf_size < sizeof(uuid_t)) {
6598 return 0;
6599 }
6600 uuid_copy(buf, flow_registration->registration_id);
6601 return sizeof(uuid_t);
6602
6603 case NSTAT_EXTENDED_UPDATE_TYPE_BLUETOOTH_COUNTS: {
6604 // Retrieve details from the last time the assigned flows were updated
6605 u_int32_t route_ifindex = IFSCOPE_NONE;
6606 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6607 u_int64_t combined_interface_details = 0;
6608
6609 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
6610 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6611 bool is_companionlink_bluetooth = (route_ifflags & NSTAT_IFNET_IS_COMPANIONLINK_BT);
6612
6613 if (buf == NULL) {
6614 return (is_companionlink_bluetooth ||
6615 (route_ifflags & NSTAT_IFNET_PEEREGRESSINTERFACE_IS_CELLULAR)) ? sizeof(nstat_interface_counts):0;
6616 }
6617 if (buf_size < sizeof(nstat_interface_counts)) {
6618 return 0;
6619 }
6620
6621 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6622 if ((sf != NULL) &&
6623 (is_companionlink_bluetooth || (route_ifflags & NSTAT_IFNET_PEEREGRESSINTERFACE_IS_CELLULAR))) {
6624 nstat_interface_counts *bt_counts = (nstat_interface_counts *)buf;
6625 bt_counts->nstat_rxbytes = sf->sf_ibytes;
6626 bt_counts->nstat_txbytes = sf->sf_obytes;
6627 return sizeof(nstat_interface_counts);
6628 } else {
6629 return 0;
6630 }
6631 }
6632
6633 default:
6634 return 0;
6635 }
6636 }
6637
6638 static void
necp_find_netstat_data(struct necp_client * client,union necp_sockaddr_union * remote,pid_t * effective_pid,uid_t * uid,uuid_t euuid,uid_t * persona_id,u_int32_t * traffic_class,u_int8_t * fallback_mode)6639 necp_find_netstat_data(struct necp_client *client,
6640 union necp_sockaddr_union *remote,
6641 pid_t *effective_pid,
6642 uid_t *uid,
6643 uuid_t euuid,
6644 uid_t *persona_id,
6645 u_int32_t *traffic_class,
6646 u_int8_t *fallback_mode)
6647 {
6648 bool have_set_euuid = false;
6649 size_t offset = 0;
6650 u_int8_t *parameters;
6651 u_int32_t parameters_size;
6652
6653 parameters = client->parameters;
6654 parameters_size = (u_int32_t)client->parameters_length;
6655
6656 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6657 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
6658 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
6659
6660 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6661 // If the length is larger than what can fit in the remaining parameters size, bail
6662 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6663 break;
6664 }
6665
6666 if (length > 0) {
6667 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
6668 if (value != NULL) {
6669 switch (type) {
6670 case NECP_CLIENT_PARAMETER_APPLICATION: {
6671 if (length >= sizeof(uuid_t)) {
6672 uuid_copy(euuid, value);
6673 }
6674 break;
6675 }
6676 case NECP_CLIENT_PARAMETER_PID: {
6677 if (length >= sizeof(pid_t)) {
6678 memcpy(effective_pid, value, sizeof(pid_t));
6679 }
6680 break;
6681 }
6682 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
6683 if (length >= sizeof(u_int32_t)) {
6684 memcpy(traffic_class, value, sizeof(u_int32_t));
6685 }
6686 break;
6687 }
6688 case NECP_CLIENT_PARAMETER_FALLBACK_MODE: {
6689 if (length >= sizeof(u_int8_t)) {
6690 memcpy(fallback_mode, value, sizeof(u_int8_t));
6691 }
6692 break;
6693 }
6694 // It is an implementation quirk that the remote address can be found in the necp parameters
6695 // while the local address must be retrieved from the flowswitch
6696 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
6697 if (length >= sizeof(struct necp_policy_condition_addr)) {
6698 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6699 if (necp_client_address_is_valid(&address_struct->address.sa)) {
6700 remote->sin6 = address_struct->address.sin6;
6701 }
6702 }
6703 break;
6704 }
6705 case NECP_CLIENT_PARAMETER_APPLICATION_ID: {
6706 if (length >= sizeof(necp_application_id_t) && uid && persona_id) {
6707 necp_application_id_t *application_id = (necp_application_id_t *)(void *)value;
6708 memcpy(uid, &application_id->uid, sizeof(uid_t));
6709 uuid_copy(euuid, application_id->effective_uuid);
6710 memcpy(persona_id, &application_id->persona_id, sizeof(uid_t));
6711 have_set_euuid = true;
6712 }
6713 break;
6714 }
6715 default: {
6716 break;
6717 }
6718 }
6719 }
6720 }
6721 offset += sizeof(struct necp_tlv_header) + length;
6722 }
6723
6724 if (!have_set_euuid) {
6725 proc_t proc = proc_find(client->proc_pid);
6726 if (proc != PROC_NULL) {
6727 uuid_t responsible_uuid = { 0 };
6728 proc_getresponsibleuuid(proc, responsible_uuid, sizeof(responsible_uuid));
6729 proc_rele(proc);
6730 if (!uuid_is_null(responsible_uuid)) {
6731 uuid_copy(euuid, responsible_uuid);
6732 }
6733 }
6734 }
6735 }
6736
6737 static u_int64_t
necp_find_netstat_initial_properties(struct necp_client * client)6738 necp_find_netstat_initial_properties(struct necp_client *client)
6739 {
6740 size_t offset = 0;
6741 u_int64_t retval = 0;
6742 u_int8_t *parameters;
6743 u_int32_t parameters_size;
6744
6745 parameters = client->parameters;
6746 parameters_size = (u_int32_t)client->parameters_length;
6747
6748 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6749 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
6750 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
6751
6752 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6753 // If the length is larger than what can fit in the remaining parameters size, bail
6754 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6755 break;
6756 }
6757
6758 if (type == NECP_CLIENT_PARAMETER_FLAGS) {
6759 u_int32_t policy_condition_client_flags;
6760 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
6761 if ((value != NULL) && (length >= sizeof(policy_condition_client_flags))) {
6762 memcpy(&policy_condition_client_flags, value, sizeof(policy_condition_client_flags));
6763 if (policy_condition_client_flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
6764 retval |= NSTAT_SOURCE_IS_LISTENER;
6765 }
6766 if (policy_condition_client_flags & NECP_CLIENT_PARAMETER_FLAG_INBOUND) {
6767 retval |= NSTAT_SOURCE_IS_INBOUND;
6768 }
6769 }
6770 break;
6771 }
6772 offset += sizeof(struct necp_tlv_header) + length;
6773 }
6774 if (retval == 0) {
6775 retval = NSTAT_SOURCE_IS_OUTBOUND;
6776 }
6777 return retval;
6778 }
6779
6780 static bool
necp_request_nexus_tcp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)6781 necp_request_nexus_tcp_netstats(userland_stats_provider_context *ctx,
6782 u_int32_t *ifflagsp,
6783 nstat_progress_digest *digestp,
6784 nstat_counts *countsp,
6785 nstat_detailed_counts *detailed_countsp,
6786 void *metadatap)
6787 {
6788 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
6789 struct necp_client *client = flow_registration->client;
6790 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
6791 struct necp_tcp_stats *tcpstats = (struct necp_tcp_stats *)ustats_kaddr;
6792 ASSERT(tcpstats != NULL);
6793 ASSERT(!flow_registration->aop_offload);
6794
6795 u_int32_t nstat_diagnostic_flags = 0;
6796
6797 // Retrieve details from the last time the assigned flows were updated
6798 u_int32_t route_ifindex = IFSCOPE_NONE;
6799 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6800 u_int64_t combined_interface_details = 0;
6801
6802 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
6803 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6804
6805 if (route_ifindex == IFSCOPE_NONE) {
6806 // Mark no interface
6807 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
6808 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6809 NECPLOG(LOG_INFO, "req tcp stats, failed to get route details for pid %d curproc %d %s\n",
6810 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
6811 }
6812
6813 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6814 if (sf == NULL) {
6815 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
6816 char namebuf[MAXCOMLEN + 1];
6817 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
6818 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
6819 NECPLOG(LOG_ERR, "req tcp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
6820 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
6821 sf = &ntstat_sk_stats_zero;
6822 }
6823
6824 if (ifflagsp) {
6825 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
6826 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
6827 if (tcpstats->necp_tcp_extra.flags1 & SOF1_CELLFALLBACK) {
6828 *ifflagsp |= NSTAT_IFNET_VIA_CELLFALLBACK;
6829 }
6830 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
6831 return true;
6832 }
6833 }
6834
6835 if (digestp) {
6836 // The digest is intended to give information that may help give insight into the state of the link
6837 digestp->rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6838 digestp->txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6839 digestp->rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6840 digestp->rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6841 digestp->txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6842 digestp->ifindex = route_ifindex;
6843 digestp->state = tcpstats->necp_tcp_extra.state;
6844 digestp->txunacked = tcpstats->necp_tcp_extra.txunacked;
6845 digestp->txwindow = tcpstats->necp_tcp_extra.txwindow;
6846 digestp->connstatus.probe_activated = tcpstats->necp_tcp_extra.probestatus.probe_activated;
6847 digestp->connstatus.write_probe_failed = tcpstats->necp_tcp_extra.probestatus.write_probe_failed;
6848 digestp->connstatus.read_probe_failed = tcpstats->necp_tcp_extra.probestatus.read_probe_failed;
6849 digestp->connstatus.conn_probe_failed = tcpstats->necp_tcp_extra.probestatus.conn_probe_failed;
6850
6851 if ((countsp == NULL) && (metadatap == NULL)) {
6852 return true;
6853 }
6854 }
6855
6856 if (countsp) {
6857 countsp->nstat_rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6858 countsp->nstat_txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6859
6860 countsp->nstat_rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6861 countsp->nstat_rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6862 countsp->nstat_txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6863
6864 countsp->nstat_min_rtt = tcpstats->necp_tcp_counts.necp_stat_min_rtt;
6865 countsp->nstat_avg_rtt = tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
6866 countsp->nstat_var_rtt = tcpstats->necp_tcp_counts.necp_stat_var_rtt;
6867
6868 countsp->nstat_connectattempts = tcpstats->necp_tcp_extra.state >= TCPS_SYN_SENT ? 1 : 0;
6869 countsp->nstat_connectsuccesses = tcpstats->necp_tcp_extra.state >= TCPS_ESTABLISHED ? 1 : 0;
6870
6871 // Supplement what the user level has told us with what we know from the flowswitch
6872 // The nstat_counts structure has only one set of packet counts so set them from the
6873 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
6874 countsp->nstat_rxpackets = sf->sf_ipackets;
6875 countsp->nstat_txpackets = sf->sf_opackets;
6876 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
6877 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
6878 countsp->nstat_cell_txbytes = sf->sf_obytes;
6879 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
6880 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
6881 countsp->nstat_wifi_txbytes = sf->sf_obytes;
6882 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
6883 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
6884 countsp->nstat_wired_txbytes = sf->sf_obytes;
6885 }
6886 }
6887
6888 if (detailed_countsp) {
6889 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6890 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6891 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = tcpstats->necp_tcp_counts.necp_stat_rxpackets;
6892 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = tcpstats->necp_tcp_counts.necp_stat_txpackets;
6893
6894 detailed_countsp->nstat_rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6895 detailed_countsp->nstat_rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6896 detailed_countsp->nstat_txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6897
6898 detailed_countsp->nstat_min_rtt = tcpstats->necp_tcp_counts.necp_stat_min_rtt;
6899 detailed_countsp->nstat_avg_rtt = tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
6900 detailed_countsp->nstat_var_rtt = tcpstats->necp_tcp_counts.necp_stat_var_rtt;
6901
6902 // Supplement what the user level has told us with what we know from the flowswitch
6903 // The user level statistics don't include a bitmap so use the one within the kernel,
6904 memcpy(&detailed_countsp->nstat_media_stats.ms_total.ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6905
6906 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
6907 if (ts) {
6908 ts->ts_rxpackets = sf->sf_ipackets;
6909 ts->ts_txpackets = sf->sf_opackets;
6910 ts->ts_rxbytes = sf->sf_ibytes;
6911 ts->ts_txbytes = sf->sf_obytes;
6912 memcpy(&ts->ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6913 }
6914 }
6915
6916 if (metadatap) {
6917 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)metadatap;
6918 memset(desc, 0, sizeof(*desc));
6919
6920 // Metadata from the flow registration
6921 uuid_copy(desc->fuuid, flow_registration->registration_id);
6922
6923 // Metadata that the necp client should have in TLV format.
6924 pid_t effective_pid = client->proc_pid;
6925 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
6926 desc->epid = (u_int32_t)effective_pid;
6927
6928 // Metadata from the flow registration
6929 // This needs to revisited if multiple flows are created from one flow registration
6930 struct necp_client_flow *flow = NULL;
6931 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
6932 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
6933 break;
6934 }
6935
6936 // Metadata from the route
6937 desc->ifindex = route_ifindex;
6938 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
6939 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
6940 if (tcpstats->necp_tcp_extra.flags1 & SOF1_CELLFALLBACK) {
6941 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
6942 }
6943
6944 // Basic metadata from userland
6945 desc->rcvbufsize = tcpstats->necp_tcp_basic.rcvbufsize;
6946 desc->rcvbufused = tcpstats->necp_tcp_basic.rcvbufused;
6947
6948 // Additional TCP specific data
6949 desc->sndbufsize = tcpstats->necp_tcp_extra.sndbufsize;
6950 desc->sndbufused = tcpstats->necp_tcp_extra.sndbufused;
6951 desc->txunacked = tcpstats->necp_tcp_extra.txunacked;
6952 desc->txwindow = tcpstats->necp_tcp_extra.txwindow;
6953 desc->txcwindow = tcpstats->necp_tcp_extra.txcwindow;
6954 desc->traffic_mgt_flags = tcpstats->necp_tcp_extra.traffic_mgt_flags;
6955 desc->state = tcpstats->necp_tcp_extra.state;
6956
6957 u_int32_t cc_alg_index = tcpstats->necp_tcp_extra.cc_alg_index;
6958 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
6959 strbufcpy(desc->cc_algo, sizeof(desc->cc_algo), tcp_cc_algo_list[cc_alg_index]->name, sizeof(tcp_cc_algo_list[cc_alg_index]->name));
6960 } else {
6961 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
6962 }
6963
6964 desc->connstatus.probe_activated = tcpstats->necp_tcp_extra.probestatus.probe_activated;
6965 desc->connstatus.write_probe_failed = tcpstats->necp_tcp_extra.probestatus.write_probe_failed;
6966 desc->connstatus.read_probe_failed = tcpstats->necp_tcp_extra.probestatus.read_probe_failed;
6967 desc->connstatus.conn_probe_failed = tcpstats->necp_tcp_extra.probestatus.conn_probe_failed;
6968
6969 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6970
6971 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
6972 uuid_string_t euuid_str = { 0 };
6973 uuid_unparse(desc->euuid, euuid_str);
6974 NECPLOG(LOG_NOTICE, "Collected stats - TCP - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
6975 }
6976 }
6977
6978 return true;
6979 }
6980
6981 static bool
necp_request_aop_tcp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)6982 necp_request_aop_tcp_netstats(userland_stats_provider_context *ctx,
6983 u_int32_t *ifflagsp,
6984 nstat_progress_digest *digestp,
6985 nstat_counts *countsp,
6986 nstat_detailed_counts *detailed_countsp,
6987 void *metadatap)
6988 {
6989 struct aop_flow_stats flow_stats = {};
6990 struct tcp_info *tcpi = &flow_stats.transport.tcp_stats.tcp_info;
6991 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
6992 struct necp_client *client = flow_registration->client;
6993 int err = 0;
6994
6995 ASSERT(flow_registration->aop_offload);
6996
6997 u_int32_t nstat_diagnostic_flags = 0;
6998
6999 // Retrieve details from the last time the assigned flows were updated
7000 u_int32_t route_ifindex = IFSCOPE_NONE;
7001 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7002 u_int64_t combined_interface_details = 0;
7003
7004 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
7005 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
7006
7007 if (route_ifindex == IFSCOPE_NONE) {
7008 // Mark no interface
7009 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
7010 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7011 NECPLOG(LOG_INFO, "req tcp stats, failed to get route details for pid %d curproc %d %s\n",
7012 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
7013 }
7014
7015 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
7016 if (sf == NULL) {
7017 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
7018 char namebuf[MAXCOMLEN + 1];
7019 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
7020 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
7021 NECPLOG(LOG_ERR, "req tcp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
7022 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
7023 sf = &ntstat_sk_stats_zero;
7024 }
7025
7026 if (ifflagsp) {
7027 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
7028 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7029 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
7030 return true;
7031 }
7032 }
7033
7034 // This needs to revisited if multiple flows are created from one flow registration
7035 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
7036 if (flow == NULL) {
7037 return false;
7038 }
7039
7040 ASSERT(flow->aop_offload && flow->flow_tag > 0);
7041 if (!flow->aop_stat_index_valid) {
7042 return false;
7043 }
7044 err = net_aop_get_flow_stats(flow->stats_index, &flow_stats);
7045 if (err != 0) {
7046 NECPLOG(LOG_ERR, "failed to get aop flow stats "
7047 "for flow id %u with error %d", flow->flow_tag, err);
7048 return false;
7049 }
7050
7051 if (__improbable(flow->flow_tag != flow_stats.flow_id)) {
7052 NECPLOG(LOG_ERR, "aop flow stats, flow tag 0x%x != 0x%x",
7053 flow->flow_tag, flow_stats.flow_id);
7054 return false;
7055 }
7056
7057 if (digestp) {
7058 // The digest is intended to give information that may help give insight into the state of the link
7059 digestp->rxbytes = tcpi->tcpi_rxbytes;
7060 digestp->txbytes = tcpi->tcpi_txbytes;
7061 digestp->rxduplicatebytes = tcpi->tcpi_rxduplicatebytes;
7062 digestp->rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes;
7063 digestp->txretransmit = tcpi->tcpi_txretransmitbytes;
7064 digestp->ifindex = route_ifindex;
7065 digestp->state = tcpi->tcpi_state;
7066 digestp->txunacked = tcpi->tcpi_txunacked;
7067 digestp->txwindow = tcpi->tcpi_snd_wnd;
7068
7069 if ((countsp == NULL) && (metadatap == NULL)) {
7070 return true;
7071 }
7072 }
7073
7074 if (countsp) {
7075 countsp->nstat_rxbytes = tcpi->tcpi_rxbytes;
7076 countsp->nstat_txbytes = tcpi->tcpi_txbytes;
7077
7078 countsp->nstat_rxduplicatebytes = tcpi->tcpi_rxduplicatebytes;
7079 countsp->nstat_rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes;
7080 countsp->nstat_txretransmit = tcpi->tcpi_txretransmitbytes;
7081
7082 countsp->nstat_min_rtt = tcpi->tcpi_rttbest;
7083 countsp->nstat_avg_rtt = tcpi->tcpi_srtt;
7084 countsp->nstat_var_rtt = tcpi->tcpi_rttvar;
7085
7086 countsp->nstat_connectattempts = tcpi->tcpi_state >= TCPS_SYN_SENT ? 1 : 0;
7087 countsp->nstat_connectsuccesses = tcpi->tcpi_state >= TCPS_ESTABLISHED ? 1 : 0;
7088
7089 // Supplement what the user level has told us with what we know from the flowswitch
7090 // The nstat_counts structure has only one set of packet counts so set them from the
7091 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
7092 countsp->nstat_rxpackets = sf->sf_ipackets;
7093 countsp->nstat_txpackets = sf->sf_opackets;
7094 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
7095 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
7096 countsp->nstat_cell_txbytes = sf->sf_obytes;
7097 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
7098 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
7099 countsp->nstat_wifi_txbytes = sf->sf_obytes;
7100 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
7101 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
7102 countsp->nstat_wired_txbytes = sf->sf_obytes;
7103 }
7104 }
7105
7106 if (detailed_countsp) {
7107 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = tcpi->tcpi_rxbytes;
7108 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = tcpi->tcpi_txbytes;
7109 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = tcpi->tcpi_rxpackets;
7110 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = tcpi->tcpi_txpackets;
7111
7112 detailed_countsp->nstat_rxduplicatebytes = tcpi->tcpi_rxduplicatebytes;
7113 detailed_countsp->nstat_rxoutoforderbytes = tcpi->tcpi_rxoutoforderbytes;
7114 detailed_countsp->nstat_txretransmit = tcpi->tcpi_txretransmitbytes;
7115
7116 detailed_countsp->nstat_min_rtt = tcpi->tcpi_rttbest;
7117 detailed_countsp->nstat_avg_rtt = tcpi->tcpi_srtt;
7118 detailed_countsp->nstat_var_rtt = tcpi->tcpi_rttvar;
7119
7120 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
7121 if (ts) {
7122 ts->ts_rxpackets = sf->sf_ipackets;
7123 ts->ts_txpackets = sf->sf_opackets;
7124 ts->ts_rxbytes = sf->sf_ibytes;
7125 ts->ts_txbytes = sf->sf_obytes;
7126 }
7127 }
7128
7129 if (metadatap) {
7130 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)metadatap;
7131 memset(desc, 0, sizeof(*desc));
7132
7133 // Metadata from the flow registration
7134 uuid_copy(desc->fuuid, flow_registration->registration_id);
7135
7136 // Metadata that the necp client should have in TLV format.
7137 pid_t effective_pid = client->proc_pid;
7138 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
7139 desc->epid = (u_int32_t)effective_pid;
7140
7141 // Metadata from the flow registration
7142 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
7143
7144 // Metadata from the route
7145 desc->ifindex = route_ifindex;
7146 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
7147 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7148
7149 // Basic metadata from userland
7150 desc->rcvbufsize = flow_stats.rx_buffer_stats.bufsize;
7151 desc->rcvbufused = flow_stats.rx_buffer_stats.bufused;
7152
7153 // Additional TCP specific data
7154 desc->sndbufsize = flow_stats.tx_buffer_stats.bufsize;
7155 desc->sndbufused = flow_stats.tx_buffer_stats.bufused;
7156 desc->txunacked = tcpi->tcpi_txunacked;
7157 desc->txwindow = tcpi->tcpi_snd_wnd;
7158 desc->txcwindow = tcpi->tcpi_snd_cwnd;
7159 desc->traffic_mgt_flags = 0;
7160 desc->state = tcpi->tcpi_state;
7161
7162 u_int32_t cc_alg_index = flow_stats.transport.tcp_stats.tcp_cc_algo;
7163 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
7164 strbufcpy(desc->cc_algo, sizeof(desc->cc_algo), tcp_cc_algo_list[cc_alg_index]->name, sizeof(tcp_cc_algo_list[cc_alg_index]->name));
7165 } else {
7166 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
7167 }
7168
7169 desc->connstatus.probe_activated = 0;
7170 desc->connstatus.write_probe_failed = 0;
7171 desc->connstatus.read_probe_failed = 0;
7172 desc->connstatus.conn_probe_failed = 0;
7173
7174 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
7175 uuid_string_t euuid_str = { 0 };
7176 uuid_unparse(desc->euuid, euuid_str);
7177 NECPLOG(LOG_NOTICE, "Collected stats - TCP - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
7178 }
7179 }
7180
7181 return true;
7182 }
7183
7184 // Called from NetworkStatistics when it wishes to collect latest information for a TCP flow.
7185 // It is a responsibility of NetworkStatistics to have previously zeroed any supplied memory.
7186 static bool
necp_request_tcp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)7187 necp_request_tcp_netstats(userland_stats_provider_context *ctx,
7188 u_int32_t *ifflagsp,
7189 nstat_progress_digest *digestp,
7190 nstat_counts *countsp,
7191 nstat_detailed_counts *detailed_countsp,
7192 void *metadatap)
7193 {
7194 if (ctx == NULL) {
7195 return false;
7196 }
7197
7198 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
7199 if (__probable(!flow_registration->aop_offload)) {
7200 return necp_request_nexus_tcp_netstats(ctx, ifflagsp, digestp, countsp, detailed_countsp, metadatap);
7201 } else {
7202 return necp_request_aop_tcp_netstats(ctx, ifflagsp, digestp, countsp, detailed_countsp, metadatap);
7203 }
7204 }
7205
7206 // Called from NetworkStatistics when it wishes to collect latest information for a UDP flow.
7207 static bool
necp_request_udp_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)7208 necp_request_udp_netstats(userland_stats_provider_context *ctx,
7209 u_int32_t *ifflagsp,
7210 nstat_progress_digest *digestp,
7211 nstat_counts *countsp,
7212 nstat_detailed_counts *detailed_countsp,
7213 void *metadatap)
7214 {
7215 #pragma unused(digestp)
7216
7217 if (ctx == NULL) {
7218 return false;
7219 }
7220
7221 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
7222 struct necp_client *client = flow_registration->client;
7223 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
7224 struct necp_udp_stats *udpstats = (struct necp_udp_stats *)ustats_kaddr;
7225 ASSERT(udpstats != NULL);
7226
7227 u_int32_t nstat_diagnostic_flags = 0;
7228
7229 // Retrieve details from the last time the assigned flows were updated
7230 u_int32_t route_ifindex = IFSCOPE_NONE;
7231 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7232 u_int64_t combined_interface_details = 0;
7233
7234 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
7235 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
7236
7237 if (route_ifindex == IFSCOPE_NONE) {
7238 // Mark no interface
7239 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
7240 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7241 NECPLOG(LOG_INFO, "req udp stats, failed to get route details for pid %d curproc %d %s\n",
7242 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
7243 }
7244
7245 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
7246 if (sf == NULL) {
7247 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
7248 char namebuf[MAXCOMLEN + 1];
7249 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
7250 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
7251 NECPLOG(LOG_ERR, "req udp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
7252 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
7253 sf = &ntstat_sk_stats_zero;
7254 }
7255
7256 if (ifflagsp) {
7257 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
7258 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7259 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
7260 return true;
7261 }
7262 }
7263
7264 if (countsp) {
7265 countsp->nstat_rxbytes = udpstats->necp_udp_counts.necp_stat_rxbytes;
7266 countsp->nstat_txbytes = udpstats->necp_udp_counts.necp_stat_txbytes;
7267
7268 countsp->nstat_rxduplicatebytes = udpstats->necp_udp_counts.necp_stat_rxduplicatebytes;
7269 countsp->nstat_rxoutoforderbytes = udpstats->necp_udp_counts.necp_stat_rxoutoforderbytes;
7270 countsp->nstat_txretransmit = udpstats->necp_udp_counts.necp_stat_txretransmit;
7271
7272 countsp->nstat_min_rtt = udpstats->necp_udp_counts.necp_stat_min_rtt;
7273 countsp->nstat_avg_rtt = udpstats->necp_udp_counts.necp_stat_avg_rtt;
7274 countsp->nstat_var_rtt = udpstats->necp_udp_counts.necp_stat_var_rtt;
7275
7276 // Supplement what the user level has told us with what we know from the flowswitch
7277 // The nstat_counts structure has only one set of packet counts so set them from the
7278 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
7279 countsp->nstat_rxpackets = sf->sf_ipackets;
7280 countsp->nstat_txpackets = sf->sf_opackets;
7281 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
7282 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
7283 countsp->nstat_cell_txbytes = sf->sf_obytes;
7284 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
7285 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
7286 countsp->nstat_wifi_txbytes = sf->sf_obytes;
7287 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
7288 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
7289 countsp->nstat_wired_txbytes = sf->sf_obytes;
7290 }
7291 }
7292
7293 if (detailed_countsp) {
7294 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = udpstats->necp_udp_counts.necp_stat_rxbytes;
7295 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = udpstats->necp_udp_counts.necp_stat_txbytes;
7296 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = udpstats->necp_udp_counts.necp_stat_rxpackets;
7297 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = udpstats->necp_udp_counts.necp_stat_txpackets;
7298
7299 detailed_countsp->nstat_rxduplicatebytes = udpstats->necp_udp_counts.necp_stat_rxduplicatebytes;
7300 detailed_countsp->nstat_rxoutoforderbytes = udpstats->necp_udp_counts.necp_stat_rxoutoforderbytes;
7301 detailed_countsp->nstat_txretransmit = udpstats->necp_udp_counts.necp_stat_txretransmit;
7302
7303 detailed_countsp->nstat_min_rtt = udpstats->necp_udp_counts.necp_stat_min_rtt;
7304 detailed_countsp->nstat_avg_rtt = udpstats->necp_udp_counts.necp_stat_avg_rtt;
7305 detailed_countsp->nstat_var_rtt = udpstats->necp_udp_counts.necp_stat_var_rtt;
7306
7307 // Supplement what the user level has told us with what we know from the flowswitch
7308 // The user level statistics don't include a bitmap so use the one within the kernel,
7309 memcpy(&detailed_countsp->nstat_media_stats.ms_total.ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7310
7311 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
7312 if (ts) {
7313 ts->ts_rxpackets = sf->sf_ipackets;
7314 ts->ts_txpackets = sf->sf_opackets;
7315 ts->ts_rxbytes = sf->sf_ibytes;
7316 ts->ts_txbytes = sf->sf_obytes;
7317 memcpy(&ts->ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7318 }
7319 }
7320
7321 if (metadatap) {
7322 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)metadatap;
7323 memset(desc, 0, sizeof(*desc));
7324
7325 // Metadata from the flow registration
7326 uuid_copy(desc->fuuid, flow_registration->registration_id);
7327
7328 // Metadata that the necp client should have in TLV format.
7329 pid_t effective_pid = client->proc_pid;
7330 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
7331 desc->epid = (u_int32_t)effective_pid;
7332
7333 // Metadata from the flow registration
7334 // This needs to revisited if multiple flows are created from one flow registration
7335 struct necp_client_flow *flow = NULL;
7336 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
7337 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
7338 break;
7339 }
7340
7341 // Metadata from the route
7342 desc->ifindex = route_ifindex;
7343 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
7344 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7345
7346 // Basic metadata is all that is required for UDP
7347 desc->rcvbufsize = udpstats->necp_udp_basic.rcvbufsize;
7348 desc->rcvbufused = udpstats->necp_udp_basic.rcvbufused;
7349
7350 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7351
7352 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
7353 uuid_string_t euuid_str = { 0 };
7354 uuid_unparse(desc->euuid, euuid_str);
7355 NECPLOG(LOG_NOTICE, "Collected stats - UDP - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
7356 }
7357 }
7358
7359 return true;
7360 }
7361
7362 // Called from NetworkStatistics when it wishes to collect latest information for a QUIC flow.
7363 //
7364 // TODO: For now it is an exact implementation as that of TCP.
7365 // Still to keep the logic separate for future divergence, keeping the routines separate.
7366 // It also seems there are lots of common code between existing implementations and
7367 // it would be good to refactor this logic at some point.
7368 static bool
necp_request_quic_netstats(userland_stats_provider_context * ctx,u_int32_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,nstat_detailed_counts * detailed_countsp,void * metadatap)7369 necp_request_quic_netstats(userland_stats_provider_context *ctx,
7370 u_int32_t *ifflagsp,
7371 nstat_progress_digest *digestp,
7372 nstat_counts *countsp,
7373 nstat_detailed_counts *detailed_countsp,
7374 void *metadatap)
7375 {
7376 if (ctx == NULL) {
7377 return false;
7378 }
7379
7380 struct necp_client_flow_registration * __single flow_registration = (struct necp_client_flow_registration *)(void *)ctx;
7381 struct necp_client *client = flow_registration->client;
7382 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
7383 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
7384 ASSERT(quicstats != NULL);
7385
7386 u_int32_t nstat_diagnostic_flags = 0;
7387
7388 // Retrieve details from the last time the assigned flows were updated
7389 u_int32_t route_ifindex = IFSCOPE_NONE;
7390 u_int32_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7391 u_int64_t combined_interface_details = 0;
7392
7393 combined_interface_details = os_atomic_load(&flow_registration->last_interface_details, relaxed);
7394 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
7395
7396 if (route_ifindex == IFSCOPE_NONE) {
7397 // Mark no interface
7398 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
7399 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
7400 NECPLOG(LOG_INFO, "req quic stats, failed to get route details for pid %d curproc %d %s\n",
7401 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
7402 }
7403
7404 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
7405 if (sf == NULL) {
7406 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
7407 char namebuf[MAXCOMLEN + 1];
7408 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
7409 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
7410 NECPLOG(LOG_ERR, "req quic stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
7411 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
7412 sf = &ntstat_sk_stats_zero;
7413 }
7414
7415 if (ifflagsp) {
7416 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
7417 *ifflagsp |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7418 if (quicstats->necp_quic_extra.fallback) {
7419 *ifflagsp |= NSTAT_IFNET_VIA_CELLFALLBACK;
7420 }
7421 if ((digestp == NULL) && (countsp == NULL) && (detailed_countsp == NULL) && (metadatap == NULL)) {
7422 return true;
7423 }
7424 }
7425
7426 if (digestp) {
7427 // The digest is intended to give information that may help give insight into the state of the link
7428 digestp->rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
7429 digestp->txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
7430 digestp->rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
7431 digestp->rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
7432 digestp->txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
7433 digestp->ifindex = route_ifindex;
7434 digestp->state = quicstats->necp_quic_extra.state;
7435 digestp->txunacked = quicstats->necp_quic_extra.txunacked;
7436 digestp->txwindow = quicstats->necp_quic_extra.txwindow;
7437 digestp->connstatus.probe_activated = quicstats->necp_quic_extra.probestatus.probe_activated;
7438 digestp->connstatus.write_probe_failed = quicstats->necp_quic_extra.probestatus.write_probe_failed;
7439 digestp->connstatus.read_probe_failed = quicstats->necp_quic_extra.probestatus.read_probe_failed;
7440 digestp->connstatus.conn_probe_failed = quicstats->necp_quic_extra.probestatus.conn_probe_failed;
7441
7442 if ((countsp == NULL) && (metadatap == NULL)) {
7443 return true;
7444 }
7445 }
7446
7447 if (countsp) {
7448 countsp->nstat_rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
7449 countsp->nstat_txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
7450
7451 countsp->nstat_rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
7452 countsp->nstat_rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
7453 countsp->nstat_txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
7454
7455 countsp->nstat_min_rtt = quicstats->necp_quic_counts.necp_stat_min_rtt;
7456 countsp->nstat_avg_rtt = quicstats->necp_quic_counts.necp_stat_avg_rtt;
7457 countsp->nstat_var_rtt = quicstats->necp_quic_counts.necp_stat_var_rtt;
7458
7459 // TODO: It would be good to expose QUIC stats for CH/SH retransmission and connection state
7460 // Supplement what the user level has told us with what we know from the flowswitch
7461 // The nstat_counts structure has only one set of packet counts so set them from the
7462 // trusted flowswitch as clients may use them to calculate header overhead for cell/wifi/wired counts
7463 countsp->nstat_rxpackets = sf->sf_ipackets;
7464 countsp->nstat_txpackets = sf->sf_opackets;
7465 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
7466 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
7467 countsp->nstat_cell_txbytes = sf->sf_obytes;
7468 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
7469 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
7470 countsp->nstat_wifi_txbytes = sf->sf_obytes;
7471 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
7472 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
7473 countsp->nstat_wired_txbytes = sf->sf_obytes;
7474 }
7475 }
7476
7477 if (detailed_countsp) {
7478 detailed_countsp->nstat_media_stats.ms_total.ts_rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
7479 detailed_countsp->nstat_media_stats.ms_total.ts_txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
7480 detailed_countsp->nstat_media_stats.ms_total.ts_rxpackets = quicstats->necp_quic_counts.necp_stat_rxpackets;
7481 detailed_countsp->nstat_media_stats.ms_total.ts_txpackets = quicstats->necp_quic_counts.necp_stat_txpackets;
7482
7483 detailed_countsp->nstat_rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
7484 detailed_countsp->nstat_rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
7485 detailed_countsp->nstat_txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
7486
7487 detailed_countsp->nstat_min_rtt = quicstats->necp_quic_counts.necp_stat_min_rtt;
7488 detailed_countsp->nstat_avg_rtt = quicstats->necp_quic_counts.necp_stat_avg_rtt;
7489 detailed_countsp->nstat_var_rtt = quicstats->necp_quic_counts.necp_stat_var_rtt;
7490
7491 // Supplement what the user level has told us with what we know from the flowswitch
7492 // The user level statistics don't include a bitmap so use the one within the kernel,
7493 memcpy(&detailed_countsp->nstat_media_stats.ms_total.ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7494
7495 struct traffic_stats *ts = media_stats_embedded_ts(&detailed_countsp->nstat_media_stats, route_ifflags);
7496 if (ts) {
7497 ts->ts_rxpackets = sf->sf_ipackets;
7498 ts->ts_txpackets = sf->sf_opackets;
7499 ts->ts_rxbytes = sf->sf_ibytes;
7500 ts->ts_txbytes = sf->sf_obytes;
7501 memcpy(&ts->ts_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7502 }
7503 }
7504
7505 if (metadatap) {
7506 nstat_quic_descriptor *desc = (nstat_quic_descriptor *)metadatap;
7507 memset(desc, 0, sizeof(*desc));
7508
7509 // Metadata from the flow registration
7510 uuid_copy(desc->fuuid, flow_registration->registration_id);
7511
7512 // Metadata, that the necp client should have, in TLV format.
7513 pid_t effective_pid = client->proc_pid;
7514 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, &desc->uid, desc->euuid, &desc->persona_id, &desc->traffic_class, &desc->fallback_mode);
7515 desc->epid = (u_int32_t)effective_pid;
7516
7517 // Metadata from the flow registration
7518 // This needs to revisited if multiple flows are created from one flow registration
7519 struct necp_client_flow *flow = NULL;
7520 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
7521 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
7522 break;
7523 }
7524
7525 // Metadata from the route
7526 desc->ifindex = route_ifindex;
7527 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
7528 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
7529 if (quicstats->necp_quic_extra.fallback) {
7530 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
7531 desc->fallback_mode = SO_FALLBACK_MODE_FAST;
7532 }
7533
7534 // Basic metadata from userland
7535 desc->rcvbufsize = quicstats->necp_quic_basic.rcvbufsize;
7536 desc->rcvbufused = quicstats->necp_quic_basic.rcvbufused;
7537
7538 // Additional QUIC specific data
7539 desc->sndbufsize = quicstats->necp_quic_extra.sndbufsize;
7540 desc->sndbufused = quicstats->necp_quic_extra.sndbufused;
7541 desc->txunacked = quicstats->necp_quic_extra.txunacked;
7542 desc->txwindow = quicstats->necp_quic_extra.txwindow;
7543 desc->txcwindow = quicstats->necp_quic_extra.txcwindow;
7544 desc->traffic_mgt_flags = quicstats->necp_quic_extra.traffic_mgt_flags;
7545 desc->state = quicstats->necp_quic_extra.state;
7546
7547 // TODO: CC algo defines should be named agnostic of the protocol
7548 u_int32_t cc_alg_index = quicstats->necp_quic_extra.cc_alg_index;
7549 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
7550 strbufcpy(desc->cc_algo, sizeof(desc->cc_algo), tcp_cc_algo_list[cc_alg_index]->name, sizeof(tcp_cc_algo_list[cc_alg_index]->name));
7551 } else {
7552 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
7553 }
7554
7555 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
7556
7557 desc->connstatus.probe_activated = quicstats->necp_quic_extra.probestatus.probe_activated;
7558 desc->connstatus.write_probe_failed = quicstats->necp_quic_extra.probestatus.write_probe_failed;
7559 desc->connstatus.read_probe_failed = quicstats->necp_quic_extra.probestatus.read_probe_failed;
7560 desc->connstatus.conn_probe_failed = quicstats->necp_quic_extra.probestatus.conn_probe_failed;
7561
7562 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) {
7563 uuid_string_t euuid_str = { 0 };
7564 uuid_unparse(desc->euuid, euuid_str);
7565 NECPLOG(LOG_NOTICE, "Collected stats - QUIC - epid %d uid %d euuid %s persona id %d", desc->epid, desc->uid, euuid_str, desc->persona_id);
7566 }
7567 }
7568 return true;
7569 }
7570
7571 #endif /* SKYWALK */
7572
7573 // Support functions for NetworkStatistics support for necp_client connections
7574
7575 static void
necp_client_inherit_from_parent(struct necp_client * client,struct necp_client * parent)7576 necp_client_inherit_from_parent(
7577 struct necp_client *client,
7578 struct necp_client *parent)
7579 {
7580 assert(client->original_parameters_source == NULL);
7581
7582 if (parent->original_parameters_source != NULL) {
7583 client->original_parameters_source = parent->original_parameters_source;
7584 } else {
7585 client->original_parameters_source = parent;
7586 }
7587 necp_client_retain(client->original_parameters_source);
7588 }
7589
7590 static void
necp_find_conn_netstat_data(struct necp_client * client,u_int32_t * ntstat_flags,pid_t * effective_pid,uuid_t * puuid,uid_t * uid,uuid_t * euuid,uid_t * persona_id)7591 necp_find_conn_netstat_data(struct necp_client *client,
7592 u_int32_t *ntstat_flags,
7593 pid_t *effective_pid,
7594 uuid_t *puuid,
7595 uid_t *uid,
7596 uuid_t *euuid,
7597 uid_t *persona_id)
7598 {
7599 bool has_remote_address = false;
7600 bool has_ip_protocol = false;
7601 bool has_transport_protocol = false;
7602 size_t offset = 0;
7603 u_int8_t *parameters;
7604 u_int32_t parameters_size;
7605
7606
7607 parameters = client->parameters;
7608 parameters_size = (u_int32_t)client->parameters_length;
7609
7610 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
7611 u_int8_t type = necp_buffer_get_tlv_type(parameters, parameters_size, offset);
7612 u_int32_t length = necp_buffer_get_tlv_length(parameters, parameters_size, offset);
7613
7614 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
7615 // If the length is larger than what can fit in the remaining parameters size, bail
7616 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
7617 break;
7618 }
7619
7620 if (length > 0) {
7621 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, parameters_size, offset, NULL);
7622 if (value != NULL) {
7623 switch (type) {
7624 case NECP_CLIENT_PARAMETER_APPLICATION: {
7625 if ((euuid) && (length >= sizeof(uuid_t))) {
7626 uuid_copy(*euuid, value);
7627 }
7628 break;
7629 }
7630 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
7631 if (length >= 1) {
7632 has_ip_protocol = true;
7633 }
7634 break;
7635 }
7636 case NECP_CLIENT_PARAMETER_PID: {
7637 if ((effective_pid) && length >= sizeof(pid_t)) {
7638 memcpy(effective_pid, value, sizeof(pid_t));
7639 }
7640 break;
7641 }
7642 case NECP_CLIENT_PARAMETER_PARENT_ID: {
7643 if ((puuid) && (length == sizeof(uuid_t))) {
7644 uuid_copy(*puuid, value);
7645 }
7646 break;
7647 }
7648 // It is an implementation quirk that the remote address can be found in the necp parameters
7649 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
7650 if (length >= sizeof(struct necp_policy_condition_addr)) {
7651 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
7652 if (necp_client_address_is_valid(&address_struct->address.sa)) {
7653 has_remote_address = true;
7654 }
7655 }
7656 break;
7657 }
7658 case NECP_CLIENT_PARAMETER_TRANSPORT_PROTOCOL: {
7659 if (length >= 1) {
7660 has_transport_protocol = true;
7661 }
7662 break;
7663 }
7664 case NECP_CLIENT_PARAMETER_APPLICATION_ID: {
7665 if (length >= sizeof(necp_application_id_t) && uid && persona_id) {
7666 necp_application_id_t *application_id = (necp_application_id_t *)(void *)value;
7667 memcpy(uid, &application_id->uid, sizeof(uid_t));
7668 uuid_copy(*euuid, application_id->effective_uuid);
7669 memcpy(persona_id, &application_id->persona_id, sizeof(uid_t));
7670 }
7671 break;
7672 }
7673 default: {
7674 break;
7675 }
7676 }
7677 }
7678 }
7679 offset += sizeof(struct necp_tlv_header) + length;
7680 }
7681 if (ntstat_flags) {
7682 *ntstat_flags = (has_remote_address && has_ip_protocol && has_transport_protocol)? NSTAT_NECP_CONN_HAS_NET_ACCESS: 0;
7683 }
7684 }
7685
7686 static bool
necp_request_conn_netstats(nstat_provider_context ctx,u_int32_t * ifflagsp,nstat_counts * countsp,nstat_detailed_counts * detailsp,void * metadatap)7687 necp_request_conn_netstats(nstat_provider_context ctx,
7688 u_int32_t *ifflagsp,
7689 nstat_counts *countsp,
7690 nstat_detailed_counts *detailsp,
7691 void *metadatap)
7692 {
7693 if (ctx == NULL) {
7694 return false;
7695 }
7696 struct necp_client * __single client = (struct necp_client *)(void *)ctx;
7697 nstat_connection_descriptor *desc = (nstat_connection_descriptor *)metadatap;
7698
7699 if (ifflagsp) {
7700 necp_find_conn_netstat_data(client, ifflagsp, NULL, NULL, NULL, NULL, NULL);
7701 }
7702 if (countsp) {
7703 memset(countsp, 0, sizeof(*countsp));
7704 }
7705 if (detailsp) {
7706 memset(detailsp, 0, sizeof(*detailsp));
7707 }
7708 if (desc) {
7709 memset(desc, 0, sizeof(*desc));
7710 // Metadata, that the necp client should have, in TLV format.
7711 pid_t effective_pid = client->proc_pid;
7712 necp_find_conn_netstat_data(client, &desc->ifnet_properties, &effective_pid, &desc->puuid, &desc->uid, &desc->euuid, &desc->persona_id);
7713 desc->epid = (u_int32_t)effective_pid;
7714
7715 // User level should obtain almost all connection information from an extension
7716 // leaving little to do here
7717 uuid_copy(desc->fuuid, client->latest_flow_registration_id);
7718 uuid_copy(desc->cuuid, client->client_id);
7719 }
7720 return true;
7721 }
7722
7723 static int
necp_skywalk_priv_check_cred(proc_t p,kauth_cred_t cred)7724 necp_skywalk_priv_check_cred(proc_t p, kauth_cred_t cred)
7725 {
7726 #pragma unused(p, cred)
7727 #if SKYWALK
7728 /* This includes Nexus controller and Skywalk observer privs */
7729 return skywalk_nxctl_check_privileges(p, cred);
7730 #else /* !SKYWALK */
7731 return 0;
7732 #endif /* !SKYWALK */
7733 }
7734
7735 /// System calls
7736
7737 int
necp_open(struct proc * p,struct necp_open_args * uap,int * retval)7738 necp_open(struct proc *p, struct necp_open_args *uap, int *retval)
7739 {
7740 #pragma unused(retval)
7741 int error = 0;
7742 struct necp_fd_data * __single fd_data = NULL;
7743 struct fileproc * __single fp = NULL;
7744 int fd = -1;
7745
7746 if (uap->flags & NECP_OPEN_FLAG_OBSERVER ||
7747 uap->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
7748 if (necp_skywalk_priv_check_cred(p, kauth_cred_get()) != 0 &&
7749 priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
7750 NECPLOG0(LOG_ERR, "Client does not hold necessary entitlement to observe other NECP clients");
7751 error = EACCES;
7752 goto done;
7753 }
7754 }
7755
7756 #if CONFIG_MACF
7757 error = mac_necp_check_open(p, uap->flags);
7758 if (error) {
7759 goto done;
7760 }
7761 #endif /* MACF */
7762
7763 error = falloc(p, &fp, &fd);
7764 if (error != 0) {
7765 goto done;
7766 }
7767
7768 fd_data = kalloc_type(struct necp_fd_data, Z_WAITOK | Z_ZERO | Z_NOFAIL);
7769
7770 fd_data->necp_fd_type = necp_fd_type_client;
7771 fd_data->flags = uap->flags;
7772 RB_INIT(&fd_data->clients);
7773 RB_INIT(&fd_data->flows);
7774 TAILQ_INIT(&fd_data->update_list);
7775 lck_mtx_init(&fd_data->fd_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
7776 klist_init(&fd_data->si.si_note);
7777 fd_data->proc_pid = proc_pid(p);
7778 #if SKYWALK
7779 LIST_INIT(&fd_data->stats_arena_list);
7780 #endif /* !SKYWALK */
7781
7782 fp->fp_flags |= FP_CLOEXEC | FP_CLOFORK;
7783 fp->fp_glob->fg_flag = FREAD;
7784 fp->fp_glob->fg_ops = &necp_fd_ops;
7785 fp_set_data(fp, fd_data);
7786
7787 proc_fdlock(p);
7788
7789 procfdtbl_releasefd(p, fd, NULL);
7790 fp_drop(p, fd, fp, 1);
7791
7792 *retval = fd;
7793
7794 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
7795 NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
7796 LIST_INSERT_HEAD(&necp_fd_observer_list, fd_data, chain);
7797 OSIncrementAtomic(&necp_observer_fd_count);
7798 NECP_OBSERVER_LIST_UNLOCK();
7799
7800 // Walk all existing clients and add them
7801 NECP_CLIENT_TREE_LOCK_SHARED();
7802 struct necp_client *existing_client = NULL;
7803 RB_FOREACH(existing_client, _necp_client_global_tree, &necp_client_global_tree) {
7804 NECP_CLIENT_LOCK(existing_client);
7805 necp_client_update_observer_add_internal(fd_data, existing_client);
7806 necp_client_update_observer_update_internal(fd_data, existing_client);
7807 NECP_CLIENT_UNLOCK(existing_client);
7808 }
7809 NECP_CLIENT_TREE_UNLOCK();
7810 } else {
7811 NECP_FD_LIST_LOCK_EXCLUSIVE();
7812 LIST_INSERT_HEAD(&necp_fd_list, fd_data, chain);
7813 OSIncrementAtomic(&necp_client_fd_count);
7814 NECP_FD_LIST_UNLOCK();
7815 }
7816
7817 proc_fdunlock(p);
7818
7819 done:
7820 if (error != 0) {
7821 if (fp != NULL) {
7822 fp_free(p, fd, fp);
7823 fp = NULL;
7824 }
7825 if (fd_data != NULL) {
7826 kfree_type(struct necp_fd_data, fd_data);
7827 }
7828 }
7829
7830 return error;
7831 }
7832
7833 // All functions called directly from necp_client_action() to handle one of the
7834 // types should be marked with NECP_CLIENT_ACTION_FUNCTION. This ensures that
7835 // necp_client_action() does not inline all the actions into a single function.
7836 #define NECP_CLIENT_ACTION_FUNCTION __attribute__((noinline))
7837
7838 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_add(struct proc * p,struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)7839 necp_client_add(struct proc *p, struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
7840 {
7841 int error = 0;
7842 struct necp_client * __single client = NULL;
7843 const size_t buffer_size = uap->buffer_size;
7844 const task_t __single task = proc_task(p);
7845
7846 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
7847 NECPLOG0(LOG_ERR, "NECP client observers with push enabled may not add their own clients");
7848 return EINVAL;
7849 }
7850
7851 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
7852 buffer_size == 0 || buffer_size > NECP_MAX_CLIENT_PARAMETERS_SIZE || uap->buffer == 0) {
7853 return EINVAL;
7854 }
7855
7856 client = kalloc_type(struct necp_client, Z_WAITOK | Z_ZERO | Z_NOFAIL);
7857 client->parameters = kalloc_data(buffer_size, Z_WAITOK | Z_NOFAIL);
7858 client->parameters_length = buffer_size;
7859 lck_mtx_init(&client->lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
7860 lck_mtx_init(&client->route_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
7861
7862 error = copyin(uap->buffer, client->parameters, buffer_size);
7863 if (error) {
7864 NECPLOG(LOG_ERR, "necp_client_add parameters copyin error (%d)", error);
7865 goto done;
7866 }
7867
7868 os_ref_init(&client->reference_count, &necp_client_refgrp); // Hold our reference until close
7869
7870 client->proc_pid = fd_data->proc_pid; // Save off proc pid in case the client will persist past fd
7871 client->agent_handle = (void *)fd_data;
7872 client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1);
7873
7874 necp_generate_client_id(client->client_id, false);
7875 LIST_INIT(&client->assertion_list);
7876 RB_INIT(&client->flow_registrations);
7877
7878 NECP_CLIENT_LOG(client, "Adding client");
7879
7880 error = copyout(client->client_id, uap->client_id, sizeof(uuid_t));
7881 if (error) {
7882 NECPLOG(LOG_ERR, "necp_client_add client_id copyout error (%d)", error);
7883 goto done;
7884 }
7885
7886 #if SKYWALK
7887 struct necp_client_parsed_parameters parsed_parameters = {};
7888 int parse_error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, &parsed_parameters);
7889
7890 if (parse_error == 0 &&
7891 ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) ||
7892 (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER))) {
7893 bool has_delegation_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0);
7894 if (!has_delegation_entitlement) {
7895 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) {
7896 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by upid",
7897 proc_name_address(p), proc_pid(p));
7898 }
7899 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER) {
7900 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to set attributed bundle identifier",
7901 proc_name_address(p), proc_pid(p));
7902 }
7903 error = EPERM;
7904 goto done;
7905 }
7906
7907 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) {
7908 // Save off delegated unique PID
7909 client->delegated_upid = parsed_parameters.delegated_upid;
7910 }
7911 }
7912
7913 if (parse_error == 0 && parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) {
7914 bool has_nexus_entitlement = (necp_skywalk_priv_check_cred(p, kauth_cred_get()) == 0);
7915 if (!has_nexus_entitlement) {
7916 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to open a custom nexus client",
7917 proc_name_address(p), proc_pid(p));
7918 error = EPERM;
7919 goto done;
7920 }
7921 }
7922
7923 if (parse_error == 0 && (parsed_parameters.flags &
7924 (NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER | NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP))) {
7925 bool has_custom_protocol_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_CUSTOM_PROTOCOL, 0) == 0);
7926 if (!has_custom_protocol_entitlement) {
7927 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement for custom protocol APIs",
7928 proc_name_address(p), proc_pid(p));
7929 error = EPERM;
7930 goto done;
7931 }
7932 }
7933
7934 if (parse_error == 0 && (parsed_parameters.extended_flags & NECP_CLIENT_PARAMETER_EXTENDED_FLAG_AOP2_OFFLOAD)) {
7935 bool has_aop_offload_entitlement = IOTaskHasEntitlement(task, "com.apple.private.network.aop2_offload");
7936 if (!has_aop_offload_entitlement) {
7937 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement for aop offload",
7938 proc_name_address(p), proc_pid(p));
7939 error = EPERM;
7940 goto done;
7941 }
7942
7943 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH) ||
7944 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) ||
7945 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER)) {
7946 NECPLOG0(LOG_INFO, "necp_client_add, aop_offload not supported for multipath/listener");
7947 error = EINVAL;
7948 goto done;
7949 }
7950 }
7951
7952 if (parse_error == 0 && parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER &&
7953 (parsed_parameters.ip_protocol == IPPROTO_TCP || parsed_parameters.ip_protocol == IPPROTO_UDP)) {
7954 uint32_t *netns_addr = NULL;
7955 uint8_t netns_addr_len = 0;
7956 struct ns_flow_info flow_info = {};
7957 uint32_t netns_flags = NETNS_LISTENER;
7958 uuid_copy(flow_info.nfi_flow_uuid, client->client_id);
7959 flow_info.nfi_protocol = parsed_parameters.ip_protocol;
7960 flow_info.nfi_owner_pid = client->proc_pid;
7961 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID) {
7962 flow_info.nfi_effective_pid = parsed_parameters.effective_pid;
7963 } else {
7964 flow_info.nfi_effective_pid = flow_info.nfi_owner_pid;
7965 }
7966 proc_name(flow_info.nfi_owner_pid, flow_info.nfi_owner_name, MAXCOMLEN);
7967 proc_name(flow_info.nfi_effective_pid, flow_info.nfi_effective_name, MAXCOMLEN);
7968
7969 if (parsed_parameters.local_addr.sa.sa_family == AF_UNSPEC) {
7970 // Treat no local address as a wildcard IPv6
7971 // parsed_parameters is already initialized to all zeros
7972 parsed_parameters.local_addr.sin6.sin6_family = AF_INET6;
7973 parsed_parameters.local_addr.sin6.sin6_len = sizeof(struct sockaddr_in6);
7974 }
7975
7976 switch (parsed_parameters.local_addr.sa.sa_family) {
7977 case AF_INET: {
7978 memcpy(&flow_info.nfi_laddr, &parsed_parameters.local_addr.sa, parsed_parameters.local_addr.sa.sa_len);
7979 netns_addr = (uint32_t *)&parsed_parameters.local_addr.sin.sin_addr;
7980 netns_addr_len = 4;
7981 break;
7982 }
7983 case AF_INET6: {
7984 memcpy(&flow_info.nfi_laddr.sin6, &parsed_parameters.local_addr.sin6, parsed_parameters.local_addr.sa.sa_len);
7985 netns_addr = (uint32_t *)&parsed_parameters.local_addr.sin6.sin6_addr;
7986 netns_addr_len = 16;
7987 break;
7988 }
7989
7990 default: {
7991 NECPLOG(LOG_ERR, "necp_client_add listener invalid address family (%d)", parsed_parameters.local_addr.sa.sa_family);
7992 error = EINVAL;
7993 goto done;
7994 }
7995 }
7996 if ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
7997 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_REUSE_LOCAL)) {
7998 netns_flags |= NETNS_REUSEPORT;
7999 }
8000 if (parsed_parameters.local_addr.sin.sin_port == 0) {
8001 error = netns_reserve_ephemeral(&client->port_reservation, netns_addr, netns_addr_len, parsed_parameters.ip_protocol,
8002 &parsed_parameters.local_addr.sin.sin_port, netns_flags, &flow_info);
8003 if (error) {
8004 NECPLOG(LOG_ERR, "necp_client_add netns_reserve_ephemeral error (%d)", error);
8005 goto done;
8006 }
8007
8008 // Update the parameter TLVs with the assigned port
8009 necp_client_update_local_port_parameters(client->parameters, (u_int32_t)client->parameters_length, parsed_parameters.local_addr.sin.sin_port);
8010 } else {
8011 error = netns_reserve(&client->port_reservation, netns_addr, netns_addr_len, parsed_parameters.ip_protocol,
8012 parsed_parameters.local_addr.sin.sin_port, netns_flags, &flow_info);
8013 if (error) {
8014 NECPLOG(LOG_ERR, "necp_client_add netns_reserve error (%d)", error);
8015 goto done;
8016 }
8017 }
8018 }
8019
8020 struct necp_client *parent = NULL;
8021 uuid_t parent_client_id;
8022 uuid_clear(parent_client_id);
8023 struct necp_client_nexus_parameters parent_parameters = {};
8024 uint16_t num_flow_regs = 0;
8025 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID) {
8026 // The parent "should" be found on fd_data without having to search across the whole necp_fd_list
8027 // It would be nice to do this a little further down where there's another instance of NECP_FD_LOCK
8028 // but the logic here depends on the parse paramters
8029 NECP_FD_LOCK(fd_data);
8030 parent = necp_client_fd_find_client_unlocked(fd_data, parsed_parameters.parent_uuid);
8031 if (parent != NULL) {
8032 necp_client_inherit_from_parent(client, parent);
8033 necp_client_copy_parameters_locked(client, &parent_parameters);
8034 uuid_copy(parent_client_id, parsed_parameters.parent_uuid);
8035 struct necp_client_flow_registration *flow_registration = NULL;
8036 RB_FOREACH(flow_registration, _necp_client_flow_tree, &parent->flow_registrations) {
8037 num_flow_regs++;
8038 }
8039 }
8040 NECP_FD_UNLOCK(fd_data);
8041 if (parent == NULL) {
8042 NECPLOG0(LOG_ERR, "necp_client_add, no necp_client_inherit_from_parent as can't find parent on fd_data");
8043 }
8044 }
8045 if (parse_error == 0 && parent != NULL && parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN) {
8046 do {
8047 if (parsed_parameters.demux_patterns[0].len == 0) {
8048 NECPLOG0(LOG_INFO, "necp_client_add, child does not have a demux pattern");
8049 break;
8050 }
8051
8052 if (uuid_is_null(parent_client_id)) {
8053 NECPLOG0(LOG_INFO, "necp_client_add, parent ID is null");
8054 break;
8055 }
8056
8057 if (num_flow_regs > 1) {
8058 NECPLOG0(LOG_INFO, "necp_client_add, multiple parent flows not supported");
8059 break;
8060 }
8061 if (parsed_parameters.ip_protocol != IPPROTO_UDP) {
8062 NECPLOG(LOG_INFO, "necp_client_add, flow demux pattern not supported for %d protocol",
8063 parsed_parameters.ip_protocol);
8064 break;
8065 }
8066 if (parsed_parameters.ip_protocol != parent_parameters.ip_protocol) {
8067 NECPLOG0(LOG_INFO, "necp_client_add, parent/child ip protocol mismatch");
8068 break;
8069 }
8070 if (parsed_parameters.local_addr.sa.sa_family != AF_INET && parsed_parameters.local_addr.sa.sa_family != AF_INET6) {
8071 NECPLOG(LOG_INFO, "necp_client_add, flow demux pattern not supported for %d family",
8072 parsed_parameters.local_addr.sa.sa_family);
8073 break;
8074 }
8075 if (parsed_parameters.local_addr.sa.sa_family != parsed_parameters.remote_addr.sa.sa_family) {
8076 NECPLOG0(LOG_INFO, "necp_client_add, local/remote address family mismatch");
8077 break;
8078 }
8079 if (parsed_parameters.local_addr.sa.sa_family != parent_parameters.local_addr.sa.sa_family) {
8080 NECPLOG0(LOG_INFO, "necp_client_add, parent/child address family mismatch");
8081 break;
8082 }
8083 if (SOCKADDR_CMP(&parsed_parameters.local_addr.sa, &parent_parameters.local_addr.sa, parsed_parameters.local_addr.sa.sa_len)) {
8084 NECPLOG0(LOG_INFO, "necp_client_add, parent/child local address mismatch");
8085 break;
8086 }
8087 if (SOCKADDR_CMP(&parsed_parameters.remote_addr.sa, &parent_parameters.remote_addr.sa, parsed_parameters.remote_addr.sa.sa_len)) {
8088 NECPLOG0(LOG_INFO, "necp_client_add, parent/child remote address mismatch");
8089 break;
8090 }
8091 if (parsed_parameters.local_addr.sin.sin_port != parent_parameters.local_addr.sin.sin_port) {
8092 NECPLOG0(LOG_INFO, "necp_client_add, parent/child local port mismatch");
8093 break;
8094 }
8095 if (parsed_parameters.remote_addr.sin.sin_port != parent_parameters.remote_addr.sin.sin_port) {
8096 NECPLOG0(LOG_INFO, "necp_client_add, parent/child remote port mismatch");
8097 break;
8098 }
8099 client->validated_parent = 1;
8100 uuid_copy(client->parent_client_id, parent_client_id);
8101 } while (false);
8102 }
8103
8104 #endif /* !SKYWALK */
8105
8106 necp_client_update_observer_add(client);
8107
8108 NECP_FD_LOCK(fd_data);
8109 RB_INSERT(_necp_client_tree, &fd_data->clients, client);
8110 OSIncrementAtomic(&necp_client_count);
8111 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
8112 RB_INSERT(_necp_client_global_tree, &necp_client_global_tree, client);
8113 NECP_CLIENT_TREE_UNLOCK();
8114
8115 // Prime the client result
8116 NECP_CLIENT_LOCK(client);
8117 (void)necp_update_client_result(current_proc(), fd_data, client, NULL);
8118 necp_client_retain_locked(client);
8119 NECP_CLIENT_UNLOCK(client);
8120 NECP_FD_UNLOCK(fd_data);
8121 // Now everything is set, it's safe to plumb this in to NetworkStatistics
8122 uint32_t ntstat_properties = 0;
8123 necp_find_conn_netstat_data(client, &ntstat_properties, NULL, NULL, NULL, NULL, NULL);
8124
8125 client->nstat_context = nstat_provider_stats_open((nstat_provider_context)client,
8126 NSTAT_PROVIDER_CONN_USERLAND, (u_int64_t)ntstat_properties, necp_request_conn_netstats, necp_find_conn_extension_info);
8127 necp_client_release(client);
8128 done:
8129 if (error != 0 && client != NULL) {
8130 necp_client_free(client);
8131 client = NULL;
8132 }
8133 *retval = error;
8134
8135 return error;
8136 }
8137
8138 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_claim(struct proc * p,struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8139 necp_client_claim(struct proc *p, struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8140 {
8141 int error = 0;
8142 uuid_t client_id = {};
8143 struct necp_client *client = NULL;
8144
8145 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8146 error = EINVAL;
8147 goto done;
8148 }
8149
8150 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8151 if (error) {
8152 NECPLOG(LOG_ERR, "necp_client_claim copyin client_id error (%d)", error);
8153 goto done;
8154 }
8155
8156 if (necp_client_id_is_flow(client_id)) {
8157 NECPLOG0(LOG_ERR, "necp_client_claim cannot claim from flow UUID");
8158 error = EINVAL;
8159 goto done;
8160 }
8161
8162 u_int64_t upid = proc_uniqueid(p);
8163
8164 NECP_FD_LIST_LOCK_SHARED();
8165
8166 struct necp_fd_data *find_fd = NULL;
8167 LIST_FOREACH(find_fd, &necp_fd_list, chain) {
8168 NECP_FD_LOCK(find_fd);
8169 struct necp_client *find_client = necp_client_fd_find_client_and_lock(find_fd, client_id);
8170 if (find_client != NULL) {
8171 if (find_client->delegated_upid == upid &&
8172 RB_EMPTY(&find_client->flow_registrations)) {
8173 // Matched the client to claim; remove from the old fd
8174 client = find_client;
8175 RB_REMOVE(_necp_client_tree, &find_fd->clients, client);
8176 necp_client_retain_locked(client);
8177 }
8178 NECP_CLIENT_UNLOCK(find_client);
8179 }
8180 NECP_FD_UNLOCK(find_fd);
8181
8182 if (client != NULL) {
8183 break;
8184 }
8185 }
8186
8187 NECP_FD_LIST_UNLOCK();
8188
8189 if (client == NULL) {
8190 error = ENOENT;
8191 goto done;
8192 }
8193
8194 client->proc_pid = fd_data->proc_pid; // Transfer client to claiming pid
8195 client->agent_handle = (void *)fd_data;
8196 client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1);
8197
8198 NECP_CLIENT_LOG(client, "Claiming client");
8199
8200 // Add matched client to our fd and re-run result
8201 NECP_FD_LOCK(fd_data);
8202 RB_INSERT(_necp_client_tree, &fd_data->clients, client);
8203 NECP_CLIENT_LOCK(client);
8204 (void)necp_update_client_result(current_proc(), fd_data, client, NULL);
8205 NECP_CLIENT_UNLOCK(client);
8206 NECP_FD_UNLOCK(fd_data);
8207
8208 necp_client_release(client);
8209
8210 done:
8211 *retval = error;
8212
8213 return error;
8214 }
8215
8216 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_remove(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8217 necp_client_remove(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8218 {
8219 int error = 0;
8220 uuid_t client_id = {};
8221 struct ifnet_stats_per_flow flow_ifnet_stats = {};
8222 const size_t buffer_size = uap->buffer_size;
8223
8224 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8225 error = EINVAL;
8226 goto done;
8227 }
8228
8229 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8230 if (error) {
8231 NECPLOG(LOG_ERR, "necp_client_remove copyin client_id error (%d)", error);
8232 goto done;
8233 }
8234
8235 if (uap->buffer != 0 && buffer_size == sizeof(flow_ifnet_stats)) {
8236 error = copyin(uap->buffer, &flow_ifnet_stats, buffer_size);
8237 if (error) {
8238 NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error);
8239 // Not fatal; make sure to zero-out stats in case of partial copy
8240 memset(&flow_ifnet_stats, 0, sizeof(flow_ifnet_stats));
8241 error = 0;
8242 }
8243 } else if (uap->buffer != 0) {
8244 NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size);
8245 }
8246
8247 NECP_FD_LOCK(fd_data);
8248
8249 pid_t pid = fd_data->proc_pid;
8250 struct necp_client *client = necp_client_fd_find_client_unlocked(fd_data, client_id);
8251
8252 NECP_CLIENT_LOG(client, "Removing client");
8253
8254 if (client != NULL) {
8255 // Remove any flow registrations that match
8256 struct necp_client_flow_registration *flow_registration = NULL;
8257 struct necp_client_flow_registration *temp_flow_registration = NULL;
8258 RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
8259 if (flow_registration->client == client) {
8260 #if SKYWALK
8261 necp_destroy_flow_stats(fd_data, flow_registration, NULL, TRUE);
8262 #endif /* SKYWALK */
8263 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
8264 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
8265 NECP_FLOW_TREE_UNLOCK();
8266 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
8267 }
8268 }
8269 #if SKYWALK
8270 if (client->nstat_context != NULL) {
8271 // Main path, we expect stats to be in existance at this point
8272 nstat_provider_stats_close(client->nstat_context);
8273 client->nstat_context = NULL;
8274 } else {
8275 NECPLOG0(LOG_ERR, "necp_client_remove ntstat shutdown finds nstat_context NULL");
8276 }
8277 #endif /* SKYWALK */
8278 // Remove client from lists
8279 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
8280 RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
8281 NECP_CLIENT_TREE_UNLOCK();
8282 RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
8283 }
8284
8285 #if SKYWALK
8286 // If the currently-active arena is idle (has no more flows referring to it), or if there are defunct
8287 // arenas lingering in the list, schedule a threadcall to do the clean up. The idle check is done
8288 // by checking if the reference count is 3: one held by this client (will be released below when we
8289 // destroy it) when it's non-NULL; the rest held by stats_arena_{active,list}.
8290 if ((fd_data->stats_arena_active != NULL && fd_data->stats_arena_active->nai_use_count == 3) ||
8291 (fd_data->stats_arena_active == NULL && !LIST_EMPTY(&fd_data->stats_arena_list))) {
8292 uint64_t deadline = 0;
8293 uint64_t leeway = 0;
8294 clock_interval_to_deadline(necp_close_arenas_timeout_microseconds, NSEC_PER_USEC, &deadline);
8295 clock_interval_to_absolutetime_interval(necp_close_arenas_timeout_leeway_microseconds, NSEC_PER_USEC, &leeway);
8296
8297 thread_call_enter_delayed_with_leeway(necp_close_empty_arenas_tcall, NULL,
8298 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
8299 }
8300 #endif /* SKYWALK */
8301
8302 NECP_FD_UNLOCK(fd_data);
8303
8304 if (client != NULL) {
8305 ASSERT(error == 0);
8306 necp_destroy_client(client, pid, true);
8307 } else {
8308 error = ENOENT;
8309 NECPLOG(LOG_ERR, "necp_client_remove invalid client_id (%d)", error);
8310 }
8311 done:
8312 *retval = error;
8313
8314 return error;
8315 }
8316
8317 static struct necp_client_flow_registration *
necp_client_fd_find_flow(struct necp_fd_data * client_fd,uuid_t flow_id)8318 necp_client_fd_find_flow(struct necp_fd_data *client_fd, uuid_t flow_id)
8319 {
8320 NECP_FD_ASSERT_LOCKED(client_fd);
8321 struct necp_client_flow_registration *flow = NULL;
8322
8323 if (necp_client_id_is_flow(flow_id)) {
8324 struct necp_client_flow_registration find;
8325 uuid_copy(find.registration_id, flow_id);
8326 flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find);
8327 }
8328
8329 return flow;
8330 }
8331
8332 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_remove_flow(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8333 necp_client_remove_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8334 {
8335 int error = 0;
8336 uuid_t flow_id = {};
8337 struct ifnet_stats_per_flow flow_ifnet_stats = {};
8338 const size_t buffer_size = uap->buffer_size;
8339
8340 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8341 error = EINVAL;
8342 NECPLOG(LOG_ERR, "necp_client_remove_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
8343 goto done;
8344 }
8345
8346 error = copyin(uap->client_id, flow_id, sizeof(uuid_t));
8347 if (error) {
8348 NECPLOG(LOG_ERR, "necp_client_remove_flow copyin client_id error (%d)", error);
8349 goto done;
8350 }
8351
8352 if (uap->buffer != 0 && buffer_size != 0) {
8353 error = copyin(uap->buffer, &flow_ifnet_stats, MIN(buffer_size, sizeof(flow_ifnet_stats)));
8354 if (error) {
8355 NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error);
8356 // Not fatal
8357 }
8358 } else if (uap->buffer != 0) {
8359 NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size);
8360 }
8361
8362 NECP_FD_LOCK(fd_data);
8363 struct necp_client *client = NULL;
8364 struct necp_client_flow_registration *flow_registration = necp_client_fd_find_flow(fd_data, flow_id);
8365 if (flow_registration != NULL) {
8366 #if SKYWALK
8367 // Cleanup stats per flow
8368 necp_destroy_flow_stats(fd_data, flow_registration, &flow_ifnet_stats, TRUE);
8369 #endif /* SKYWALK */
8370 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
8371 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
8372 NECP_FLOW_TREE_UNLOCK();
8373 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
8374
8375 client = flow_registration->client;
8376 if (client != NULL) {
8377 necp_client_retain(client);
8378 }
8379 }
8380 NECP_FD_UNLOCK(fd_data);
8381
8382 NECP_CLIENT_FLOW_LOG(client, flow_registration, "removing flow");
8383
8384 if (flow_registration != NULL && client != NULL) {
8385 NECP_CLIENT_LOCK(client);
8386 if (flow_registration->client == client) {
8387 bool abort = (flow_registration->aop_offload) ? true : false;
8388 necp_destroy_client_flow_registration(client, flow_registration, fd_data->proc_pid, abort);
8389 }
8390 necp_client_release_locked(client);
8391 NECP_CLIENT_UNLOCK(client);
8392 }
8393
8394 done:
8395 *retval = error;
8396 if (error != 0) {
8397 NECPLOG(LOG_ERR, "Remove flow error (%d)", error);
8398 }
8399
8400 return error;
8401 }
8402
8403 // Don't inline the function since it includes necp_client_parsed_parameters on the stack
8404 static __attribute__((noinline)) int
necp_client_check_tcp_heuristics(struct necp_client * client,struct necp_client_flow * flow,u_int32_t * flags,u_int8_t * __counted_by (tfo_cookie_maxlen)tfo_cookie,u_int8_t tfo_cookie_maxlen,u_int8_t * tfo_cookie_len)8405 necp_client_check_tcp_heuristics(struct necp_client *client, struct necp_client_flow *flow,
8406 u_int32_t *flags, u_int8_t *__counted_by(tfo_cookie_maxlen) tfo_cookie, u_int8_t tfo_cookie_maxlen,
8407 u_int8_t *tfo_cookie_len)
8408 {
8409 struct necp_client_parsed_parameters parsed_parameters;
8410 int error = 0;
8411
8412 error = necp_client_parse_parameters(client, client->parameters,
8413 (u_int32_t)client->parameters_length,
8414 &parsed_parameters);
8415 if (error) {
8416 NECPLOG(LOG_ERR, "necp_client_parse_parameters error (%d)", error);
8417 return error;
8418 }
8419
8420 if ((flow->remote_addr.sa.sa_family != AF_INET &&
8421 flow->remote_addr.sa.sa_family != AF_INET6) ||
8422 (flow->local_addr.sa.sa_family != AF_INET &&
8423 flow->local_addr.sa.sa_family != AF_INET6)) {
8424 return EINVAL;
8425 }
8426
8427 NECP_CLIENT_ROUTE_LOCK(client);
8428
8429 if (client->current_route == NULL) {
8430 error = ENOENT;
8431 goto do_unlock;
8432 }
8433
8434 bool check_ecn = false;
8435 do {
8436 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) ==
8437 NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) {
8438 check_ecn = true;
8439 break;
8440 }
8441
8442 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) ==
8443 NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) {
8444 break;
8445 }
8446
8447 if (tcp_ecn == 1) {
8448 check_ecn = true;
8449 }
8450 } while (false);
8451
8452 if (check_ecn) {
8453 if (tcp_heuristic_do_ecn_with_address(client->current_route->rt_ifp,
8454 (union sockaddr_in_4_6 *)&flow->local_addr)) {
8455 *flags |= NECP_CLIENT_RESULT_FLAG_ECN_ENABLED;
8456 }
8457 }
8458
8459 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) ==
8460 NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) {
8461 if (!tcp_heuristic_do_tfo_with_address(client->current_route->rt_ifp,
8462 (union sockaddr_in_4_6 *)&flow->local_addr,
8463 (union sockaddr_in_4_6 *)&flow->remote_addr,
8464 tfo_cookie, tfo_cookie_maxlen, tfo_cookie_len)) {
8465 *flags |= NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED;
8466 *tfo_cookie_len = 0;
8467 }
8468 } else {
8469 *flags |= NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED;
8470 *tfo_cookie_len = 0;
8471 }
8472 do_unlock:
8473 NECP_CLIENT_ROUTE_UNLOCK(client);
8474
8475 return error;
8476 }
8477
8478 static size_t
necp_client_calculate_flow_tlv_size(struct necp_client_flow_registration * flow_registration)8479 necp_client_calculate_flow_tlv_size(struct necp_client_flow_registration *flow_registration)
8480 {
8481 size_t assigned_results_size = 0;
8482 struct necp_client_flow *flow = NULL;
8483 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
8484 if (flow->assigned || flow_registration->defunct || !necp_client_endpoint_is_unspecified((struct necp_client_endpoint *)&flow->remote_addr)) {
8485 size_t header_length = 0;
8486 if (flow->nexus) {
8487 header_length = sizeof(struct necp_client_nexus_flow_header);
8488 } else {
8489 header_length = sizeof(struct necp_client_flow_header);
8490 }
8491 assigned_results_size += (header_length + flow->assigned_results_length);
8492
8493 if (flow->has_protoctl_event) {
8494 assigned_results_size += sizeof(struct necp_client_flow_protoctl_event_header);
8495 }
8496 }
8497 }
8498 return assigned_results_size;
8499 }
8500
8501 static errno_t
necp_client_destination_mac_address(struct sockaddr * remote,uint32_t index,struct ether_addr * remote_mac)8502 necp_client_destination_mac_address(struct sockaddr *remote, uint32_t index,
8503 struct ether_addr *remote_mac)
8504 {
8505 struct rtentry *rt = NULL;
8506 struct rtentry *tgt_rt = NULL;
8507 struct rtentry *__single gwrt = NULL;
8508 errno_t err = 0;
8509
8510 ASSERT(remote_mac != NULL);
8511 ASSERT(remote != NULL);
8512
8513 rt = rtalloc1_scoped(remote, 0, 0, index);
8514 if (rt == NULL) {
8515 return ENOENT;
8516 }
8517
8518 if (IS_DIRECT_HOSTROUTE(rt)) {
8519 tgt_rt = rt;
8520 } else {
8521 err = route_to_gwroute(remote, rt, &gwrt);
8522 if (err != 0) {
8523 goto done;
8524 }
8525
8526 ASSERT(gwrt != NULL);
8527 RT_LOCK_ASSERT_HELD(gwrt);
8528 tgt_rt = gwrt;
8529 }
8530
8531 if ((tgt_rt->rt_flags & RTF_HOST) &&
8532 (tgt_rt->rt_flags & RTF_LLINFO) &&
8533 (tgt_rt->rt_gateway->sa_family == AF_LINK) &&
8534 (SDL(tgt_rt->rt_gateway)->sdl_alen == ETHER_ADDR_LEN)) {
8535 struct sockaddr_dl *__bidi_indexable sdl =
8536 (struct sockaddr_dl *__bidi_indexable)SDL(tgt_rt->rt_gateway);
8537 bcopy(LLADDR(sdl), remote_mac->octet, ETHER_ADDR_LEN);
8538 } else {
8539 err = ENOENT;
8540 }
8541 done:
8542 if (gwrt != NULL) {
8543 RT_UNLOCK(gwrt);
8544 rtfree(gwrt);
8545 gwrt = NULL;
8546 }
8547
8548 if (rt != NULL) {
8549 rtfree(rt);
8550 rt = NULL;
8551 }
8552
8553 return err;
8554 }
8555
8556 static uint8_t *
8557 __sized_by(*buflen)
necp_client_flow_mac_and_gateway(struct necp_client_flow * flow,size_t * buflen)8558 necp_client_flow_mac_and_gateway(struct necp_client_flow *flow, size_t *buflen)
8559 {
8560 u_int8_t * __indexable buffer = NULL;
8561 u_int8_t * __indexable cursor = NULL;
8562 size_t valsize = 0;
8563
8564 ASSERT(flow != NULL);
8565 ASSERT(buflen != NULL);
8566
8567 *buflen = 0;
8568
8569 ifnet_t ifp = NULL;
8570 ifnet_head_lock_shared();
8571 if (flow->interface_index != IFSCOPE_NONE && flow->interface_index <= if_index) {
8572 ifp = ifindex2ifnet[flow->interface_index];
8573 }
8574 ifnet_head_done();
8575
8576 if (ifp == NULL) {
8577 NECPLOG0(LOG_ERR, "necp_client_flow_mac_and_gateway: ifp is NULL");
8578 return NULL;
8579 }
8580
8581 if (!IFNET_IS_ETHERNET(ifp)) {
8582 return NULL;
8583 }
8584
8585 /* local MAC */
8586 struct ether_addr local_ether = {};
8587 bool local_ether_set = false;
8588 if (ifnet_lladdr_copy_bytes(ifp, local_ether.octet, ETHER_ADDR_LEN) == 0) {
8589 local_ether_set = true;
8590 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
8591 }
8592
8593 /*remote MAC */
8594 struct ether_addr remote_ether = {};
8595 bool remote_ether_set = false;
8596 if (necp_client_destination_mac_address(SA(&flow->remote_addr),
8597 flow->interface_index, &remote_ether) == 0) {
8598 remote_ether_set = true;
8599 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
8600 }
8601
8602 if (valsize == 0) {
8603 return NULL;
8604 }
8605
8606 buffer = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
8607 if (buffer == NULL) {
8608 return NULL;
8609 }
8610
8611 cursor = buffer;
8612 if (local_ether_set) {
8613 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ETHER_ADDR,
8614 sizeof(struct ether_addr), (uint8_t *)(struct ether_addr * __bidi_indexable)&local_ether,
8615 buffer, valsize);
8616 }
8617 if (remote_ether_set) {
8618 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_REMOTE_ETHER_ADDR,
8619 sizeof(struct ether_addr), (uint8_t *)(struct ether_addr * __bidi_indexable)&remote_ether,
8620 buffer, valsize);
8621 }
8622 *buflen = valsize;
8623 return buffer;
8624 }
8625
8626 static int
necp_client_fillout_flow_tlvs(struct necp_client * client,bool client_is_observed,struct necp_client_flow_registration * flow_registration,struct necp_client_action_args * uap,size_t * assigned_results_cursor)8627 necp_client_fillout_flow_tlvs(struct necp_client *client,
8628 bool client_is_observed,
8629 struct necp_client_flow_registration *flow_registration,
8630 struct necp_client_action_args *uap,
8631 size_t *assigned_results_cursor)
8632 {
8633 int error = 0;
8634 struct necp_client_flow *flow = NULL;
8635 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
8636 if (flow->assigned || flow_registration->defunct || !necp_client_endpoint_is_unspecified((struct necp_client_endpoint *)&flow->remote_addr)) {
8637 // Write TLV headers
8638 struct necp_client_nexus_flow_header header = {};
8639 u_int32_t length = 0;
8640 u_int32_t flags = 0;
8641 u_int8_t tfo_cookie_len = 0;
8642 u_int8_t type = 0;
8643 size_t buflen = 0;
8644 uint8_t *buffer = NULL;
8645
8646 type = NECP_CLIENT_RESULT_FLOW_ID;
8647 length = sizeof(header.flow_header.flow_id);
8648 header.flow_header.flow_id_tlv_header.type = type;
8649 header.flow_header.flow_id_tlv_header.length = length;
8650 uuid_copy(header.flow_header.flow_id, flow_registration->registration_id);
8651
8652 if (flow->nexus) {
8653 if (flow->check_tcp_heuristics) {
8654 u_int8_t tfo_cookie[NECP_TFO_COOKIE_LEN_MAX];
8655 tfo_cookie_len = NECP_TFO_COOKIE_LEN_MAX;
8656
8657 if (necp_client_check_tcp_heuristics(client, flow, &flags,
8658 tfo_cookie, tfo_cookie_len, &tfo_cookie_len) != 0) {
8659 tfo_cookie_len = 0;
8660 } else {
8661 flow->check_tcp_heuristics = FALSE;
8662
8663 if (tfo_cookie_len != 0) {
8664 type = NECP_CLIENT_RESULT_TFO_COOKIE;
8665 length = tfo_cookie_len;
8666 header.tfo_cookie_tlv_header.type = type;
8667 header.tfo_cookie_tlv_header.length = length;
8668 memcpy(&header.tfo_cookie_value, tfo_cookie, tfo_cookie_len);
8669 }
8670 }
8671 }
8672 }
8673
8674 size_t header_length = 0;
8675 if (flow->nexus) {
8676 if (tfo_cookie_len != 0) {
8677 header_length = sizeof(struct necp_client_nexus_flow_header) - (NECP_TFO_COOKIE_LEN_MAX - tfo_cookie_len);
8678 } else {
8679 header_length = sizeof(struct necp_client_nexus_flow_header) - sizeof(struct necp_tlv_header) - NECP_TFO_COOKIE_LEN_MAX;
8680 }
8681 } else {
8682 header_length = sizeof(struct necp_client_flow_header);
8683 }
8684
8685 type = NECP_CLIENT_RESULT_FLAGS;
8686 length = sizeof(header.flow_header.flags_value);
8687 header.flow_header.flags_tlv_header.type = type;
8688 header.flow_header.flags_tlv_header.length = length;
8689 if (flow->assigned) {
8690 flags |= NECP_CLIENT_RESULT_FLAG_FLOW_ASSIGNED;
8691 }
8692 if (flow->viable) {
8693 flags |= NECP_CLIENT_RESULT_FLAG_FLOW_VIABLE;
8694 }
8695 if (flow_registration->defunct) {
8696 flags |= NECP_CLIENT_RESULT_FLAG_DEFUNCT;
8697 }
8698 flags |= flow->necp_flow_flags;
8699 header.flow_header.flags_value = flags;
8700
8701 type = NECP_CLIENT_RESULT_INTERFACE;
8702 length = sizeof(header.flow_header.interface_value);
8703 header.flow_header.interface_tlv_header.type = type;
8704 header.flow_header.interface_tlv_header.length = length;
8705
8706 struct necp_client_result_interface interface_struct;
8707 interface_struct.generation = 0;
8708 interface_struct.index = flow->interface_index;
8709
8710 header.flow_header.interface_value = interface_struct;
8711 if (flow->nexus) {
8712 type = NECP_CLIENT_RESULT_NETAGENT;
8713 length = sizeof(header.agent_value);
8714 header.agent_tlv_header.type = type;
8715 header.agent_tlv_header.length = length;
8716
8717 struct necp_client_result_netagent agent_struct;
8718 uuid_copy(agent_struct.netagent_uuid, flow->u.nexus_agent);
8719 agent_struct.generation = netagent_get_generation(agent_struct.netagent_uuid);
8720
8721 header.agent_value = agent_struct;
8722 }
8723
8724 // Don't include outer TLV header in length field
8725 type = NECP_CLIENT_RESULT_FLOW;
8726 length = (header_length - sizeof(struct necp_tlv_header) + flow->assigned_results_length);
8727 if (flow->has_protoctl_event) {
8728 length += sizeof(struct necp_client_flow_protoctl_event_header);
8729 }
8730 if (flow->nexus && flow->aop_offload) {
8731 buffer = necp_client_flow_mac_and_gateway(flow, &buflen);
8732 length += buflen;
8733
8734 if (flow->aop_stat_index_valid) {
8735 length += sizeof(struct necp_client_flow_stats_index_header);
8736 }
8737 }
8738 header.flow_header.outer_header.type = type;
8739 header.flow_header.outer_header.length = length;
8740
8741 error = copyout(&header, uap->buffer + client->result_length + *assigned_results_cursor, header_length);
8742 if (error) {
8743 NECPLOG(LOG_ERR, "necp_client_copy assigned results tlv_header copyout error (%d)", error);
8744 return error;
8745 }
8746 *assigned_results_cursor += header_length;
8747
8748 if (flow->assigned_results && flow->assigned_results_length) {
8749 // Write inner TLVs
8750 error = copyout(flow->assigned_results, uap->buffer + client->result_length + *assigned_results_cursor,
8751 flow->assigned_results_length);
8752 if (error) {
8753 NECPLOG(LOG_ERR, "necp_client_copy assigned results copyout error (%d)", error);
8754 return error;
8755 }
8756 }
8757 *assigned_results_cursor += flow->assigned_results_length;
8758
8759 /* Read the protocol event and reset it */
8760 if (flow->has_protoctl_event) {
8761 struct necp_client_flow_protoctl_event_header protoctl_event_header = {};
8762
8763 type = NECP_CLIENT_RESULT_PROTO_CTL_EVENT;
8764 length = sizeof(protoctl_event_header.protoctl_event);
8765
8766 protoctl_event_header.protoctl_tlv_header.type = type;
8767 protoctl_event_header.protoctl_tlv_header.length = length;
8768 protoctl_event_header.protoctl_event = flow->protoctl_event;
8769
8770 error = copyout(&protoctl_event_header, uap->buffer + client->result_length + *assigned_results_cursor,
8771 sizeof(protoctl_event_header));
8772
8773 if (error) {
8774 NECPLOG(LOG_ERR, "necp_client_copy protocol control event results"
8775 " tlv_header copyout error (%d)", error);
8776 return error;
8777 }
8778 *assigned_results_cursor += sizeof(protoctl_event_header);
8779 flow->has_protoctl_event = FALSE;
8780 flow->protoctl_event.protoctl_event_code = 0;
8781 flow->protoctl_event.protoctl_event_val = 0;
8782 flow->protoctl_event.protoctl_event_tcp_seq_num = 0;
8783 }
8784
8785 if (flow->nexus && flow->aop_offload) {
8786 if (buffer != NULL) {
8787 ASSERT(buflen > 0);
8788 error = copyout(buffer, uap->buffer + client->result_length + *assigned_results_cursor,
8789 buflen);
8790 *assigned_results_cursor += buflen;
8791 kfree_data_counted_by(buffer, buflen);
8792 if (error) {
8793 NECPLOG(LOG_ERR, "necp_client_copy mac address results"
8794 " tlv_header copyout error (%d)", error);
8795 return error;
8796 }
8797 }
8798
8799 if (flow->aop_stat_index_valid) {
8800 struct necp_client_flow_stats_index_header flow_stats_header = {};
8801
8802 type = NECP_CLIENT_RESULT_FLOW_STATS_INDEX;
8803 length = sizeof(flow_stats_header.stats_index);
8804
8805 flow_stats_header.stats_index_tlv_header.type = type;
8806 flow_stats_header.stats_index_tlv_header.length = length;
8807 flow_stats_header.stats_index = flow->stats_index;
8808
8809 error = copyout(&flow_stats_header, uap->buffer +
8810 client->result_length + *assigned_results_cursor, sizeof(flow_stats_header));
8811 if (error) {
8812 NECPLOG(LOG_ERR, "necp_client_copy flow stats index "
8813 "tlv header copyout error (%d)", error);
8814 return error;
8815 }
8816 *assigned_results_cursor += sizeof(flow_stats_header);
8817 }
8818 }
8819 }
8820 }
8821 if (!client_is_observed) {
8822 flow_registration->flow_result_read = TRUE;
8823 }
8824 return 0;
8825 }
8826
8827 static int
necp_client_copy_internal(struct necp_client * client,uuid_t client_id,bool client_is_observed,struct necp_client_action_args * uap,int * retval)8828 necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool client_is_observed, struct necp_client_action_args *uap, int *retval)
8829 {
8830 NECP_CLIENT_ASSERT_LOCKED(client);
8831 int error = 0;
8832 // Copy results out
8833 if (uap->action == NECP_CLIENT_ACTION_COPY_PARAMETERS) {
8834 if (uap->buffer_size < client->parameters_length) {
8835 return EINVAL;
8836 }
8837 error = copyout(client->parameters, uap->buffer, client->parameters_length);
8838 if (error) {
8839 NECPLOG(LOG_ERR, "necp_client_copy parameters copyout error (%d)", error);
8840 return error;
8841 }
8842 *retval = client->parameters_length;
8843 } else if ((uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT || uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) &&
8844 client->result_read && client->group_members_read && !necp_client_has_unread_flows(client)) {
8845 // Copy updates only, but nothing to read
8846 // Just return 0 for bytes read
8847 *retval = 0;
8848 } else if (uap->action == NECP_CLIENT_ACTION_COPY_RESULT ||
8849 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT ||
8850 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8851 size_t assigned_results_size = client->assigned_group_members_length;
8852
8853 bool some_flow_is_defunct = false;
8854 struct necp_client_flow_registration *single_flow_registration = NULL;
8855 if (necp_client_id_is_flow(client_id)) {
8856 single_flow_registration = necp_client_find_flow(client, client_id);
8857 if (single_flow_registration != NULL) {
8858 assigned_results_size += necp_client_calculate_flow_tlv_size(single_flow_registration);
8859 }
8860 } else {
8861 // This request is for the client, so copy everything
8862 struct necp_client_flow_registration *flow_registration = NULL;
8863 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
8864 if (flow_registration->defunct) {
8865 some_flow_is_defunct = true;
8866 }
8867 assigned_results_size += necp_client_calculate_flow_tlv_size(flow_registration);
8868 }
8869 }
8870 if (uap->buffer_size < (client->result_length + assigned_results_size)) {
8871 if (uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8872 // Mark the client and all flows as read to prevent looping
8873 client->result_read = true;
8874 struct necp_client_flow_registration *flow_registration = NULL;
8875 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
8876 flow_registration->flow_result_read = true;
8877 }
8878 }
8879 return EINVAL;
8880 }
8881
8882 u_int32_t original_flags = 0;
8883 bool flags_updated = false;
8884 if (some_flow_is_defunct && client->legacy_client_is_flow) {
8885 // If our client expects the defunct flag in the client, add it now
8886 u_int32_t client_flags = 0;
8887 u_int32_t value_size = 0;
8888 u_int8_t *flags_pointer = necp_buffer_get_tlv_value(client->result, client->result_length, 0, &value_size);
8889 if (flags_pointer != NULL && value_size == sizeof(client_flags)) {
8890 memcpy(&client_flags, flags_pointer, value_size);
8891 original_flags = client_flags;
8892 client_flags |= NECP_CLIENT_RESULT_FLAG_DEFUNCT;
8893 (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS,
8894 sizeof(client_flags), &client_flags, &flags_updated,
8895 client->result, sizeof(client->result));
8896 }
8897 }
8898
8899 error = copyout(client->result, uap->buffer, client->result_length);
8900
8901 if (flags_updated) {
8902 // Revert stored flags
8903 (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS,
8904 sizeof(original_flags), &original_flags, &flags_updated,
8905 client->result, sizeof(client->result));
8906 }
8907
8908 if (error != 0) {
8909 NECPLOG(LOG_ERR, "necp_client_copy result copyout error (%d)", error);
8910 return error;
8911 }
8912
8913 if (client->assigned_group_members != NULL && client->assigned_group_members_length > 0) {
8914 error = copyout(client->assigned_group_members, uap->buffer + client->result_length, client->assigned_group_members_length);
8915 if (error != 0) {
8916 NECPLOG(LOG_ERR, "necp_client_copy group members copyout error (%d)", error);
8917 return error;
8918 }
8919 }
8920
8921 size_t assigned_results_cursor = client->assigned_group_members_length; // Start with an offset based on the group members
8922 if (necp_client_id_is_flow(client_id)) {
8923 if (single_flow_registration != NULL) {
8924 error = necp_client_fillout_flow_tlvs(client, client_is_observed, single_flow_registration, uap, &assigned_results_cursor);
8925 if (error != 0) {
8926 return error;
8927 }
8928 }
8929 } else {
8930 // This request is for the client, so copy everything
8931 struct necp_client_flow_registration *flow_registration = NULL;
8932 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
8933 error = necp_client_fillout_flow_tlvs(client, client_is_observed, flow_registration, uap, &assigned_results_cursor);
8934 if (error != 0) {
8935 return error;
8936 }
8937 }
8938 }
8939
8940 *retval = client->result_length + assigned_results_cursor;
8941
8942 if (!client_is_observed) {
8943 client->result_read = TRUE;
8944 client->group_members_read = TRUE;
8945 }
8946 }
8947
8948 return 0;
8949 }
8950
8951 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8952 necp_client_copy(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8953 {
8954 int error = 0;
8955 struct necp_client *client = NULL;
8956 uuid_t client_id;
8957 uuid_clear(client_id);
8958
8959 *retval = 0;
8960
8961 if (uap->buffer_size == 0 || uap->buffer == 0) {
8962 return EINVAL;
8963 }
8964
8965 if (uap->action != NECP_CLIENT_ACTION_COPY_PARAMETERS &&
8966 uap->action != NECP_CLIENT_ACTION_COPY_RESULT &&
8967 uap->action != NECP_CLIENT_ACTION_COPY_UPDATED_RESULT &&
8968 uap->action != NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8969 return EINVAL;
8970 }
8971
8972 if (uap->client_id) {
8973 if (uap->client_id_len != sizeof(uuid_t)) {
8974 NECPLOG(LOG_ERR, "Incorrect length (got %zu, expected %zu)", (size_t)uap->client_id_len, sizeof(uuid_t));
8975 return ERANGE;
8976 }
8977
8978 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8979 if (error) {
8980 NECPLOG(LOG_ERR, "necp_client_copy client_id copyin error (%d)", error);
8981 return error;
8982 }
8983 }
8984
8985 const bool is_wildcard = (bool)uuid_is_null(client_id);
8986
8987 NECP_FD_LOCK(fd_data);
8988
8989 bool send_in_process_flow_divert_message = false;
8990 if (is_wildcard) {
8991 if (uap->action == NECP_CLIENT_ACTION_COPY_RESULT ||
8992 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT ||
8993 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL) {
8994 struct necp_client *find_client = NULL;
8995 RB_FOREACH(find_client, _necp_client_tree, &fd_data->clients) {
8996 NECP_CLIENT_LOCK(find_client);
8997 if (!find_client->result_read || !find_client->group_members_read || necp_client_has_unread_flows(find_client)) {
8998 client = find_client;
8999 // Leave the client locked, and break
9000 break;
9001 }
9002 NECP_CLIENT_UNLOCK(find_client);
9003 }
9004
9005 if (client == NULL && fd_data->request_in_process_flow_divert) {
9006 // No client found that needs update. Check for an event requesting in-process flow divert.
9007 send_in_process_flow_divert_message = true;
9008 }
9009 }
9010 } else {
9011 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9012 }
9013
9014 if (client != NULL) {
9015 if (!send_in_process_flow_divert_message) {
9016 // If client is set, it is locked
9017 error = necp_client_copy_internal(client, client_id, FALSE, uap, retval);
9018 }
9019 NECP_CLIENT_UNLOCK(client);
9020 }
9021
9022 if (send_in_process_flow_divert_message) {
9023 fd_data->request_in_process_flow_divert = false;
9024
9025 struct necp_tlv_header request_tlv = {
9026 .type = NECP_CLIENT_RESULT_REQUEST_IN_PROCESS_FLOW_DIVERT,
9027 .length = 0,
9028 };
9029 if (uap->buffer_size < sizeof(request_tlv)) {
9030 error = EINVAL;
9031 } else {
9032 error = copyout(&request_tlv, uap->buffer, sizeof(request_tlv));
9033 if (error) {
9034 NECPLOG(LOG_ERR, "necp_client_copy request flow divert TLV copyout error (%d)", error);
9035 } else {
9036 *retval = sizeof(request_tlv);
9037 }
9038 }
9039 }
9040
9041 // Unlock our own fd before moving on or returning
9042 NECP_FD_UNLOCK(fd_data);
9043
9044 if (client == NULL && !send_in_process_flow_divert_message) {
9045 if (fd_data->flags & NECP_OPEN_FLAG_OBSERVER) {
9046 // Observers are allowed to lookup clients on other fds
9047
9048 // Lock tree
9049 NECP_CLIENT_TREE_LOCK_SHARED();
9050
9051 bool found_client = FALSE;
9052
9053 client = necp_find_client_and_lock(client_id);
9054 if (client != NULL) {
9055 // Matched, copy out data
9056 found_client = TRUE;
9057 error = necp_client_copy_internal(client, client_id, TRUE, uap, retval);
9058 NECP_CLIENT_UNLOCK(client);
9059 }
9060
9061 // Unlock tree
9062 NECP_CLIENT_TREE_UNLOCK();
9063
9064 // No client found, fail
9065 if (!found_client) {
9066 return ENOENT;
9067 }
9068 } else {
9069 // No client found, and not allowed to search other fds, fail
9070 return ENOENT;
9071 }
9072 }
9073
9074 return error;
9075 }
9076
9077 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_client_update(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9078 necp_client_copy_client_update(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9079 {
9080 int error = 0;
9081
9082 *retval = 0;
9083
9084 if (!(fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER)) {
9085 NECPLOG0(LOG_ERR, "NECP fd is not observer, cannot copy client update");
9086 return EINVAL;
9087 }
9088
9089 if (uap->client_id_len != sizeof(uuid_t) || uap->client_id == 0) {
9090 NECPLOG0(LOG_ERR, "Client id invalid, cannot copy client update");
9091 return EINVAL;
9092 }
9093
9094 if (uap->buffer_size == 0 || uap->buffer == 0) {
9095 NECPLOG0(LOG_ERR, "Buffer invalid, cannot copy client update");
9096 return EINVAL;
9097 }
9098
9099 NECP_FD_LOCK(fd_data);
9100 struct necp_client_update *client_update = TAILQ_FIRST(&fd_data->update_list);
9101 if (client_update != NULL) {
9102 TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
9103 VERIFY(fd_data->update_count > 0);
9104 fd_data->update_count--;
9105 }
9106 NECP_FD_UNLOCK(fd_data);
9107
9108 if (client_update != NULL) {
9109 error = copyout(client_update->client_id, uap->client_id, sizeof(uuid_t));
9110 if (error) {
9111 NECPLOG(LOG_ERR, "Copy client update copyout client id error (%d)", error);
9112 } else {
9113 if (uap->buffer_size < client_update->update_length) {
9114 NECPLOG(LOG_ERR, "Buffer size cannot hold update (%zu < %zu)", (size_t)uap->buffer_size, client_update->update_length);
9115 error = EINVAL;
9116 } else {
9117 error = copyout(client_update->update, uap->buffer, client_update->update_length);
9118 if (error) {
9119 NECPLOG(LOG_ERR, "Copy client update copyout error (%d)", error);
9120 } else {
9121 *retval = client_update->update_length;
9122 }
9123 }
9124 }
9125
9126 necp_client_update_free(client_update);
9127 client_update = NULL;
9128 } else {
9129 error = ENOENT;
9130 }
9131
9132 return error;
9133 }
9134
9135 static int
necp_client_copy_parameters_locked(struct necp_client * client,struct necp_client_nexus_parameters * parameters)9136 necp_client_copy_parameters_locked(struct necp_client *client,
9137 struct necp_client_nexus_parameters *parameters)
9138 {
9139 VERIFY(parameters != NULL);
9140
9141 struct necp_client_parsed_parameters parsed_parameters = {};
9142 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, &parsed_parameters);
9143
9144 parameters->pid = client->proc_pid;
9145 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID) {
9146 parameters->epid = parsed_parameters.effective_pid;
9147 } else {
9148 parameters->epid = parameters->pid;
9149 }
9150 #if SKYWALK
9151 parameters->port_reservation = client->port_reservation;
9152 #endif /* !SKYWALK */
9153 memcpy(¶meters->local_addr, &parsed_parameters.local_addr, sizeof(parameters->local_addr));
9154 memcpy(¶meters->remote_addr, &parsed_parameters.remote_addr, sizeof(parameters->remote_addr));
9155 parameters->ip_protocol = parsed_parameters.ip_protocol;
9156 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL) {
9157 parameters->transport_protocol = parsed_parameters.transport_protocol;
9158 } else {
9159 parameters->transport_protocol = parsed_parameters.ip_protocol;
9160 }
9161 parameters->ethertype = parsed_parameters.ethertype;
9162 parameters->traffic_class = parsed_parameters.traffic_class;
9163 if (uuid_is_null(client->override_euuid)) {
9164 uuid_copy(parameters->euuid, parsed_parameters.effective_uuid);
9165 } else {
9166 uuid_copy(parameters->euuid, client->override_euuid);
9167 }
9168 parameters->is_listener = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) ? 1 : 0;
9169 parameters->is_interpose = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) ? 1 : 0;
9170 parameters->is_custom_ether = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) ? 1 : 0;
9171 parameters->policy_id = client->policy_id;
9172 parameters->skip_policy_id = client->skip_policy_id;
9173
9174 // parse client result flag
9175 u_int32_t client_result_flags = 0;
9176 u_int32_t value_size = 0;
9177 u_int8_t *flags_pointer = NULL;
9178 flags_pointer = necp_buffer_get_tlv_value(client->result, client->result_length, 0, &value_size);
9179 if (flags_pointer && value_size == sizeof(client_result_flags)) {
9180 memcpy(&client_result_flags, flags_pointer, value_size);
9181 }
9182 parameters->allow_qos_marking = (client_result_flags & NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING) ? 1 : 0;
9183
9184 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE) {
9185 if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_DEFAULT) {
9186 parameters->override_address_selection = false;
9187 } else if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_TEMPORARY) {
9188 parameters->override_address_selection = true;
9189 parameters->use_stable_address = false;
9190 } else if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_STABLE) {
9191 parameters->override_address_selection = true;
9192 parameters->use_stable_address = true;
9193 }
9194 } else {
9195 parameters->override_address_selection = false;
9196 }
9197
9198 if ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
9199 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_NO_WAKE_FROM_SLEEP)) {
9200 parameters->no_wake_from_sleep = true;
9201 }
9202
9203 if ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
9204 (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_REUSE_LOCAL)) {
9205 parameters->reuse_port = true;
9206 }
9207
9208 #if SKYWALK
9209 if (!parameters->is_listener) {
9210 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN) {
9211 if (parsed_parameters.demux_patterns[0].len == 0) {
9212 parameters->is_demuxable_parent = 1;
9213 } else {
9214 if (client->validated_parent) {
9215 ASSERT(!uuid_is_null(client->parent_client_id));
9216
9217 NECP_CLIENT_TREE_LOCK_SHARED();
9218 struct necp_client *parent = necp_find_client_and_lock(client->parent_client_id);
9219 if (parent != NULL) {
9220 struct necp_client_flow_registration *parent_flow_registration = NULL;
9221 RB_FOREACH(parent_flow_registration, _necp_client_flow_tree, &parent->flow_registrations) {
9222 uuid_copy(parameters->parent_flow_uuid, parent_flow_registration->registration_id);
9223 break;
9224 }
9225
9226 NECP_CLIENT_UNLOCK(parent);
9227 }
9228 NECP_CLIENT_TREE_UNLOCK();
9229
9230 if (parsed_parameters.demux_pattern_count > 0) {
9231 for (int i = 0; i < parsed_parameters.demux_pattern_count; i++) {
9232 memcpy(¶meters->demux_patterns[i], &parsed_parameters.demux_patterns[i], sizeof(struct necp_demux_pattern));
9233 }
9234 parameters->demux_pattern_count = parsed_parameters.demux_pattern_count;
9235 }
9236 }
9237 }
9238 }
9239
9240 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EXTENDED_FLAGS) {
9241 if (parsed_parameters.extended_flags & NECP_CLIENT_PARAMETER_EXTENDED_FLAG_AOP2_OFFLOAD) {
9242 parameters->use_aop_offload = true;
9243 }
9244 }
9245 }
9246 #endif // SKYWALK
9247
9248 return error;
9249 }
9250
9251 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_list(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9252 necp_client_list(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9253 {
9254 int error = 0;
9255 struct necp_client *find_client = NULL;
9256 size_t copy_buffer_size = 0;
9257 uuid_t *list = NULL;
9258 u_int32_t requested_client_count = 0;
9259 u_int32_t client_count = 0;
9260
9261 if (uap->buffer_size < sizeof(requested_client_count) || uap->buffer == 0) {
9262 error = EINVAL;
9263 goto done;
9264 }
9265
9266 if (!(fd_data->flags & NECP_OPEN_FLAG_OBSERVER)) {
9267 NECPLOG0(LOG_ERR, "Client does not hold necessary entitlement to list other NECP clients");
9268 error = EACCES;
9269 goto done;
9270 }
9271
9272 error = copyin(uap->buffer, &requested_client_count, sizeof(requested_client_count));
9273 if (error) {
9274 goto done;
9275 }
9276
9277 if (os_mul_overflow(sizeof(uuid_t), requested_client_count, ©_buffer_size)) {
9278 error = ERANGE;
9279 goto done;
9280 }
9281
9282 if (uap->buffer_size - sizeof(requested_client_count) != copy_buffer_size) {
9283 error = EINVAL;
9284 goto done;
9285 }
9286
9287 if (copy_buffer_size > NECP_MAX_CLIENT_LIST_SIZE) {
9288 error = EINVAL;
9289 goto done;
9290 }
9291
9292 if (requested_client_count > 0) {
9293 list = (uuid_t*)kalloc_data(copy_buffer_size, Z_WAITOK | Z_ZERO);
9294 if (list == NULL) {
9295 error = ENOMEM;
9296 goto done;
9297 }
9298 }
9299
9300 // Lock tree
9301 NECP_CLIENT_TREE_LOCK_SHARED();
9302
9303 find_client = NULL;
9304 RB_FOREACH(find_client, _necp_client_global_tree, &necp_client_global_tree) {
9305 NECP_CLIENT_LOCK(find_client);
9306 if (!uuid_is_null(find_client->client_id)) {
9307 if (client_count < requested_client_count) {
9308 uuid_copy(list[client_count], find_client->client_id);
9309 }
9310 client_count++;
9311 }
9312 NECP_CLIENT_UNLOCK(find_client);
9313 }
9314
9315 // Unlock tree
9316 NECP_CLIENT_TREE_UNLOCK();
9317
9318 error = copyout(&client_count, uap->buffer, sizeof(client_count));
9319 if (error) {
9320 NECPLOG(LOG_ERR, "necp_client_list buffer copyout error (%d)", error);
9321 goto done;
9322 }
9323
9324 if (requested_client_count > 0 &&
9325 client_count > 0 &&
9326 list != NULL) {
9327 error = copyout(list, uap->buffer + sizeof(client_count), copy_buffer_size);
9328 if (error) {
9329 NECPLOG(LOG_ERR, "necp_client_list client count copyout error (%d)", error);
9330 goto done;
9331 }
9332 }
9333 done:
9334 if (list != NULL) {
9335 kfree_data(list, copy_buffer_size);
9336 }
9337 *retval = error;
9338
9339 return error;
9340 }
9341
9342 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_add_flow(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9343 necp_client_add_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9344 {
9345 int error = 0;
9346 struct necp_client *client = NULL;
9347 uuid_t client_id;
9348 struct necp_client_nexus_parameters parameters = {};
9349 struct proc *proc = PROC_NULL;
9350 struct necp_client_add_flow * __indexable add_request = NULL;
9351 struct necp_client_add_flow * __indexable allocated_add_request = NULL;
9352 struct necp_client_add_flow_default default_add_request = {};
9353 const size_t buffer_size = uap->buffer_size;
9354
9355 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
9356 error = EINVAL;
9357 NECPLOG(LOG_ERR, "necp_client_add_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
9358 goto done;
9359 }
9360
9361 if (uap->buffer == 0 || buffer_size < sizeof(struct necp_client_add_flow) ||
9362 buffer_size > sizeof(struct necp_client_add_flow_default) * 4) {
9363 error = EINVAL;
9364 NECPLOG(LOG_ERR, "necp_client_add_flow invalid buffer (length %zu)", buffer_size);
9365 goto done;
9366 }
9367
9368 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9369 if (error) {
9370 NECPLOG(LOG_ERR, "necp_client_add_flow copyin client_id error (%d)", error);
9371 goto done;
9372 }
9373
9374 if (buffer_size <= sizeof(struct necp_client_add_flow_default)) {
9375 // Fits in default size
9376 error = copyin(uap->buffer, &default_add_request, buffer_size);
9377 if (error) {
9378 NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error);
9379 goto done;
9380 }
9381
9382 add_request = (struct necp_client_add_flow *)&default_add_request;
9383 } else {
9384 allocated_add_request = (struct necp_client_add_flow *)kalloc_data(buffer_size, Z_WAITOK | Z_ZERO);
9385 if (allocated_add_request == NULL) {
9386 error = ENOMEM;
9387 goto done;
9388 }
9389
9390 error = copyin(uap->buffer, allocated_add_request, buffer_size);
9391 if (error) {
9392 NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error);
9393 goto done;
9394 }
9395
9396 add_request = allocated_add_request;
9397 }
9398
9399 NECP_FD_LOCK(fd_data);
9400 pid_t pid = fd_data->proc_pid;
9401 proc = proc_find(pid);
9402 if (proc == PROC_NULL) {
9403 NECP_FD_UNLOCK(fd_data);
9404 NECPLOG(LOG_ERR, "necp_client_add_flow process not found for pid %d error (%d)", pid, error);
9405 error = ESRCH;
9406 goto done;
9407 }
9408
9409 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9410 if (client == NULL) {
9411 error = ENOENT;
9412 NECP_FD_UNLOCK(fd_data);
9413 goto done;
9414 }
9415
9416 // Using ADD_FLOW indicates that the client supports multiple flows per client
9417 client->legacy_client_is_flow = false;
9418
9419 necp_client_retain_locked(client);
9420 necp_client_copy_parameters_locked(client, ¶meters);
9421
9422 struct necp_client_flow_registration *new_registration = necp_client_create_flow_registration(fd_data, client);
9423 if (new_registration == NULL) {
9424 error = ENOMEM;
9425 NECP_CLIENT_UNLOCK(client);
9426 NECP_FD_UNLOCK(fd_data);
9427 NECPLOG0(LOG_ERR, "Failed to allocate flow registration");
9428 goto done;
9429 }
9430
9431 new_registration->flags = add_request->flags;
9432
9433 // If NECP_CLIENT_FLOW_FLAGS_OPEN_FLOW_ON_BEHALF_OF_CLIENT is set, then set registration_id_to_add to the old
9434 // value in add_request->registration_id, otherwise use the new value in new_registration->registration_id.
9435 bool open_flow_on_behalf_of_client = (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OPEN_FLOW_ON_BEHALF_OF_CLIENT);
9436 uuid_t registration_id_to_add = {};
9437 if (open_flow_on_behalf_of_client && !uuid_is_null(add_request->registration_id)) {
9438 uuid_copy(registration_id_to_add, add_request->registration_id);
9439 } else {
9440 uuid_copy(registration_id_to_add, new_registration->registration_id);
9441 }
9442
9443 // Copy new ID out to caller
9444 uuid_copy(add_request->registration_id, new_registration->registration_id);
9445 new_registration->aop_offload = parameters.use_aop_offload;
9446
9447 NECP_CLIENT_FLOW_LOG(client, new_registration, "adding flow");
9448
9449 size_t trailer_offset = (sizeof(struct necp_client_add_flow) +
9450 add_request->stats_request_count * sizeof(struct necp_client_flow_stats));
9451
9452 // Copy override address
9453 struct sockaddr * __single override_address = NULL;
9454 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OVERRIDE_ADDRESS) {
9455 size_t offset_of_address = trailer_offset;
9456 if (buffer_size >= offset_of_address + sizeof(struct sockaddr_in)) {
9457 override_address = flow_req_get_address(add_request, offset_of_address);
9458 if (buffer_size >= offset_of_address + override_address->sa_len &&
9459 override_address->sa_len <= sizeof(parameters.remote_addr)) {
9460 SOCKADDR_COPY(override_address, ¶meters.remote_addr, override_address->sa_len);
9461 trailer_offset += override_address->sa_len;
9462
9463 // Clear out any local address if the remote address is overridden
9464 if (parameters.remote_addr.sa.sa_family == AF_INET) {
9465 parameters.local_addr.sin.sin_family = AF_INET;
9466 parameters.local_addr.sin.sin_len = sizeof(struct sockaddr_in);
9467 parameters.local_addr.sin.sin_addr.s_addr = 0;
9468 } else if (parameters.remote_addr.sa.sa_family == AF_INET6) {
9469 parameters.local_addr.sin6.sin6_family = AF_INET6;
9470 parameters.local_addr.sin6.sin6_len = sizeof(struct sockaddr_in6);
9471 memset((uint8_t *)¶meters.local_addr.sin6.sin6_addr, 0, sizeof(struct in6_addr));
9472 parameters.local_addr.sin6.sin6_scope_id = 0;
9473 }
9474 } else {
9475 override_address = NULL;
9476 }
9477 }
9478 }
9479
9480 // Copy override IP protocol
9481 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OVERRIDE_IP_PROTOCOL) {
9482 size_t offset_of_ip_protocol = trailer_offset;
9483 if (buffer_size >= offset_of_ip_protocol + sizeof(uint8_t)) {
9484 uint8_t * __single ip_protocol_p = flow_req_get_proto(add_request, offset_of_ip_protocol);
9485 memcpy(¶meters.ip_protocol, ip_protocol_p, sizeof(uint8_t));
9486 }
9487 }
9488
9489 // If opening the flow on behalf of the client, then replace the pid and parameters.pid with the effective PID
9490 // so that the client's PID is used for this flow instead of the PID of the process making the requests.
9491 if (open_flow_on_behalf_of_client) {
9492 parameters.pid = parameters.epid;
9493 pid = parameters.epid;
9494 }
9495
9496 #if SKYWALK
9497 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS) {
9498 size_t assigned_results_length = 0;
9499 void * __sized_by(assigned_results_length) assigned_results = NULL;
9500 uint32_t interface_index = 0;
9501
9502 // Validate that the nexus UUID is assigned
9503 bool found_nexus = false;
9504 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
9505 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
9506 struct necp_client_interface_option *option = &client->interface_options[option_i];
9507 if (uuid_compare(option->nexus_agent, add_request->agent_uuid) == 0) {
9508 interface_index = option->interface_index;
9509 found_nexus = true;
9510 break;
9511 }
9512 } else {
9513 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
9514 if (uuid_compare(option->nexus_agent, add_request->agent_uuid) == 0) {
9515 interface_index = option->interface_index;
9516 found_nexus = true;
9517 break;
9518 }
9519 }
9520 }
9521
9522 if (!found_nexus) {
9523 NECPLOG0(LOG_ERR, "Requested nexus not found");
9524 } else {
9525 necp_client_add_nexus_flow_if_needed(new_registration, add_request->agent_uuid, interface_index, parameters.use_aop_offload);
9526
9527 error = netagent_client_message_with_params(add_request->agent_uuid,
9528 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
9529 client->client_id :
9530 registration_id_to_add),
9531 pid, client->agent_handle,
9532 NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS,
9533 (struct necp_client_agent_parameters *)¶meters,
9534 &assigned_results, &assigned_results_length);
9535 if (error != 0) {
9536 VERIFY(assigned_results == NULL);
9537 VERIFY(assigned_results_length == 0);
9538 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
9539 } else if (assigned_results != NULL) {
9540 if (!necp_assign_client_result_locked(proc, fd_data, client, new_registration, add_request->agent_uuid,
9541 assigned_results, assigned_results_length, false, false)) {
9542 kfree_data_sized_by(assigned_results, assigned_results_length);
9543 }
9544 } else if (override_address != NULL) {
9545 // Save the overridden address in the flow. Find the correct flow,
9546 // and assign just the address TLV. Don't set the assigned flag.
9547 struct necp_client_flow *flow = NULL;
9548 LIST_FOREACH(flow, &new_registration->flow_list, flow_chain) {
9549 if (flow->nexus &&
9550 uuid_compare(flow->u.nexus_agent, add_request->agent_uuid) == 0) {
9551 if (flow->assigned_results == NULL) {
9552 SOCKADDR_COPY(override_address, &flow->remote_addr, override_address->sa_len);
9553 uuid_t empty_uuid;
9554 uuid_clear(empty_uuid);
9555 size_t message_length;
9556 void *message = necp_create_nexus_assign_message(empty_uuid, 0, NULL, 0,
9557 (struct necp_client_endpoint *)&flow->local_addr,
9558 (struct necp_client_endpoint *)&flow->remote_addr,
9559 NULL, 0, NULL, 0, &message_length);
9560 flow->assigned_results = message;
9561 flow->assigned_results_length = message_length;
9562 }
9563 break;
9564 }
9565 }
9566 }
9567 }
9568 }
9569
9570 // Don't request stats if nexus creation fails
9571 if (error == 0 && add_request->stats_request_count > 0 && necp_arena_initialize(fd_data, true) == 0) {
9572 struct necp_client_flow_stats * __single stats_request = &(necp_client_get_flow_stats(add_request))[0];
9573 struct necp_stats_bufreq bufreq = {};
9574
9575 NECP_CLIENT_FLOW_LOG(client, new_registration, "Initializing stats");
9576
9577 bufreq.necp_stats_bufreq_id = NECP_CLIENT_STATISTICS_BUFREQ_ID;
9578 bufreq.necp_stats_bufreq_type = stats_request->stats_type;
9579 bufreq.necp_stats_bufreq_ver = stats_request->stats_version;
9580 bufreq.necp_stats_bufreq_size = stats_request->stats_size;
9581 bufreq.necp_stats_bufreq_uaddr = stats_request->stats_addr;
9582 (void)necp_stats_initialize(fd_data, client, new_registration, &bufreq);
9583 stats_request->stats_type = bufreq.necp_stats_bufreq_type;
9584 stats_request->stats_version = bufreq.necp_stats_bufreq_ver;
9585 stats_request->stats_size = bufreq.necp_stats_bufreq_size;
9586 stats_request->stats_addr = bufreq.necp_stats_bufreq_uaddr;
9587 }
9588
9589 if (error == 0 && parameters.use_aop_offload) {
9590 error = necp_aop_offload_stats_initialize(
9591 new_registration, add_request->agent_uuid);
9592 }
9593 #endif /* !SKYWALK */
9594
9595 if (error == 0 &&
9596 (add_request->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE ||
9597 add_request->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) {
9598 uint32_t interface_index = IFSCOPE_NONE;
9599 ifnet_head_lock_shared();
9600 struct ifnet *interface = NULL;
9601 TAILQ_FOREACH(interface, &ifnet_head, if_link) {
9602 ifnet_lock_shared(interface);
9603 if (interface->if_agentids != NULL) {
9604 for (u_int32_t i = 0; i < interface->if_agentcount; i++) {
9605 if (uuid_compare(interface->if_agentids[i], add_request->agent_uuid) == 0) {
9606 interface_index = interface->if_index;
9607 break;
9608 }
9609 }
9610 }
9611 ifnet_lock_done(interface);
9612 if (interface_index != IFSCOPE_NONE) {
9613 break;
9614 }
9615 }
9616 ifnet_head_done();
9617
9618 necp_client_add_nexus_flow_if_needed(new_registration, add_request->agent_uuid, interface_index, parameters.use_aop_offload);
9619
9620 size_t dummy_length = 0;
9621 void * __sized_by(dummy_length) dummy_results = NULL;
9622 error = netagent_client_message_with_params(add_request->agent_uuid,
9623 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
9624 client->client_id :
9625 new_registration->registration_id),
9626 pid, client->agent_handle,
9627 NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT,
9628 (struct necp_client_agent_parameters *)¶meters,
9629 &dummy_results, &dummy_length);
9630 if (error != 0) {
9631 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
9632 }
9633 }
9634
9635 if (error != 0) {
9636 // Encountered an error in adding the flow, destroy the flow registration
9637 #if SKYWALK
9638 necp_destroy_flow_stats(fd_data, new_registration, NULL, false);
9639 #endif /* SKYWALK */
9640 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
9641 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, new_registration);
9642 NECP_FLOW_TREE_UNLOCK();
9643 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, new_registration);
9644 necp_destroy_client_flow_registration(client, new_registration, fd_data->proc_pid, true);
9645 new_registration = NULL;
9646 }
9647
9648 NECP_CLIENT_UNLOCK(client);
9649 NECP_FD_UNLOCK(fd_data);
9650
9651 necp_client_release(client);
9652
9653 if (error != 0) {
9654 goto done;
9655 }
9656
9657 // Copy the request back out to the caller with assigned fields
9658 error = copyout(add_request, uap->buffer, buffer_size);
9659 if (error != 0) {
9660 NECPLOG(LOG_ERR, "necp_client_add_flow copyout add_request error (%d)", error);
9661 }
9662
9663 done:
9664 *retval = error;
9665 if (error != 0) {
9666 NECPLOG(LOG_ERR, "Add flow error (%d)", error);
9667 }
9668
9669 if (allocated_add_request != NULL) {
9670 kfree_data(allocated_add_request, buffer_size);
9671 }
9672
9673 if (proc != PROC_NULL) {
9674 proc_rele(proc);
9675 }
9676 return error;
9677 }
9678
9679 #if SKYWALK
9680
9681 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_request_nexus(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9682 necp_client_request_nexus(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9683 {
9684 int error = 0;
9685 struct necp_client *client = NULL;
9686 uuid_t client_id;
9687 struct necp_client_nexus_parameters parameters = {};
9688 struct proc *proc = PROC_NULL;
9689 const size_t buffer_size = uap->buffer_size;
9690
9691 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
9692 error = EINVAL;
9693 goto done;
9694 }
9695
9696 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9697 if (error) {
9698 NECPLOG(LOG_ERR, "necp_client_request_nexus copyin client_id error (%d)", error);
9699 goto done;
9700 }
9701
9702 NECP_FD_LOCK(fd_data);
9703 pid_t pid = fd_data->proc_pid;
9704 proc = proc_find(pid);
9705 if (proc == PROC_NULL) {
9706 NECP_FD_UNLOCK(fd_data);
9707 NECPLOG(LOG_ERR, "necp_client_request_nexus process not found for pid %d error (%d)", pid, error);
9708 error = ESRCH;
9709 goto done;
9710 }
9711
9712 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9713 if (client == NULL) {
9714 NECP_FD_UNLOCK(fd_data);
9715 error = ENOENT;
9716 goto done;
9717 }
9718
9719 // Using REQUEST_NEXUS indicates that the client only supports one flow per client
9720 client->legacy_client_is_flow = true;
9721
9722 necp_client_retain_locked(client);
9723 necp_client_copy_parameters_locked(client, ¶meters);
9724
9725 do {
9726 size_t assigned_results_length = 0;
9727 void * __sized_by(assigned_results_length) assigned_results = NULL;
9728 uuid_t nexus_uuid;
9729 uint32_t interface_index = 0;
9730
9731 // Validate that the nexus UUID is assigned
9732 bool found_nexus = false;
9733 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
9734 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
9735 struct necp_client_interface_option *option = &client->interface_options[option_i];
9736 if (!uuid_is_null(option->nexus_agent)) {
9737 uuid_copy(nexus_uuid, option->nexus_agent);
9738 interface_index = option->interface_index;
9739 found_nexus = true;
9740 break;
9741 }
9742 } else {
9743 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
9744 if (!uuid_is_null(option->nexus_agent)) {
9745 uuid_copy(nexus_uuid, option->nexus_agent);
9746 interface_index = option->interface_index;
9747 found_nexus = true;
9748 break;
9749 }
9750 }
9751 }
9752
9753 if (!found_nexus) {
9754 NECP_CLIENT_UNLOCK(client);
9755 NECP_FD_UNLOCK(fd_data);
9756 necp_client_release(client);
9757 // Break the loop
9758 error = ENETDOWN;
9759 goto done;
9760 }
9761
9762 struct necp_client_flow_registration *new_registration = necp_client_create_flow_registration(fd_data, client);
9763 if (new_registration == NULL) {
9764 error = ENOMEM;
9765 NECP_CLIENT_UNLOCK(client);
9766 NECP_FD_UNLOCK(fd_data);
9767 necp_client_release(client);
9768 NECPLOG0(LOG_ERR, "Failed to allocate flow registration");
9769 goto done;
9770 }
9771
9772 new_registration->flags = (NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS | NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID);
9773
9774 necp_client_add_nexus_flow_if_needed(new_registration, nexus_uuid, interface_index, parameters.use_aop_offload);
9775
9776 // Note: Any clients using "request_nexus" are not flow-registration aware.
9777 // Register the Client ID rather than the Registration ID with the nexus, since
9778 // the client will send traffic based on the client ID.
9779 error = netagent_client_message_with_params(nexus_uuid,
9780 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
9781 client->client_id :
9782 new_registration->registration_id),
9783 pid, client->agent_handle,
9784 NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS,
9785 (struct necp_client_agent_parameters *)¶meters,
9786 &assigned_results, &assigned_results_length);
9787 if (error) {
9788 NECP_CLIENT_UNLOCK(client);
9789 NECP_FD_UNLOCK(fd_data);
9790 necp_client_release(client);
9791 VERIFY(assigned_results == NULL);
9792 VERIFY(assigned_results_length == 0);
9793 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
9794 goto done;
9795 }
9796
9797 if (assigned_results != NULL) {
9798 if (!necp_assign_client_result_locked(proc, fd_data, client, new_registration, nexus_uuid,
9799 assigned_results, assigned_results_length, false, false)) {
9800 kfree_data_sized_by(assigned_results, assigned_results_length);
9801 }
9802 }
9803
9804 if (uap->buffer != 0 && buffer_size == sizeof(struct necp_stats_bufreq) &&
9805 necp_arena_initialize(fd_data, true) == 0) {
9806 struct necp_stats_bufreq bufreq = {};
9807 int copy_error = copyin(uap->buffer, &bufreq, buffer_size);
9808 if (copy_error) {
9809 NECPLOG(LOG_ERR, "necp_client_request_nexus copyin bufreq error (%d)", copy_error);
9810 } else {
9811 (void)necp_stats_initialize(fd_data, client, new_registration, &bufreq);
9812 copy_error = copyout(&bufreq, uap->buffer, buffer_size);
9813 if (copy_error != 0) {
9814 NECPLOG(LOG_ERR, "necp_client_request_nexus copyout bufreq error (%d)", copy_error);
9815 }
9816 }
9817 }
9818 } while (false);
9819
9820 NECP_CLIENT_UNLOCK(client);
9821 NECP_FD_UNLOCK(fd_data);
9822
9823 necp_client_release(client);
9824
9825 done:
9826 *retval = error;
9827 if (error != 0) {
9828 NECPLOG(LOG_ERR, "Request nexus error (%d)", error);
9829 }
9830
9831 if (proc != PROC_NULL) {
9832 proc_rele(proc);
9833 }
9834 return error;
9835 }
9836 #endif /* !SKYWALK */
9837
9838 static void
necp_client_add_assertion(struct necp_client * client,uuid_t netagent_uuid)9839 necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid)
9840 {
9841 struct necp_client_assertion *new_assertion = NULL;
9842
9843 new_assertion = kalloc_type(struct necp_client_assertion,
9844 Z_WAITOK | Z_NOFAIL);
9845
9846 uuid_copy(new_assertion->asserted_netagent, netagent_uuid);
9847
9848 LIST_INSERT_HEAD(&client->assertion_list, new_assertion, assertion_chain);
9849 }
9850
9851 static bool
necp_client_remove_assertion(struct necp_client * client,uuid_t netagent_uuid)9852 necp_client_remove_assertion(struct necp_client *client, uuid_t netagent_uuid)
9853 {
9854 struct necp_client_assertion * __single found_assertion = NULL;
9855 struct necp_client_assertion *search_assertion = NULL;
9856 LIST_FOREACH(search_assertion, &client->assertion_list, assertion_chain) {
9857 if (uuid_compare(search_assertion->asserted_netagent, netagent_uuid) == 0) {
9858 found_assertion = search_assertion;
9859 break;
9860 }
9861 }
9862
9863 if (found_assertion == NULL) {
9864 NECPLOG0(LOG_ERR, "Netagent uuid not previously asserted");
9865 return false;
9866 }
9867
9868 LIST_REMOVE(found_assertion, assertion_chain);
9869 kfree_type(struct necp_client_assertion, found_assertion);
9870 return true;
9871 }
9872
9873 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_agent_action(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9874 necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9875 {
9876 int error = 0;
9877 struct necp_client *client = NULL;
9878 uuid_t client_id;
9879 bool acted_on_agent = FALSE;
9880 u_int8_t *parameters = NULL;
9881 const size_t buffer_size = uap->buffer_size;
9882
9883 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
9884 buffer_size == 0 || uap->buffer == 0) {
9885 NECPLOG0(LOG_ERR, "necp_client_agent_action invalid parameters");
9886 error = EINVAL;
9887 goto done;
9888 }
9889
9890 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9891 if (error) {
9892 NECPLOG(LOG_ERR, "necp_client_agent_action copyin client_id error (%d)", error);
9893 goto done;
9894 }
9895
9896 if (buffer_size > NECP_MAX_AGENT_ACTION_SIZE) {
9897 NECPLOG(LOG_ERR, "necp_client_agent_action invalid buffer size (>%u)", NECP_MAX_AGENT_ACTION_SIZE);
9898 error = EINVAL;
9899 goto done;
9900 }
9901
9902 parameters = (u_int8_t *)kalloc_data(buffer_size, Z_WAITOK | Z_ZERO);
9903 if (parameters == NULL) {
9904 error = ENOMEM;
9905 goto done;
9906 }
9907
9908 error = copyin(uap->buffer, parameters, buffer_size);
9909 if (error) {
9910 NECPLOG(LOG_ERR, "necp_client_agent_action parameters copyin error (%d)", error);
9911 goto done;
9912 }
9913
9914 NECP_FD_LOCK(fd_data);
9915 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9916 if (client != NULL) {
9917 size_t offset = 0;
9918 while ((offset + sizeof(struct necp_tlv_header)) <= buffer_size) {
9919 u_int8_t type = necp_buffer_get_tlv_type(parameters, buffer_size, offset);
9920 u_int32_t length = necp_buffer_get_tlv_length(parameters, buffer_size, offset);
9921
9922 if (length > (buffer_size - (offset + sizeof(struct necp_tlv_header)))) {
9923 // If the length is larger than what can fit in the remaining parameters size, bail
9924 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
9925 break;
9926 }
9927
9928 if (length >= sizeof(uuid_t)) {
9929 u_int8_t * __indexable value = necp_buffer_get_tlv_value(parameters, buffer_size, offset, NULL);
9930 if (value == NULL) {
9931 NECPLOG0(LOG_ERR, "Invalid TLV value");
9932 break;
9933 }
9934 if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT ||
9935 type == NECP_CLIENT_PARAMETER_ASSERT_AGENT ||
9936 type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
9937 uuid_t agent_uuid;
9938 uuid_copy(agent_uuid, value);
9939 u_int8_t netagent_message_type = 0;
9940 if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT) {
9941 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER;
9942 } else if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) {
9943 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT;
9944 } else if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
9945 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
9946 }
9947
9948 // Before unasserting, verify that the assertion was already taken
9949 if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
9950 if (!necp_client_remove_assertion(client, agent_uuid)) {
9951 error = ENOENT;
9952 break;
9953 }
9954 }
9955
9956 struct necp_client_nexus_parameters parsed_parameters = {};
9957 necp_client_copy_parameters_locked(client, &parsed_parameters);
9958 size_t dummy_length = 0;
9959 void * __sized_by(dummy_length) dummy_results = NULL;
9960
9961 error = netagent_client_message_with_params(agent_uuid,
9962 client_id,
9963 fd_data->proc_pid,
9964 client->agent_handle,
9965 netagent_message_type,
9966 (struct necp_client_agent_parameters *)&parsed_parameters,
9967 &dummy_results, &dummy_length);
9968 if (error == 0) {
9969 acted_on_agent = TRUE;
9970 } else {
9971 break;
9972 }
9973
9974 // Only save the assertion if the action succeeded
9975 if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) {
9976 necp_client_add_assertion(client, agent_uuid);
9977 }
9978 } else if (type == NECP_CLIENT_PARAMETER_AGENT_ADD_GROUP_MEMBERS ||
9979 type == NECP_CLIENT_PARAMETER_AGENT_REMOVE_GROUP_MEMBERS) {
9980 uuid_t agent_uuid;
9981 uuid_copy(agent_uuid, value);
9982 u_int8_t netagent_message_type = 0;
9983 if (type == NECP_CLIENT_PARAMETER_AGENT_ADD_GROUP_MEMBERS) {
9984 netagent_message_type = NETAGENT_MESSAGE_TYPE_ADD_GROUP_MEMBERS;
9985 } else if (type == NECP_CLIENT_PARAMETER_AGENT_REMOVE_GROUP_MEMBERS) {
9986 netagent_message_type = NETAGENT_MESSAGE_TYPE_REMOVE_GROUP_MEMBERS;
9987 }
9988
9989 struct necp_client_group_members group_members = {};
9990 group_members.group_members_length = (length - sizeof(uuid_t));
9991 group_members.group_members = (value + sizeof(uuid_t));
9992 size_t dummy_length = 0;
9993 void * __sized_by(dummy_length) dummy_results = NULL;
9994 error = netagent_client_message_with_params(agent_uuid,
9995 client_id,
9996 fd_data->proc_pid,
9997 client->agent_handle,
9998 netagent_message_type,
9999 (struct necp_client_agent_parameters *)&group_members,
10000 &dummy_results, &dummy_length);
10001 if (error == 0) {
10002 acted_on_agent = TRUE;
10003 } else {
10004 break;
10005 }
10006 } else if (type == NECP_CLIENT_PARAMETER_REPORT_AGENT_ERROR) {
10007 uuid_t agent_uuid;
10008 uuid_copy(agent_uuid, value);
10009 struct necp_client_agent_parameters agent_params = {};
10010 if ((length - sizeof(uuid_t)) >= sizeof(agent_params.u.error.error)) {
10011 memcpy(&agent_params.u.error.error,
10012 (value + sizeof(uuid_t)),
10013 sizeof(agent_params.u.error.error));
10014 }
10015 bool agent_reported = false;
10016 for (int agent_i = 0; agent_i < NECP_FD_REPORTED_AGENT_COUNT; agent_i++) {
10017 if (uuid_compare(agent_uuid, fd_data->reported_agents.agent_uuid[agent_i]) == 0) {
10018 // Found a match, already reported
10019 agent_reported = true;
10020 break;
10021 }
10022 }
10023 agent_params.u.error.force_report = !agent_reported;
10024 if (!agent_reported) {
10025 // Save this agent as having been reported
10026 bool saved_agent_uuid = false;
10027 for (int agent_i = 0; agent_i < NECP_FD_REPORTED_AGENT_COUNT; agent_i++) {
10028 if (uuid_is_null(fd_data->reported_agents.agent_uuid[agent_i])) {
10029 uuid_copy(fd_data->reported_agents.agent_uuid[agent_i], agent_uuid);
10030 saved_agent_uuid = true;
10031 break;
10032 }
10033 }
10034 if (!saved_agent_uuid) {
10035 // Reported agent UUIDs full, move over and insert at the end
10036 for (int agent_i = 0; agent_i < NECP_FD_REPORTED_AGENT_COUNT; agent_i++) {
10037 if (agent_i + 1 < NECP_FD_REPORTED_AGENT_COUNT) {
10038 uuid_copy(fd_data->reported_agents.agent_uuid[agent_i], fd_data->reported_agents.agent_uuid[agent_i + 1]);
10039 } else {
10040 uuid_copy(fd_data->reported_agents.agent_uuid[agent_i], agent_uuid);
10041 }
10042 }
10043 }
10044 }
10045 size_t dummy_length = 0;
10046 void * __sized_by(dummy_length) dummy_results = NULL;
10047 error = netagent_client_message_with_params(agent_uuid,
10048 client_id,
10049 fd_data->proc_pid,
10050 client->agent_handle,
10051 NETAGENT_MESSAGE_TYPE_CLIENT_ERROR,
10052 &agent_params,
10053 &dummy_results, &dummy_length);
10054 if (error == 0) {
10055 acted_on_agent = TRUE;
10056 } else {
10057 break;
10058 }
10059 }
10060 }
10061
10062 offset += sizeof(struct necp_tlv_header) + length;
10063 }
10064
10065 NECP_CLIENT_UNLOCK(client);
10066 }
10067 NECP_FD_UNLOCK(fd_data);
10068
10069 if (!acted_on_agent &&
10070 error == 0) {
10071 error = ENOENT;
10072 }
10073 done:
10074 *retval = error;
10075 if (parameters != NULL) {
10076 kfree_data(parameters, buffer_size);
10077 parameters = NULL;
10078 }
10079
10080 return error;
10081 }
10082
10083 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_agent(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10084 necp_client_copy_agent(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10085 {
10086 int error = 0;
10087 uuid_t agent_uuid;
10088 const size_t buffer_size = uap->buffer_size;
10089
10090 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
10091 buffer_size == 0 || uap->buffer == 0) {
10092 NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input");
10093 error = EINVAL;
10094 goto done;
10095 }
10096
10097 error = copyin(uap->client_id, agent_uuid, sizeof(uuid_t));
10098 if (error) {
10099 NECPLOG(LOG_ERR, "necp_client_copy_agent copyin agent_uuid error (%d)", error);
10100 goto done;
10101 }
10102
10103 error = netagent_copyout(agent_uuid, uap->buffer, buffer_size);
10104 if (error) {
10105 // netagent_copyout already logs appropriate errors
10106 goto done;
10107 }
10108 done:
10109 *retval = error;
10110
10111 return error;
10112 }
10113
10114 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_agent_use(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10115 necp_client_agent_use(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10116 {
10117 int error = 0;
10118 struct necp_client *client = NULL;
10119 uuid_t client_id;
10120 struct necp_agent_use_parameters parameters = {};
10121 const size_t buffer_size = uap->buffer_size;
10122
10123 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
10124 buffer_size != sizeof(parameters) || uap->buffer == 0) {
10125 error = EINVAL;
10126 goto done;
10127 }
10128
10129 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
10130 if (error) {
10131 NECPLOG(LOG_ERR, "Copyin client_id error (%d)", error);
10132 goto done;
10133 }
10134
10135 error = copyin(uap->buffer, ¶meters, buffer_size);
10136 if (error) {
10137 NECPLOG(LOG_ERR, "Parameters copyin error (%d)", error);
10138 goto done;
10139 }
10140
10141 NECP_FD_LOCK(fd_data);
10142 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
10143 if (client != NULL) {
10144 error = netagent_use(parameters.agent_uuid, ¶meters.out_use_count);
10145 NECP_CLIENT_UNLOCK(client);
10146 } else {
10147 error = ENOENT;
10148 }
10149
10150 NECP_FD_UNLOCK(fd_data);
10151
10152 if (error == 0) {
10153 error = copyout(¶meters, uap->buffer, buffer_size);
10154 if (error) {
10155 NECPLOG(LOG_ERR, "Parameters copyout error (%d)", error);
10156 goto done;
10157 }
10158 }
10159
10160 done:
10161 *retval = error;
10162
10163 return error;
10164 }
10165
10166 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_acquire_agent_token(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10167 necp_client_acquire_agent_token(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10168 {
10169 int error = 0;
10170 uuid_t agent_uuid = {};
10171 const size_t buffer_size = uap->buffer_size;
10172
10173 *retval = 0;
10174
10175 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
10176 buffer_size == 0 || uap->buffer == 0) {
10177 NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input");
10178 error = EINVAL;
10179 goto done;
10180 }
10181
10182 error = copyin(uap->client_id, agent_uuid, sizeof(uuid_t));
10183 if (error) {
10184 NECPLOG(LOG_ERR, "necp_client_copy_agent copyin agent_uuid error (%d)", error);
10185 goto done;
10186 }
10187
10188 error = netagent_acquire_token(agent_uuid, uap->buffer, buffer_size, retval);
10189 done:
10190 return error;
10191 }
10192
10193 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_interface(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10194 necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10195 {
10196 int error = 0;
10197 u_int32_t interface_index = 0;
10198 struct necp_interface_details interface_details = {};
10199
10200 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
10201 uap->buffer_size < sizeof(interface_details) ||
10202 uap->buffer == 0) {
10203 NECPLOG0(LOG_ERR, "necp_client_copy_interface bad input");
10204 error = EINVAL;
10205 goto done;
10206 }
10207
10208 error = copyin(uap->client_id, &interface_index, sizeof(u_int32_t));
10209 if (error) {
10210 NECPLOG(LOG_ERR, "necp_client_copy_interface copyin interface_index error (%d)", error);
10211 goto done;
10212 }
10213
10214 if (interface_index == 0) {
10215 error = ENOENT;
10216 NECPLOG(LOG_ERR, "necp_client_copy_interface bad interface_index (%d)", interface_index);
10217 goto done;
10218 }
10219
10220 lck_mtx_lock(rnh_lock);
10221 ifnet_head_lock_shared();
10222 ifnet_t interface = NULL;
10223 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
10224 interface = ifindex2ifnet[interface_index];
10225 }
10226
10227 if (interface != NULL) {
10228 if (interface->if_xname != NULL) {
10229 strlcpy((char *)&interface_details.name, interface->if_xname, sizeof(interface_details.name));
10230 }
10231 interface_details.index = interface->if_index;
10232 interface_details.generation = ifnet_get_generation(interface);
10233 if (interface->if_delegated.ifp != NULL) {
10234 interface_details.delegate_index = interface->if_delegated.ifp->if_index;
10235 }
10236 interface_details.functional_type = if_functional_type(interface, TRUE);
10237 if (IFNET_IS_EXPENSIVE(interface)) {
10238 interface_details.flags |= NECP_INTERFACE_FLAG_EXPENSIVE;
10239 }
10240 if (IFNET_IS_CONSTRAINED(interface)) {
10241 interface_details.flags |= NECP_INTERFACE_FLAG_CONSTRAINED;
10242 }
10243 if (IFNET_IS_ULTRA_CONSTRAINED(interface)) {
10244 interface_details.flags |= NECP_INTERFACE_FLAG_ULTRA_CONSTRAINED;
10245 }
10246 if ((interface->if_eflags & IFEF_TXSTART) == IFEF_TXSTART) {
10247 interface_details.flags |= NECP_INTERFACE_FLAG_TXSTART;
10248 }
10249 if ((interface->if_eflags & IFEF_NOACKPRI) == IFEF_NOACKPRI) {
10250 interface_details.flags |= NECP_INTERFACE_FLAG_NOACKPRI;
10251 }
10252 if ((interface->if_eflags & IFEF_3CA) == IFEF_3CA) {
10253 interface_details.flags |= NECP_INTERFACE_FLAG_3CARRIERAGG;
10254 }
10255 if (IFNET_IS_LOW_POWER(interface)) {
10256 interface_details.flags |= NECP_INTERFACE_FLAG_IS_LOW_POWER;
10257 }
10258 if (interface->if_xflags & IFXF_MPK_LOG) {
10259 interface_details.flags |= NECP_INTERFACE_FLAG_MPK_LOG;
10260 }
10261 if (interface->if_flags & IFF_MULTICAST) {
10262 interface_details.flags |= NECP_INTERFACE_FLAG_SUPPORTS_MULTICAST;
10263 }
10264 if (IS_INTF_CLAT46(interface)) {
10265 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_NAT64;
10266 }
10267 if (interface->if_xflags & IFXF_LOW_POWER_WAKE) {
10268 interface_details.flags |= NECP_INTERFACE_FLAG_LOW_POWER_WAKE;
10269 }
10270 interface_details.l4s_mode = interface->if_l4s_mode;
10271 interface_details.mtu = interface->if_mtu;
10272 #if SKYWALK
10273 fsw_get_tso_capabilities(interface, &interface_details.tso_max_segment_size_v4,
10274 &interface_details.tso_max_segment_size_v6);
10275
10276 interface_details.hwcsum_flags = interface->if_hwassist & IFNET_CHECKSUMF;
10277 #endif /* SKYWALK */
10278
10279 u_int8_t ipv4_signature_len = sizeof(interface_details.ipv4_signature.signature);
10280 u_int16_t ipv4_signature_flags;
10281 if (ifnet_get_netsignature(interface, AF_INET, &ipv4_signature_len, &ipv4_signature_flags,
10282 (u_int8_t *)&interface_details.ipv4_signature) != 0) {
10283 ipv4_signature_len = 0;
10284 }
10285 interface_details.ipv4_signature.signature_len = ipv4_signature_len;
10286
10287 // Check for default scoped routes for IPv4 and IPv6
10288 union necp_sockaddr_union default_address;
10289 struct rtentry *v4Route = NULL;
10290 memset(&default_address, 0, sizeof(default_address));
10291 default_address.sa.sa_family = AF_INET;
10292 default_address.sa.sa_len = sizeof(struct sockaddr_in);
10293 v4Route = rtalloc1_scoped_locked(SA(&default_address), 0, 0,
10294 interface->if_index);
10295 if (v4Route != NULL) {
10296 if (v4Route->rt_ifp != NULL && !IS_INTF_CLAT46(v4Route->rt_ifp)) {
10297 interface_details.flags |= NECP_INTERFACE_FLAG_IPV4_ROUTABLE;
10298 }
10299 rtfree_locked(v4Route);
10300 v4Route = NULL;
10301 }
10302
10303 struct rtentry *v6Route = NULL;
10304 memset(&default_address, 0, sizeof(default_address));
10305 default_address.sa.sa_family = AF_INET6;
10306 default_address.sa.sa_len = sizeof(struct sockaddr_in6);
10307 v6Route = rtalloc1_scoped_locked(SA(&default_address), 0, 0,
10308 interface->if_index);
10309 if (v6Route != NULL) {
10310 if (v6Route->rt_ifp != NULL) {
10311 interface_details.flags |= NECP_INTERFACE_FLAG_IPV6_ROUTABLE;
10312 }
10313 rtfree_locked(v6Route);
10314 v6Route = NULL;
10315 }
10316
10317 u_int8_t ipv6_signature_len = sizeof(interface_details.ipv6_signature.signature);
10318 u_int16_t ipv6_signature_flags;
10319 if (ifnet_get_netsignature(interface, AF_INET6, &ipv6_signature_len, &ipv6_signature_flags,
10320 (u_int8_t *)&interface_details.ipv6_signature) != 0) {
10321 ipv6_signature_len = 0;
10322 }
10323 interface_details.ipv6_signature.signature_len = ipv6_signature_len;
10324
10325 ifnet_lock_shared(interface);
10326 struct ifaddr * __single ifa = NULL;
10327 TAILQ_FOREACH(ifa, &interface->if_addrhead, ifa_link) {
10328 IFA_LOCK(ifa);
10329 if (ifa->ifa_addr->sa_family == AF_INET) {
10330 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_NETMASK;
10331 interface_details.ipv4_netmask = (ifatoia(ifa))->ia_sockmask.sin_addr.s_addr;
10332 if (interface->if_flags & IFF_BROADCAST) {
10333 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_BROADCAST;
10334 interface_details.ipv4_broadcast = (ifatoia(ifa))->ia_broadaddr.sin_addr.s_addr;
10335 }
10336 }
10337 IFA_UNLOCK(ifa);
10338 }
10339
10340 interface_details.radio_type = interface->if_radio_type;
10341 if (interface_details.radio_type == 0 && interface->if_delegated.ifp) {
10342 interface_details.radio_type = interface->if_delegated.ifp->if_radio_type;
10343 }
10344 ifnet_lock_done(interface);
10345 }
10346
10347 ifnet_head_done();
10348 lck_mtx_unlock(rnh_lock);
10349
10350 // If the client is using an older version of the struct, copy that length
10351 error = copyout(&interface_details, uap->buffer, sizeof(interface_details));
10352 if (error) {
10353 NECPLOG(LOG_ERR, "necp_client_copy_interface copyout error (%d)", error);
10354 goto done;
10355 }
10356 done:
10357 *retval = error;
10358
10359 return error;
10360 }
10361
10362 #if SKYWALK
10363
10364 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_interface_address(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)10365 necp_client_get_interface_address(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
10366 {
10367 int error = 0;
10368 u_int32_t interface_index = IFSCOPE_NONE;
10369 struct sockaddr_storage address = {};
10370 const size_t buffer_size = uap->buffer_size;
10371
10372 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
10373 buffer_size < sizeof(struct sockaddr_in) ||
10374 buffer_size > sizeof(struct sockaddr_storage) ||
10375 uap->buffer == 0) {
10376 NECPLOG0(LOG_ERR, "necp_client_get_interface_address bad input");
10377 error = EINVAL;
10378 goto done;
10379 }
10380
10381 error = copyin(uap->client_id, &interface_index, sizeof(u_int32_t));
10382 if (error) {
10383 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyin interface_index error (%d)", error);
10384 goto done;
10385 }
10386
10387 if (interface_index == IFSCOPE_NONE) {
10388 error = ENOENT;
10389 NECPLOG(LOG_ERR, "necp_client_get_interface_address bad interface_index (%d)", interface_index);
10390 goto done;
10391 }
10392
10393 error = copyin(uap->buffer, &address, buffer_size);
10394 if (error) {
10395 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyin address error (%d)", error);
10396 goto done;
10397 }
10398
10399 if (address.ss_family != AF_INET && address.ss_family != AF_INET6) {
10400 error = EINVAL;
10401 NECPLOG(LOG_ERR, "necp_client_get_interface_address invalid address family (%u)", address.ss_family);
10402 goto done;
10403 }
10404
10405 if (address.ss_len != buffer_size) {
10406 error = EINVAL;
10407 NECPLOG(LOG_ERR, "necp_client_get_interface_address invalid address length (%u)", address.ss_len);
10408 goto done;
10409 }
10410
10411 ifnet_head_lock_shared();
10412 ifnet_t ifp = NULL;
10413 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
10414 ifp = ifindex2ifnet[interface_index];
10415 }
10416 ifnet_head_done();
10417 if (ifp == NULL) {
10418 error = ENOENT;
10419 NECPLOG0(LOG_ERR, "necp_client_get_interface_address no matching interface found");
10420 goto done;
10421 }
10422
10423 struct rtentry *rt = rtalloc1_scoped(SA(&address), 0, 0, interface_index);
10424 if (rt == NULL) {
10425 error = EINVAL;
10426 NECPLOG0(LOG_ERR, "necp_client_get_interface_address route lookup failed");
10427 goto done;
10428 }
10429
10430 uint32_t gencount = 0;
10431 struct sockaddr_storage local_address = {};
10432 error = flow_route_select_laddr((union sockaddr_in_4_6 *)&local_address,
10433 (union sockaddr_in_4_6 *)&address, ifp, rt, &gencount, 1);
10434 rtfree(rt);
10435 rt = NULL;
10436
10437 if (error) {
10438 NECPLOG(LOG_ERR, "necp_client_get_interface_address local address selection failed (%d)", error);
10439 goto done;
10440 }
10441
10442 if (local_address.ss_len > buffer_size) {
10443 error = EMSGSIZE;
10444 NECPLOG(LOG_ERR, "necp_client_get_interface_address local address too long for buffer (%u)",
10445 local_address.ss_len);
10446 goto done;
10447 }
10448
10449 error = copyout(&local_address, uap->buffer, local_address.ss_len);
10450 if (error) {
10451 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyout error (%d)", error);
10452 goto done;
10453 }
10454 done:
10455 *retval = error;
10456
10457 return error;
10458 }
10459
10460 extern const char *proc_name_address(void *p);
10461
10462 int
necp_stats_ctor(struct skmem_obj_info * oi,struct skmem_obj_info * oim,void * arg,uint32_t skmflag)10463 necp_stats_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim,
10464 void *arg, uint32_t skmflag)
10465 {
10466 #pragma unused(arg, skmflag)
10467 struct necp_all_kstats * __single kstats = SKMEM_OBJ_ADDR(oi);
10468
10469 ASSERT(oim != NULL && SKMEM_OBJ_ADDR(oim) != NULL);
10470 ASSERT(SKMEM_OBJ_SIZE(oi) == SKMEM_OBJ_SIZE(oim));
10471
10472 kstats->necp_stats_ustats = SKMEM_OBJ_ADDR(oim);
10473
10474 return 0;
10475 }
10476
10477 int
necp_stats_dtor(void * addr,void * arg)10478 necp_stats_dtor(void *addr, void *arg)
10479 {
10480 #pragma unused(addr, arg)
10481 struct necp_all_kstats * __single kstats = addr;
10482
10483 kstats->necp_stats_ustats = NULL;
10484
10485 return 0;
10486 }
10487
10488 static void
necp_fd_insert_stats_arena(struct necp_fd_data * fd_data,struct necp_arena_info * nai)10489 necp_fd_insert_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai)
10490 {
10491 NECP_FD_ASSERT_LOCKED(fd_data);
10492 VERIFY(!(nai->nai_flags & NAIF_ATTACHED));
10493 VERIFY(nai->nai_chain.le_next == NULL && nai->nai_chain.le_prev == NULL);
10494
10495 LIST_INSERT_HEAD(&fd_data->stats_arena_list, nai, nai_chain);
10496 nai->nai_flags |= NAIF_ATTACHED;
10497 necp_arena_info_retain(nai); // for the list
10498 }
10499
10500 static void
necp_fd_remove_stats_arena(struct necp_fd_data * fd_data,struct necp_arena_info * nai)10501 necp_fd_remove_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai)
10502 {
10503 #pragma unused(fd_data)
10504 NECP_FD_ASSERT_LOCKED(fd_data);
10505 VERIFY(nai->nai_flags & NAIF_ATTACHED);
10506 VERIFY(nai->nai_use_count >= 1);
10507
10508 LIST_REMOVE(nai, nai_chain);
10509 nai->nai_flags &= ~NAIF_ATTACHED;
10510 nai->nai_chain.le_next = NULL;
10511 nai->nai_chain.le_prev = NULL;
10512 necp_arena_info_release(nai); // for the list
10513 }
10514
10515 static struct necp_arena_info *
necp_fd_mredirect_stats_arena(struct necp_fd_data * fd_data,struct proc * proc)10516 necp_fd_mredirect_stats_arena(struct necp_fd_data *fd_data, struct proc *proc)
10517 {
10518 struct necp_arena_info *nai, *nai_ret = NULL;
10519
10520 NECP_FD_ASSERT_LOCKED(fd_data);
10521
10522 // Redirect currently-active stats arena and remove it from the active state;
10523 // upon process resumption, new flow request would trigger the creation of
10524 // another active arena.
10525 if ((nai = fd_data->stats_arena_active) != NULL) {
10526 boolean_t need_defunct = FALSE;
10527
10528 ASSERT(!(nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)));
10529 VERIFY(nai->nai_use_count >= 2);
10530 ASSERT(nai->nai_arena != NULL);
10531 ASSERT(nai->nai_mmap.ami_mapref != NULL);
10532
10533 int err = skmem_arena_mredirect(nai->nai_arena, &nai->nai_mmap, proc, &need_defunct);
10534 VERIFY(err == 0);
10535 // must be TRUE since we don't mmap the arena more than once
10536 VERIFY(need_defunct == TRUE);
10537
10538 nai->nai_flags |= NAIF_REDIRECT;
10539 nai_ret = nai; // return to caller
10540
10541 necp_arena_info_release(nai); // for fd_data
10542 fd_data->stats_arena_active = nai = NULL;
10543 }
10544
10545 #if (DEVELOPMENT || DEBUG)
10546 // make sure this list now contains nothing but redirected/defunct arenas
10547 LIST_FOREACH(nai, &fd_data->stats_arena_list, nai_chain) {
10548 ASSERT(nai->nai_use_count >= 1);
10549 ASSERT(nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT));
10550 }
10551 #endif /* (DEVELOPMENT || DEBUG) */
10552
10553 return nai_ret;
10554 }
10555
10556 static void
necp_arena_info_retain(struct necp_arena_info * nai)10557 necp_arena_info_retain(struct necp_arena_info *nai)
10558 {
10559 nai->nai_use_count++;
10560 VERIFY(nai->nai_use_count != 0);
10561 }
10562
10563 static void
necp_arena_info_release(struct necp_arena_info * nai)10564 necp_arena_info_release(struct necp_arena_info *nai)
10565 {
10566 VERIFY(nai->nai_use_count > 0);
10567 if (--nai->nai_use_count == 0) {
10568 necp_arena_info_free(nai);
10569 }
10570 }
10571
10572 static struct necp_arena_info *
necp_arena_info_alloc(void)10573 necp_arena_info_alloc(void)
10574 {
10575 return zalloc_flags(necp_arena_info_zone, Z_WAITOK | Z_ZERO);
10576 }
10577
10578 static void
necp_arena_info_free(struct necp_arena_info * nai)10579 necp_arena_info_free(struct necp_arena_info *nai)
10580 {
10581 VERIFY(nai->nai_chain.le_next == NULL && nai->nai_chain.le_prev == NULL);
10582 VERIFY(nai->nai_use_count == 0);
10583
10584 // NOTE: destroying the arena requires that all outstanding objects
10585 // that were allocated have been freed, else it will assert.
10586 if (nai->nai_arena != NULL) {
10587 skmem_arena_munmap(nai->nai_arena, &nai->nai_mmap);
10588 skmem_arena_release(nai->nai_arena);
10589 OSDecrementAtomic(&necp_arena_count);
10590 nai->nai_arena = NULL;
10591 nai->nai_roff = 0;
10592 }
10593
10594 ASSERT(nai->nai_arena == NULL);
10595 ASSERT(nai->nai_mmap.ami_mapref == NULL);
10596 ASSERT(nai->nai_mmap.ami_arena == NULL);
10597 ASSERT(nai->nai_mmap.ami_maptask == TASK_NULL);
10598
10599 zfree(necp_arena_info_zone, nai);
10600 }
10601
10602 static int
necp_arena_create(struct necp_fd_data * fd_data,size_t obj_size,size_t obj_cnt,struct proc * p)10603 necp_arena_create(struct necp_fd_data *fd_data, size_t obj_size, size_t obj_cnt, struct proc *p)
10604 {
10605 struct skmem_region_params srp_ustats = {};
10606 struct skmem_region_params srp_kstats = {};
10607 struct necp_arena_info *nai;
10608 char name[32];
10609 const char *__null_terminated name_ptr = NULL;
10610 int error = 0;
10611
10612 NECP_FD_ASSERT_LOCKED(fd_data);
10613 ASSERT(fd_data->stats_arena_active == NULL);
10614 ASSERT(p != PROC_NULL);
10615 ASSERT(proc_pid(p) == fd_data->proc_pid);
10616
10617 // inherit the default parameters for the stats region
10618 srp_ustats = *skmem_get_default(SKMEM_REGION_USTATS);
10619 srp_kstats = *skmem_get_default(SKMEM_REGION_KSTATS);
10620
10621 // enable multi-segment mode
10622 srp_ustats.srp_cflags &= ~SKMEM_REGION_CR_MONOLITHIC;
10623 srp_kstats.srp_cflags &= ~SKMEM_REGION_CR_MONOLITHIC;
10624
10625 // configure and adjust the region parameters
10626 srp_ustats.srp_r_obj_cnt = srp_kstats.srp_r_obj_cnt = obj_cnt;
10627 srp_ustats.srp_r_obj_size = srp_kstats.srp_r_obj_size = obj_size;
10628 skmem_region_params_config(&srp_ustats);
10629 skmem_region_params_config(&srp_kstats);
10630
10631 nai = necp_arena_info_alloc();
10632
10633 nai->nai_proc_pid = fd_data->proc_pid;
10634 name_ptr = tsnprintf(name, sizeof(name), "stats-%u.%s.%d", fd_data->stats_arena_gencnt, proc_name_address(p), fd_data->proc_pid);
10635 nai->nai_arena = skmem_arena_create_for_necp(name_ptr, &srp_ustats, &srp_kstats, &error);
10636 ASSERT(nai->nai_arena != NULL || error != 0);
10637 if (error != 0) {
10638 NECPLOG(LOG_ERR, "failed to create stats arena for pid %d\n", fd_data->proc_pid);
10639 } else {
10640 OSIncrementAtomic(&necp_arena_count);
10641
10642 // Get region offsets from base of mmap span; the arena
10643 // doesn't need to be mmap'd at this point, since we simply
10644 // compute the relative offset.
10645 nai->nai_roff = skmem_arena_get_region_offset(nai->nai_arena, SKMEM_REGION_USTATS);
10646
10647 // map to the task/process; upon success, the base address of the region
10648 // will be returned in nai_mmap.ami_mapaddr; this can be communicated to
10649 // the process.
10650 error = skmem_arena_mmap(nai->nai_arena, p, &nai->nai_mmap);
10651 if (error != 0) {
10652 NECPLOG(LOG_ERR, "failed to map stats arena for pid %d\n", fd_data->proc_pid);
10653 }
10654 }
10655
10656 if (error == 0) {
10657 fd_data->stats_arena_active = nai;
10658 necp_arena_info_retain(nai); // for fd_data
10659 necp_fd_insert_stats_arena(fd_data, nai);
10660 ++fd_data->stats_arena_gencnt;
10661 } else {
10662 necp_arena_info_free(nai);
10663 }
10664
10665 return error;
10666 }
10667
10668 static int
necp_arena_stats_obj_alloc(struct necp_fd_data * fd_data,mach_vm_offset_t * off,struct necp_arena_info ** stats_arena,void ** kstats_kaddr,boolean_t cansleep)10669 necp_arena_stats_obj_alloc(struct necp_fd_data *fd_data,
10670 mach_vm_offset_t *off,
10671 struct necp_arena_info **stats_arena,
10672 void **kstats_kaddr,
10673 boolean_t cansleep)
10674 {
10675 struct skmem_cache *kstats_cp = NULL;
10676 struct skmem_obj_info kstats_oi = {};
10677 uint32_t ustats_obj_sz = 0;
10678 void *__sized_by(ustats_obj_sz) ustats_obj = NULL;
10679 uint32_t kstats_obj_sz = 0;
10680 void *__sized_by(kstats_obj_sz) kstats_obj = NULL;
10681 void * __indexable kstats_obj_tmp = NULL;
10682 struct necp_all_kstats * __single kstats = NULL;
10683
10684 ASSERT(off != NULL);
10685 ASSERT(stats_arena != NULL && *stats_arena == NULL);
10686 ASSERT(kstats_kaddr != NULL && *kstats_kaddr == NULL);
10687
10688 NECP_FD_ASSERT_LOCKED(fd_data);
10689 ASSERT(fd_data->stats_arena_active != NULL);
10690 ASSERT(fd_data->stats_arena_active->nai_arena != NULL);
10691
10692 kstats_cp = skmem_arena_necp(fd_data->stats_arena_active->nai_arena)->arc_kstats_cache;
10693 if ((kstats_obj_tmp = skmem_cache_alloc(kstats_cp, (cansleep ? SKMEM_SLEEP : SKMEM_NOSLEEP))) == NULL) {
10694 return ENOMEM;
10695 }
10696 skmem_cache_get_obj_info(kstats_cp, kstats_obj_tmp, &kstats_oi, NULL);
10697 ASSERT(SKMEM_OBJ_SIZE(&kstats_oi) >= sizeof(struct necp_all_stats));
10698 kstats_obj = kstats_obj_tmp;
10699 kstats_obj_sz = SKMEM_OBJ_SIZE(&kstats_oi);
10700
10701 kstats = (struct necp_all_kstats*)kstats_obj;
10702 ustats_obj = __unsafe_forge_bidi_indexable(uint8_t *, kstats->necp_stats_ustats, kstats_obj_sz);
10703 ustats_obj_sz = kstats_obj_sz;
10704
10705 bzero(ustats_obj, ustats_obj_sz);
10706 bzero(&kstats->necp_stats_comm, sizeof(struct necp_all_stats));
10707 *stats_arena = fd_data->stats_arena_active;
10708 *kstats_kaddr = kstats_obj;
10709 // kstats and ustats are mirrored and have the same offset
10710 *off = fd_data->stats_arena_active->nai_roff + SKMEM_OBJ_ROFF(&kstats_oi);
10711
10712 return 0;
10713 }
10714
10715 static void
necp_arena_stats_obj_free(struct necp_fd_data * fd_data,struct necp_arena_info * stats_arena,void ** kstats_kaddr,mach_vm_address_t * ustats_uaddr)10716 necp_arena_stats_obj_free(struct necp_fd_data *fd_data, struct necp_arena_info *stats_arena, void **kstats_kaddr, mach_vm_address_t *ustats_uaddr)
10717 {
10718 #pragma unused(fd_data)
10719 NECP_FD_ASSERT_LOCKED(fd_data);
10720
10721 ASSERT(stats_arena != NULL);
10722 ASSERT(stats_arena->nai_arena != NULL);
10723 ASSERT(kstats_kaddr != NULL && *kstats_kaddr != NULL);
10724 ASSERT(ustats_uaddr != NULL);
10725
10726 skmem_cache_free(skmem_arena_necp(stats_arena->nai_arena)->arc_kstats_cache, *kstats_kaddr);
10727 *kstats_kaddr = NULL;
10728 *ustats_uaddr = 0;
10729 }
10730
10731 // This routine returns the KVA of the sysctls object, as well as the
10732 // offset of that object relative to the mmap base address for the
10733 // task/process.
10734 static void *
necp_arena_sysctls_obj(struct necp_fd_data * fd_data,mach_vm_offset_t * off,size_t * size)10735 necp_arena_sysctls_obj(struct necp_fd_data *fd_data, mach_vm_offset_t *off, size_t *size)
10736 {
10737 void * __single objaddr;
10738
10739 NECP_FD_ASSERT_LOCKED(fd_data);
10740 ASSERT(fd_data->sysctl_arena != NULL);
10741
10742 // kernel virtual address of the sysctls object
10743 objaddr = skmem_arena_system_sysctls_obj_addr(fd_data->sysctl_arena);
10744 ASSERT(objaddr != NULL);
10745
10746 // Return the relative offset of the sysctls object; there is
10747 // only 1 object in the entire sysctls region, and therefore the
10748 // object's offset is simply the region's offset in the arena.
10749 // (sysctl_mmap.ami_mapaddr + offset) is the address of this object
10750 // in the task/process.
10751 if (off != NULL) {
10752 *off = fd_data->system_sysctls_roff;
10753 }
10754
10755 if (size != NULL) {
10756 *size = skmem_arena_system_sysctls_obj_size(fd_data->sysctl_arena);
10757 ASSERT(*size != 0);
10758 }
10759
10760 return objaddr;
10761 }
10762
10763 static void
necp_stats_arenas_destroy(struct necp_fd_data * fd_data,boolean_t closing)10764 necp_stats_arenas_destroy(struct necp_fd_data *fd_data, boolean_t closing)
10765 {
10766 struct necp_arena_info *nai, *nai_tmp;
10767
10768 NECP_FD_ASSERT_LOCKED(fd_data);
10769
10770 // If reaping (not closing), release reference only for idle active arena; the reference
10771 // count must be 2 by now, when it's not being referred to by any clients/flows.
10772 if ((nai = fd_data->stats_arena_active) != NULL && (closing || nai->nai_use_count == 2)) {
10773 VERIFY(nai->nai_use_count >= 2);
10774 necp_arena_info_release(nai); // for fd_data
10775 fd_data->stats_arena_active = NULL;
10776 }
10777
10778 // clean up any defunct arenas left in the list
10779 LIST_FOREACH_SAFE(nai, &fd_data->stats_arena_list, nai_chain, nai_tmp) {
10780 // If reaping, release reference if the list holds the last one
10781 if (closing || nai->nai_use_count == 1) {
10782 VERIFY(nai->nai_use_count >= 1);
10783 // callee unchains nai (and may free it)
10784 necp_fd_remove_stats_arena(fd_data, nai);
10785 }
10786 }
10787 }
10788
10789 static void
necp_sysctl_arena_destroy(struct necp_fd_data * fd_data)10790 necp_sysctl_arena_destroy(struct necp_fd_data *fd_data)
10791 {
10792 NECP_FD_ASSERT_LOCKED(fd_data);
10793
10794 // NOTE: destroying the arena requires that all outstanding objects
10795 // that were allocated have been freed, else it will assert.
10796 if (fd_data->sysctl_arena != NULL) {
10797 skmem_arena_munmap(fd_data->sysctl_arena, &fd_data->sysctl_mmap);
10798 skmem_arena_release(fd_data->sysctl_arena);
10799 OSDecrementAtomic(&necp_sysctl_arena_count);
10800 fd_data->sysctl_arena = NULL;
10801 fd_data->system_sysctls_roff = 0;
10802 }
10803 }
10804
10805 static int
necp_arena_initialize(struct necp_fd_data * fd_data,bool locked)10806 necp_arena_initialize(struct necp_fd_data *fd_data, bool locked)
10807 {
10808 int error = 0;
10809 size_t stats_obj_size = MAX(sizeof(struct necp_all_stats), sizeof(struct necp_all_kstats));
10810
10811 if (!locked) {
10812 NECP_FD_LOCK(fd_data);
10813 }
10814 if (fd_data->stats_arena_active == NULL) {
10815 error = necp_arena_create(fd_data, stats_obj_size,
10816 NECP_MAX_PER_PROCESS_CLIENT_STATISTICS_STRUCTS,
10817 current_proc());
10818 }
10819 if (!locked) {
10820 NECP_FD_UNLOCK(fd_data);
10821 }
10822
10823 return error;
10824 }
10825
10826 static int
necp_sysctl_arena_initialize(struct necp_fd_data * fd_data,bool locked)10827 necp_sysctl_arena_initialize(struct necp_fd_data *fd_data, bool locked)
10828 {
10829 int error = 0;
10830
10831 if (!locked) {
10832 NECP_FD_LOCK(fd_data);
10833 }
10834
10835 NECP_FD_ASSERT_LOCKED(fd_data);
10836
10837 if (fd_data->sysctl_arena == NULL) {
10838 char name[32];
10839 const char *__null_terminated name_ptr = NULL;
10840 struct proc *p = current_proc();
10841
10842 ASSERT(p != PROC_NULL);
10843 ASSERT(proc_pid(p) == fd_data->proc_pid);
10844
10845 name_ptr = tsnprintf(name, sizeof(name), "sysctl.%s.%d", proc_name_address(p), fd_data->proc_pid);
10846 fd_data->sysctl_arena = skmem_arena_create_for_system(name_ptr, &error);
10847 ASSERT(fd_data->sysctl_arena != NULL || error != 0);
10848 if (error != 0) {
10849 NECPLOG(LOG_ERR, "failed to create arena for pid %d\n", fd_data->proc_pid);
10850 } else {
10851 OSIncrementAtomic(&necp_sysctl_arena_count);
10852
10853 // Get region offsets from base of mmap span; the arena
10854 // doesn't need to be mmap'd at this point, since we simply
10855 // compute the relative offset.
10856 fd_data->system_sysctls_roff = skmem_arena_get_region_offset(fd_data->sysctl_arena, SKMEM_REGION_SYSCTLS);
10857
10858 // map to the task/process; upon success, the base address of the region
10859 // will be returned in nai_mmap.ami_mapaddr; this can be communicated to
10860 // the process.
10861 error = skmem_arena_mmap(fd_data->sysctl_arena, p, &fd_data->sysctl_mmap);
10862 if (error != 0) {
10863 NECPLOG(LOG_ERR, "failed to map sysctl arena for pid %d\n", fd_data->proc_pid);
10864 necp_sysctl_arena_destroy(fd_data);
10865 }
10866 }
10867 }
10868
10869 if (!locked) {
10870 NECP_FD_UNLOCK(fd_data);
10871 }
10872
10873 return error;
10874 }
10875
10876 static int
necp_client_stats_bufreq(struct necp_fd_data * fd_data,struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct necp_stats_bufreq * bufreq,struct necp_stats_hdr * out_header)10877 necp_client_stats_bufreq(struct necp_fd_data *fd_data,
10878 struct necp_client *client,
10879 struct necp_client_flow_registration *flow_registration,
10880 struct necp_stats_bufreq *bufreq,
10881 struct necp_stats_hdr *out_header)
10882 {
10883 int error = 0;
10884 NECP_CLIENT_ASSERT_LOCKED(client);
10885 NECP_FD_ASSERT_LOCKED(fd_data);
10886
10887 if ((bufreq->necp_stats_bufreq_id == NECP_CLIENT_STATISTICS_BUFREQ_ID) &&
10888 ((bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_TCP &&
10889 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_TCP_CURRENT_VER) ||
10890 (bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_UDP &&
10891 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_UDP_CURRENT_VER) ||
10892 (bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_QUIC &&
10893 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_QUIC_CURRENT_VER)) &&
10894 (bufreq->necp_stats_bufreq_size == sizeof(struct necp_all_stats))) {
10895 // There should be one and only one stats allocation per client.
10896 // If asked more than once, we just repeat ourselves.
10897 if (flow_registration->ustats_uaddr == 0) {
10898 mach_vm_offset_t off;
10899 ASSERT(flow_registration->stats_arena == NULL);
10900 ASSERT(flow_registration->kstats_kaddr == NULL);
10901 ASSERT(flow_registration->ustats_uaddr == 0);
10902 error = necp_arena_stats_obj_alloc(fd_data, &off, &flow_registration->stats_arena, &flow_registration->kstats_kaddr, FALSE);
10903 if (error == 0) {
10904 // upon success, hold a reference for the client; this is released when the client is removed/closed
10905 ASSERT(flow_registration->stats_arena != NULL);
10906 necp_arena_info_retain(flow_registration->stats_arena);
10907
10908 // compute user address based on mapping info and object offset
10909 flow_registration->ustats_uaddr = flow_registration->stats_arena->nai_mmap.ami_mapaddr + off;
10910
10911 // add to collect_stats list
10912 NECP_STATS_LIST_LOCK_EXCLUSIVE();
10913 necp_client_retain_locked(client); // Add a reference to the client
10914 LIST_INSERT_HEAD(&necp_collect_stats_flow_list, flow_registration, collect_stats_chain);
10915 NECP_STATS_LIST_UNLOCK();
10916 necp_schedule_collect_stats_clients(FALSE);
10917 } else {
10918 ASSERT(flow_registration->stats_arena == NULL);
10919 ASSERT(flow_registration->kstats_kaddr == NULL);
10920 }
10921 }
10922 if (flow_registration->ustats_uaddr != 0) {
10923 ASSERT(error == 0);
10924 ASSERT(flow_registration->stats_arena != NULL);
10925 ASSERT(flow_registration->kstats_kaddr != NULL);
10926
10927 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
10928 kstats->necp_stats_ustats->all_stats_u.tcp_stats.necp_tcp_hdr.necp_stats_type = bufreq->necp_stats_bufreq_type;
10929 kstats->necp_stats_ustats->all_stats_u.tcp_stats.necp_tcp_hdr.necp_stats_ver = bufreq->necp_stats_bufreq_ver;
10930
10931 if (out_header) {
10932 out_header->necp_stats_type = bufreq->necp_stats_bufreq_type;
10933 out_header->necp_stats_ver = bufreq->necp_stats_bufreq_ver;
10934 }
10935
10936 bufreq->necp_stats_bufreq_uaddr = flow_registration->ustats_uaddr;
10937 }
10938 } else {
10939 error = EINVAL;
10940 }
10941
10942 return error;
10943 }
10944
10945 static int
necp_client_stats_initial(struct necp_client_flow_registration * flow_registration,uint32_t stats_type,uint32_t stats_ver)10946 necp_client_stats_initial(struct necp_client_flow_registration *flow_registration, uint32_t stats_type, uint32_t stats_ver)
10947 {
10948 // An attempted create
10949 assert(flow_registration->stats_handler_context == NULL);
10950 assert(flow_registration->stats_arena);
10951 assert(flow_registration->ustats_uaddr);
10952 assert(flow_registration->kstats_kaddr);
10953
10954 int error = 0;
10955 uint64_t ntstat_properties = necp_find_netstat_initial_properties(flow_registration->client);
10956
10957 switch (stats_type) {
10958 case NECP_CLIENT_STATISTICS_TYPE_TCP: {
10959 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_TCP_VER_1) {
10960 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
10961 NSTAT_PROVIDER_TCP_USERLAND, ntstat_properties, necp_request_tcp_netstats, necp_find_extension_info);
10962 if (flow_registration->stats_handler_context == NULL) {
10963 error = EIO;
10964 }
10965 } else {
10966 error = ENOTSUP;
10967 }
10968 break;
10969 }
10970 case NECP_CLIENT_STATISTICS_TYPE_UDP: {
10971 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_UDP_VER_1) {
10972 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
10973 NSTAT_PROVIDER_UDP_USERLAND, ntstat_properties, necp_request_udp_netstats, necp_find_extension_info);
10974 if (flow_registration->stats_handler_context == NULL) {
10975 error = EIO;
10976 }
10977 } else {
10978 error = ENOTSUP;
10979 }
10980 break;
10981 }
10982 case NECP_CLIENT_STATISTICS_TYPE_QUIC: {
10983 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_QUIC_VER_1 && flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS) {
10984 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
10985 NSTAT_PROVIDER_QUIC_USERLAND, ntstat_properties, necp_request_quic_netstats, necp_find_extension_info);
10986 if (flow_registration->stats_handler_context == NULL) {
10987 error = EIO;
10988 }
10989 } else {
10990 error = ENOTSUP;
10991 }
10992 break;
10993 }
10994 default: {
10995 error = ENOTSUP;
10996 break;
10997 }
10998 }
10999 return error;
11000 }
11001
11002 static int
necp_stats_initialize(struct necp_fd_data * fd_data,struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct necp_stats_bufreq * bufreq)11003 necp_stats_initialize(struct necp_fd_data *fd_data,
11004 struct necp_client *client,
11005 struct necp_client_flow_registration *flow_registration,
11006 struct necp_stats_bufreq *bufreq)
11007 {
11008 int error = 0;
11009 struct necp_stats_hdr stats_hdr = {};
11010
11011 NECP_CLIENT_ASSERT_LOCKED(client);
11012 NECP_FD_ASSERT_LOCKED(fd_data);
11013 VERIFY(fd_data->stats_arena_active != NULL);
11014 VERIFY(fd_data->stats_arena_active->nai_arena != NULL);
11015 VERIFY(!(fd_data->stats_arena_active->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)));
11016
11017 if (bufreq == NULL) {
11018 return EINVAL;
11019 }
11020
11021 // Setup stats region
11022 error = necp_client_stats_bufreq(fd_data, client, flow_registration, bufreq, &stats_hdr);
11023 if (error) {
11024 return error;
11025 }
11026 // Notify ntstat about new flow
11027 if (flow_registration->stats_handler_context == NULL) {
11028 error = necp_client_stats_initial(flow_registration, stats_hdr.necp_stats_type, stats_hdr.necp_stats_ver);
11029 if (flow_registration->stats_handler_context != NULL) {
11030 ntstat_userland_stats_event(flow_registration->stats_handler_context, NECP_CLIENT_STATISTICS_EVENT_INIT);
11031 }
11032 NECP_CLIENT_FLOW_LOG(client, flow_registration, "Initialized stats <error %d>", error);
11033 }
11034
11035 return error;
11036 }
11037
11038 static int
necp_aop_offload_stats_initialize(struct necp_client_flow_registration * flow_registration,uuid_t netagent_uuid)11039 necp_aop_offload_stats_initialize(struct necp_client_flow_registration *flow_registration,
11040 uuid_t netagent_uuid)
11041 {
11042 int error = 0;
11043
11044 struct necp_client_flow *flow = NULL;
11045 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
11046 // Verify that the client nexus agent matches
11047 if (flow->nexus &&
11048 uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) {
11049 ASSERT(flow->flow_tag != 0);
11050 ASSERT(flow->aop_offload);
11051
11052 error = net_aop_setup_flow(flow->flow_tag,
11053 true, &flow->stats_index);
11054 if (error != 0) {
11055 NECPLOG(LOG_ERR, "failed to setup aop flow "
11056 "stats area, error %d", error);
11057 } else {
11058 flow->aop_stat_index_valid = true;
11059 }
11060 break;
11061 }
11062 }
11063
11064 return error;
11065 }
11066
11067 static void
necp_aop_offload_stats_destroy(struct necp_client_flow * flow)11068 necp_aop_offload_stats_destroy(struct necp_client_flow *flow)
11069 {
11070 int error = 0;
11071
11072 if (flow->flow_tag != 0 && flow->aop_stat_index_valid) {
11073 error = net_aop_setup_flow(flow->flow_tag,
11074 false, &flow->stats_index);
11075 if (error != 0) {
11076 NECPLOG(LOG_ERR, "failed to cleanup aop offload stats with error %d", error);
11077 }
11078 flow->aop_stat_index_valid = false;
11079 }
11080 return;
11081 }
11082
11083 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_map_sysctls(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11084 necp_client_map_sysctls(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11085 {
11086 int result = 0;
11087 if (!retval) {
11088 retval = &result;
11089 }
11090
11091 do {
11092 mach_vm_address_t uaddr = 0;
11093 if (uap->buffer_size != sizeof(uaddr)) {
11094 *retval = EINVAL;
11095 break;
11096 }
11097
11098 *retval = necp_sysctl_arena_initialize(fd_data, false);
11099 if (*retval != 0) {
11100 break;
11101 }
11102
11103 mach_vm_offset_t off = 0;
11104 void * __single location = NULL;
11105 NECP_FD_LOCK(fd_data);
11106 location = necp_arena_sysctls_obj(fd_data, &off, NULL);
11107 NECP_FD_UNLOCK(fd_data);
11108
11109 if (location == NULL) {
11110 *retval = ENOENT;
11111 break;
11112 }
11113
11114 uaddr = fd_data->sysctl_mmap.ami_mapaddr + off;
11115 *retval = copyout(&uaddr, uap->buffer, sizeof(uaddr));
11116 } while (false);
11117
11118 return *retval;
11119 }
11120
11121 #endif /* !SKYWALK */
11122
11123 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_route_statistics(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11124 necp_client_copy_route_statistics(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11125 {
11126 int error = 0;
11127 struct necp_client *client = NULL;
11128 uuid_t client_id;
11129
11130 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
11131 uap->buffer_size < sizeof(struct necp_stat_counts) || uap->buffer == 0) {
11132 NECPLOG0(LOG_ERR, "necp_client_copy_route_statistics bad input");
11133 error = EINVAL;
11134 goto done;
11135 }
11136
11137 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
11138 if (error) {
11139 NECPLOG(LOG_ERR, "necp_client_copy_route_statistics copyin client_id error (%d)", error);
11140 goto done;
11141 }
11142
11143 // Lock
11144 NECP_FD_LOCK(fd_data);
11145 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
11146 if (client != NULL) {
11147 NECP_CLIENT_ROUTE_LOCK(client);
11148 struct necp_stat_counts route_stats = {};
11149 if (client->current_route != NULL && client->current_route->rt_stats != NULL) {
11150 struct nstat_counts *rt_stats = client->current_route->rt_stats;
11151 route_stats.necp_stat_rxpackets = os_atomic_load(&rt_stats->nstat_rxpackets, relaxed);
11152 route_stats.necp_stat_rxbytes = os_atomic_load(&rt_stats->nstat_rxbytes, relaxed);
11153 route_stats.necp_stat_txpackets = os_atomic_load(&rt_stats->nstat_txpackets, relaxed);
11154 route_stats.necp_stat_txbytes = os_atomic_load(&rt_stats->nstat_txbytes, relaxed);
11155 route_stats.necp_stat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
11156 route_stats.necp_stat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
11157 route_stats.necp_stat_txretransmit = rt_stats->nstat_txretransmit;
11158 route_stats.necp_stat_connectattempts = rt_stats->nstat_connectattempts;
11159 route_stats.necp_stat_connectsuccesses = rt_stats->nstat_connectsuccesses;
11160 if (__probable(necp_client_stats_use_route_metrics == 0)) {
11161 route_stats.necp_stat_min_rtt = rt_stats->nstat_min_rtt;
11162 route_stats.necp_stat_avg_rtt = rt_stats->nstat_avg_rtt;
11163 route_stats.necp_stat_var_rtt = rt_stats->nstat_var_rtt;
11164 } else {
11165 route_stats.necp_stat_min_rtt = client->current_route->rtt_min;
11166 route_stats.necp_stat_avg_rtt = client->current_route->rt_rmx.rmx_rtt;
11167 route_stats.necp_stat_var_rtt = client->current_route->rt_rmx.rmx_rttvar;
11168 }
11169 route_stats.necp_stat_route_flags = client->current_route->rt_flags;
11170 }
11171
11172 // Unlock before copying out
11173 NECP_CLIENT_ROUTE_UNLOCK(client);
11174 NECP_CLIENT_UNLOCK(client);
11175 NECP_FD_UNLOCK(fd_data);
11176
11177 error = copyout(&route_stats, uap->buffer, sizeof(route_stats));
11178 if (error) {
11179 NECPLOG(LOG_ERR, "necp_client_copy_route_statistics copyout error (%d)", error);
11180 }
11181 } else {
11182 // Unlock
11183 NECP_FD_UNLOCK(fd_data);
11184 error = ENOENT;
11185 }
11186
11187
11188 done:
11189 *retval = error;
11190 return error;
11191 }
11192
11193 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_update_cache(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11194 necp_client_update_cache(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11195 {
11196 int error = 0;
11197 struct necp_client *client = NULL;
11198 uuid_t client_id;
11199
11200 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
11201 error = EINVAL;
11202 goto done;
11203 }
11204
11205 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
11206 if (error) {
11207 NECPLOG(LOG_ERR, "necp_client_update_cache copyin client_id error (%d)", error);
11208 goto done;
11209 }
11210
11211 NECP_FD_LOCK(fd_data);
11212 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
11213 if (client == NULL) {
11214 NECP_FD_UNLOCK(fd_data);
11215 error = ENOENT;
11216 goto done;
11217 }
11218
11219 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
11220 if (flow_registration == NULL) {
11221 NECP_CLIENT_UNLOCK(client);
11222 NECP_FD_UNLOCK(fd_data);
11223 error = ENOENT;
11224 goto done;
11225 }
11226
11227 NECP_CLIENT_ROUTE_LOCK(client);
11228 // This needs to be changed when TFO/ECN is supported by multiple flows
11229 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
11230 if (flow == NULL ||
11231 (flow->remote_addr.sa.sa_family != AF_INET &&
11232 flow->remote_addr.sa.sa_family != AF_INET6) ||
11233 (flow->local_addr.sa.sa_family != AF_INET &&
11234 flow->local_addr.sa.sa_family != AF_INET6)) {
11235 error = EINVAL;
11236 NECPLOG(LOG_ERR, "necp_client_update_cache no flow error (%d)", error);
11237 goto done_unlock;
11238 }
11239
11240 necp_cache_buffer cache_buffer;
11241 memset(&cache_buffer, 0, sizeof(cache_buffer));
11242
11243 if (uap->buffer_size != sizeof(necp_cache_buffer) ||
11244 uap->buffer == USER_ADDR_NULL) {
11245 error = EINVAL;
11246 goto done_unlock;
11247 }
11248
11249 error = copyin(uap->buffer, &cache_buffer, sizeof(cache_buffer));
11250 if (error) {
11251 NECPLOG(LOG_ERR, "necp_client_update_cache copyin cache buffer error (%d)", error);
11252 goto done_unlock;
11253 }
11254
11255 if (cache_buffer.necp_cache_buf_type == NECP_CLIENT_CACHE_TYPE_ECN &&
11256 cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_ECN_VER_1) {
11257 if (cache_buffer.necp_cache_buf_size != sizeof(necp_tcp_ecn_cache) ||
11258 cache_buffer.necp_cache_buf_addr == USER_ADDR_NULL) {
11259 error = EINVAL;
11260 goto done_unlock;
11261 }
11262
11263 necp_tcp_ecn_cache ecn_cache_buffer;
11264 memset(&ecn_cache_buffer, 0, sizeof(ecn_cache_buffer));
11265
11266 error = copyin(cache_buffer.necp_cache_buf_addr, &ecn_cache_buffer, sizeof(necp_tcp_ecn_cache));
11267 if (error) {
11268 NECPLOG(LOG_ERR, "necp_client_update_cache copyin ecn cache buffer error (%d)", error);
11269 goto done_unlock;
11270 }
11271
11272 if (client->current_route != NULL && client->current_route->rt_ifp != NULL) {
11273 if (!client->platform_binary) {
11274 ecn_cache_buffer.necp_tcp_ecn_heuristics_success = 0;
11275 }
11276 tcp_heuristics_ecn_update(&ecn_cache_buffer, client->current_route->rt_ifp,
11277 (union sockaddr_in_4_6 *)&flow->local_addr);
11278 }
11279 } else if (cache_buffer.necp_cache_buf_type == NECP_CLIENT_CACHE_TYPE_TFO &&
11280 cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_TFO_VER_1) {
11281 if (cache_buffer.necp_cache_buf_size != sizeof(necp_tcp_tfo_cache) ||
11282 cache_buffer.necp_cache_buf_addr == USER_ADDR_NULL) {
11283 error = EINVAL;
11284 goto done_unlock;
11285 }
11286
11287 necp_tcp_tfo_cache tfo_cache_buffer;
11288 memset(&tfo_cache_buffer, 0, sizeof(tfo_cache_buffer));
11289
11290 error = copyin(cache_buffer.necp_cache_buf_addr, &tfo_cache_buffer, sizeof(necp_tcp_tfo_cache));
11291 if (error) {
11292 NECPLOG(LOG_ERR, "necp_client_update_cache copyin tfo cache buffer error (%d)", error);
11293 goto done_unlock;
11294 }
11295
11296 if (client->current_route != NULL && client->current_route->rt_ifp != NULL) {
11297 if (!client->platform_binary) {
11298 tfo_cache_buffer.necp_tcp_tfo_heuristics_success = 0;
11299 }
11300 tcp_heuristics_tfo_update(&tfo_cache_buffer, client->current_route->rt_ifp,
11301 (union sockaddr_in_4_6 *)&flow->local_addr,
11302 (union sockaddr_in_4_6 *)&flow->remote_addr);
11303 }
11304 } else {
11305 error = EINVAL;
11306 }
11307 done_unlock:
11308 NECP_CLIENT_ROUTE_UNLOCK(client);
11309 NECP_CLIENT_UNLOCK(client);
11310 NECP_FD_UNLOCK(fd_data);
11311 done:
11312 *retval = error;
11313 return error;
11314 }
11315
11316 // Most results will fit into this size
11317 struct necp_client_signable_default {
11318 uuid_t client_id;
11319 u_int32_t sign_type;
11320 u_int8_t signable_data[NECP_CLIENT_ACTION_SIGN_DEFAULT_DATA_LENGTH];
11321 } __attribute__((__packed__));
11322
11323 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_sign(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11324 necp_client_sign(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11325 {
11326 int error = 0;
11327 u_int8_t tag[NECP_CLIENT_ACTION_SIGN_TAG_LENGTH] = {};
11328 struct necp_client_signable * __indexable signable = NULL;
11329 struct necp_client_signable * __indexable allocated_signable = NULL;
11330 struct necp_client_signable_default default_signable = {};
11331 size_t tag_size = sizeof(tag);
11332
11333 const size_t signable_length = uap->client_id_len;
11334 const size_t return_tag_length = uap->buffer_size;
11335
11336 *retval = 0;
11337
11338 const bool has_resolver_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_VALIDATED_RESOLVER, 0) == 0);
11339 if (!has_resolver_entitlement) {
11340 NECPLOG0(LOG_ERR, "Process does not hold the necessary entitlement to sign resolver answers");
11341 error = EPERM;
11342 goto done;
11343 }
11344
11345 if (uap->client_id == 0 || signable_length < sizeof(*signable) || signable_length > NECP_CLIENT_ACTION_SIGN_MAX_TOTAL_LENGTH) {
11346 error = EINVAL;
11347 goto done;
11348 }
11349
11350 if (uap->buffer == 0 || return_tag_length != NECP_CLIENT_ACTION_SIGN_TAG_LENGTH) {
11351 error = EINVAL;
11352 goto done;
11353 }
11354
11355 if (signable_length <= sizeof(default_signable)) {
11356 signable = (struct necp_client_signable *)&default_signable;
11357 } else {
11358 if ((allocated_signable = (struct necp_client_signable *)kalloc_data(signable_length, Z_WAITOK | Z_ZERO)) == NULL) {
11359 NECPLOG(LOG_ERR, "necp_client_sign allocate signable %zu failed", signable_length);
11360 error = ENOMEM;
11361 goto done;
11362 }
11363 signable = allocated_signable;
11364 }
11365
11366 error = copyin(uap->client_id, signable, signable_length);
11367 if (error) {
11368 NECPLOG(LOG_ERR, "necp_client_sign copyin signable error (%d)", error);
11369 goto done;
11370 }
11371
11372 size_t data_length = 0;
11373 switch (signable->sign_type) {
11374 case NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER:
11375 case NECP_CLIENT_SIGN_TYPE_SYSTEM_RESOLVER_ANSWER: {
11376 data_length = (sizeof(struct necp_client_host_resolver_answer) - sizeof(struct necp_client_signable));
11377 if (signable_length < (sizeof(struct necp_client_signable) + data_length)) {
11378 error = EINVAL;
11379 goto done;
11380 }
11381 struct necp_client_host_resolver_answer * __single signable_struct = (struct necp_client_host_resolver_answer *)signable;
11382 if (signable_struct->hostname_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11383 signable_length != (sizeof(struct necp_client_signable) + data_length + signable_struct->hostname_length)) {
11384 error = EINVAL;
11385 goto done;
11386 }
11387 data_length += signable_struct->hostname_length;
11388 break;
11389 }
11390 case NECP_CLIENT_SIGN_TYPE_BROWSE_RESULT:
11391 case NECP_CLIENT_SIGN_TYPE_SYSTEM_BROWSE_RESULT: {
11392 data_length = (sizeof(struct necp_client_browse_result) - sizeof(struct necp_client_signable));
11393 if (signable_length < (sizeof(struct necp_client_signable) + data_length)) {
11394 error = EINVAL;
11395 goto done;
11396 }
11397 struct necp_client_browse_result *signable_struct = (struct necp_client_browse_result *)signable;
11398 if (signable_struct->service_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11399 signable_length != (sizeof(struct necp_client_signable) + data_length + signable_struct->service_length)) {
11400 error = EINVAL;
11401 goto done;
11402 }
11403 data_length += signable_struct->service_length;
11404 break;
11405 }
11406 case NECP_CLIENT_SIGN_TYPE_SERVICE_RESOLVER_ANSWER:
11407 case NECP_CLIENT_SIGN_TYPE_SYSTEM_SERVICE_RESOLVER_ANSWER: {
11408 data_length = (sizeof(struct necp_client_service_resolver_answer) - sizeof(struct necp_client_signable));
11409 if (signable_length < (sizeof(struct necp_client_signable) + data_length)) {
11410 error = EINVAL;
11411 goto done;
11412 }
11413 struct necp_client_service_resolver_answer * __single signable_struct = (struct necp_client_service_resolver_answer *)signable;
11414 if (signable_struct->service_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11415 signable_struct->hostname_length > NECP_CLIENT_ACTION_SIGN_MAX_STRING_LENGTH ||
11416 signable_length != (sizeof(struct necp_client_signable) + data_length + signable_struct->service_length + signable_struct->hostname_length)) {
11417 error = EINVAL;
11418 goto done;
11419 }
11420 data_length += signable_struct->service_length;
11421 data_length += signable_struct->hostname_length;
11422 break;
11423 }
11424 default: {
11425 NECPLOG(LOG_ERR, "necp_client_sign unknown signable type (%u)", signable->sign_type);
11426 error = EINVAL;
11427 goto done;
11428 }
11429 }
11430
11431 error = necp_sign_resolver_answer(signable->client_id, signable->sign_type,
11432 signable_get_data(signable, data_length), data_length,
11433 tag, &tag_size);
11434 if (tag_size != sizeof(tag)) {
11435 NECPLOG(LOG_ERR, "necp_client_sign unexpected tag size %zu", tag_size);
11436 error = EINVAL;
11437 goto done;
11438 }
11439 error = copyout(tag, uap->buffer, tag_size);
11440 if (error) {
11441 NECPLOG(LOG_ERR, "necp_client_sign copyout error (%d)", error);
11442 goto done;
11443 }
11444
11445 done:
11446 if (allocated_signable != NULL) {
11447 kfree_data(allocated_signable, signable_length);
11448 allocated_signable = NULL;
11449 }
11450 *retval = error;
11451 return error;
11452 }
11453
11454 // Most results will fit into this size
11455 struct necp_client_validatable_default {
11456 struct necp_client_signature signature;
11457 struct necp_client_signable_default signable;
11458 } __attribute__((__packed__));
11459
11460 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_validate(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11461 necp_client_validate(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11462 {
11463 int error = 0;
11464 struct necp_client_validatable *validatable = NULL;
11465 struct necp_client_validatable * __single allocated_validatable = NULL;
11466 struct necp_client_validatable_default default_validatable = {};
11467
11468 const size_t validatable_length = uap->client_id_len;
11469
11470 *retval = 0;
11471
11472 const bool has_resolver_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_VALIDATED_RESOLVER, 0) == 0);
11473 if (!has_resolver_entitlement) {
11474 NECPLOG0(LOG_ERR, "Process does not hold the necessary entitlement to directly validate resolver answers");
11475 error = EPERM;
11476 goto done;
11477 }
11478
11479 if (uap->client_id == 0 || validatable_length < sizeof(*validatable) ||
11480 validatable_length > (NECP_CLIENT_ACTION_SIGN_MAX_TOTAL_LENGTH + NECP_CLIENT_ACTION_SIGN_TAG_LENGTH)) {
11481 error = EINVAL;
11482 goto done;
11483 }
11484
11485 if (validatable_length <= sizeof(default_validatable)) {
11486 validatable = (struct necp_client_validatable *)&default_validatable;
11487 } else {
11488 if ((allocated_validatable = (struct necp_client_validatable *)kalloc_data(validatable_length, Z_WAITOK | Z_ZERO)) == NULL) {
11489 NECPLOG(LOG_ERR, "necp_client_validate allocate struct %zu failed", validatable_length);
11490 error = ENOMEM;
11491 goto done;
11492 }
11493 validatable = allocated_validatable;
11494 }
11495
11496 error = copyin(uap->client_id, validatable, validatable_length);
11497 if (error) {
11498 NECPLOG(LOG_ERR, "necp_client_validate copyin error (%d)", error);
11499 goto done;
11500 }
11501
11502 size_t signable_data_len = validatable_length - sizeof(struct necp_client_validatable);
11503 const bool validated = necp_validate_resolver_answer(validatable->signable.client_id, validatable->signable.sign_type,
11504 signable_get_data(&validatable->signable, signable_data_len), signable_data_len,
11505 validatable->signature.signed_tag, sizeof(validatable->signature.signed_tag));
11506 if (!validated) {
11507 // Return EAUTH to indicate that the signature failed
11508 error = EAUTH;
11509 }
11510
11511 done:
11512 if (allocated_validatable != NULL) {
11513 kfree_data(allocated_validatable, validatable_length);
11514 allocated_validatable = NULL;
11515 }
11516 *retval = error;
11517 return error;
11518 }
11519
11520 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_signed_client_id(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11521 necp_client_get_signed_client_id(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11522 {
11523 int error = 0;
11524 *retval = 0;
11525 u_int32_t request_type = 0;
11526 struct necp_client_signed_client_id_uuid client_id = { 0 };
11527 const size_t buffer_size = uap->buffer_size;
11528 u_int8_t tag[NECP_CLIENT_ACTION_SIGN_TAG_LENGTH] = {};
11529 size_t tag_size = sizeof(tag);
11530 proc_t proc = current_proc();
11531 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
11532 buffer_size < sizeof(struct necp_client_signed_client_id_uuid) ||
11533 uap->buffer == 0) {
11534 NECPLOG0(LOG_ERR, "necp_client_get_signed_client_id bad input");
11535 error = EINVAL;
11536 goto done;
11537 }
11538
11539 error = copyin(uap->client_id, &request_type, sizeof(u_int32_t));
11540 if (error) {
11541 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id copyin request_type error (%d)", error);
11542 goto done;
11543 }
11544
11545 if (request_type != NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID) {
11546 error = ENOENT;
11547 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id bad request_type (%d)", request_type);
11548 goto done;
11549 }
11550
11551 uuid_t application_uuid;
11552 uuid_clear(application_uuid);
11553 proc_getexecutableuuid(proc, application_uuid, sizeof(application_uuid));
11554
11555 error = necp_sign_application_id(application_uuid,
11556 NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID,
11557 tag, &tag_size);
11558 if (tag_size != sizeof(tag)) {
11559 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id unexpected tag size %zu", tag_size);
11560 error = EINVAL;
11561 goto done;
11562 }
11563 uuid_copy(client_id.client_id, application_uuid);
11564 client_id.signature_length = tag_size;
11565 memcpy(client_id.signature_data, tag, tag_size);
11566
11567 error = copyout(&client_id, uap->buffer, sizeof(client_id));
11568 if (error != 0) {
11569 NECPLOG(LOG_ERR, "necp_client_get_signed_client_id copyout error (%d)", error);
11570 goto done;
11571 }
11572
11573 done:
11574 *retval = error;
11575 return error;
11576 }
11577
11578 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_set_signed_client_id(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11579 necp_client_set_signed_client_id(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11580 {
11581 int error = 0;
11582 *retval = 0;
11583 u_int32_t request_type = 0;
11584 struct necp_client_signed_client_id_uuid client_id = { 0 };
11585 const size_t buffer_size = uap->buffer_size;
11586
11587 // Only allow entitled processes to set the client ID.
11588 proc_t proc = current_proc();
11589 task_t __single task = proc_task(proc);
11590 bool has_delegation_entitlement = task != NULL && IOTaskHasEntitlement(task, kCSWebBrowserNetworkEntitlement);
11591 if (!has_delegation_entitlement) {
11592 has_delegation_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0);
11593 }
11594 if (!has_delegation_entitlement) {
11595 NECPLOG0(LOG_ERR, "necp_client_set_signed_client_id client lacks the necessary entitlement");
11596 error = EAUTH;
11597 goto done;
11598 }
11599
11600 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
11601 buffer_size < sizeof(struct necp_client_signed_client_id_uuid) ||
11602 uap->buffer == 0) {
11603 NECPLOG0(LOG_ERR, "necp_client_set_signed_client_id bad input");
11604 error = EINVAL;
11605 goto done;
11606 }
11607
11608 error = copyin(uap->client_id, &request_type, sizeof(u_int32_t));
11609 if (error) {
11610 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id copyin request_type error (%d)", error);
11611 goto done;
11612 }
11613
11614 if (request_type != NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID) {
11615 error = ENOENT;
11616 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id bad request_type (%d)", request_type);
11617 goto done;
11618 }
11619
11620 error = copyin(uap->buffer, &client_id, sizeof(struct necp_client_signed_client_id_uuid));
11621 if (error) {
11622 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id copyin request error (%d)", error);
11623 goto done;
11624 }
11625
11626 const bool validated = necp_validate_application_id(client_id.client_id,
11627 NECP_CLIENT_SIGNED_CLIENT_ID_TYPE_UUID,
11628 client_id.signature_data, sizeof(client_id.signature_data));
11629 if (!validated) {
11630 // Return EAUTH to indicate that the signature failed
11631 error = EAUTH;
11632 NECPLOG(LOG_ERR, "necp_client_set_signed_client_id signature validation failed (%d)", error);
11633 goto done;
11634 }
11635
11636 proc_setresponsibleuuid(proc, client_id.client_id, sizeof(client_id.client_id));
11637
11638 done:
11639 *retval = error;
11640 return error;
11641 }
11642
11643 static int
necp_client_copy_flow_stats(struct necp_client_flow_registration * flow_registration,struct necp_flow_statistics * flow_stats)11644 necp_client_copy_flow_stats(struct necp_client_flow_registration *flow_registration,
11645 struct necp_flow_statistics *flow_stats)
11646 {
11647 struct aop_flow_stats aop_flow_stats = {};
11648 int error = 0;
11649
11650 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
11651 if (flow == NULL || !flow->aop_offload || !flow->aop_stat_index_valid) {
11652 NECPLOG0(LOG_ERR, "necp_client_copy_flow_stats only supported for aop flows");
11653 return EINVAL;
11654 }
11655 error = net_aop_get_flow_stats(flow->stats_index, &aop_flow_stats);
11656 if (error != 0) {
11657 NECPLOG(LOG_ERR, "net_aop_get_flow_stats failed (%d)", error);
11658 return error;
11659 }
11660
11661 if (flow_stats->transport_proto == IPPROTO_TCP) {
11662 struct tcp_info *tcpi = &flow_stats->transport.tcpi;
11663 struct tcp_info *a_tcpi = &aop_flow_stats.transport.tcp_stats.tcp_info;
11664 memcpy(tcpi, a_tcpi, sizeof(*tcpi));
11665 }
11666
11667 return 0;
11668 }
11669
11670 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_flow_statistics(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)11671 necp_client_get_flow_statistics(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
11672 {
11673 int error = 0;
11674 uuid_t flow_id = {};
11675 struct necp_flow_statistics flow_stats = {};
11676
11677 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
11678 error = EINVAL;
11679 NECPLOG(LOG_ERR, "necp_client_remove_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
11680 goto done;
11681 }
11682
11683 error = copyin(uap->client_id, flow_id, sizeof(uuid_t));
11684 if (error) {
11685 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics copyin client_id error (%d)", error);
11686 goto done;
11687 }
11688
11689 if (uap->buffer_size < sizeof(flow_stats) || uap->buffer == 0) {
11690 error = EINVAL;
11691 goto done;
11692 }
11693
11694 error = copyin(uap->buffer, &flow_stats, sizeof(flow_stats));
11695 if (error) {
11696 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics copyin protocol error (%d)", error);
11697 goto done;
11698 }
11699
11700 if (flow_stats.transport_proto != IPPROTO_TCP) {
11701 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics, transport proto %u not supported",
11702 flow_stats.transport_proto);
11703 error = ENOTSUP;
11704 goto done;
11705 }
11706
11707 NECP_FD_LOCK(fd_data);
11708 struct necp_client *client = NULL;
11709 struct necp_client_flow_registration *flow_registration = necp_client_fd_find_flow(fd_data, flow_id);
11710 if (flow_registration != NULL) {
11711 client = flow_registration->client;
11712 if (client != NULL) {
11713 necp_client_retain(client);
11714 }
11715 }
11716 NECP_FD_UNLOCK(fd_data);
11717
11718 if (flow_registration != NULL && client != NULL) {
11719 NECP_CLIENT_LOCK(client);
11720 if (flow_registration->client == client) {
11721 error = necp_client_copy_flow_stats(flow_registration, &flow_stats);
11722 if (error == 0) {
11723 error = copyout(&flow_stats, uap->buffer, sizeof(flow_stats));
11724 if (error != 0) {
11725 NECPLOG(LOG_ERR, "necp_client_get_flow_statistics copyout failed (%d)", error);
11726 }
11727 }
11728 }
11729
11730 necp_client_release_locked(client);
11731 NECP_CLIENT_UNLOCK(client);
11732 }
11733
11734 done:
11735 *retval = error;
11736 if (error != 0) {
11737 NECPLOG(LOG_ERR, "get flow statistics error (%d)", error);
11738 }
11739
11740 return error;
11741 }
11742
11743 int
necp_client_action(struct proc * p,struct necp_client_action_args * uap,int * retval)11744 necp_client_action(struct proc *p, struct necp_client_action_args *uap, int *retval)
11745 {
11746 struct fileproc * __single fp;
11747 int error = 0;
11748 int return_value = 0;
11749 struct necp_fd_data * __single fd_data = NULL;
11750
11751 error = necp_find_fd_data(p, uap->necp_fd, &fp, &fd_data);
11752 if (error != 0) {
11753 NECPLOG(LOG_ERR, "necp_client_action find fd error (%d)", error);
11754 return error;
11755 }
11756
11757 u_int32_t action = uap->action;
11758
11759 #if CONFIG_MACF
11760 error = mac_necp_check_client_action(p, fp->fp_glob, action);
11761 if (error) {
11762 return_value = error;
11763 goto done;
11764 }
11765 #endif /* MACF */
11766
11767 switch (action) {
11768 case NECP_CLIENT_ACTION_ADD: {
11769 return_value = necp_client_add(p, fd_data, uap, retval);
11770 break;
11771 }
11772 case NECP_CLIENT_ACTION_CLAIM: {
11773 return_value = necp_client_claim(p, fd_data, uap, retval);
11774 break;
11775 }
11776 case NECP_CLIENT_ACTION_REMOVE: {
11777 return_value = necp_client_remove(fd_data, uap, retval);
11778 break;
11779 }
11780 case NECP_CLIENT_ACTION_COPY_PARAMETERS:
11781 case NECP_CLIENT_ACTION_COPY_RESULT:
11782 case NECP_CLIENT_ACTION_COPY_UPDATED_RESULT:
11783 case NECP_CLIENT_ACTION_COPY_UPDATED_RESULT_FINAL: {
11784 return_value = necp_client_copy(fd_data, uap, retval);
11785 break;
11786 }
11787 case NECP_CLIENT_ACTION_COPY_LIST: {
11788 return_value = necp_client_list(fd_data, uap, retval);
11789 break;
11790 }
11791 case NECP_CLIENT_ACTION_ADD_FLOW: {
11792 return_value = necp_client_add_flow(fd_data, uap, retval);
11793 break;
11794 }
11795 case NECP_CLIENT_ACTION_REMOVE_FLOW: {
11796 return_value = necp_client_remove_flow(fd_data, uap, retval);
11797 break;
11798 }
11799 #if SKYWALK
11800 case NECP_CLIENT_ACTION_REQUEST_NEXUS_INSTANCE: {
11801 return_value = necp_client_request_nexus(fd_data, uap, retval);
11802 break;
11803 }
11804 #endif /* !SKYWALK */
11805 case NECP_CLIENT_ACTION_AGENT: {
11806 return_value = necp_client_agent_action(fd_data, uap, retval);
11807 break;
11808 }
11809 case NECP_CLIENT_ACTION_COPY_AGENT: {
11810 return_value = necp_client_copy_agent(fd_data, uap, retval);
11811 break;
11812 }
11813 case NECP_CLIENT_ACTION_AGENT_USE: {
11814 return_value = necp_client_agent_use(fd_data, uap, retval);
11815 break;
11816 }
11817 case NECP_CLIENT_ACTION_ACQUIRE_AGENT_TOKEN: {
11818 return_value = necp_client_acquire_agent_token(fd_data, uap, retval);
11819 break;
11820 }
11821 case NECP_CLIENT_ACTION_COPY_INTERFACE: {
11822 return_value = necp_client_copy_interface(fd_data, uap, retval);
11823 break;
11824 }
11825 #if SKYWALK
11826 case NECP_CLIENT_ACTION_GET_INTERFACE_ADDRESS: {
11827 return_value = necp_client_get_interface_address(fd_data, uap, retval);
11828 break;
11829 }
11830 case NECP_CLIENT_ACTION_SET_STATISTICS: {
11831 return_value = ENOTSUP;
11832 break;
11833 }
11834 case NECP_CLIENT_ACTION_MAP_SYSCTLS: {
11835 return_value = necp_client_map_sysctls(fd_data, uap, retval);
11836 break;
11837 }
11838 #endif /* !SKYWALK */
11839 case NECP_CLIENT_ACTION_COPY_ROUTE_STATISTICS: {
11840 return_value = necp_client_copy_route_statistics(fd_data, uap, retval);
11841 break;
11842 }
11843 case NECP_CLIENT_ACTION_UPDATE_CACHE: {
11844 return_value = necp_client_update_cache(fd_data, uap, retval);
11845 break;
11846 }
11847 case NECP_CLIENT_ACTION_COPY_CLIENT_UPDATE: {
11848 return_value = necp_client_copy_client_update(fd_data, uap, retval);
11849 break;
11850 }
11851 case NECP_CLIENT_ACTION_SIGN: {
11852 return_value = necp_client_sign(fd_data, uap, retval);
11853 break;
11854 }
11855 case NECP_CLIENT_ACTION_VALIDATE: {
11856 return_value = necp_client_validate(fd_data, uap, retval);
11857 break;
11858 }
11859 case NECP_CLIENT_ACTION_GET_SIGNED_CLIENT_ID: {
11860 return_value = necp_client_get_signed_client_id(fd_data, uap, retval);
11861 break;
11862 }
11863 case NECP_CLIENT_ACTION_SET_SIGNED_CLIENT_ID: {
11864 return_value = necp_client_set_signed_client_id(fd_data, uap, retval);
11865 break;
11866 }
11867 case NECP_CLIENT_ACTION_GET_FLOW_STATISTICS: {
11868 return_value = necp_client_get_flow_statistics(fd_data, uap, retval);
11869 break;
11870 }
11871 default: {
11872 NECPLOG(LOG_ERR, "necp_client_action unknown action (%u)", action);
11873 return_value = EINVAL;
11874 break;
11875 }
11876 }
11877
11878 done:
11879 fp_drop(p, uap->necp_fd, fp, 0);
11880 return return_value;
11881 }
11882
11883 #define NECP_MAX_MATCH_POLICY_PARAMETER_SIZE 1024
11884
11885 int
necp_match_policy(struct proc * p,struct necp_match_policy_args * uap,int32_t * retval)11886 necp_match_policy(struct proc *p, struct necp_match_policy_args *uap, int32_t *retval)
11887 {
11888 #pragma unused(retval)
11889 size_t buffer_size = 0;
11890 u_int8_t * __sized_by(buffer_size) parameters = NULL;
11891 struct necp_aggregate_result returned_result;
11892 int error = 0;
11893
11894 if (uap == NULL) {
11895 error = EINVAL;
11896 goto done;
11897 }
11898
11899 if (uap->parameters == 0 || uap->parameters_size == 0 || uap->parameters_size > NECP_MAX_MATCH_POLICY_PARAMETER_SIZE || uap->returned_result == 0) {
11900 error = EINVAL;
11901 goto done;
11902 }
11903
11904 parameters = (u_int8_t *)kalloc_data(uap->parameters_size, Z_WAITOK | Z_ZERO);
11905 buffer_size = uap->parameters_size;
11906 if (parameters == NULL) {
11907 error = ENOMEM;
11908 goto done;
11909 }
11910 // Copy parameters in
11911 error = copyin(uap->parameters, parameters, buffer_size);
11912 if (error) {
11913 goto done;
11914 }
11915
11916 error = necp_application_find_policy_match_internal(p, parameters, buffer_size,
11917 &returned_result, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, false, false, NULL);
11918 if (error) {
11919 goto done;
11920 }
11921
11922 // Copy return value back
11923 error = copyout(&returned_result, uap->returned_result, sizeof(struct necp_aggregate_result));
11924 if (error) {
11925 goto done;
11926 }
11927 done:
11928 if (parameters != NULL) {
11929 kfree_data_sized_by(parameters, buffer_size);
11930 }
11931 return error;
11932 }
11933
11934 /// Socket operations
11935
11936 static errno_t
necp_set_socket_attribute(u_int8_t * __sized_by (buffer_length)buffer,size_t buffer_length,u_int8_t type,char * __null_terminated * buffer_p,bool * single_tlv)11937 necp_set_socket_attribute(u_int8_t * __sized_by(buffer_length)buffer, size_t buffer_length, u_int8_t type, char *__null_terminated *buffer_p, bool *single_tlv)
11938 {
11939 int error = 0;
11940 int cursor = 0;
11941 size_t string_size = 0;
11942 size_t local_string_length = 0;
11943 char * __sized_by(local_string_length) local_string = NULL;
11944 u_int8_t * __indexable value = NULL;
11945 char * __indexable buffer_to_free = NULL;
11946
11947 cursor = necp_buffer_find_tlv(buffer, buffer_length, 0, type, NULL, 0);
11948 if (cursor < 0) {
11949 // This will clear out the parameter
11950 goto done;
11951 }
11952
11953 string_size = necp_buffer_get_tlv_length(buffer, buffer_length, cursor);
11954 if (single_tlv != NULL && (buffer_length == sizeof(struct necp_tlv_header) + string_size)) {
11955 *single_tlv = true;
11956 }
11957 if (string_size == 0 || string_size > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
11958 // This will clear out the parameter
11959 goto done;
11960 }
11961
11962 local_string = (char *)kalloc_data(string_size + 1, Z_WAITOK | Z_ZERO);
11963 local_string_length = string_size + 1;
11964 if (local_string == NULL) {
11965 NECPLOG(LOG_ERR, "Failed to allocate a socket attribute buffer (size %zu)", string_size);
11966 goto fail;
11967 }
11968
11969 value = necp_buffer_get_tlv_value(buffer, buffer_length, cursor, NULL);
11970 if (value == NULL) {
11971 NECPLOG0(LOG_ERR, "Failed to get socket attribute");
11972 goto fail;
11973 }
11974
11975 memcpy(local_string, value, string_size);
11976 local_string[string_size] = 0;
11977
11978 done:
11979 if (*buffer_p != NULL) {
11980 buffer_to_free = __unsafe_null_terminated_to_indexable(*buffer_p);
11981 }
11982
11983 // Protect switching of buffer pointer
11984 necp_lock_socket_attributes();
11985 if (local_string != NULL) {
11986 *buffer_p = __unsafe_null_terminated_from_indexable(local_string, &local_string[string_size]);
11987 } else {
11988 *buffer_p = NULL;
11989 }
11990 necp_unlock_socket_attributes();
11991
11992 if (buffer_to_free != NULL) {
11993 kfree_data_addr(buffer_to_free);
11994 }
11995 return 0;
11996 fail:
11997 if (local_string != NULL) {
11998 kfree_data_sized_by(local_string, local_string_length);
11999 }
12000 return error;
12001 }
12002
12003 errno_t
necp_set_socket_attributes(struct inp_necp_attributes * attributes,struct sockopt * sopt)12004 necp_set_socket_attributes(struct inp_necp_attributes *attributes, struct sockopt *sopt)
12005 {
12006 int error = 0;
12007 u_int8_t *buffer = NULL;
12008 bool single_tlv = false;
12009 size_t valsize = sopt->sopt_valsize;
12010 if (valsize == 0 ||
12011 valsize > ((sizeof(struct necp_tlv_header) + NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) * 4)) {
12012 goto done;
12013 }
12014
12015 buffer = (u_int8_t *)kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12016 if (buffer == NULL) {
12017 goto done;
12018 }
12019
12020 error = sooptcopyin(sopt, buffer, valsize, 0);
12021 if (error) {
12022 goto done;
12023 }
12024
12025 // If NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT is being set/cleared separately from the other attributes,
12026 // do not clear other attributes.
12027 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT, &attributes->inp_domain_context, &single_tlv);
12028 if (error) {
12029 NECPLOG0(LOG_ERR, "Could not set domain context TLV for socket attributes");
12030 goto done;
12031 }
12032 if (single_tlv == true) {
12033 goto done;
12034 }
12035
12036 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN, &attributes->inp_domain, NULL);
12037 if (error) {
12038 NECPLOG0(LOG_ERR, "Could not set domain TLV for socket attributes");
12039 goto done;
12040 }
12041
12042 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN_OWNER, &attributes->inp_domain_owner, NULL);
12043 if (error) {
12044 NECPLOG0(LOG_ERR, "Could not set domain owner TLV for socket attributes");
12045 goto done;
12046 }
12047
12048 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_TRACKER_DOMAIN, &attributes->inp_tracker_domain, NULL);
12049 if (error) {
12050 NECPLOG0(LOG_ERR, "Could not set tracker domain TLV for socket attributes");
12051 goto done;
12052 }
12053
12054 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_ACCOUNT, &attributes->inp_account, NULL);
12055 if (error) {
12056 NECPLOG0(LOG_ERR, "Could not set account TLV for socket attributes");
12057 goto done;
12058 }
12059
12060 done:
12061 NECP_SOCKET_ATTRIBUTE_LOG("NECP ATTRIBUTES SOCKET - domain <%s> owner <%s> context <%s> tracker domain <%s> account <%s>",
12062 attributes->inp_domain,
12063 attributes->inp_domain_owner,
12064 attributes->inp_domain_context,
12065 attributes->inp_tracker_domain,
12066 attributes->inp_account);
12067
12068 if (necp_debug) {
12069 NECPLOG(LOG_DEBUG, "Set on socket: Domain %s, Domain owner %s, Domain context %s, Tracker domain %s, Account %s",
12070 attributes->inp_domain,
12071 attributes->inp_domain_owner,
12072 attributes->inp_domain_context,
12073 attributes->inp_tracker_domain,
12074 attributes->inp_account);
12075 }
12076
12077 if (buffer != NULL) {
12078 kfree_data(buffer, valsize);
12079 }
12080
12081 return error;
12082 }
12083
12084 errno_t
necp_get_socket_attributes(struct inp_necp_attributes * attributes,struct sockopt * sopt)12085 necp_get_socket_attributes(struct inp_necp_attributes *attributes, struct sockopt *sopt)
12086 {
12087 int error = 0;
12088 size_t valsize = 0;
12089 u_int8_t *buffer = NULL;
12090 u_int8_t * __indexable cursor = NULL;
12091
12092 if (attributes->inp_domain != NULL) {
12093 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain);
12094 }
12095 if (attributes->inp_domain_owner != NULL) {
12096 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain_owner);
12097 }
12098 if (attributes->inp_domain_context != NULL) {
12099 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain_context);
12100 }
12101 if (attributes->inp_tracker_domain != NULL) {
12102 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_tracker_domain);
12103 }
12104 if (attributes->inp_account != NULL) {
12105 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_account);
12106 }
12107 if (valsize == 0) {
12108 goto done;
12109 }
12110
12111 buffer = (u_int8_t *)kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12112 if (buffer == NULL) {
12113 goto done;
12114 }
12115
12116 cursor = buffer;
12117 if (attributes->inp_domain != NULL) {
12118 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN, strlen(attributes->inp_domain), __terminated_by_to_indexable(attributes->inp_domain),
12119 buffer, valsize);
12120 }
12121
12122 if (attributes->inp_domain_owner != NULL) {
12123 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN_OWNER, strlen(attributes->inp_domain_owner), __terminated_by_to_indexable(attributes->inp_domain_owner),
12124 buffer, valsize);
12125 }
12126
12127 if (attributes->inp_domain_context != NULL) {
12128 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT, strlen(attributes->inp_domain_context), __terminated_by_to_indexable(attributes->inp_domain_context),
12129 buffer, valsize);
12130 }
12131
12132 if (attributes->inp_tracker_domain != NULL) {
12133 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_TRACKER_DOMAIN, strlen(attributes->inp_tracker_domain), __terminated_by_to_indexable(attributes->inp_tracker_domain),
12134 buffer, valsize);
12135 }
12136
12137 if (attributes->inp_account != NULL) {
12138 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_ACCOUNT, strlen(attributes->inp_account), __terminated_by_to_indexable(attributes->inp_account),
12139 buffer, valsize);
12140 }
12141
12142 error = sooptcopyout(sopt, buffer, valsize);
12143 if (error) {
12144 goto done;
12145 }
12146 done:
12147 if (buffer != NULL) {
12148 kfree_data(buffer, valsize);
12149 }
12150
12151 return error;
12152 }
12153
12154 int
necp_set_socket_resolver_signature(struct inpcb * inp,struct sockopt * sopt)12155 necp_set_socket_resolver_signature(struct inpcb *inp, struct sockopt *sopt)
12156 {
12157 const size_t valsize = sopt->sopt_valsize;
12158 if (valsize > NECP_CLIENT_ACTION_SIGN_MAX_TOTAL_LENGTH + NECP_CLIENT_ACTION_SIGN_TAG_LENGTH) {
12159 return EINVAL;
12160 }
12161
12162 necp_lock_socket_attributes();
12163 if (inp->inp_resolver_signature != NULL) {
12164 kfree_data_sized_by(inp->inp_resolver_signature, inp->inp_resolver_signature_length);
12165 }
12166
12167 int error = 0;
12168 if (valsize > 0) {
12169 inp->inp_resolver_signature = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12170 inp->inp_resolver_signature_length = valsize;
12171 if ((error = sooptcopyin(sopt, inp->inp_resolver_signature, valsize,
12172 valsize)) != 0) {
12173 // Free the signature buffer if the copyin failed
12174 kfree_data_sized_by(inp->inp_resolver_signature, inp->inp_resolver_signature_length);
12175 }
12176 }
12177 necp_unlock_socket_attributes();
12178
12179 return error;
12180 }
12181
12182 int
necp_get_socket_resolver_signature(struct inpcb * inp,struct sockopt * sopt)12183 necp_get_socket_resolver_signature(struct inpcb *inp, struct sockopt *sopt)
12184 {
12185 int error = 0;
12186 necp_lock_socket_attributes();
12187 if (inp->inp_resolver_signature == NULL ||
12188 inp->inp_resolver_signature_length == 0) {
12189 error = ENOENT;
12190 } else {
12191 error = sooptcopyout(sopt, inp->inp_resolver_signature,
12192 inp->inp_resolver_signature_length);
12193 }
12194 necp_unlock_socket_attributes();
12195 return error;
12196 }
12197
12198 bool
necp_socket_has_resolver_signature(struct inpcb * inp)12199 necp_socket_has_resolver_signature(struct inpcb *inp)
12200 {
12201 necp_lock_socket_attributes();
12202 bool has_signature = (inp->inp_resolver_signature != NULL && inp->inp_resolver_signature_length != 0);
12203 necp_unlock_socket_attributes();
12204 return has_signature;
12205 }
12206
12207 bool
necp_socket_resolver_signature_matches_address(struct inpcb * inp,union necp_sockaddr_union * address)12208 necp_socket_resolver_signature_matches_address(struct inpcb *inp, union necp_sockaddr_union *address)
12209 {
12210 bool matches_address = false;
12211 necp_lock_socket_attributes();
12212 if (inp->inp_resolver_signature != NULL && inp->inp_resolver_signature_length > 0 && address->sa.sa_len > 0) {
12213 struct necp_client_validatable *validatable = (struct necp_client_validatable *)inp->inp_resolver_signature;
12214 if (inp->inp_resolver_signature_length > sizeof(struct necp_client_validatable) &&
12215 validatable->signable.sign_type == NECP_CLIENT_SIGN_TYPE_SYSTEM_RESOLVER_ANSWER) {
12216 size_t data_length = inp->inp_resolver_signature_length - sizeof(struct necp_client_validatable);
12217 if (data_length >= (sizeof(struct necp_client_host_resolver_answer) - sizeof(struct necp_client_signable))) {
12218 struct necp_client_host_resolver_answer * __single answer_struct = (struct necp_client_host_resolver_answer *)&validatable->signable;
12219 struct sockaddr_in6 sin6 = answer_struct->address_answer.sin6;
12220 if (data_length == (sizeof(struct necp_client_host_resolver_answer) + answer_struct->hostname_length - sizeof(struct necp_client_signable)) &&
12221 answer_struct->address_answer.sa.sa_family == address->sa.sa_family &&
12222 answer_struct->address_answer.sa.sa_len == address->sa.sa_len &&
12223 (answer_struct->address_answer.sin.sin_port == 0 ||
12224 answer_struct->address_answer.sin.sin_port == address->sin.sin_port) &&
12225 ((answer_struct->address_answer.sa.sa_family == AF_INET &&
12226 answer_struct->address_answer.sin.sin_addr.s_addr == address->sin.sin_addr.s_addr) ||
12227 (answer_struct->address_answer.sa.sa_family == AF_INET6 &&
12228 memcmp(&sin6.sin6_addr, &address->sin6.sin6_addr, sizeof(struct in6_addr)) == 0))) {
12229 // Address matches
12230 const bool validated = necp_validate_resolver_answer(validatable->signable.client_id,
12231 validatable->signable.sign_type,
12232 signable_get_data(&validatable->signable, data_length), data_length,
12233 validatable->signature.signed_tag, sizeof(validatable->signature.signed_tag));
12234 if (validated) {
12235 // Answer is validated
12236 matches_address = true;
12237 }
12238 }
12239 }
12240 }
12241 }
12242 necp_unlock_socket_attributes();
12243 return matches_address;
12244 }
12245
12246 /*
12247 * necp_set_socket_domain_attributes
12248 * Called from soconnectlock/soconnectxlock to directly set the tracker domain and owner for
12249 * a newly marked tracker socket.
12250 */
12251 errno_t
necp_set_socket_domain_attributes(struct socket * so,const char * domain __null_terminated,const char * domain_owner __null_terminated)12252 necp_set_socket_domain_attributes(struct socket *so, const char *domain __null_terminated, const char *domain_owner __null_terminated)
12253 {
12254 int error = 0;
12255 struct inpcb * __single inp = NULL;
12256 size_t valsize = 0;
12257 size_t buffer_size = 0;
12258 u_int8_t * __sized_by(buffer_size) buffer = NULL;
12259 char * __indexable buffer_to_free = NULL;
12260
12261 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
12262 error = EINVAL;
12263 goto fail;
12264 }
12265
12266 // Set domain (required)
12267
12268 valsize = strlen(domain);
12269 if (valsize == 0 || valsize > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
12270 error = EINVAL;
12271 goto fail;
12272 }
12273
12274 buffer = (u_int8_t *)kalloc_data(valsize + 1, Z_WAITOK | Z_ZERO);
12275 buffer_size = valsize + 1;
12276 if (buffer == NULL) {
12277 error = ENOMEM;
12278 goto fail;
12279 }
12280 strlcpy((char *)buffer, domain, buffer_size);
12281 buffer[valsize] = 0;
12282
12283 inp = sotoinpcb(so);
12284 // Do not overwrite a previously set domain if tracker domain is different.
12285 if (inp->inp_necp_attributes.inp_domain != NULL) {
12286 if (strlen(inp->inp_necp_attributes.inp_domain) != strlen(domain) ||
12287 strcmp(inp->inp_necp_attributes.inp_domain, domain) != 0) {
12288 buffer_to_free = (inp->inp_necp_attributes.inp_tracker_domain != NULL) ? __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_tracker_domain) : NULL;
12289 // Protect switching of buffer pointer
12290 necp_lock_socket_attributes();
12291 inp->inp_necp_attributes.inp_tracker_domain = __unsafe_null_terminated_from_indexable((char *)buffer, (char *)&buffer[valsize]);
12292 necp_unlock_socket_attributes();
12293 if (buffer_to_free != NULL) {
12294 kfree_data_addr(buffer_to_free);
12295 }
12296 } else {
12297 kfree_data_sized_by(buffer, buffer_size);
12298 }
12299 } else {
12300 // Protect switching of buffer pointer
12301 necp_lock_socket_attributes();
12302 inp->inp_necp_attributes.inp_domain = __unsafe_null_terminated_from_indexable((char *)buffer, (char *)&buffer[valsize]);
12303 necp_unlock_socket_attributes();
12304 }
12305 buffer = NULL;
12306 buffer_size = 0;
12307
12308 // set domain_owner (required only for tracker)
12309 if (!(so->so_flags1 & SOF1_KNOWN_TRACKER)) {
12310 goto done;
12311 }
12312
12313 valsize = strlen(domain_owner);
12314 if (valsize == 0 || valsize > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
12315 error = EINVAL;
12316 goto fail;
12317 }
12318
12319 buffer = (u_int8_t *)kalloc_data(valsize + 1, Z_WAITOK | Z_ZERO);
12320 buffer_size = valsize + 1;
12321 if (buffer == NULL) {
12322 error = ENOMEM;
12323 goto fail;
12324 }
12325 strlcpy((char *)buffer, domain_owner, buffer_size);
12326 buffer[valsize] = 0;
12327
12328 inp = sotoinpcb(so);
12329
12330 buffer_to_free = (inp->inp_necp_attributes.inp_domain_owner != NULL) ? __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain_owner) : NULL;
12331 // Protect switching of buffer pointer
12332 necp_lock_socket_attributes();
12333 inp->inp_necp_attributes.inp_domain_owner = __unsafe_null_terminated_from_indexable((char *)buffer, (char *)&buffer[valsize]);
12334 necp_unlock_socket_attributes();
12335 buffer = NULL;
12336 buffer_size = 0;
12337
12338 if (buffer_to_free != NULL) {
12339 kfree_data_addr(buffer_to_free);
12340 }
12341
12342 done:
12343 NECP_SOCKET_PARAMS_LOG(so, "NECP ATTRIBUTES SOCKET - domain <%s> owner <%s> context <%s> tracker domain <%s> account <%s> "
12344 "<so flags - is_tracker %X non-app-initiated %X app-approved-domain %X",
12345 inp->inp_necp_attributes.inp_domain,
12346 inp->inp_necp_attributes.inp_domain_owner,
12347 inp->inp_necp_attributes.inp_domain_context,
12348 inp->inp_necp_attributes.inp_tracker_domain,
12349 inp->inp_necp_attributes.inp_account,
12350 so->so_flags1 & SOF1_KNOWN_TRACKER,
12351 so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED,
12352 so->so_flags1 & SOF1_APPROVED_APP_DOMAIN);
12353
12354 if (necp_debug) {
12355 NECPLOG(LOG_DEBUG, "Set on socket: Domain <%s> Domain owner <%s> Domain context <%s> Tracker domain <%s> Account <%s> ",
12356 inp->inp_necp_attributes.inp_domain,
12357 inp->inp_necp_attributes.inp_domain_owner,
12358 inp->inp_necp_attributes.inp_domain_context,
12359 inp->inp_necp_attributes.inp_tracker_domain,
12360 inp->inp_necp_attributes.inp_account);
12361 }
12362 fail:
12363 if (buffer != NULL) {
12364 kfree_data_sized_by(buffer, buffer_size);
12365 }
12366 return error;
12367 }
12368
12369 void *
12370 __sized_by(*message_length)
necp_create_nexus_assign_message(uuid_t nexus_instance,nexus_port_t nexus_port,void * __sized_by (key_length)key,uint32_t key_length,struct necp_client_endpoint * local_endpoint,struct necp_client_endpoint * remote_endpoint,struct ether_addr * local_ether_addr,u_int32_t flow_adv_index,void * flow_stats,uint32_t flow_id,size_t * message_length)12371 necp_create_nexus_assign_message(uuid_t nexus_instance, nexus_port_t nexus_port, void * __sized_by(key_length) key, uint32_t key_length,
12372 struct necp_client_endpoint *local_endpoint, struct necp_client_endpoint *remote_endpoint, struct ether_addr *local_ether_addr,
12373 u_int32_t flow_adv_index, void *flow_stats, uint32_t flow_id, size_t *message_length)
12374 {
12375 u_int8_t * __indexable buffer = NULL;
12376 u_int8_t * __indexable cursor = NULL;
12377 size_t valsize = 0;
12378 bool has_nexus_assignment = FALSE;
12379
12380 if (!uuid_is_null(nexus_instance)) {
12381 has_nexus_assignment = TRUE;
12382 valsize += sizeof(struct necp_tlv_header) + sizeof(uuid_t);
12383 valsize += sizeof(struct necp_tlv_header) + sizeof(nexus_port_t);
12384 }
12385 if (flow_adv_index != NECP_FLOWADV_IDX_INVALID) {
12386 valsize += sizeof(struct necp_tlv_header) + sizeof(u_int32_t);
12387 }
12388 if (key != NULL && key_length > 0) {
12389 valsize += sizeof(struct necp_tlv_header) + key_length;
12390 }
12391 if (local_endpoint != NULL) {
12392 valsize += sizeof(struct necp_tlv_header) + sizeof(struct necp_client_endpoint);
12393 }
12394 if (remote_endpoint != NULL) {
12395 valsize += sizeof(struct necp_tlv_header) + sizeof(struct necp_client_endpoint);
12396 }
12397 if (local_ether_addr != NULL) {
12398 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
12399 }
12400 if (flow_stats != NULL) {
12401 valsize += sizeof(struct necp_tlv_header) + sizeof(void *);
12402 }
12403 if (flow_id != 0) {
12404 valsize += sizeof(struct necp_tlv_header) + sizeof(u_int32_t);
12405 }
12406 if (valsize == 0) {
12407 *message_length = 0;
12408 return NULL;
12409 }
12410
12411 buffer = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
12412 if (buffer == NULL) {
12413 *message_length = 0;
12414 return NULL;
12415 }
12416
12417 cursor = buffer;
12418 if (has_nexus_assignment) {
12419 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_INSTANCE, sizeof(uuid_t), nexus_instance, buffer, valsize);
12420 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_PORT, sizeof(nexus_port_t), &nexus_port, buffer, valsize);
12421 }
12422 if (flow_adv_index != NECP_FLOWADV_IDX_INVALID) {
12423 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_PORT_FLOW_INDEX, sizeof(u_int32_t), &flow_adv_index, buffer, valsize);
12424 }
12425 if (key != NULL && key_length > 0) {
12426 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_PARAMETER_NEXUS_KEY, key_length, key, buffer, valsize);
12427 }
12428 if (local_endpoint != NULL) {
12429 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ENDPOINT, sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)local_endpoint, buffer, valsize);
12430 }
12431 if (remote_endpoint != NULL) {
12432 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_REMOTE_ENDPOINT, sizeof(struct necp_client_endpoint), (uint8_t *)(struct necp_client_endpoint * __bidi_indexable)remote_endpoint, buffer, valsize);
12433 }
12434 if (local_ether_addr != NULL) {
12435 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ETHER_ADDR, sizeof(struct ether_addr), (uint8_t *)(struct ether_addr * __bidi_indexable)local_ether_addr, buffer, valsize);
12436 }
12437 if (flow_stats != NULL) {
12438 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_FLOW_STATS, sizeof(void *), &flow_stats, buffer, valsize);
12439 }
12440 if (flow_id != 0) {
12441 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_UNIQUE_FLOW_TAG, sizeof(u_int32_t), &flow_id, buffer, valsize);
12442 }
12443
12444 *message_length = valsize;
12445
12446 return buffer;
12447 }
12448
12449 void
necp_inpcb_remove_cb(struct inpcb * inp)12450 necp_inpcb_remove_cb(struct inpcb *inp)
12451 {
12452 if (!uuid_is_null(inp->necp_client_uuid)) {
12453 necp_client_unregister_socket_flow(inp->necp_client_uuid, inp);
12454 uuid_clear(inp->necp_client_uuid);
12455 }
12456 }
12457
12458 void
necp_inpcb_dispose(struct inpcb * inp)12459 necp_inpcb_dispose(struct inpcb *inp)
12460 {
12461 char * __indexable buffer = NULL;
12462
12463 necp_inpcb_remove_cb(inp); // Clear out socket registrations if not yet done
12464 if (inp->inp_necp_attributes.inp_domain != NULL) {
12465 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain);
12466 kfree_data_addr(buffer);
12467 inp->inp_necp_attributes.inp_domain = NULL;
12468 }
12469 if (inp->inp_necp_attributes.inp_account != NULL) {
12470 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_account);
12471 kfree_data_addr(buffer);
12472 inp->inp_necp_attributes.inp_account = NULL;
12473 }
12474 if (inp->inp_necp_attributes.inp_domain_owner != NULL) {
12475 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain_owner);
12476 kfree_data_addr(buffer);
12477 inp->inp_necp_attributes.inp_domain_owner = NULL;
12478 }
12479 if (inp->inp_necp_attributes.inp_domain_context != NULL) {
12480 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_domain_context);
12481 kfree_data_addr(buffer);
12482 inp->inp_necp_attributes.inp_domain_context = NULL;
12483 }
12484 if (inp->inp_necp_attributes.inp_tracker_domain != NULL) {
12485 buffer = __unsafe_null_terminated_to_indexable(inp->inp_necp_attributes.inp_tracker_domain);
12486 kfree_data_addr(buffer);
12487 inp->inp_necp_attributes.inp_tracker_domain = NULL;
12488 }
12489 if (inp->inp_resolver_signature != NULL) {
12490 kfree_data_sized_by(inp->inp_resolver_signature, inp->inp_resolver_signature_length);
12491 }
12492 }
12493
12494 void
necp_mppcb_dispose(struct mppcb * mpp)12495 necp_mppcb_dispose(struct mppcb *mpp)
12496 {
12497 char * __indexable buffer = NULL;
12498
12499 if (!uuid_is_null(mpp->necp_client_uuid)) {
12500 necp_client_unregister_multipath_cb(mpp->necp_client_uuid, mpp);
12501 uuid_clear(mpp->necp_client_uuid);
12502 }
12503
12504 if (mpp->inp_necp_attributes.inp_domain != NULL) {
12505 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_domain);
12506 kfree_data_addr(buffer);
12507 mpp->inp_necp_attributes.inp_domain = NULL;
12508 }
12509 if (mpp->inp_necp_attributes.inp_account != NULL) {
12510 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_account);
12511 kfree_data_addr(buffer);
12512 mpp->inp_necp_attributes.inp_account = NULL;
12513 }
12514 if (mpp->inp_necp_attributes.inp_domain_owner != NULL) {
12515 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_domain_owner);
12516 kfree_data_addr(buffer);
12517 mpp->inp_necp_attributes.inp_domain_owner = NULL;
12518 }
12519 if (mpp->inp_necp_attributes.inp_tracker_domain != NULL) {
12520 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_tracker_domain);
12521 kfree_data_addr(buffer);
12522 mpp->inp_necp_attributes.inp_tracker_domain = NULL;
12523 }
12524 if (mpp->inp_necp_attributes.inp_domain_context != NULL) {
12525 buffer = __unsafe_null_terminated_to_indexable(mpp->inp_necp_attributes.inp_domain_context);
12526 kfree_data_addr(buffer);
12527 mpp->inp_necp_attributes.inp_domain_context = NULL;
12528 }
12529 }
12530
12531 /// Module init
12532
12533 void
necp_client_init(void)12534 necp_client_init(void)
12535 {
12536 necp_client_update_tcall = thread_call_allocate_with_options(necp_update_all_clients_callout, NULL,
12537 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
12538 VERIFY(necp_client_update_tcall != NULL);
12539 #if SKYWALK
12540
12541 necp_client_collect_stats_tcall = thread_call_allocate_with_options(necp_collect_stats_client_callout, NULL,
12542 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
12543 VERIFY(necp_client_collect_stats_tcall != NULL);
12544
12545 necp_close_empty_arenas_tcall = thread_call_allocate_with_options(necp_close_empty_arenas_callout, NULL,
12546 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
12547 VERIFY(necp_close_empty_arenas_tcall != NULL);
12548 #endif /* SKYWALK */
12549
12550 LIST_INIT(&necp_fd_list);
12551 LIST_INIT(&necp_fd_observer_list);
12552 LIST_INIT(&necp_collect_stats_flow_list);
12553
12554 RB_INIT(&necp_client_global_tree);
12555 RB_INIT(&necp_client_flow_global_tree);
12556 }
12557
12558 #if SKYWALK
12559 pid_t
necp_client_get_proc_pid_from_arena_info(struct skmem_arena_mmap_info * arena_info)12560 necp_client_get_proc_pid_from_arena_info(struct skmem_arena_mmap_info *arena_info)
12561 {
12562 ASSERT((arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_NECP) || (arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_SYSTEM));
12563
12564 if (arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_NECP) {
12565 struct necp_arena_info * __single nai = __unsafe_forge_single(struct necp_arena_info *, container_of(arena_info, struct necp_arena_info, nai_mmap));
12566 return nai->nai_proc_pid;
12567 } else {
12568 struct necp_fd_data * __single fd_data = __unsafe_forge_single(struct necp_fd_data *, container_of(arena_info, struct necp_fd_data, sysctl_mmap));
12569 return fd_data->proc_pid;
12570 }
12571 }
12572 #endif /* !SKYWALK */
12573