1 /*
2 * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30
31 #include <kern/thread_call.h>
32 #include <kern/zalloc.h>
33
34 #include <net/if.h>
35 #include <net/if_var.h>
36 #include <net/net_api_stats.h>
37 #include <net/necp.h>
38 #include <net/network_agent.h>
39 #include <net/ntstat.h>
40
41 #include <netinet/in_pcb.h>
42 #include <netinet/in_var.h>
43 #include <netinet/ip.h>
44 #include <netinet/ip6.h>
45 #include <netinet/mp_pcb.h>
46 #include <netinet/tcp_cc.h>
47 #include <netinet/tcp_fsm.h>
48 #include <netinet/tcp_cache.h>
49 #include <netinet6/in6_var.h>
50
51 #include <sys/domain.h>
52 #include <sys/file_internal.h>
53 #include <sys/kauth.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
56 #include <sys/poll.h>
57 #include <sys/priv.h>
58 #include <sys/protosw.h>
59 #include <sys/queue.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/sysproto.h>
63 #include <sys/systm.h>
64 #include <sys/types.h>
65 #include <sys/codesign.h>
66 #include <libkern/section_keywords.h>
67
68 #include <os/refcnt.h>
69
70 #if SKYWALK
71 #include <skywalk/os_skywalk_private.h>
72 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
73 #endif /* SKYWALK */
74
75 #if CONFIG_MACF
76 #include <security/mac_framework.h>
77 #endif
78
79 /*
80 * NECP Client Architecture
81 * ------------------------------------------------
82 * See <net/necp.c> for a discussion on NECP database architecture.
83 *
84 * Each client of NECP provides a set of parameters for a connection or network state
85 * evaluation, on which NECP policy evaluation is run. This produces a policy result
86 * which can be accessed by the originating process, along with events for when policies
87 * results have changed.
88 *
89 * ------------------------------------------------
90 * NECP Client FD
91 * ------------------------------------------------
92 * A process opens an NECP file descriptor using necp_open(). This is a very simple
93 * file descriptor, upon which the process may do the following operations:
94 * - necp_client_action(...), to add/remove/query clients
95 * - kqueue, to watch for readable events
96 * - close(), to close the client session and release all clients
97 *
98 * Client objects are allocated structures that hang off of the file descriptor. Each
99 * client contains:
100 * - Client ID, a UUID that references the client across the system
101 * - Parameters, a buffer of TLVs that describe the client's connection parameters,
102 * such as the remote and local endpoints, interface requirements, etc.
103 * - Result, a buffer of TLVs containing the current policy evaluation for the client.
104 * This result will be updated whenever a network change occurs that impacts the
105 * policy result for that client.
106 *
107 * +--------------+
108 * | NECP fd |
109 * +--------------+
110 * ||
111 * ==================================
112 * || || ||
113 * +--------------+ +--------------+ +--------------+
114 * | Client ID | | Client ID | | Client ID |
115 * | ---- | | ---- | | ---- |
116 * | Parameters | | Parameters | | Parameters |
117 * | ---- | | ---- | | ---- |
118 * | Result | | Result | | Result |
119 * +--------------+ +--------------+ +--------------+
120 *
121 * ------------------------------------------------
122 * Client Actions
123 * ------------------------------------------------
124 * - Add. Input parameters as a buffer of TLVs, and output a client ID. Allocates a
125 * new client structure on the file descriptor.
126 * - Remove. Input a client ID. Removes a client structure from the file descriptor.
127 * - Copy Parameters. Input a client ID, and output parameter TLVs.
128 * - Copy Result. Input a client ID, and output result TLVs. Alternatively, input empty
129 * client ID and get next unread client result.
130 * - Copy List. List all client IDs.
131 *
132 * ------------------------------------------------
133 * Client Policy Evaluation
134 * ------------------------------------------------
135 * Policies are evaluated for clients upon client creation, and upon update events,
136 * which are network/agent/policy changes coalesced by a timer.
137 *
138 * The policy evaluation goes through the following steps:
139 * 1. Parse client parameters.
140 * 2. Select a scoped interface if applicable. This involves using require/prohibit
141 * parameters, along with the local address, to select the most appropriate interface
142 * if not explicitly set by the client parameters.
143 * 3. Run NECP application-level policy evalution
144 * 4. Set policy result into client result buffer.
145 *
146 * ------------------------------------------------
147 * Client Observers
148 * ------------------------------------------------
149 * If necp_open() is called with the NECP_OPEN_FLAG_OBSERVER flag, and the process
150 * passes the necessary privilege check, the fd is allowed to use necp_client_action()
151 * to copy client state attached to the file descriptors of other processes, and to
152 * list all client IDs on the system.
153 */
154
155 extern u_int32_t necp_debug;
156
157 static int necpop_select(struct fileproc *, int, void *, vfs_context_t);
158 static int necpop_close(struct fileglob *, vfs_context_t);
159 static int necpop_kqfilter(struct fileproc *, struct knote *, struct kevent_qos_s *);
160
161 // Timer functions
162 static int necp_timeout_microseconds = 1000 * 100; // 100ms
163 static int necp_timeout_leeway_microseconds = 1000 * 500; // 500ms
164 #if SKYWALK
165 static int necp_collect_stats_timeout_microseconds = 1000 * 1000 * 1; // 1s
166 static int necp_collect_stats_timeout_leeway_microseconds = 1000 * 500; // 500ms
167 static int necp_close_arenas_timeout_microseconds = 1000 * 1000 * 10; // 10s
168 static int necp_close_arenas_timeout_leeway_microseconds = 1000 * 1000 * 1; // 1s
169 #endif /* SKYWALK */
170
171 static int necp_client_fd_count = 0;
172 static int necp_observer_fd_count = 0;
173 static int necp_client_count = 0;
174 static int necp_socket_flow_count = 0;
175 static int necp_if_flow_count = 0;
176 static int necp_observer_message_limit = 256;
177
178 /*
179 * NECP client tracing control -
180 *
181 * necp_client_tracing_level : 1 for client trace, 2 for flow trace, 3 for parameter details
182 * necp_client_tracing_pid : match client with pid
183 */
184 static int necp_client_tracing_level = 0;
185 static int necp_client_tracing_pid = 0;
186
187 #define NECP_CLIENT_TRACE_LEVEL_CLIENT 1
188 #define NECP_CLIENT_TRACE_LEVEL_FLOW 2
189 #define NECP_CLIENT_TRACE_LEVEL_PARAMS 3
190
191 #define NECP_CLIENT_TRACE_PID_MATCHED(pid) \
192 (pid == necp_client_tracing_pid)
193
194 #define NECP_ENABLE_CLIENT_TRACE(level) \
195 ((necp_client_tracing_level >= level && \
196 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(client->proc_pid))) ? necp_client_tracing_level : 0)
197
198 #define NECP_CLIENT_LOG(client, fmt, ...) \
199 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_CLIENT)) { \
200 uuid_string_t client_uuid_str = { }; \
201 uuid_unparse_lower(client->client_id, client_uuid_str); \
202 NECPLOG(LOG_NOTICE, "NECP_CLIENT_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
203 }
204
205 #define NECP_CLIENT_FLOW_LOG(client, flow, fmt, ...) \
206 if (client && flow && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) { \
207 uuid_string_t client_uuid_str = { }; \
208 uuid_unparse_lower(client->client_id, client_uuid_str); \
209 uuid_string_t flow_uuid_str = { }; \
210 uuid_unparse_lower(flow->registration_id, flow_uuid_str); \
211 NECPLOG(LOG_NOTICE, "NECP CLIENT FLOW TRACE <pid %d %s> <flow %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, flow_uuid_str, ##__VA_ARGS__); \
212 }
213
214 #define NECP_CLIENT_PARAMS_LOG(client, fmt, ...) \
215 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
216 uuid_string_t client_uuid_str = { }; \
217 uuid_unparse_lower(client->client_id, client_uuid_str); \
218 NECPLOG(LOG_NOTICE, "NECP_CLIENT_PARAMS_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
219 }
220
221 #define NECP_SOCKET_PID(so) \
222 ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid)
223
224 #define NECP_ENABLE_SOCKET_TRACE(level) \
225 ((necp_client_tracing_level >= level && \
226 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(NECP_SOCKET_PID(so)))) ? necp_client_tracing_level : 0)
227
228 #define NECP_SOCKET_PARAMS_LOG(so, fmt, ...) \
229 if (so && NECP_ENABLE_SOCKET_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
230 NECPLOG(LOG_NOTICE, "NECP_SOCKET_PARAMS_LOG <pid %d>: " fmt "\n", NECP_SOCKET_PID(so), ##__VA_ARGS__); \
231 }
232
233 #define NECP_SOCKET_ATTRIBUTE_LOG(fmt, ...) \
234 if (necp_client_tracing_level >= NECP_CLIENT_TRACE_LEVEL_PARAMS) { \
235 NECPLOG(LOG_NOTICE, "NECP_SOCKET_ATTRIBUTE_LOG: " fmt "\n", ##__VA_ARGS__); \
236 }
237
238 #define NECP_CLIENT_TRACKER_LOG(pid, fmt, ...) \
239 if (pid) { \
240 NECPLOG(LOG_NOTICE, "NECP_CLIENT_TRACKER_LOG <pid %d>: " fmt "\n", pid, ##__VA_ARGS__); \
241 }
242
243 #if SKYWALK
244 static int necp_arena_count = 0;
245 static int necp_sysctl_arena_count = 0;
246 static int necp_nexus_flow_count = 0;
247
248 /* userspace stats sanity check range, same unit as TCP (see TCP_RTT_SCALE) */
249 static uint32_t necp_client_stats_rtt_floor = 1; // 32us
250 static uint32_t necp_client_stats_rtt_ceiling = 1920000; // 60s
251 const static struct sk_stats_flow ntstat_sk_stats_zero;
252 #endif /* SKYWALK */
253
254 os_refgrp_decl(static, necp_client_refgrp, "NECPClientRefGroup", NULL);
255
256 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_FD_COUNT, client_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_fd_count, 0, "");
257 SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_FD_COUNT, observer_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_observer_fd_count, 0, "");
258 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_COUNT, client_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_count, 0, "");
259 SYSCTL_INT(_net_necp, NECPCTL_SOCKET_FLOW_COUNT, socket_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_socket_flow_count, 0, "");
260 SYSCTL_INT(_net_necp, NECPCTL_IF_FLOW_COUNT, if_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_if_flow_count, 0, "");
261 SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_MESSAGE_LIMIT, observer_message_limit, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_observer_message_limit, 256, "");
262 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_LEVEL, necp_client_tracing_level, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_level, 0, "");
263 SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_PID, necp_client_tracing_pid, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_pid, 0, "");
264
265 #if SKYWALK
266 SYSCTL_INT(_net_necp, NECPCTL_ARENA_COUNT, arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_arena_count, 0, "");
267 SYSCTL_INT(_net_necp, NECPCTL_SYSCTL_ARENA_COUNT, sysctl_arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_sysctl_arena_count, 0, "");
268 SYSCTL_INT(_net_necp, NECPCTL_NEXUS_FLOW_COUNT, nexus_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_nexus_flow_count, 0, "");
269 #if (DEVELOPMENT || DEBUG)
270 SYSCTL_UINT(_net_necp, OID_AUTO, collect_stats_interval_us, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_collect_stats_timeout_microseconds, 0, "");
271 SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_floor, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_floor, 0, "");
272 SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_ceiling, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_ceiling, 0, "");
273 #endif /* (DEVELOPMENT || DEBUG) */
274 #endif /* SKYWALK */
275
276 #define NECP_MAX_CLIENT_LIST_SIZE 1024 * 1024 // 1MB
277 #define NECP_MAX_AGENT_ACTION_SIZE 256
278
279 extern int tvtohz(struct timeval *);
280 extern unsigned int get_maxmtu(struct rtentry *);
281
282 // Parsed parameters
283 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR 0x00001
284 #define NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR 0x00002
285 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF 0x00004
286 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF 0x00008
287 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE 0x00010
288 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE 0x00020
289 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT 0x00040
290 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT 0x00080
291 #define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT 0x00100
292 #define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT 0x00200
293 #define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE 0x00400
294 #define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE 0x00800
295 #define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE 0x01000
296 #define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE 0x02000
297 #define NECP_PARSED_PARAMETERS_FIELD_FLAGS 0x04000
298 #define NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL 0x08000
299 #define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID 0x10000
300 #define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID 0x20000
301 #define NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS 0x40000
302 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT 0x80000
303 #define NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID 0x100000
304 #define NECP_PARSED_PARAMETERS_FIELD_ETHERTYPE 0x200000
305 #define NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL 0x400000
306 #define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE 0x800000
307 #define NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER 0x1000000
308 #define NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID 0x2000000
309
310
311 #define NECP_MAX_INTERFACE_PARAMETERS 16
312 #define NECP_MAX_AGENT_PARAMETERS 4
313 struct necp_client_parsed_parameters {
314 u_int32_t valid_fields;
315 u_int32_t flags;
316 u_int64_t delegated_upid;
317 union necp_sockaddr_union local_addr;
318 union necp_sockaddr_union remote_addr;
319 u_int32_t required_interface_index;
320 char prohibited_interfaces[NECP_MAX_INTERFACE_PARAMETERS][IFXNAMSIZ];
321 u_int8_t required_interface_type;
322 u_int8_t local_address_preference;
323 u_int8_t prohibited_interface_types[NECP_MAX_INTERFACE_PARAMETERS];
324 struct necp_client_parameter_netagent_type required_netagent_types[NECP_MAX_AGENT_PARAMETERS];
325 struct necp_client_parameter_netagent_type prohibited_netagent_types[NECP_MAX_AGENT_PARAMETERS];
326 struct necp_client_parameter_netagent_type preferred_netagent_types[NECP_MAX_AGENT_PARAMETERS];
327 struct necp_client_parameter_netagent_type avoided_netagent_types[NECP_MAX_AGENT_PARAMETERS];
328 uuid_t required_netagents[NECP_MAX_AGENT_PARAMETERS];
329 uuid_t prohibited_netagents[NECP_MAX_AGENT_PARAMETERS];
330 uuid_t preferred_netagents[NECP_MAX_AGENT_PARAMETERS];
331 uuid_t avoided_netagents[NECP_MAX_AGENT_PARAMETERS];
332 u_int8_t ip_protocol;
333 u_int8_t transport_protocol;
334 u_int16_t ethertype;
335 pid_t effective_pid;
336 uuid_t effective_uuid;
337 uuid_t parent_uuid;
338 u_int32_t traffic_class;
339 };
340
341 static bool
342 necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters,
343 u_int *return_ifindex, bool *validate_agents);
344
345 static bool
346 necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa);
347
348 static bool
349 necp_ifnet_matches_parameters(struct ifnet *ifp,
350 struct necp_client_parsed_parameters *parsed_parameters,
351 u_int32_t override_flags,
352 u_int32_t *preferred_count,
353 bool secondary_interface,
354 bool require_scoped_field);
355
356 static const struct fileops necp_fd_ops = {
357 .fo_type = DTYPE_NETPOLICY,
358 .fo_read = fo_no_read,
359 .fo_write = fo_no_write,
360 .fo_ioctl = fo_no_ioctl,
361 .fo_select = necpop_select,
362 .fo_close = necpop_close,
363 .fo_drain = fo_no_drain,
364 .fo_kqfilter = necpop_kqfilter,
365 };
366
367 struct necp_client_assertion {
368 LIST_ENTRY(necp_client_assertion) assertion_chain;
369 uuid_t asserted_netagent;
370 };
371
372 struct necp_client_flow_header {
373 struct necp_tlv_header outer_header;
374 struct necp_tlv_header flow_id_tlv_header;
375 uuid_t flow_id;
376 struct necp_tlv_header flags_tlv_header;
377 u_int32_t flags_value;
378 struct necp_tlv_header interface_tlv_header;
379 struct necp_client_result_interface interface_value;
380 } __attribute__((__packed__));
381
382 struct necp_client_flow_protoctl_event_header {
383 struct necp_tlv_header protoctl_tlv_header;
384 struct necp_client_flow_protoctl_event protoctl_event;
385 } __attribute__((__packed__));
386
387 struct necp_client_nexus_flow_header {
388 struct necp_client_flow_header flow_header;
389 struct necp_tlv_header agent_tlv_header;
390 struct necp_client_result_netagent agent_value;
391 struct necp_tlv_header tfo_cookie_tlv_header;
392 u_int8_t tfo_cookie_value[NECP_TFO_COOKIE_LEN_MAX];
393 } __attribute__((__packed__));
394
395 #if SKYWALK
396 struct necp_arena_info;
397 #endif
398
399 struct necp_client_flow {
400 LIST_ENTRY(necp_client_flow) flow_chain;
401 unsigned invalid : 1;
402 unsigned nexus : 1; // If true, flow is a nexus; if false, flow is attached to socket
403 unsigned socket : 1;
404 unsigned viable : 1;
405 unsigned assigned : 1;
406 unsigned has_protoctl_event : 1;
407 unsigned check_tcp_heuristics : 1;
408 unsigned _reserved : 1;
409 union {
410 uuid_t nexus_agent;
411 struct {
412 void *socket_handle;
413 necp_client_flow_cb cb;
414 };
415 } u;
416 uint32_t interface_index;
417 u_short delegated_interface_index;
418 uint16_t interface_flags;
419 uint32_t necp_flow_flags;
420 struct necp_client_flow_protoctl_event protoctl_event;
421 union necp_sockaddr_union local_addr;
422 union necp_sockaddr_union remote_addr;
423
424 size_t assigned_results_length;
425 u_int8_t *assigned_results;
426 };
427
428 struct necp_client_flow_registration {
429 RB_ENTRY(necp_client_flow_registration) fd_link;
430 RB_ENTRY(necp_client_flow_registration) global_link;
431 RB_ENTRY(necp_client_flow_registration) client_link;
432 LIST_ENTRY(necp_client_flow_registration) collect_stats_chain;
433 uuid_t registration_id;
434 u_int32_t flags;
435 unsigned flow_result_read : 1;
436 unsigned defunct : 1;
437 void *interface_handle;
438 necp_client_flow_cb interface_cb;
439 struct necp_client *client;
440 LIST_HEAD(_necp_registration_flow_list, necp_client_flow) flow_list;
441 #if SKYWALK
442 struct necp_arena_info *stats_arena; /* arena where the stats objects came from */
443 void * kstats_kaddr; /* kernel snapshot of untrusted userspace stats, for calculating delta */
444 mach_vm_address_t ustats_uaddr; /* userspace stats (untrusted) */
445 nstat_userland_context stats_handler_context;
446 struct flow_stats *nexus_stats; /* shared stats objects between necp_client and skywalk */
447 #endif /* !SKYWALK */
448 u_int64_t last_interface_details __attribute__((aligned(sizeof(u_int64_t))));
449 };
450
451 static int necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1);
452
453 RB_HEAD(_necp_client_flow_tree, necp_client_flow_registration);
454 RB_PROTOTYPE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
455 RB_GENERATE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
456
457 #define NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT 4
458 #define NECP_CLIENT_MAX_INTERFACE_OPTIONS 16
459
460 #define NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT (NECP_CLIENT_MAX_INTERFACE_OPTIONS - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT)
461
462 struct necp_client {
463 RB_ENTRY(necp_client) link;
464 RB_ENTRY(necp_client) global_link;
465
466 decl_lck_mtx_data(, lock);
467 decl_lck_mtx_data(, route_lock);
468 os_refcnt_t reference_count;
469
470 uuid_t client_id;
471 unsigned result_read : 1;
472 unsigned group_members_read : 1;
473 unsigned allow_multiple_flows : 1;
474 unsigned legacy_client_is_flow : 1;
475
476 unsigned platform_binary : 1;
477
478 size_t result_length;
479 u_int8_t result[NECP_MAX_CLIENT_RESULT_SIZE];
480
481 necp_policy_id policy_id;
482
483 u_int8_t ip_protocol;
484 int proc_pid;
485
486 u_int64_t delegated_upid;
487
488 struct _necp_client_flow_tree flow_registrations;
489 LIST_HEAD(_necp_client_assertion_list, necp_client_assertion) assertion_list;
490
491 size_t assigned_group_members_length;
492 u_int8_t *assigned_group_members;
493
494 struct rtentry *current_route;
495
496 struct necp_client_interface_option interface_options[NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
497 struct necp_client_interface_option *extra_interface_options;
498 u_int8_t interface_option_count; // Number in interface_options + extra_interface_options
499
500 struct necp_client_result_netagent failed_trigger_agent;
501
502 void *agent_handle;
503
504 uuid_t override_euuid;
505
506 #if SKYWALK
507 netns_token port_reservation;
508 nstat_context nstat_context;
509 uuid_t latest_flow_registration_id;
510 struct necp_client *original_parameters_source;
511 #endif /* !SKYWALK */
512
513 size_t parameters_length;
514 u_int8_t *parameters;
515 };
516
517 #define NECP_CLIENT_LOCK(_c) lck_mtx_lock(&_c->lock)
518 #define NECP_CLIENT_UNLOCK(_c) lck_mtx_unlock(&_c->lock)
519 #define NECP_CLIENT_ASSERT_LOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_OWNED)
520 #define NECP_CLIENT_ASSERT_UNLOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_NOTOWNED)
521
522 #define NECP_CLIENT_ROUTE_LOCK(_c) lck_mtx_lock(&_c->route_lock)
523 #define NECP_CLIENT_ROUTE_UNLOCK(_c) lck_mtx_unlock(&_c->route_lock)
524
525 static void necp_client_retain_locked(struct necp_client *client);
526 static void necp_client_retain(struct necp_client *client);
527
528 static bool necp_client_release_locked(struct necp_client *client);
529 static bool necp_client_release(struct necp_client *client);
530
531 static void
532 necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid);
533
534 static bool
535 necp_client_remove_assertion(struct necp_client *client, uuid_t netagent_uuid);
536
537 LIST_HEAD(_necp_flow_registration_list, necp_client_flow_registration);
538 static struct _necp_flow_registration_list necp_collect_stats_flow_list;
539
540 struct necp_flow_defunct {
541 LIST_ENTRY(necp_flow_defunct) chain;
542
543 uuid_t flow_id;
544 uuid_t nexus_agent;
545 void *agent_handle;
546 int proc_pid;
547 u_int32_t flags;
548 struct necp_client_agent_parameters close_parameters;
549 bool has_close_parameters;
550 };
551
552 LIST_HEAD(_necp_flow_defunct_list, necp_flow_defunct);
553
554 static int necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1);
555
556 RB_HEAD(_necp_client_tree, necp_client);
557 RB_PROTOTYPE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
558 RB_GENERATE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
559
560 RB_HEAD(_necp_client_global_tree, necp_client);
561 RB_PROTOTYPE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
562 RB_GENERATE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
563
564 RB_HEAD(_necp_fd_flow_tree, necp_client_flow_registration);
565 RB_PROTOTYPE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
566 RB_GENERATE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
567
568 RB_HEAD(_necp_client_flow_global_tree, necp_client_flow_registration);
569 RB_PROTOTYPE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
570 RB_GENERATE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
571
572 static struct _necp_client_global_tree necp_client_global_tree;
573 static struct _necp_client_flow_global_tree necp_client_flow_global_tree;
574
575 struct necp_client_update {
576 TAILQ_ENTRY(necp_client_update) chain;
577
578 uuid_t client_id;
579
580 size_t update_length;
581 struct necp_client_observer_update *update;
582 };
583
584 #if SKYWALK
585 struct necp_arena_info {
586 LIST_ENTRY(necp_arena_info) nai_chain;
587 u_int32_t nai_flags;
588 pid_t nai_proc_pid;
589 struct skmem_arena *nai_arena;
590 struct skmem_arena_mmap_info nai_mmap;
591 mach_vm_offset_t nai_roff;
592 u_int32_t nai_use_count;
593 };
594 #endif /* !SKYWALK */
595
596 #define NAIF_ATTACHED 0x1 // arena is attached to list
597 #define NAIF_REDIRECT 0x2 // arena mmap has been redirected
598 #define NAIF_DEFUNCT 0x4 // arena is now defunct
599
600 struct necp_fd_data {
601 u_int8_t necp_fd_type;
602 LIST_ENTRY(necp_fd_data) chain;
603 struct _necp_client_tree clients;
604 struct _necp_fd_flow_tree flows;
605 TAILQ_HEAD(_necp_client_update_list, necp_client_update) update_list;
606 int update_count;
607 int flags;
608
609 unsigned background : 1;
610
611 int proc_pid;
612 decl_lck_mtx_data(, fd_lock);
613 struct selinfo si;
614 #if SKYWALK
615 // Arenas and their mmap info for per-process stats. Stats objects are allocated from an active arena
616 // that is not redirected/defunct. The stats_arena_active keeps track of such an arena, and it also
617 // holds a reference count on the object. Each flow allocating a stats object also holds a reference
618 // the necp_arena_info (where the object got allocated from). During defunct, we redirect the mapping
619 // of the arena such that any attempt to access (read/write) will result in getting zero-filled pages.
620 // We then go thru all of the flows for the process and free the stats objects associated with them,
621 // followed by destroying the skmem region(s) associated with the arena. The stats_arena_list keeps
622 // track of all current and defunct stats arenas; there could be more than one arena created for the
623 // process as the arena destruction happens when its reference count drops to 0.
624 struct necp_arena_info *stats_arena_active;
625 LIST_HEAD(_necp_arena_info_list, necp_arena_info) stats_arena_list;
626 u_int32_t stats_arena_gencnt;
627
628 struct skmem_arena *sysctl_arena;
629 struct skmem_arena_mmap_info sysctl_mmap;
630 mach_vm_offset_t system_sysctls_roff;
631 #endif /* !SKYWALK */
632 };
633
634 #define NECP_FD_LOCK(_f) lck_mtx_lock(&_f->fd_lock)
635 #define NECP_FD_UNLOCK(_f) lck_mtx_unlock(&_f->fd_lock)
636 #define NECP_FD_ASSERT_LOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_OWNED)
637 #define NECP_FD_ASSERT_UNLOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_NOTOWNED)
638
639 static LIST_HEAD(_necp_fd_list, necp_fd_data) necp_fd_list;
640 static LIST_HEAD(_necp_fd_observer_list, necp_fd_data) necp_fd_observer_list;
641
642 static ZONE_DECLARE(necp_client_fd_zone, "necp.clientfd",
643 sizeof(struct necp_fd_data), ZC_NONE);
644
645 #define NECP_FLOW_ZONE_NAME "necp.flow"
646 #define NECP_FLOW_REGISTRATION_ZONE_NAME "necp.flowregistration"
647
648 static unsigned int necp_flow_size; /* size of necp_client_flow */
649 static struct mcache *necp_flow_cache; /* cache for necp_client_flow */
650
651 static unsigned int necp_flow_registration_size; /* size of necp_client_flow_registration */
652 static struct mcache *necp_flow_registration_cache; /* cache for necp_client_flow_registration */
653
654 #if SKYWALK
655 static ZONE_DECLARE(necp_arena_info_zone, "necp.arenainfo",
656 sizeof(struct necp_arena_info), ZC_ZFREE_CLEARMEM);
657 #endif /* !SKYWALK */
658
659 static LCK_ATTR_DECLARE(necp_fd_mtx_attr, 0, 0);
660 static LCK_GRP_DECLARE(necp_fd_mtx_grp, "necp_fd");
661
662 static LCK_RW_DECLARE_ATTR(necp_fd_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
663 static LCK_RW_DECLARE_ATTR(necp_observer_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
664 static LCK_RW_DECLARE_ATTR(necp_client_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
665 static LCK_RW_DECLARE_ATTR(necp_flow_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
666 static LCK_RW_DECLARE_ATTR(necp_collect_stats_list_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
667
668
669 #define NECP_STATS_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_collect_stats_list_lock)
670 #define NECP_STATS_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_collect_stats_list_lock)
671 #define NECP_STATS_LIST_UNLOCK() lck_rw_done(&necp_collect_stats_list_lock)
672
673 #define NECP_CLIENT_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_client_tree_lock)
674 #define NECP_CLIENT_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_client_tree_lock)
675 #define NECP_CLIENT_TREE_UNLOCK() lck_rw_done(&necp_client_tree_lock)
676 #define NECP_CLIENT_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_client_tree_lock, LCK_RW_ASSERT_HELD)
677
678 #define NECP_FLOW_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_flow_tree_lock)
679 #define NECP_FLOW_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_flow_tree_lock)
680 #define NECP_FLOW_TREE_UNLOCK() lck_rw_done(&necp_flow_tree_lock)
681 #define NECP_FLOW_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_flow_tree_lock, LCK_RW_ASSERT_HELD)
682
683 #define NECP_FD_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_fd_lock)
684 #define NECP_FD_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_fd_lock)
685 #define NECP_FD_LIST_UNLOCK() lck_rw_done(&necp_fd_lock)
686
687 #define NECP_OBSERVER_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_observer_lock)
688 #define NECP_OBSERVER_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_observer_lock)
689 #define NECP_OBSERVER_LIST_UNLOCK() lck_rw_done(&necp_observer_lock)
690
691 // Locking Notes
692
693 // Take NECP_FD_LIST_LOCK when accessing or modifying the necp_fd_list
694 // Take NECP_CLIENT_TREE_LOCK when accessing or modifying the necp_client_global_tree
695 // Take NECP_FLOW_TREE_LOCK when accessing or modifying the necp_client_flow_global_tree
696 // Take NECP_STATS_LIST_LOCK when accessing or modifying the necp_collect_stats_flow_list
697 // Take NECP_FD_LOCK when accessing or modifying an necp_fd_data entry
698 // Take NECP_CLIENT_LOCK when accessing or modifying a single necp_client
699 // Take NECP_CLIENT_ROUTE_LOCK when accessing or modifying a client's route
700
701 // Precedence, where 1 is the first lock that must be taken
702 // 1. NECP_FD_LIST_LOCK
703 // 2. NECP_FD_LOCK (any)
704 // 3. NECP_CLIENT_TREE_LOCK
705 // 4. NECP_CLIENT_LOCK (any)
706 // 5. NECP_FLOW_TREE_LOCK
707 // 6. NECP_STATS_LIST_LOCK
708 // 7. NECP_CLIENT_ROUTE_LOCK (any)
709
710 static thread_call_t necp_client_update_tcall;
711
712 #if SKYWALK
713 static thread_call_t necp_client_collect_stats_tcall;
714 static thread_call_t necp_close_empty_arenas_tcall;
715
716 static void necp_fd_insert_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
717 static void necp_fd_remove_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
718 static struct necp_arena_info *necp_fd_mredirect_stats_arena(struct necp_fd_data *fd_data, struct proc *proc);
719
720 static void necp_arena_info_retain(struct necp_arena_info *nai);
721 static void necp_arena_info_release(struct necp_arena_info *nai);
722 static struct necp_arena_info *necp_arena_info_alloc(void);
723 static void necp_arena_info_free(struct necp_arena_info *nai);
724
725 static int necp_arena_initialize(struct necp_fd_data *fd_data, bool locked);
726 static int necp_stats_initialize(struct necp_fd_data *fd_data, struct necp_client *client,
727 struct necp_client_flow_registration *flow_registration, struct necp_stats_bufreq *bufreq);
728 static int necp_arena_create(struct necp_fd_data *fd_data, size_t obj_size, size_t obj_cnt, struct proc *p);
729 static int necp_arena_stats_obj_alloc(struct necp_fd_data *fd_data, mach_vm_offset_t *off, struct necp_arena_info **stats_arena, void **kstats_kaddr, boolean_t cansleep);
730 static void necp_arena_stats_obj_free(struct necp_fd_data *fd_data, struct necp_arena_info *stats_arena, void **kstats_kaddr, mach_vm_address_t *ustats_uaddr);
731 static void necp_stats_arenas_destroy(struct necp_fd_data *fd_data, boolean_t closing);
732
733 static int necp_sysctl_arena_initialize(struct necp_fd_data *fd_data, bool locked);
734 static void necp_sysctl_arena_destroy(struct necp_fd_data *fd_data);
735 static void *necp_arena_sysctls_obj(struct necp_fd_data *fd_data, mach_vm_offset_t *off, size_t *size);
736 #endif /* !SKYWALK */
737
738 /// NECP file descriptor functions
739
740 static void
necp_fd_notify(struct necp_fd_data * fd_data,bool locked)741 necp_fd_notify(struct necp_fd_data *fd_data, bool locked)
742 {
743 struct selinfo *si = &fd_data->si;
744
745 if (!locked) {
746 NECP_FD_LOCK(fd_data);
747 }
748
749 selwakeup(si);
750
751 // use a non-zero hint to tell the notification from the
752 // call done in kqueue_scan() which uses 0
753 KNOTE(&si->si_note, 1); // notification
754
755 if (!locked) {
756 NECP_FD_UNLOCK(fd_data);
757 }
758 }
759
760 static inline bool
necp_client_has_unread_flows(struct necp_client * client)761 necp_client_has_unread_flows(struct necp_client *client)
762 {
763 NECP_CLIENT_ASSERT_LOCKED(client);
764 struct necp_client_flow_registration *flow_registration = NULL;
765 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
766 if (!flow_registration->flow_result_read) {
767 return true;
768 }
769 }
770 return false;
771 }
772
773 static int
necp_fd_poll(struct necp_fd_data * fd_data,int events,void * wql,struct proc * p,int is_kevent)774 necp_fd_poll(struct necp_fd_data *fd_data, int events, void *wql, struct proc *p, int is_kevent)
775 {
776 #pragma unused(wql, p, is_kevent)
777 u_int revents = 0;
778
779 u_int want_rx = events & (POLLIN | POLLRDNORM);
780 if (want_rx) {
781 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
782 // Push-mode observers are readable when they have a new update
783 if (!TAILQ_EMPTY(&fd_data->update_list)) {
784 revents |= want_rx;
785 }
786 } else {
787 // Standard fds are readable when some client is unread
788 struct necp_client *client = NULL;
789 bool has_unread_clients = FALSE;
790 RB_FOREACH(client, _necp_client_tree, &fd_data->clients) {
791 NECP_CLIENT_LOCK(client);
792 if (!client->result_read || !client->group_members_read || necp_client_has_unread_flows(client)) {
793 has_unread_clients = TRUE;
794 }
795 NECP_CLIENT_UNLOCK(client);
796 if (has_unread_clients) {
797 break;
798 }
799 }
800
801 if (has_unread_clients) {
802 revents |= want_rx;
803 }
804 }
805 }
806
807 return revents;
808 }
809
810 static inline void
necp_generate_client_id(uuid_t client_id,bool is_flow)811 necp_generate_client_id(uuid_t client_id, bool is_flow)
812 {
813 uuid_generate_random(client_id);
814
815 if (is_flow) {
816 client_id[9] |= 0x01;
817 } else {
818 client_id[9] &= ~0x01;
819 }
820 }
821
822 static inline bool
necp_client_id_is_flow(uuid_t client_id)823 necp_client_id_is_flow(uuid_t client_id)
824 {
825 return client_id[9] & 0x01;
826 }
827
828 static struct necp_client *
necp_find_client_and_lock(uuid_t client_id)829 necp_find_client_and_lock(uuid_t client_id)
830 {
831 NECP_CLIENT_TREE_ASSERT_LOCKED();
832
833 struct necp_client *client = NULL;
834
835 if (necp_client_id_is_flow(client_id)) {
836 NECP_FLOW_TREE_LOCK_SHARED();
837 struct necp_client_flow_registration find;
838 uuid_copy(find.registration_id, client_id);
839 struct necp_client_flow_registration *flow = RB_FIND(_necp_client_flow_global_tree, &necp_client_flow_global_tree, &find);
840 if (flow != NULL) {
841 client = flow->client;
842 }
843 NECP_FLOW_TREE_UNLOCK();
844 } else {
845 struct necp_client find;
846 uuid_copy(find.client_id, client_id);
847 client = RB_FIND(_necp_client_global_tree, &necp_client_global_tree, &find);
848 }
849
850 if (client != NULL) {
851 NECP_CLIENT_LOCK(client);
852 }
853
854 return client;
855 }
856
857 static struct necp_client_flow_registration *
necp_client_find_flow(struct necp_client * client,uuid_t flow_id)858 necp_client_find_flow(struct necp_client *client, uuid_t flow_id)
859 {
860 NECP_CLIENT_ASSERT_LOCKED(client);
861 struct necp_client_flow_registration *flow = NULL;
862
863 if (necp_client_id_is_flow(flow_id)) {
864 struct necp_client_flow_registration find;
865 uuid_copy(find.registration_id, flow_id);
866 flow = RB_FIND(_necp_client_flow_tree, &client->flow_registrations, &find);
867 } else {
868 flow = RB_ROOT(&client->flow_registrations);
869 }
870
871 return flow;
872 }
873
874 static struct necp_client *
necp_client_fd_find_client_unlocked(struct necp_fd_data * client_fd,uuid_t client_id)875 necp_client_fd_find_client_unlocked(struct necp_fd_data *client_fd, uuid_t client_id)
876 {
877 NECP_FD_ASSERT_LOCKED(client_fd);
878 struct necp_client *client = NULL;
879
880 if (necp_client_id_is_flow(client_id)) {
881 struct necp_client_flow_registration find;
882 uuid_copy(find.registration_id, client_id);
883 struct necp_client_flow_registration *flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find);
884 if (flow != NULL) {
885 client = flow->client;
886 }
887 } else {
888 struct necp_client find;
889 uuid_copy(find.client_id, client_id);
890 client = RB_FIND(_necp_client_tree, &client_fd->clients, &find);
891 }
892
893 return client;
894 }
895
896 static struct necp_client *
necp_client_fd_find_client_and_lock(struct necp_fd_data * client_fd,uuid_t client_id)897 necp_client_fd_find_client_and_lock(struct necp_fd_data *client_fd, uuid_t client_id)
898 {
899 struct necp_client *client = necp_client_fd_find_client_unlocked(client_fd, client_id);
900 if (client != NULL) {
901 NECP_CLIENT_LOCK(client);
902 }
903
904 return client;
905 }
906
907 static inline int
necp_client_id_cmp(struct necp_client * client0,struct necp_client * client1)908 necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1)
909 {
910 return uuid_compare(client0->client_id, client1->client_id);
911 }
912
913 static inline int
necp_client_flow_id_cmp(struct necp_client_flow_registration * flow0,struct necp_client_flow_registration * flow1)914 necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1)
915 {
916 return uuid_compare(flow0->registration_id, flow1->registration_id);
917 }
918
919 static int
necpop_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)920 necpop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
921 {
922 #pragma unused(fp, which, wql, ctx)
923 return 0;
924 struct necp_fd_data *fd_data = NULL;
925 int revents = 0;
926 int events = 0;
927 proc_t procp;
928
929 fd_data = (struct necp_fd_data *)fp_get_data(fp);
930 if (fd_data == NULL) {
931 return 0;
932 }
933
934 procp = vfs_context_proc(ctx);
935
936 switch (which) {
937 case FREAD: {
938 events = POLLIN;
939 break;
940 }
941
942 default: {
943 return 1;
944 }
945 }
946
947 NECP_FD_LOCK(fd_data);
948 revents = necp_fd_poll(fd_data, events, wql, procp, 0);
949 NECP_FD_UNLOCK(fd_data);
950
951 return (events & revents) ? 1 : 0;
952 }
953
954 static void
necp_fd_knrdetach(struct knote * kn)955 necp_fd_knrdetach(struct knote *kn)
956 {
957 struct necp_fd_data *fd_data = (struct necp_fd_data *)kn->kn_hook;
958 struct selinfo *si = &fd_data->si;
959
960 NECP_FD_LOCK(fd_data);
961 KNOTE_DETACH(&si->si_note, kn);
962 NECP_FD_UNLOCK(fd_data);
963 }
964
965 static int
necp_fd_knread(struct knote * kn,long hint)966 necp_fd_knread(struct knote *kn, long hint)
967 {
968 #pragma unused(kn, hint)
969 return 1; /* assume we are ready */
970 }
971
972 static int
necp_fd_knrprocess(struct knote * kn,struct kevent_qos_s * kev)973 necp_fd_knrprocess(struct knote *kn, struct kevent_qos_s *kev)
974 {
975 struct necp_fd_data *fd_data;
976 int revents;
977 int res;
978
979 fd_data = (struct necp_fd_data *)kn->kn_hook;
980
981 NECP_FD_LOCK(fd_data);
982 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
983 res = ((revents & POLLIN) != 0);
984 if (res) {
985 knote_fill_kevent(kn, kev, 0);
986 }
987 NECP_FD_UNLOCK(fd_data);
988 return res;
989 }
990
991 static int
necp_fd_knrtouch(struct knote * kn,struct kevent_qos_s * kev)992 necp_fd_knrtouch(struct knote *kn, struct kevent_qos_s *kev)
993 {
994 #pragma unused(kev)
995 struct necp_fd_data *fd_data;
996 int revents;
997
998 fd_data = (struct necp_fd_data *)kn->kn_hook;
999
1000 NECP_FD_LOCK(fd_data);
1001 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1002 NECP_FD_UNLOCK(fd_data);
1003
1004 return (revents & POLLIN) != 0;
1005 }
1006
1007 SECURITY_READ_ONLY_EARLY(struct filterops) necp_fd_rfiltops = {
1008 .f_isfd = 1,
1009 .f_detach = necp_fd_knrdetach,
1010 .f_event = necp_fd_knread,
1011 .f_touch = necp_fd_knrtouch,
1012 .f_process = necp_fd_knrprocess,
1013 };
1014
1015 static int
necpop_kqfilter(struct fileproc * fp,struct knote * kn,__unused struct kevent_qos_s * kev)1016 necpop_kqfilter(struct fileproc *fp, struct knote *kn,
1017 __unused struct kevent_qos_s *kev)
1018 {
1019 struct necp_fd_data *fd_data = NULL;
1020 int revents;
1021
1022 if (kn->kn_filter != EVFILT_READ) {
1023 NECPLOG(LOG_ERR, "bad filter request %d", kn->kn_filter);
1024 knote_set_error(kn, EINVAL);
1025 return 0;
1026 }
1027
1028 fd_data = (struct necp_fd_data *)fp_get_data(fp);
1029 if (fd_data == NULL) {
1030 NECPLOG0(LOG_ERR, "No channel for kqfilter");
1031 knote_set_error(kn, ENOENT);
1032 return 0;
1033 }
1034
1035 NECP_FD_LOCK(fd_data);
1036 kn->kn_filtid = EVFILTID_NECP_FD;
1037 kn->kn_hook = fd_data;
1038 KNOTE_ATTACH(&fd_data->si.si_note, kn);
1039
1040 revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1);
1041
1042 NECP_FD_UNLOCK(fd_data);
1043
1044 return (revents & POLLIN) != 0;
1045 }
1046
1047 #define INTERFACE_FLAGS_SHIFT 32
1048 #define INTERFACE_FLAGS_MASK 0xffff
1049 #define INTERFACE_INDEX_SHIFT 0
1050 #define INTERFACE_INDEX_MASK 0xffffffff
1051
1052 static uint64_t
combine_interface_details(uint32_t interface_index,uint16_t interface_flags)1053 combine_interface_details(uint32_t interface_index, uint16_t interface_flags)
1054 {
1055 return ((uint64_t)interface_flags & INTERFACE_FLAGS_MASK) << INTERFACE_FLAGS_SHIFT |
1056 ((uint64_t)interface_index & INTERFACE_INDEX_MASK) << INTERFACE_INDEX_SHIFT;
1057 }
1058
1059 #if SKYWALK
1060
1061 static void
split_interface_details(uint64_t combined_details,uint32_t * interface_index,uint16_t * interface_flags)1062 split_interface_details(uint64_t combined_details, uint32_t *interface_index, uint16_t *interface_flags)
1063 {
1064 *interface_index = (combined_details >> INTERFACE_INDEX_SHIFT) & INTERFACE_INDEX_MASK;
1065 *interface_flags = (combined_details >> INTERFACE_FLAGS_SHIFT) & INTERFACE_FLAGS_MASK;
1066 }
1067
1068 static void
necp_flow_save_current_interface_details(struct necp_client_flow_registration * flow_registration)1069 necp_flow_save_current_interface_details(struct necp_client_flow_registration *flow_registration)
1070 {
1071 struct necp_client_flow *flow = NULL;
1072 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1073 if (flow->nexus) {
1074 uint64_t combined_details = combine_interface_details(flow->interface_index, flow->interface_flags);
1075 atomic_set_64(&flow_registration->last_interface_details, combined_details);
1076 break;
1077 }
1078 }
1079 }
1080
1081 static void
necp_client_collect_interface_stats(struct necp_client_flow_registration * flow_registration,struct ifnet_stats_per_flow * ifs)1082 necp_client_collect_interface_stats(struct necp_client_flow_registration *flow_registration, struct ifnet_stats_per_flow *ifs)
1083 {
1084 struct necp_client_flow *flow = NULL;
1085
1086 if (ifs == NULL || ifs->txpackets == 0 || ifs->rxpackets == 0) {
1087 return; // App might have crashed without publishing ifs
1088 }
1089
1090 // Do malicious stats detection here
1091
1092 // Fold userspace stats into (trusted) kernel stats (stored in ifp).
1093 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1094 uint32_t if_idx = flow->interface_index;
1095 ifnet_t ifp = NULL;
1096 ifnet_head_lock_shared();
1097 if (if_idx != IFSCOPE_NONE && if_idx <= (uint32_t)if_index) {
1098 ifp = ifindex2ifnet[if_idx];
1099 ifnet_update_stats_per_flow(ifs, ifp);
1100 }
1101 ifnet_head_done();
1102
1103 // Currently there is only one flow that uses the shared necp
1104 // stats region, so this loop should exit after updating an ifp
1105 break;
1106 }
1107 }
1108
1109 static void
necp_client_collect_stats(struct necp_client_flow_registration * flow_registration)1110 necp_client_collect_stats(struct necp_client_flow_registration *flow_registration)
1111 {
1112 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
1113 if (kstats == NULL) {
1114 return;
1115 }
1116
1117 // Grab userspace stats delta (untrusted).
1118 struct necp_tcp_stats *curr_tcpstats = (struct necp_tcp_stats *)kstats->necp_stats_ustats;
1119 struct necp_tcp_stats *prev_tcpstats = (struct necp_tcp_stats *)&kstats->necp_stats_comm;
1120 #define diff_n_update(field) \
1121 u_int32_t d_##field = (curr_tcpstats->necp_tcp_counts.necp_stat_##field - prev_tcpstats->necp_tcp_counts.necp_stat_##field); \
1122 prev_tcpstats->necp_tcp_counts.necp_stat_##field += d_##field;
1123 diff_n_update(rxpackets);
1124 diff_n_update(txpackets);
1125 if (d_rxpackets == 0 && d_txpackets == 0) {
1126 return; // no activity since last collection, stop here
1127 }
1128 diff_n_update(rxbytes);
1129 diff_n_update(txbytes);
1130 diff_n_update(rxduplicatebytes);
1131 diff_n_update(rxoutoforderbytes);
1132 diff_n_update(txretransmit);
1133 diff_n_update(connectattempts);
1134 diff_n_update(connectsuccesses);
1135 uint32_t rtt = prev_tcpstats->necp_tcp_counts.necp_stat_avg_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
1136 uint32_t rtt_var = prev_tcpstats->necp_tcp_counts.necp_stat_var_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_var_rtt;
1137 #undef diff_n_update
1138
1139 // Do malicious stats detection with the deltas here.
1140 // RTT check (not necessarily attacks, might just be not measured since we report stats async periodically).
1141 if (rtt < necp_client_stats_rtt_floor || rtt > necp_client_stats_rtt_ceiling) {
1142 rtt = rtt_var = 0; // nstat_route_update to skip 0 rtt
1143 }
1144
1145 // Fold userspace stats into (trusted) kernel stats (stored in route).
1146 NECP_CLIENT_ROUTE_LOCK(flow_registration->client);
1147 struct rtentry *route = flow_registration->client->current_route;
1148 if (route != NULL) {
1149 nstat_route_update(route, d_connectattempts, d_connectsuccesses, d_rxpackets, d_rxbytes, d_rxduplicatebytes,
1150 d_rxoutoforderbytes, d_txpackets, d_txbytes, d_txretransmit, rtt, rtt_var);
1151 }
1152 NECP_CLIENT_ROUTE_UNLOCK(flow_registration->client);
1153 }
1154
1155 // This is called from various places; "closing" here implies the client being closed/removed if true, otherwise being
1156 // defunct. In the former, we expect the caller to not hold the lock; for the latter it must have acquired it.
1157 static void
necp_destroy_flow_stats(struct necp_fd_data * fd_data,struct necp_client_flow_registration * flow_registration,struct ifnet_stats_per_flow * flow_ifnet_stats,boolean_t closing)1158 necp_destroy_flow_stats(struct necp_fd_data *fd_data,
1159 struct necp_client_flow_registration *flow_registration,
1160 struct ifnet_stats_per_flow *flow_ifnet_stats,
1161 boolean_t closing)
1162 {
1163 NECP_FD_ASSERT_LOCKED(fd_data);
1164
1165 struct necp_client *client = flow_registration->client;
1166
1167 if (closing) {
1168 NECP_CLIENT_ASSERT_UNLOCKED(client);
1169 NECP_CLIENT_LOCK(client);
1170 } else {
1171 NECP_CLIENT_ASSERT_LOCKED(client);
1172 }
1173
1174 // the interface stats are independent of the flow stats, hence we check here
1175 if (flow_ifnet_stats != NULL) {
1176 necp_client_collect_interface_stats(flow_registration, flow_ifnet_stats);
1177 }
1178
1179 if (flow_registration->kstats_kaddr != NULL) {
1180 NECP_STATS_LIST_LOCK_EXCLUSIVE();
1181 necp_client_collect_stats(flow_registration);
1182 const bool destroyed = necp_client_release_locked(client); // Drop the reference held by the stats list
1183 ASSERT(!destroyed);
1184 (void)destroyed;
1185 LIST_REMOVE(flow_registration, collect_stats_chain);
1186 NECP_STATS_LIST_UNLOCK();
1187 if (flow_registration->stats_handler_context != NULL) {
1188 ntstat_userland_stats_close(flow_registration->stats_handler_context);
1189 flow_registration->stats_handler_context = NULL;
1190 }
1191 necp_arena_stats_obj_free(fd_data, flow_registration->stats_arena, &flow_registration->kstats_kaddr, &flow_registration->ustats_uaddr);
1192 ASSERT(flow_registration->kstats_kaddr == NULL);
1193 ASSERT(flow_registration->ustats_uaddr == 0);
1194 }
1195
1196 if (flow_registration->nexus_stats != NULL) {
1197 flow_stats_release(flow_registration->nexus_stats);
1198 flow_registration->nexus_stats = NULL;
1199 }
1200
1201 if (closing) {
1202 NECP_CLIENT_UNLOCK(client);
1203 }
1204 }
1205
1206 static void
necp_schedule_collect_stats_clients(bool recur)1207 necp_schedule_collect_stats_clients(bool recur)
1208 {
1209 if (necp_client_collect_stats_tcall == NULL ||
1210 (!recur && thread_call_isactive(necp_client_collect_stats_tcall))) {
1211 return;
1212 }
1213
1214 uint64_t deadline = 0;
1215 uint64_t leeway = 0;
1216 clock_interval_to_deadline(necp_collect_stats_timeout_microseconds, NSEC_PER_USEC, &deadline);
1217 clock_interval_to_absolutetime_interval(necp_collect_stats_timeout_leeway_microseconds, NSEC_PER_USEC, &leeway);
1218
1219 thread_call_enter_delayed_with_leeway(necp_client_collect_stats_tcall, NULL,
1220 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1221 }
1222
1223 static void
necp_collect_stats_client_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)1224 necp_collect_stats_client_callout(__unused thread_call_param_t dummy,
1225 __unused thread_call_param_t arg)
1226 {
1227 struct necp_client_flow_registration *flow_registration;
1228
1229 net_update_uptime();
1230 NECP_STATS_LIST_LOCK_SHARED();
1231 if (LIST_EMPTY(&necp_collect_stats_flow_list)) {
1232 NECP_STATS_LIST_UNLOCK();
1233 return;
1234 }
1235 LIST_FOREACH(flow_registration, &necp_collect_stats_flow_list, collect_stats_chain) {
1236 // Collecting stats should be cheap (atomic increments)
1237 // Values like flow_registration->kstats_kaddr are guaranteed to be valid
1238 // as long as the flow_registration is in the stats list
1239 necp_client_collect_stats(flow_registration);
1240 }
1241 NECP_STATS_LIST_UNLOCK();
1242
1243 necp_schedule_collect_stats_clients(TRUE); // recurring collection
1244 }
1245
1246 #endif /* !SKYWALK */
1247
1248 static void
necp_defunct_flow_registration(struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct _necp_flow_defunct_list * defunct_list)1249 necp_defunct_flow_registration(struct necp_client *client,
1250 struct necp_client_flow_registration *flow_registration,
1251 struct _necp_flow_defunct_list *defunct_list)
1252 {
1253 NECP_CLIENT_ASSERT_LOCKED(client);
1254
1255 if (!flow_registration->defunct) {
1256 bool needs_defunct = false;
1257 struct necp_client_flow *search_flow = NULL;
1258 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
1259 if (search_flow->nexus &&
1260 !uuid_is_null(search_flow->u.nexus_agent)) {
1261 // Save defunct values for the nexus
1262 if (defunct_list != NULL) {
1263 // Sleeping alloc won't fail; copy only what's necessary
1264 struct necp_flow_defunct *flow_defunct = kalloc_type(struct necp_flow_defunct,
1265 Z_WAITOK | Z_ZERO);
1266 uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent);
1267 uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1268 client->client_id :
1269 flow_registration->registration_id));
1270 flow_defunct->proc_pid = client->proc_pid;
1271 flow_defunct->agent_handle = client->agent_handle;
1272 flow_defunct->flags = flow_registration->flags;
1273 #if SKYWALK
1274 if (flow_registration->kstats_kaddr != NULL) {
1275 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1276 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1277 if (quicstats != NULL) {
1278 memcpy(flow_defunct->close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(flow_defunct->close_parameters.u.close_token));
1279 flow_defunct->has_close_parameters = true;
1280 }
1281 }
1282 #endif /* SKYWALK */
1283 // Add to the list provided by caller
1284 LIST_INSERT_HEAD(defunct_list, flow_defunct, chain);
1285 }
1286
1287 needs_defunct = true;
1288 }
1289 }
1290
1291 if (needs_defunct) {
1292 #if SKYWALK
1293 // Close the stats early
1294 if (flow_registration->stats_handler_context != NULL) {
1295 ntstat_userland_stats_event(flow_registration->stats_handler_context,
1296 NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT);
1297 }
1298 #endif /* SKYWALK */
1299
1300 // Only set defunct if there was some assigned flow
1301 flow_registration->defunct = true;
1302 }
1303 }
1304 }
1305
1306 static void
necp_defunct_client_for_policy(struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)1307 necp_defunct_client_for_policy(struct necp_client *client,
1308 struct _necp_flow_defunct_list *defunct_list)
1309 {
1310 NECP_CLIENT_ASSERT_LOCKED(client);
1311
1312 struct necp_client_flow_registration *flow_registration = NULL;
1313 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
1314 necp_defunct_flow_registration(client, flow_registration, defunct_list);
1315 }
1316 }
1317
1318 static void
necp_client_free(struct necp_client * client)1319 necp_client_free(struct necp_client *client)
1320 {
1321 NECP_CLIENT_ASSERT_UNLOCKED(client);
1322
1323 kfree_data(client->extra_interface_options,
1324 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT);
1325 client->extra_interface_options = NULL;
1326
1327 kfree_data(client->parameters, client->parameters_length);
1328 client->parameters = NULL;
1329
1330 lck_mtx_destroy(&client->route_lock, &necp_fd_mtx_grp);
1331 lck_mtx_destroy(&client->lock, &necp_fd_mtx_grp);
1332
1333 kfree_type(struct necp_client, client);
1334 }
1335
1336 static void
necp_client_retain_locked(struct necp_client * client)1337 necp_client_retain_locked(struct necp_client *client)
1338 {
1339 NECP_CLIENT_ASSERT_LOCKED(client);
1340
1341 os_ref_retain_locked(&client->reference_count);
1342 }
1343
1344 static void
necp_client_retain(struct necp_client * client)1345 necp_client_retain(struct necp_client *client)
1346 {
1347 NECP_CLIENT_LOCK(client);
1348 necp_client_retain_locked(client);
1349 NECP_CLIENT_UNLOCK(client);
1350 }
1351
1352 static bool
necp_client_release_locked(struct necp_client * client)1353 necp_client_release_locked(struct necp_client *client)
1354 {
1355 NECP_CLIENT_ASSERT_LOCKED(client);
1356
1357 os_ref_count_t count = os_ref_release_locked(&client->reference_count);
1358 if (count == 0) {
1359 NECP_CLIENT_UNLOCK(client);
1360 necp_client_free(client);
1361 }
1362
1363 return count == 0;
1364 }
1365
1366 static bool
necp_client_release(struct necp_client * client)1367 necp_client_release(struct necp_client *client)
1368 {
1369 bool last_ref;
1370
1371 NECP_CLIENT_LOCK(client);
1372 if (!(last_ref = necp_client_release_locked(client))) {
1373 NECP_CLIENT_UNLOCK(client);
1374 }
1375
1376 return last_ref;
1377 }
1378
1379 static struct necp_client_update *
necp_client_update_alloc(const void * data,size_t length)1380 necp_client_update_alloc(const void *data, size_t length)
1381 {
1382 struct necp_client_update *client_update;
1383 struct necp_client_observer_update *buffer;
1384 size_t alloc_size;
1385
1386 if (os_add_overflow(length, sizeof(*buffer), &alloc_size)) {
1387 return NULL;
1388 }
1389 buffer = kalloc_data(alloc_size, Z_WAITOK);
1390 if (buffer == NULL) {
1391 return NULL;
1392 }
1393
1394 client_update = kalloc_type(struct necp_client_update,
1395 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1396 client_update->update_length = alloc_size;
1397 client_update->update = buffer;
1398 memcpy(buffer->tlv_buffer, data, length);
1399 return client_update;
1400 }
1401
1402 static void
necp_client_update_free(struct necp_client_update * client_update)1403 necp_client_update_free(struct necp_client_update *client_update)
1404 {
1405 kfree_data(client_update->update, client_update->update_length);
1406 kfree_type(struct necp_client_update, client_update);
1407 }
1408
1409 static void
necp_client_update_observer_add_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1410 necp_client_update_observer_add_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1411 {
1412 struct necp_client_update *client_update;
1413
1414 NECP_FD_LOCK(observer_fd);
1415
1416 if (observer_fd->update_count >= necp_observer_message_limit) {
1417 NECP_FD_UNLOCK(observer_fd);
1418 return;
1419 }
1420
1421 client_update = necp_client_update_alloc(client->parameters, client->parameters_length);
1422 if (client_update != NULL) {
1423 uuid_copy(client_update->client_id, client->client_id);
1424 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_PARAMETERS;
1425 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1426 observer_fd->update_count++;
1427
1428 necp_fd_notify(observer_fd, true);
1429 }
1430
1431 NECP_FD_UNLOCK(observer_fd);
1432 }
1433
1434 static void
necp_client_update_observer_update_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1435 necp_client_update_observer_update_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1436 {
1437 NECP_FD_LOCK(observer_fd);
1438
1439 if (observer_fd->update_count >= necp_observer_message_limit) {
1440 NECP_FD_UNLOCK(observer_fd);
1441 return;
1442 }
1443
1444 struct necp_client_update *client_update = necp_client_update_alloc(client->result, client->result_length);
1445 if (client_update != NULL) {
1446 uuid_copy(client_update->client_id, client->client_id);
1447 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_RESULT;
1448 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1449 observer_fd->update_count++;
1450
1451 necp_fd_notify(observer_fd, true);
1452 }
1453
1454 NECP_FD_UNLOCK(observer_fd);
1455 }
1456
1457 static void
necp_client_update_observer_remove_internal(struct necp_fd_data * observer_fd,struct necp_client * client)1458 necp_client_update_observer_remove_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1459 {
1460 NECP_FD_LOCK(observer_fd);
1461
1462 if (observer_fd->update_count >= necp_observer_message_limit) {
1463 NECP_FD_UNLOCK(observer_fd);
1464 return;
1465 }
1466
1467 struct necp_client_update *client_update = necp_client_update_alloc(NULL, 0);
1468 if (client_update != NULL) {
1469 uuid_copy(client_update->client_id, client->client_id);
1470 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_REMOVE;
1471 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1472 observer_fd->update_count++;
1473
1474 necp_fd_notify(observer_fd, true);
1475 }
1476
1477 NECP_FD_UNLOCK(observer_fd);
1478 }
1479
1480 static void
necp_client_update_observer_add(struct necp_client * client)1481 necp_client_update_observer_add(struct necp_client *client)
1482 {
1483 NECP_OBSERVER_LIST_LOCK_SHARED();
1484
1485 if (LIST_EMPTY(&necp_fd_observer_list)) {
1486 // No observers, bail
1487 NECP_OBSERVER_LIST_UNLOCK();
1488 return;
1489 }
1490
1491 struct necp_fd_data *observer_fd = NULL;
1492 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1493 necp_client_update_observer_add_internal(observer_fd, client);
1494 }
1495
1496 NECP_OBSERVER_LIST_UNLOCK();
1497 }
1498
1499 static void
necp_client_update_observer_update(struct necp_client * client)1500 necp_client_update_observer_update(struct necp_client *client)
1501 {
1502 NECP_OBSERVER_LIST_LOCK_SHARED();
1503
1504 if (LIST_EMPTY(&necp_fd_observer_list)) {
1505 // No observers, bail
1506 NECP_OBSERVER_LIST_UNLOCK();
1507 return;
1508 }
1509
1510 struct necp_fd_data *observer_fd = NULL;
1511 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1512 necp_client_update_observer_update_internal(observer_fd, client);
1513 }
1514
1515 NECP_OBSERVER_LIST_UNLOCK();
1516 }
1517
1518 static void
necp_client_update_observer_remove(struct necp_client * client)1519 necp_client_update_observer_remove(struct necp_client *client)
1520 {
1521 NECP_OBSERVER_LIST_LOCK_SHARED();
1522
1523 if (LIST_EMPTY(&necp_fd_observer_list)) {
1524 // No observers, bail
1525 NECP_OBSERVER_LIST_UNLOCK();
1526 return;
1527 }
1528
1529 struct necp_fd_data *observer_fd = NULL;
1530 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1531 necp_client_update_observer_remove_internal(observer_fd, client);
1532 }
1533
1534 NECP_OBSERVER_LIST_UNLOCK();
1535 }
1536
1537 static void
necp_destroy_client_flow_registration(struct necp_client * client,struct necp_client_flow_registration * flow_registration,pid_t pid,bool abort)1538 necp_destroy_client_flow_registration(struct necp_client *client,
1539 struct necp_client_flow_registration *flow_registration,
1540 pid_t pid, bool abort)
1541 {
1542 NECP_CLIENT_ASSERT_LOCKED(client);
1543
1544 bool has_close_parameters = false;
1545 struct necp_client_agent_parameters close_parameters = {};
1546 memset(close_parameters.u.close_token, 0, sizeof(close_parameters.u.close_token));
1547 #if SKYWALK
1548 if (flow_registration->kstats_kaddr != NULL) {
1549 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1550 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1551 if (quicstats != NULL &&
1552 quicstats->necp_quic_udp_stats.necp_udp_hdr.necp_stats_type == NECP_CLIENT_STATISTICS_TYPE_QUIC) {
1553 memcpy(close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(close_parameters.u.close_token));
1554 has_close_parameters = true;
1555 }
1556 }
1557
1558 // Release reference held on the stats arena
1559 if (flow_registration->stats_arena != NULL) {
1560 necp_arena_info_release(flow_registration->stats_arena);
1561 flow_registration->stats_arena = NULL;
1562 }
1563 #endif /* SKYWALK */
1564
1565 struct necp_client_flow *search_flow = NULL;
1566 struct necp_client_flow *temp_flow = NULL;
1567 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
1568 if (search_flow->nexus &&
1569 !uuid_is_null(search_flow->u.nexus_agent)) {
1570 // Don't unregister for defunct flows
1571 if (!flow_registration->defunct) {
1572 u_int8_t message_type = (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS :
1573 NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS);
1574 if (((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1575 (flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
1576 !(flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
1577 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
1578 }
1579 int netagent_error = netagent_client_message_with_params(search_flow->u.nexus_agent,
1580 ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1581 client->client_id :
1582 flow_registration->registration_id),
1583 pid, client->agent_handle,
1584 message_type,
1585 has_close_parameters ? &close_parameters : NULL,
1586 NULL, 0);
1587 if (netagent_error != 0 && netagent_error != ENOENT) {
1588 NECPLOG(LOG_ERR, "necp_client_remove close nexus error (%d) MESSAGE TYPE %u", netagent_error, message_type);
1589 }
1590 }
1591 uuid_clear(search_flow->u.nexus_agent);
1592 }
1593 if (search_flow->assigned_results != NULL) {
1594 kfree_data(search_flow->assigned_results, search_flow->assigned_results_length);
1595 search_flow->assigned_results = NULL;
1596 }
1597 LIST_REMOVE(search_flow, flow_chain);
1598 #if SKYWALK
1599 if (search_flow->nexus) {
1600 OSDecrementAtomic(&necp_nexus_flow_count);
1601 } else
1602 #endif /* SKYWALK */
1603 if (search_flow->socket) {
1604 OSDecrementAtomic(&necp_socket_flow_count);
1605 } else {
1606 OSDecrementAtomic(&necp_if_flow_count);
1607 }
1608 mcache_free(necp_flow_cache, search_flow);
1609 }
1610
1611 RB_REMOVE(_necp_client_flow_tree, &client->flow_registrations, flow_registration);
1612 flow_registration->client = NULL;
1613
1614 mcache_free(necp_flow_registration_cache, flow_registration);
1615 }
1616
1617 static void
necp_destroy_client(struct necp_client * client,pid_t pid,bool abort)1618 necp_destroy_client(struct necp_client *client, pid_t pid, bool abort)
1619 {
1620 NECP_CLIENT_ASSERT_UNLOCKED(client);
1621
1622 #if SKYWALK
1623 if (client->nstat_context != NULL) {
1624 // This is a catch-all that should be rarely used.
1625 nstat_provider_stats_close(client->nstat_context);
1626 client->nstat_context = NULL;
1627 }
1628 if (client->original_parameters_source != NULL) {
1629 necp_client_release(client->original_parameters_source);
1630 client->original_parameters_source = NULL;
1631 }
1632 #endif /* SKYWALK */
1633 necp_client_update_observer_remove(client);
1634
1635 NECP_CLIENT_LOCK(client);
1636
1637 // Free route
1638 NECP_CLIENT_ROUTE_LOCK(client);
1639 if (client->current_route != NULL) {
1640 rtfree(client->current_route);
1641 client->current_route = NULL;
1642 }
1643 NECP_CLIENT_ROUTE_UNLOCK(client);
1644
1645 // Remove flow assignments
1646 struct necp_client_flow_registration *flow_registration = NULL;
1647 struct necp_client_flow_registration *temp_flow_registration = NULL;
1648 RB_FOREACH_SAFE(flow_registration, _necp_client_flow_tree, &client->flow_registrations, temp_flow_registration) {
1649 necp_destroy_client_flow_registration(client, flow_registration, pid, abort);
1650 }
1651
1652 #if SKYWALK
1653 // Remove port reservation
1654 if (NETNS_TOKEN_VALID(&client->port_reservation)) {
1655 netns_release(&client->port_reservation);
1656 }
1657 #endif /* !SKYWALK */
1658
1659 // Remove agent assertions
1660 struct necp_client_assertion *search_assertion = NULL;
1661 struct necp_client_assertion *temp_assertion = NULL;
1662 LIST_FOREACH_SAFE(search_assertion, &client->assertion_list, assertion_chain, temp_assertion) {
1663 int netagent_error = netagent_client_message(search_assertion->asserted_netagent, client->client_id, pid,
1664 client->agent_handle, NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT);
1665 if (netagent_error != 0) {
1666 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR),
1667 "necp_client_remove unassert agent error (%d)", netagent_error);
1668 }
1669 LIST_REMOVE(search_assertion, assertion_chain);
1670 kfree_type(struct necp_client_assertion, search_assertion);
1671 }
1672
1673 if (!necp_client_release_locked(client)) {
1674 NECP_CLIENT_UNLOCK(client);
1675 }
1676
1677 OSDecrementAtomic(&necp_client_count);
1678 }
1679
1680 static bool
1681 necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats);
1682
1683 static void
necp_process_defunct_list(struct _necp_flow_defunct_list * defunct_list)1684 necp_process_defunct_list(struct _necp_flow_defunct_list *defunct_list)
1685 {
1686 if (!LIST_EMPTY(defunct_list)) {
1687 struct necp_flow_defunct *flow_defunct = NULL;
1688 struct necp_flow_defunct *temp_flow_defunct = NULL;
1689
1690 // For each newly defunct client, send a message to the nexus to remove the flow
1691 LIST_FOREACH_SAFE(flow_defunct, defunct_list, chain, temp_flow_defunct) {
1692 if (!uuid_is_null(flow_defunct->nexus_agent)) {
1693 u_int8_t message_type = NETAGENT_MESSAGE_TYPE_ABORT_NEXUS;
1694 if (((flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1695 (flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
1696 !(flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
1697 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
1698 }
1699 int netagent_error = netagent_client_message_with_params(flow_defunct->nexus_agent,
1700 flow_defunct->flow_id,
1701 flow_defunct->proc_pid,
1702 flow_defunct->agent_handle,
1703 message_type,
1704 flow_defunct->has_close_parameters ? &flow_defunct->close_parameters : NULL,
1705 NULL, 0);
1706 if (netagent_error != 0) {
1707 char namebuf[MAXCOMLEN + 1];
1708 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
1709 proc_name(flow_defunct->proc_pid, namebuf, sizeof(namebuf));
1710 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_update_client abort nexus error (%d) for pid %d %s", netagent_error, flow_defunct->proc_pid, namebuf);
1711 }
1712 }
1713 LIST_REMOVE(flow_defunct, chain);
1714 kfree_type(struct necp_flow_defunct, flow_defunct);
1715 }
1716 }
1717 ASSERT(LIST_EMPTY(defunct_list));
1718 }
1719
1720 static int
necpop_close(struct fileglob * fg,vfs_context_t ctx)1721 necpop_close(struct fileglob *fg, vfs_context_t ctx)
1722 {
1723 #pragma unused(ctx)
1724 struct necp_fd_data *fd_data = NULL;
1725 int error = 0;
1726
1727 fd_data = (struct necp_fd_data *)fg_get_data(fg);
1728 fg_set_data(fg, NULL);
1729
1730 if (fd_data != NULL) {
1731 struct _necp_client_tree clients_to_close;
1732 RB_INIT(&clients_to_close);
1733
1734 // Remove from list quickly
1735 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
1736 NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
1737 LIST_REMOVE(fd_data, chain);
1738 NECP_OBSERVER_LIST_UNLOCK();
1739 } else {
1740 NECP_FD_LIST_LOCK_EXCLUSIVE();
1741 LIST_REMOVE(fd_data, chain);
1742 NECP_FD_LIST_UNLOCK();
1743 }
1744
1745 NECP_FD_LOCK(fd_data);
1746 pid_t pid = fd_data->proc_pid;
1747
1748 struct _necp_flow_defunct_list defunct_list;
1749 LIST_INIT(&defunct_list);
1750
1751 (void)necp_defunct_client_fd_locked_inner(fd_data, &defunct_list, false);
1752
1753 struct necp_client_flow_registration *flow_registration = NULL;
1754 struct necp_client_flow_registration *temp_flow_registration = NULL;
1755 RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
1756 #if SKYWALK
1757 necp_destroy_flow_stats(fd_data, flow_registration, NULL, TRUE);
1758 #endif /* SKYWALK */
1759 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
1760 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
1761 NECP_FLOW_TREE_UNLOCK();
1762 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
1763 }
1764
1765 struct necp_client *client = NULL;
1766 struct necp_client *temp_client = NULL;
1767 RB_FOREACH_SAFE(client, _necp_client_tree, &fd_data->clients, temp_client) {
1768 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
1769 RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
1770 NECP_CLIENT_TREE_UNLOCK();
1771 RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
1772 RB_INSERT(_necp_client_tree, &clients_to_close, client);
1773 }
1774
1775 struct necp_client_update *client_update = NULL;
1776 struct necp_client_update *temp_update = NULL;
1777 TAILQ_FOREACH_SAFE(client_update, &fd_data->update_list, chain, temp_update) {
1778 // Flush pending updates
1779 TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
1780 necp_client_update_free(client_update);
1781 }
1782 fd_data->update_count = 0;
1783
1784 #if SKYWALK
1785 // Cleanup stats arena(s); indicate that we're closing
1786 necp_stats_arenas_destroy(fd_data, TRUE);
1787 ASSERT(fd_data->stats_arena_active == NULL);
1788 ASSERT(LIST_EMPTY(&fd_data->stats_arena_list));
1789
1790 // Cleanup systctl arena
1791 necp_sysctl_arena_destroy(fd_data);
1792 ASSERT(fd_data->sysctl_arena == NULL);
1793 #endif /* SKYWALK */
1794
1795 NECP_FD_UNLOCK(fd_data);
1796
1797 selthreadclear(&fd_data->si);
1798
1799 lck_mtx_destroy(&fd_data->fd_lock, &necp_fd_mtx_grp);
1800
1801 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
1802 OSDecrementAtomic(&necp_observer_fd_count);
1803 } else {
1804 OSDecrementAtomic(&necp_client_fd_count);
1805 }
1806
1807 zfree(necp_client_fd_zone, fd_data);
1808 fd_data = NULL;
1809
1810 RB_FOREACH_SAFE(client, _necp_client_tree, &clients_to_close, temp_client) {
1811 RB_REMOVE(_necp_client_tree, &clients_to_close, client);
1812 necp_destroy_client(client, pid, true);
1813 }
1814
1815 necp_process_defunct_list(&defunct_list);
1816 }
1817
1818 return error;
1819 }
1820
1821 /// NECP client utilities
1822
1823 static inline bool
necp_address_is_wildcard(const union necp_sockaddr_union * const addr)1824 necp_address_is_wildcard(const union necp_sockaddr_union * const addr)
1825 {
1826 return (addr->sa.sa_family == AF_INET && addr->sin.sin_addr.s_addr == INADDR_ANY) ||
1827 (addr->sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr->sin6.sin6_addr));
1828 }
1829
1830 static int
necp_find_fd_data(struct proc * p,int fd,struct fileproc ** fpp,struct necp_fd_data ** fd_data)1831 necp_find_fd_data(struct proc *p, int fd,
1832 struct fileproc **fpp, struct necp_fd_data **fd_data)
1833 {
1834 struct fileproc *fp;
1835 int error = fp_get_ftype(p, fd, DTYPE_NETPOLICY, ENODEV, &fp);
1836
1837 if (error == 0) {
1838 *fd_data = (struct necp_fd_data *)fp_get_data(fp);
1839 *fpp = fp;
1840
1841 if ((*fd_data)->necp_fd_type != necp_fd_type_client) {
1842 // Not a client fd, ignore
1843 fp_drop(p, fd, fp, 0);
1844 error = EINVAL;
1845 }
1846 }
1847 return error;
1848 }
1849
1850 static void
necp_client_add_nexus_flow(struct necp_client_flow_registration * flow_registration,uuid_t nexus_agent,uint32_t interface_index,uint16_t interface_flags)1851 necp_client_add_nexus_flow(struct necp_client_flow_registration *flow_registration,
1852 uuid_t nexus_agent,
1853 uint32_t interface_index,
1854 uint16_t interface_flags)
1855 {
1856 struct necp_client_flow *new_flow = mcache_alloc(necp_flow_cache, MCR_SLEEP);
1857 if (new_flow == NULL) {
1858 NECPLOG0(LOG_ERR, "Failed to allocate nexus flow");
1859 return;
1860 }
1861
1862 memset(new_flow, 0, sizeof(*new_flow));
1863
1864 new_flow->nexus = TRUE;
1865 uuid_copy(new_flow->u.nexus_agent, nexus_agent);
1866 new_flow->interface_index = interface_index;
1867 new_flow->interface_flags = interface_flags;
1868 new_flow->check_tcp_heuristics = TRUE;
1869
1870 #if SKYWALK
1871 OSIncrementAtomic(&necp_nexus_flow_count);
1872 #endif /* SKYWALK */
1873
1874 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
1875
1876 #if SKYWALK
1877 necp_flow_save_current_interface_details(flow_registration);
1878 #endif /* SKYWALK */
1879 }
1880
1881 static void
necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration * flow_registration,uuid_t nexus_agent,uint32_t interface_index)1882 necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration *flow_registration,
1883 uuid_t nexus_agent,
1884 uint32_t interface_index)
1885 {
1886 struct necp_client_flow *flow = NULL;
1887 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1888 if (flow->nexus &&
1889 uuid_compare(flow->u.nexus_agent, nexus_agent) == 0) {
1890 return;
1891 }
1892 }
1893
1894 uint16_t interface_flags = 0;
1895 ifnet_t ifp = NULL;
1896 ifnet_head_lock_shared();
1897 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
1898 ifp = ifindex2ifnet[interface_index];
1899 if (ifp != NULL) {
1900 ifnet_lock_shared(ifp);
1901 interface_flags = nstat_ifnet_to_flags(ifp);
1902 ifnet_lock_done(ifp);
1903 }
1904 }
1905 ifnet_head_done();
1906 necp_client_add_nexus_flow(flow_registration, nexus_agent, interface_index, interface_flags);
1907 }
1908
1909 static struct necp_client_flow *
necp_client_add_interface_flow(struct necp_client_flow_registration * flow_registration,uint32_t interface_index)1910 necp_client_add_interface_flow(struct necp_client_flow_registration *flow_registration,
1911 uint32_t interface_index)
1912 {
1913 struct necp_client_flow *new_flow = mcache_alloc(necp_flow_cache, MCR_SLEEP);
1914 if (new_flow == NULL) {
1915 NECPLOG0(LOG_ERR, "Failed to allocate interface flow");
1916 return NULL;
1917 }
1918
1919 memset(new_flow, 0, sizeof(*new_flow));
1920
1921 // Neither nexus nor socket
1922 new_flow->interface_index = interface_index;
1923 new_flow->u.socket_handle = flow_registration->interface_handle;
1924 new_flow->u.cb = flow_registration->interface_cb;
1925
1926 OSIncrementAtomic(&necp_if_flow_count);
1927
1928 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
1929
1930 return new_flow;
1931 }
1932
1933 static struct necp_client_flow *
necp_client_add_interface_flow_if_needed(struct necp_client * client,struct necp_client_flow_registration * flow_registration,uint32_t interface_index)1934 necp_client_add_interface_flow_if_needed(struct necp_client *client,
1935 struct necp_client_flow_registration *flow_registration,
1936 uint32_t interface_index)
1937 {
1938 if (!client->allow_multiple_flows ||
1939 interface_index == IFSCOPE_NONE) {
1940 // Interface not set, or client not allowed to use this mode
1941 return NULL;
1942 }
1943
1944 struct necp_client_flow *flow = NULL;
1945 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1946 if (!flow->nexus && !flow->socket && flow->interface_index == interface_index) {
1947 // Already have the flow
1948 flow->invalid = FALSE;
1949 flow->u.socket_handle = flow_registration->interface_handle;
1950 flow->u.cb = flow_registration->interface_cb;
1951 return NULL;
1952 }
1953 }
1954 return necp_client_add_interface_flow(flow_registration, interface_index);
1955 }
1956
1957 static void
necp_client_add_interface_option_if_needed(struct necp_client * client,uint32_t interface_index,uint32_t interface_generation,uuid_t * nexus_agent,bool network_provider)1958 necp_client_add_interface_option_if_needed(struct necp_client *client,
1959 uint32_t interface_index,
1960 uint32_t interface_generation,
1961 uuid_t *nexus_agent,
1962 bool network_provider)
1963 {
1964 if ((interface_index == IFSCOPE_NONE && !network_provider) ||
1965 (client->interface_option_count != 0 && !client->allow_multiple_flows)) {
1966 // Interface not set, or client not allowed to use this mode
1967 return;
1968 }
1969
1970 if (client->interface_option_count >= NECP_CLIENT_MAX_INTERFACE_OPTIONS) {
1971 // Cannot take any more interface options
1972 return;
1973 }
1974
1975 // Check if already present
1976 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
1977 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
1978 struct necp_client_interface_option *option = &client->interface_options[option_i];
1979 if (option->interface_index == interface_index) {
1980 if (nexus_agent == NULL) {
1981 return;
1982 }
1983 if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
1984 return;
1985 }
1986 if (uuid_is_null(option->nexus_agent)) {
1987 uuid_copy(option->nexus_agent, *nexus_agent);
1988 return;
1989 }
1990 // If we get to this point, this is a new nexus flow
1991 }
1992 } else {
1993 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
1994 if (option->interface_index == interface_index) {
1995 if (nexus_agent == NULL) {
1996 return;
1997 }
1998 if (uuid_compare(option->nexus_agent, *nexus_agent) == 0) {
1999 return;
2000 }
2001 if (uuid_is_null(option->nexus_agent)) {
2002 uuid_copy(option->nexus_agent, *nexus_agent);
2003 return;
2004 }
2005 // If we get to this point, this is a new nexus flow
2006 }
2007 }
2008 }
2009
2010 // Add a new entry
2011 if (client->interface_option_count < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2012 // Add to static
2013 struct necp_client_interface_option *option = &client->interface_options[client->interface_option_count];
2014 option->interface_index = interface_index;
2015 option->interface_generation = interface_generation;
2016 if (nexus_agent != NULL) {
2017 uuid_copy(option->nexus_agent, *nexus_agent);
2018 } else {
2019 uuid_clear(option->nexus_agent);
2020 }
2021 client->interface_option_count++;
2022 } else {
2023 // Add to extra
2024 if (client->extra_interface_options == NULL) {
2025 client->extra_interface_options = (struct necp_client_interface_option *)kalloc_data(
2026 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT, Z_WAITOK | Z_ZERO);
2027 }
2028 if (client->extra_interface_options != NULL) {
2029 struct necp_client_interface_option *option = &client->extra_interface_options[client->interface_option_count - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2030 option->interface_index = interface_index;
2031 option->interface_generation = interface_generation;
2032 if (nexus_agent != NULL) {
2033 uuid_copy(option->nexus_agent, *nexus_agent);
2034 } else {
2035 uuid_clear(option->nexus_agent);
2036 }
2037 client->interface_option_count++;
2038 }
2039 }
2040 }
2041
2042 static bool
necp_client_flow_is_viable(proc_t proc,struct necp_client * client,struct necp_client_flow * flow)2043 necp_client_flow_is_viable(proc_t proc, struct necp_client *client,
2044 struct necp_client_flow *flow)
2045 {
2046 struct necp_aggregate_result result;
2047 bool ignore_address = (client->allow_multiple_flows && !flow->nexus && !flow->socket);
2048
2049 flow->necp_flow_flags = 0;
2050 int error = necp_application_find_policy_match_internal(proc, client->parameters,
2051 (u_int32_t)client->parameters_length,
2052 &result, &flow->necp_flow_flags, NULL,
2053 flow->interface_index,
2054 &flow->local_addr, &flow->remote_addr, NULL, NULL,
2055 NULL, ignore_address, true, NULL);
2056
2057 // Check for blocking agents
2058 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
2059 if (uuid_is_null(result.netagents[i])) {
2060 // Passed end of valid agents
2061 break;
2062 }
2063 if (result.netagent_use_flags[i] & NECP_AGENT_USE_FLAG_REMOVE) {
2064 // A removed agent, ignore
2065 continue;
2066 }
2067 u_int32_t flags = netagent_get_flags(result.netagents[i]);
2068 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2069 !(flags & NETAGENT_FLAG_VOLUNTARY) &&
2070 !(flags & NETAGENT_FLAG_ACTIVE) &&
2071 !(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY)) {
2072 // A required agent is not active, cause the flow to be marked non-viable
2073 return false;
2074 }
2075 }
2076
2077 if (flow->interface_index != IFSCOPE_NONE) {
2078 ifnet_head_lock_shared();
2079
2080 struct ifnet *ifp = ifindex2ifnet[flow->interface_index];
2081 if (ifp && ifp->if_delegated.ifp != IFSCOPE_NONE) {
2082 flow->delegated_interface_index = ifp->if_delegated.ifp->if_index;
2083 }
2084
2085 ifnet_head_done();
2086 }
2087
2088 return error == 0 &&
2089 result.routed_interface_index != IFSCOPE_NONE &&
2090 result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP;
2091 }
2092
2093 static void
necp_flow_add_interface_flows(proc_t proc,struct necp_client * client,struct necp_client_flow_registration * flow_registration,bool send_initial)2094 necp_flow_add_interface_flows(proc_t proc,
2095 struct necp_client *client,
2096 struct necp_client_flow_registration *flow_registration,
2097 bool send_initial)
2098 {
2099 // Traverse all interfaces and add a tracking flow if needed
2100 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
2101 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2102 struct necp_client_interface_option *option = &client->interface_options[option_i];
2103 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
2104 if (flow != NULL && send_initial) {
2105 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2106 if (flow->viable && flow->u.cb) {
2107 bool viable = flow->viable;
2108 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2109 flow->viable = viable;
2110 }
2111 }
2112 } else {
2113 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2114 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, option->interface_index);
2115 if (flow != NULL && send_initial) {
2116 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2117 if (flow->viable && flow->u.cb) {
2118 bool viable = flow->viable;
2119 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2120 flow->viable = viable;
2121 }
2122 }
2123 }
2124 }
2125 }
2126
2127 static bool
necp_client_update_flows(proc_t proc,struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)2128 necp_client_update_flows(proc_t proc,
2129 struct necp_client *client,
2130 struct _necp_flow_defunct_list *defunct_list)
2131 {
2132 NECP_CLIENT_ASSERT_LOCKED(client);
2133
2134 bool any_client_updated = FALSE;
2135 struct necp_client_flow *flow = NULL;
2136 struct necp_client_flow *temp_flow = NULL;
2137 struct necp_client_flow_registration *flow_registration = NULL;
2138 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2139 if (flow_registration->interface_cb != NULL) {
2140 // Add any interface flows that are not already tracked
2141 necp_flow_add_interface_flows(proc, client, flow_registration, false);
2142 }
2143
2144 LIST_FOREACH_SAFE(flow, &flow_registration->flow_list, flow_chain, temp_flow) {
2145 bool client_updated = FALSE;
2146
2147 // Check policy result for flow
2148 u_short old_delegated_ifindex = flow->delegated_interface_index;
2149
2150 int old_flags = flow->necp_flow_flags;
2151 bool viable = necp_client_flow_is_viable(proc, client, flow);
2152
2153 // TODO: Defunct nexus flows that are blocked by policy
2154
2155 if (flow->viable != viable) {
2156 flow->viable = viable;
2157 client_updated = TRUE;
2158 }
2159
2160 if ((old_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE) !=
2161 (flow->necp_flow_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE)) {
2162 client_updated = TRUE;
2163 }
2164
2165 if (flow->delegated_interface_index != old_delegated_ifindex) {
2166 client_updated = TRUE;
2167 }
2168
2169 if (flow->viable && client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2170 bool flow_viable = flow->viable;
2171 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_VIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2172 flow->viable = flow_viable;
2173 }
2174
2175 if (!flow->viable || flow->invalid) {
2176 if (client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2177 bool flow_viable = flow->viable;
2178 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_NONVIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2179 flow->viable = flow_viable;
2180 }
2181 // The callback might change the viable-flag of the
2182 // flow depending on its policy. Thus, we need to
2183 // check the flags again after the callback.
2184 }
2185
2186 #if SKYWALK
2187 if (defunct_list != NULL) {
2188 if (flow->invalid && flow->nexus && flow->assigned && !uuid_is_null(flow->u.nexus_agent)) {
2189 // This is a nexus flow that was assigned, but not found on path
2190 u_int32_t flags = netagent_get_flags(flow->u.nexus_agent);
2191 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2192 // The agent is no longer registered! Mark defunct.
2193 necp_defunct_flow_registration(client, flow_registration, defunct_list);
2194 client_updated = TRUE;
2195 }
2196 }
2197 }
2198 #else /* !SKYWALK */
2199 (void)defunct_list;
2200 #endif /* !SKYWALK */
2201
2202 // Handle flows that no longer match
2203 if (!flow->viable || flow->invalid) {
2204 // Drop them as long as they aren't assigned data
2205 if (!flow->nexus && !flow->assigned) {
2206 if (flow->assigned_results != NULL) {
2207 kfree_data(flow->assigned_results, flow->assigned_results_length);
2208 flow->assigned_results = NULL;
2209 client_updated = TRUE;
2210 }
2211 LIST_REMOVE(flow, flow_chain);
2212 #if SKYWALK
2213 if (flow->nexus) {
2214 OSDecrementAtomic(&necp_nexus_flow_count);
2215 } else
2216 #endif /* SKYWALK */
2217 if (flow->socket) {
2218 OSDecrementAtomic(&necp_socket_flow_count);
2219 } else {
2220 OSDecrementAtomic(&necp_if_flow_count);
2221 }
2222 mcache_free(necp_flow_cache, flow);
2223 }
2224 }
2225
2226 any_client_updated |= client_updated;
2227 }
2228 #if SKYWALK
2229 necp_flow_save_current_interface_details(flow_registration);
2230 #endif /* SKYWALK */
2231 }
2232
2233 return any_client_updated;
2234 }
2235
2236 static void
necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client * client)2237 necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client *client)
2238 {
2239 struct necp_client_flow_registration *flow_registration = NULL;
2240 struct necp_client_flow *flow = NULL;
2241 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2242 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2243 if (!flow->socket) { // Socket flows are not marked as invalid
2244 flow->invalid = TRUE;
2245 }
2246 }
2247 }
2248
2249 // Reset option count every update
2250 client->interface_option_count = 0;
2251 }
2252
2253 static inline bool
necp_netagent_is_requested(const struct necp_client_parsed_parameters * parameters,uuid_t * netagent_uuid)2254 necp_netagent_is_requested(const struct necp_client_parsed_parameters *parameters,
2255 uuid_t *netagent_uuid)
2256 {
2257 // Specific use agents only apply when requested
2258 bool requested = false;
2259 if (parameters != NULL) {
2260 // Check required agent UUIDs
2261 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2262 if (uuid_is_null(parameters->required_netagents[i])) {
2263 break;
2264 }
2265 if (uuid_compare(parameters->required_netagents[i], *netagent_uuid) == 0) {
2266 requested = true;
2267 break;
2268 }
2269 }
2270
2271 if (!requested) {
2272 // Check required agent types
2273 bool fetched_type = false;
2274 char netagent_domain[NETAGENT_DOMAINSIZE];
2275 char netagent_type[NETAGENT_TYPESIZE];
2276 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
2277 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
2278
2279 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2280 if (strlen(parameters->required_netagent_types[i].netagent_domain) == 0 ||
2281 strlen(parameters->required_netagent_types[i].netagent_type) == 0) {
2282 break;
2283 }
2284
2285 if (!fetched_type) {
2286 if (netagent_get_agent_domain_and_type(*netagent_uuid, netagent_domain, netagent_type)) {
2287 fetched_type = TRUE;
2288 } else {
2289 break;
2290 }
2291 }
2292
2293 if ((strlen(parameters->required_netagent_types[i].netagent_domain) == 0 ||
2294 strncmp(netagent_domain, parameters->required_netagent_types[i].netagent_domain, NETAGENT_DOMAINSIZE) == 0) &&
2295 (strlen(parameters->required_netagent_types[i].netagent_type) == 0 ||
2296 strncmp(netagent_type, parameters->required_netagent_types[i].netagent_type, NETAGENT_TYPESIZE) == 0)) {
2297 requested = true;
2298 break;
2299 }
2300 }
2301 }
2302
2303 // Check preferred agent UUIDs
2304 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2305 if (uuid_is_null(parameters->preferred_netagents[i])) {
2306 break;
2307 }
2308 if (uuid_compare(parameters->preferred_netagents[i], *netagent_uuid) == 0) {
2309 requested = true;
2310 break;
2311 }
2312 }
2313
2314 if (!requested) {
2315 // Check preferred agent types
2316 bool fetched_type = false;
2317 char netagent_domain[NETAGENT_DOMAINSIZE];
2318 char netagent_type[NETAGENT_TYPESIZE];
2319 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
2320 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
2321
2322 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2323 if (strlen(parameters->preferred_netagent_types[i].netagent_domain) == 0 ||
2324 strlen(parameters->preferred_netagent_types[i].netagent_type) == 0) {
2325 break;
2326 }
2327
2328 if (!fetched_type) {
2329 if (netagent_get_agent_domain_and_type(*netagent_uuid, netagent_domain, netagent_type)) {
2330 fetched_type = TRUE;
2331 } else {
2332 break;
2333 }
2334 }
2335
2336 if ((strlen(parameters->preferred_netagent_types[i].netagent_domain) == 0 ||
2337 strncmp(netagent_domain, parameters->preferred_netagent_types[i].netagent_domain, NETAGENT_DOMAINSIZE) == 0) &&
2338 (strlen(parameters->preferred_netagent_types[i].netagent_type) == 0 ||
2339 strncmp(netagent_type, parameters->preferred_netagent_types[i].netagent_type, NETAGENT_TYPESIZE) == 0)) {
2340 requested = true;
2341 break;
2342 }
2343 }
2344 }
2345 }
2346
2347 return requested;
2348 }
2349
2350 static bool
necp_netagent_applies_to_client(struct necp_client * client,const struct necp_client_parsed_parameters * parameters,uuid_t * netagent_uuid,bool allow_nexus,uint32_t interface_index,uint32_t interface_generation)2351 necp_netagent_applies_to_client(struct necp_client *client,
2352 const struct necp_client_parsed_parameters *parameters,
2353 uuid_t *netagent_uuid, bool allow_nexus,
2354 uint32_t interface_index, uint32_t interface_generation)
2355 {
2356 #pragma unused(interface_index, interface_generation)
2357 bool applies = FALSE;
2358 u_int32_t flags = netagent_get_flags(*netagent_uuid);
2359 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2360 // Unregistered agents never apply
2361 return applies;
2362 }
2363
2364 const bool is_nexus_agent = ((flags & NETAGENT_FLAG_NEXUS_PROVIDER) ||
2365 (flags & NETAGENT_FLAG_NEXUS_LISTENER) ||
2366 (flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS) ||
2367 (flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS) ||
2368 (flags & NETAGENT_FLAG_INTERPOSE_NEXUS));
2369 if (is_nexus_agent) {
2370 if (!allow_nexus) {
2371 // Hide nexus providers unless allowed
2372 // Direct interfaces and direct policies are allowed to use a nexus
2373 // Delegate interfaces or re-scoped interfaces are not allowed
2374 return applies;
2375 }
2376
2377 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2378 !(flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS)) {
2379 // Client requested a custom ether nexus, but this nexus isn't one
2380 return applies;
2381 }
2382
2383 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2384 !(flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS)) {
2385 // Client requested a custom IP nexus, but this nexus isn't one
2386 return applies;
2387 }
2388
2389 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2390 !(flags & NETAGENT_FLAG_INTERPOSE_NEXUS)) {
2391 // Client requested an interpose nexus, but this nexus isn't one
2392 return applies;
2393 }
2394
2395 if (!(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2396 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2397 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2398 !(flags & NETAGENT_FLAG_NEXUS_PROVIDER)) {
2399 // Client requested default parameters, but this nexus isn't generic
2400 return applies;
2401 }
2402 }
2403
2404 if (uuid_compare(client->failed_trigger_agent.netagent_uuid, *netagent_uuid) == 0) {
2405 if (client->failed_trigger_agent.generation == netagent_get_generation(*netagent_uuid)) {
2406 // If this agent was triggered, and failed, and hasn't changed, keep hiding it
2407 return applies;
2408 } else {
2409 // Mismatch generation, clear out old trigger
2410 uuid_clear(client->failed_trigger_agent.netagent_uuid);
2411 client->failed_trigger_agent.generation = 0;
2412 }
2413 }
2414
2415 if (flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) {
2416 // Specific use agents only apply when requested
2417 applies = necp_netagent_is_requested(parameters, netagent_uuid);
2418 } else {
2419 applies = TRUE;
2420 }
2421
2422 #if SKYWALK
2423 // Add nexus agent if it is a nexus, and either is not a listener, or the nexus supports listeners
2424 if (applies && is_nexus_agent &&
2425 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) && // Don't add for browse paths
2426 ((flags & NETAGENT_FLAG_NEXUS_LISTENER) || !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER))) {
2427 necp_client_add_interface_option_if_needed(client, interface_index,
2428 interface_generation, netagent_uuid,
2429 (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2430 }
2431 #endif /* SKYWALK */
2432
2433 return applies;
2434 }
2435
2436 static void
necp_client_add_agent_interface_options(struct necp_client * client,const struct necp_client_parsed_parameters * parsed_parameters,ifnet_t ifp)2437 necp_client_add_agent_interface_options(struct necp_client *client,
2438 const struct necp_client_parsed_parameters *parsed_parameters,
2439 ifnet_t ifp)
2440 {
2441 if (ifp != NULL && ifp->if_agentids != NULL) {
2442 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2443 if (uuid_is_null(ifp->if_agentids[i])) {
2444 continue;
2445 }
2446 // Relies on the side effect that nexus agents that apply will create flows
2447 (void)necp_netagent_applies_to_client(client, parsed_parameters, &ifp->if_agentids[i], TRUE,
2448 ifp->if_index, ifnet_get_generation(ifp));
2449 }
2450 }
2451 }
2452
2453 static void
necp_client_add_browse_interface_options(struct necp_client * client,const struct necp_client_parsed_parameters * parsed_parameters,ifnet_t ifp)2454 necp_client_add_browse_interface_options(struct necp_client *client,
2455 const struct necp_client_parsed_parameters *parsed_parameters,
2456 ifnet_t ifp)
2457 {
2458 if (ifp != NULL && ifp->if_agentids != NULL) {
2459 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2460 if (uuid_is_null(ifp->if_agentids[i])) {
2461 continue;
2462 }
2463
2464 u_int32_t flags = netagent_get_flags(ifp->if_agentids[i]);
2465 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2466 (flags & NETAGENT_FLAG_ACTIVE) &&
2467 (flags & NETAGENT_FLAG_SUPPORTS_BROWSE) &&
2468 (!(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) ||
2469 necp_netagent_is_requested(parsed_parameters, &ifp->if_agentids[i]))) {
2470 necp_client_add_interface_option_if_needed(client, ifp->if_index, ifnet_get_generation(ifp), &ifp->if_agentids[i], (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2471
2472 // Finding one is enough
2473 break;
2474 }
2475 }
2476 }
2477 }
2478
2479 static inline bool
necp_client_address_is_valid(struct sockaddr * address)2480 necp_client_address_is_valid(struct sockaddr *address)
2481 {
2482 if (address->sa_family == AF_INET) {
2483 return address->sa_len == sizeof(struct sockaddr_in);
2484 } else if (address->sa_family == AF_INET6) {
2485 return address->sa_len == sizeof(struct sockaddr_in6);
2486 } else {
2487 return FALSE;
2488 }
2489 }
2490
2491 static inline bool
necp_client_endpoint_is_unspecified(struct necp_client_endpoint * endpoint)2492 necp_client_endpoint_is_unspecified(struct necp_client_endpoint *endpoint)
2493 {
2494 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2495 if (endpoint->u.sa.sa_family == AF_INET) {
2496 return endpoint->u.sin.sin_addr.s_addr == INADDR_ANY;
2497 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2498 return IN6_IS_ADDR_UNSPECIFIED(&endpoint->u.sin6.sin6_addr);
2499 } else {
2500 return TRUE;
2501 }
2502 } else {
2503 return TRUE;
2504 }
2505 }
2506
2507 #if SKYWALK
2508 static void
necp_client_update_local_port_parameters(u_int8_t * parameters,u_int32_t parameters_size,uint16_t local_port)2509 necp_client_update_local_port_parameters(u_int8_t *parameters,
2510 u_int32_t parameters_size,
2511 uint16_t local_port)
2512 {
2513 size_t offset = 0;
2514 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
2515 u_int8_t type = necp_buffer_get_tlv_type(parameters, offset);
2516 u_int32_t length = necp_buffer_get_tlv_length(parameters, offset);
2517
2518 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
2519 // If the length is larger than what can fit in the remaining parameters size, bail
2520 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
2521 break;
2522 }
2523
2524 if (length > 0) {
2525 u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL);
2526 if (value != NULL) {
2527 switch (type) {
2528 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
2529 if (length >= sizeof(struct necp_policy_condition_addr)) {
2530 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
2531 if (necp_client_address_is_valid(&address_struct->address.sa)) {
2532 if (address_struct->address.sa.sa_family == AF_INET) {
2533 address_struct->address.sin.sin_port = local_port;
2534 } else if (address_struct->address.sa.sa_family == AF_INET6) {
2535 address_struct->address.sin6.sin6_port = local_port;
2536 }
2537 }
2538 }
2539 break;
2540 }
2541 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: {
2542 if (length >= sizeof(struct necp_client_endpoint)) {
2543 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
2544 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2545 if (endpoint->u.sa.sa_family == AF_INET) {
2546 endpoint->u.sin.sin_port = local_port;
2547 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2548 endpoint->u.sin6.sin6_port = local_port;
2549 }
2550 }
2551 }
2552 break;
2553 }
2554 default: {
2555 break;
2556 }
2557 }
2558 }
2559 }
2560
2561 offset += sizeof(struct necp_tlv_header) + length;
2562 }
2563 }
2564 #endif /* !SKYWALK */
2565
2566 #define NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH 253
2567
2568 static void
necp_client_trace_parameter_parsing(struct necp_client * client,u_int8_t type,u_int8_t * value,u_int32_t length)2569 necp_client_trace_parameter_parsing(struct necp_client *client, u_int8_t type, u_int8_t *value, u_int32_t length)
2570 {
2571 uint64_t num = 0;
2572 uint16_t shortBuf;
2573 uint32_t intBuf;
2574 char buffer[NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH + 1];
2575
2576 if (value != NULL && length > 0) {
2577 switch (length) {
2578 case 1:
2579 num = *value;
2580 break;
2581 case 2:
2582 memcpy(&shortBuf, value, sizeof(shortBuf));
2583 num = shortBuf;
2584 break;
2585 case 4:
2586 memcpy(&intBuf, value, sizeof(intBuf));
2587 num = intBuf;
2588 break;
2589 case 8:
2590 memcpy(&num, value, sizeof(num));
2591 break;
2592 default:
2593 num = 0;
2594 break;
2595 }
2596 int len = NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH < length ? NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH : length;
2597 memcpy(buffer, value, len);
2598 buffer[len] = 0;
2599 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d value <%llu (%llX)> %s", type, length, num, num, buffer);
2600 } else {
2601 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d", type, length);
2602 }
2603 }
2604
2605 static void
necp_client_trace_parsed_parameters(struct necp_client * client,struct necp_client_parsed_parameters * parsed_parameters)2606 necp_client_trace_parsed_parameters(struct necp_client *client, struct necp_client_parsed_parameters *parsed_parameters)
2607 {
2608 int i;
2609 char local_buffer[64] = { };
2610 char remote_buffer[64] = { };
2611 uuid_string_t uuid_str = { };
2612 uuid_unparse_lower(parsed_parameters->effective_uuid, uuid_str);
2613
2614 switch (parsed_parameters->local_addr.sa.sa_family) {
2615 case AF_INET:
2616 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2617 struct sockaddr_in *addr = &parsed_parameters->local_addr.sin;
2618 inet_ntop(AF_INET, &(addr->sin_addr), local_buffer, sizeof(local_buffer));
2619 }
2620 break;
2621 case AF_INET6:
2622 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2623 struct sockaddr_in6 *addr6 = &parsed_parameters->local_addr.sin6;
2624 inet_ntop(AF_INET6, &(addr6->sin6_addr), local_buffer, sizeof(local_buffer));
2625 }
2626 break;
2627 default:
2628 break;
2629 }
2630
2631 switch (parsed_parameters->remote_addr.sa.sa_family) {
2632 case AF_INET:
2633 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2634 struct sockaddr_in *addr = &parsed_parameters->remote_addr.sin;
2635 inet_ntop(AF_INET, &(addr->sin_addr), remote_buffer, sizeof(remote_buffer));
2636 }
2637 break;
2638 case AF_INET6:
2639 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2640 struct sockaddr_in6 *addr6 = &parsed_parameters->remote_addr.sin6;
2641 inet_ntop(AF_INET6, &(addr6->sin6_addr), remote_buffer, sizeof(remote_buffer));
2642 }
2643 break;
2644 default:
2645 break;
2646 }
2647
2648 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - valid_fields %X flags %X delegated_upid %llu local_addr %s remote_addr %s "
2649 "required_interface_index %u required_interface_type %d local_address_preference %d "
2650 "ip_protocol %d transport_protocol %d ethertype %d effective_pid %d effective_uuid %s traffic_class %d",
2651 parsed_parameters->valid_fields,
2652 parsed_parameters->flags,
2653 parsed_parameters->delegated_upid,
2654 local_buffer, remote_buffer,
2655 parsed_parameters->required_interface_index,
2656 parsed_parameters->required_interface_type,
2657 parsed_parameters->local_address_preference,
2658 parsed_parameters->ip_protocol,
2659 parsed_parameters->transport_protocol,
2660 parsed_parameters->ethertype,
2661 parsed_parameters->effective_pid,
2662 uuid_str,
2663 parsed_parameters->traffic_class);
2664
2665 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - tracker flags <known-tracker %X> <non-app-initiated %X> <silent %X> <app-approved %X>",
2666 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER,
2667 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED,
2668 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_SILENT,
2669 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_APPROVED_APP_DOMAIN);
2670
2671 for (i = 0; i < NECP_MAX_INTERFACE_PARAMETERS && parsed_parameters->prohibited_interfaces[i][0]; i++) {
2672 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_interfaces[%d] <%s>", i, parsed_parameters->prohibited_interfaces[i]);
2673 }
2674
2675 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->required_netagent_types[i].netagent_domain[0]; i++) {
2676 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagent_types[%d] <%s> <%s>", i,
2677 parsed_parameters->required_netagent_types[i].netagent_domain,
2678 parsed_parameters->required_netagent_types[i].netagent_type);
2679 }
2680 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->prohibited_netagent_types[i].netagent_domain[0]; i++) {
2681 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagent_types[%d] <%s> <%s>", i,
2682 parsed_parameters->prohibited_netagent_types[i].netagent_domain,
2683 parsed_parameters->prohibited_netagent_types[i].netagent_type);
2684 }
2685 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->preferred_netagent_types[i].netagent_domain[0]; i++) {
2686 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagent_types[%d] <%s> <%s>", i,
2687 parsed_parameters->preferred_netagent_types[i].netagent_domain,
2688 parsed_parameters->preferred_netagent_types[i].netagent_type);
2689 }
2690 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->avoided_netagent_types[i].netagent_domain[0]; i++) {
2691 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagent_types[%d] <%s> <%s>", i,
2692 parsed_parameters->avoided_netagent_types[i].netagent_domain,
2693 parsed_parameters->avoided_netagent_types[i].netagent_type);
2694 }
2695
2696 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->required_netagents[i]); i++) {
2697 uuid_unparse_lower(parsed_parameters->required_netagents[i], uuid_str);
2698 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagents[%d] <%s>", i, uuid_str);
2699 }
2700 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->prohibited_netagents[i]); i++) {
2701 uuid_unparse_lower(parsed_parameters->prohibited_netagents[i], uuid_str);
2702 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagents[%d] <%s>", i, uuid_str);
2703 }
2704 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->preferred_netagents[i]); i++) {
2705 uuid_unparse_lower(parsed_parameters->preferred_netagents[i], uuid_str);
2706 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagents[%d] <%s>", i, uuid_str);
2707 }
2708 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(parsed_parameters->avoided_netagents[i]); i++) {
2709 uuid_unparse_lower(parsed_parameters->avoided_netagents[i], uuid_str);
2710 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagents[%d] <%s>", i, uuid_str);
2711 }
2712 }
2713
2714 static int
necp_client_parse_parameters(struct necp_client * client,u_int8_t * parameters,u_int32_t parameters_size,struct necp_client_parsed_parameters * parsed_parameters)2715 necp_client_parse_parameters(struct necp_client *client, u_int8_t *parameters,
2716 u_int32_t parameters_size,
2717 struct necp_client_parsed_parameters *parsed_parameters)
2718 {
2719 int error = 0;
2720 size_t offset = 0;
2721
2722 u_int32_t num_prohibited_interfaces = 0;
2723 u_int32_t num_prohibited_interface_types = 0;
2724 u_int32_t num_required_agents = 0;
2725 u_int32_t num_prohibited_agents = 0;
2726 u_int32_t num_preferred_agents = 0;
2727 u_int32_t num_avoided_agents = 0;
2728 u_int32_t num_required_agent_types = 0;
2729 u_int32_t num_prohibited_agent_types = 0;
2730 u_int32_t num_preferred_agent_types = 0;
2731 u_int32_t num_avoided_agent_types = 0;
2732 u_int8_t *resolver_tag = NULL;
2733 u_int32_t resolver_tag_length = 0;
2734 u_int8_t *client_hostname = NULL;
2735 u_int32_t hostname_length = 0;
2736 uuid_t parent_id = {};
2737
2738 if (parsed_parameters == NULL) {
2739 return EINVAL;
2740 }
2741
2742 memset(parsed_parameters, 0, sizeof(struct necp_client_parsed_parameters));
2743
2744 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
2745 u_int8_t type = necp_buffer_get_tlv_type(parameters, offset);
2746 u_int32_t length = necp_buffer_get_tlv_length(parameters, offset);
2747
2748 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
2749 // If the length is larger than what can fit in the remaining parameters size, bail
2750 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
2751 break;
2752 }
2753
2754 if (length > 0) {
2755 u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL);
2756 if (value != NULL) {
2757 switch (type) {
2758 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: {
2759 if (length <= IFXNAMSIZ && length > 0) {
2760 ifnet_t bound_interface = NULL;
2761 char interface_name[IFXNAMSIZ];
2762 memcpy(interface_name, value, length);
2763 interface_name[length - 1] = 0; // Make sure the string is NULL terminated
2764 if (ifnet_find_by_name(interface_name, &bound_interface) == 0) {
2765 parsed_parameters->required_interface_index = bound_interface->if_index;
2766 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF;
2767 ifnet_release(bound_interface);
2768 }
2769 }
2770 break;
2771 }
2772 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
2773 if (length >= sizeof(struct necp_policy_condition_addr)) {
2774 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
2775 if (necp_client_address_is_valid(&address_struct->address.sa)) {
2776 memcpy(&parsed_parameters->local_addr, &address_struct->address, sizeof(address_struct->address));
2777 if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) {
2778 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR;
2779 }
2780 if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) ||
2781 (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) {
2782 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT;
2783 }
2784 }
2785 }
2786 break;
2787 }
2788 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: {
2789 if (length >= sizeof(struct necp_client_endpoint)) {
2790 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
2791 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2792 memcpy(&parsed_parameters->local_addr, &endpoint->u.sa, sizeof(union necp_sockaddr_union));
2793 if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) {
2794 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR;
2795 }
2796 if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) ||
2797 (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) {
2798 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT;
2799 }
2800 }
2801 }
2802 break;
2803 }
2804 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
2805 if (length >= sizeof(struct necp_policy_condition_addr)) {
2806 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
2807 if (necp_client_address_is_valid(&address_struct->address.sa)) {
2808 memcpy(&parsed_parameters->remote_addr, &address_struct->address, sizeof(address_struct->address));
2809 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
2810 }
2811 }
2812 break;
2813 }
2814 case NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT: {
2815 if (length >= sizeof(struct necp_client_endpoint)) {
2816 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
2817 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2818 memcpy(&parsed_parameters->remote_addr, &endpoint->u.sa, sizeof(union necp_sockaddr_union));
2819 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
2820 }
2821 }
2822 break;
2823 }
2824 case NECP_CLIENT_PARAMETER_PROHIBIT_INTERFACE: {
2825 if (num_prohibited_interfaces >= NECP_MAX_INTERFACE_PARAMETERS) {
2826 break;
2827 }
2828 if (length <= IFXNAMSIZ && length > 0) {
2829 memcpy(parsed_parameters->prohibited_interfaces[num_prohibited_interfaces], value, length);
2830 parsed_parameters->prohibited_interfaces[num_prohibited_interfaces][length - 1] = 0; // Make sure the string is NULL terminated
2831 num_prohibited_interfaces++;
2832 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF;
2833 }
2834 break;
2835 }
2836 case NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE: {
2837 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) {
2838 break;
2839 }
2840 if (length >= sizeof(u_int8_t)) {
2841 memcpy(&parsed_parameters->required_interface_type, value, sizeof(u_int8_t));
2842 if (parsed_parameters->required_interface_type) {
2843 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE;
2844 }
2845 }
2846 break;
2847 }
2848 case NECP_CLIENT_PARAMETER_PROHIBIT_IF_TYPE: {
2849 if (num_prohibited_interface_types >= NECP_MAX_INTERFACE_PARAMETERS) {
2850 break;
2851 }
2852 if (length >= sizeof(u_int8_t)) {
2853 memcpy(&parsed_parameters->prohibited_interface_types[num_prohibited_interface_types], value, sizeof(u_int8_t));
2854 num_prohibited_interface_types++;
2855 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE;
2856 }
2857 break;
2858 }
2859 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT: {
2860 if (num_required_agents >= NECP_MAX_AGENT_PARAMETERS) {
2861 break;
2862 }
2863 if (length >= sizeof(uuid_t)) {
2864 memcpy(&parsed_parameters->required_netagents[num_required_agents], value, sizeof(uuid_t));
2865 num_required_agents++;
2866 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT;
2867 }
2868 break;
2869 }
2870 case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT: {
2871 if (num_prohibited_agents >= NECP_MAX_AGENT_PARAMETERS) {
2872 break;
2873 }
2874 if (length >= sizeof(uuid_t)) {
2875 memcpy(&parsed_parameters->prohibited_netagents[num_prohibited_agents], value, sizeof(uuid_t));
2876 num_prohibited_agents++;
2877 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT;
2878 }
2879 break;
2880 }
2881 case NECP_CLIENT_PARAMETER_PREFER_AGENT: {
2882 if (num_preferred_agents >= NECP_MAX_AGENT_PARAMETERS) {
2883 break;
2884 }
2885 if (length >= sizeof(uuid_t)) {
2886 memcpy(&parsed_parameters->preferred_netagents[num_preferred_agents], value, sizeof(uuid_t));
2887 num_preferred_agents++;
2888 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT;
2889 }
2890 break;
2891 }
2892 case NECP_CLIENT_PARAMETER_AVOID_AGENT: {
2893 if (num_avoided_agents >= NECP_MAX_AGENT_PARAMETERS) {
2894 break;
2895 }
2896 if (length >= sizeof(uuid_t)) {
2897 memcpy(&parsed_parameters->avoided_netagents[num_avoided_agents], value, sizeof(uuid_t));
2898 num_avoided_agents++;
2899 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT;
2900 }
2901 break;
2902 }
2903 case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE: {
2904 if (num_required_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
2905 break;
2906 }
2907 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
2908 memcpy(&parsed_parameters->required_netagent_types[num_required_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
2909 num_required_agent_types++;
2910 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE;
2911 }
2912 break;
2913 }
2914 case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT_TYPE: {
2915 if (num_prohibited_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
2916 break;
2917 }
2918 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
2919 memcpy(&parsed_parameters->prohibited_netagent_types[num_prohibited_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
2920 num_prohibited_agent_types++;
2921 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE;
2922 }
2923 break;
2924 }
2925 case NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE: {
2926 if (num_preferred_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
2927 break;
2928 }
2929 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
2930 memcpy(&parsed_parameters->preferred_netagent_types[num_preferred_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
2931 num_preferred_agent_types++;
2932 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE;
2933 }
2934 break;
2935 }
2936 case NECP_CLIENT_PARAMETER_AVOID_AGENT_TYPE: {
2937 if (num_avoided_agent_types >= NECP_MAX_AGENT_PARAMETERS) {
2938 break;
2939 }
2940 if (length >= sizeof(struct necp_client_parameter_netagent_type)) {
2941 memcpy(&parsed_parameters->avoided_netagent_types[num_avoided_agent_types], value, sizeof(struct necp_client_parameter_netagent_type));
2942 num_avoided_agent_types++;
2943 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE;
2944 }
2945 break;
2946 }
2947 case NECP_CLIENT_PARAMETER_FLAGS: {
2948 if (length >= sizeof(u_int32_t)) {
2949 memcpy(&parsed_parameters->flags, value, sizeof(parsed_parameters->flags));
2950 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_FLAGS;
2951 }
2952 break;
2953 }
2954 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
2955 if (length == sizeof(u_int16_t)) {
2956 u_int16_t large_ip_protocol = 0;
2957 memcpy(&large_ip_protocol, value, sizeof(large_ip_protocol));
2958 parsed_parameters->ip_protocol = (u_int8_t)large_ip_protocol;
2959 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL;
2960 } else if (length >= sizeof(parsed_parameters->ip_protocol)) {
2961 memcpy(&parsed_parameters->ip_protocol, value, sizeof(parsed_parameters->ip_protocol));
2962 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL;
2963 }
2964 break;
2965 }
2966 case NECP_CLIENT_PARAMETER_TRANSPORT_PROTOCOL: {
2967 if (length >= sizeof(parsed_parameters->transport_protocol)) {
2968 memcpy(&parsed_parameters->transport_protocol, value, sizeof(parsed_parameters->transport_protocol));
2969 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL;
2970 }
2971 break;
2972 }
2973 case NECP_CLIENT_PARAMETER_PID: {
2974 if (length >= sizeof(parsed_parameters->effective_pid)) {
2975 memcpy(&parsed_parameters->effective_pid, value, sizeof(parsed_parameters->effective_pid));
2976 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID;
2977 }
2978 break;
2979 }
2980 case NECP_CLIENT_PARAMETER_DELEGATED_UPID: {
2981 if (length >= sizeof(parsed_parameters->delegated_upid)) {
2982 memcpy(&parsed_parameters->delegated_upid, value, sizeof(parsed_parameters->delegated_upid));
2983 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID;
2984 }
2985 break;
2986 }
2987 case NECP_CLIENT_PARAMETER_ETHERTYPE: {
2988 if (length >= sizeof(parsed_parameters->ethertype)) {
2989 memcpy(&parsed_parameters->ethertype, value, sizeof(parsed_parameters->ethertype));
2990 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_ETHERTYPE;
2991 }
2992 break;
2993 }
2994 case NECP_CLIENT_PARAMETER_APPLICATION: {
2995 if (length >= sizeof(parsed_parameters->effective_uuid)) {
2996 memcpy(&parsed_parameters->effective_uuid, value, sizeof(parsed_parameters->effective_uuid));
2997 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID;
2998 }
2999 break;
3000 }
3001 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
3002 if (length >= sizeof(parsed_parameters->traffic_class)) {
3003 memcpy(&parsed_parameters->traffic_class, value, sizeof(parsed_parameters->traffic_class));
3004 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS;
3005 }
3006 break;
3007 }
3008 case NECP_CLIENT_PARAMETER_RESOLVER_TAG: {
3009 if (length > 0) {
3010 resolver_tag = (u_int8_t *)value;
3011 resolver_tag_length = length;
3012 }
3013 break;
3014 }
3015 case NECP_CLIENT_PARAMETER_DOMAIN: {
3016 if (length > 0) {
3017 client_hostname = (u_int8_t *)value;
3018 hostname_length = length;
3019 }
3020 break;
3021 }
3022 case NECP_CLIENT_PARAMETER_PARENT_ID: {
3023 if (length == sizeof(parent_id)) {
3024 uuid_copy(parent_id, value);
3025 memcpy(&parsed_parameters->parent_uuid, value, sizeof(parsed_parameters->parent_uuid));
3026 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID;
3027 }
3028 break;
3029 }
3030 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE: {
3031 if (length >= sizeof(parsed_parameters->local_address_preference)) {
3032 memcpy(&parsed_parameters->local_address_preference, value, sizeof(parsed_parameters->local_address_preference));
3033 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE;
3034 }
3035 break;
3036 }
3037 case NECP_CLIENT_PARAMETER_ATTRIBUTED_BUNDLE_IDENTIFIER: {
3038 if (length > 0) {
3039 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER;
3040 }
3041 break;
3042 }
3043 default: {
3044 break;
3045 }
3046 }
3047 }
3048
3049 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) {
3050 necp_client_trace_parameter_parsing(client, type, value, length);
3051 }
3052 }
3053
3054 offset += sizeof(struct necp_tlv_header) + length;
3055 }
3056
3057 if (resolver_tag != NULL) {
3058 union necp_sockaddr_union remote_addr;
3059 memcpy(&remote_addr, &parsed_parameters->remote_addr, sizeof(remote_addr));
3060 remote_addr.sin.sin_port = 0;
3061 const bool validated = necp_validate_resolver_answer(parent_id,
3062 client_hostname, hostname_length,
3063 (u_int8_t *)&remote_addr, sizeof(remote_addr),
3064 resolver_tag, resolver_tag_length);
3065 if (!validated) {
3066 error = EAUTH;
3067 NECPLOG(LOG_ERR, "Failed to validate answer for hostname %s", client_hostname);
3068 }
3069 }
3070
3071 // Log if it is a known tracker
3072 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER && client) {
3073 NECP_CLIENT_TRACKER_LOG(client->proc_pid, "Parsing tracker flags - known-tracker %X non-app-initiated %X silent %X approved-app-domain %X",
3074 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER,
3075 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED,
3076 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_SILENT,
3077 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_APPROVED_APP_DOMAIN);
3078 }
3079
3080 if (NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) {
3081 necp_client_trace_parsed_parameters(client, parsed_parameters);
3082 }
3083
3084 return error;
3085 }
3086
3087 static int
necp_client_parse_result(u_int8_t * result,u_int32_t result_size,union necp_sockaddr_union * local_address,union necp_sockaddr_union * remote_address,void ** flow_stats)3088 necp_client_parse_result(u_int8_t *result,
3089 u_int32_t result_size,
3090 union necp_sockaddr_union *local_address,
3091 union necp_sockaddr_union *remote_address,
3092 void **flow_stats)
3093 {
3094 #pragma unused(flow_stats)
3095 int error = 0;
3096 size_t offset = 0;
3097
3098 while ((offset + sizeof(struct necp_tlv_header)) <= result_size) {
3099 u_int8_t type = necp_buffer_get_tlv_type(result, offset);
3100 u_int32_t length = necp_buffer_get_tlv_length(result, offset);
3101
3102 if (length > 0 && (offset + sizeof(struct necp_tlv_header) + length) <= result_size) {
3103 u_int8_t *value = necp_buffer_get_tlv_value(result, offset, NULL);
3104 if (value != NULL) {
3105 switch (type) {
3106 case NECP_CLIENT_RESULT_LOCAL_ENDPOINT: {
3107 if (length >= sizeof(struct necp_client_endpoint)) {
3108 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3109 if (local_address != NULL && necp_client_address_is_valid(&endpoint->u.sa)) {
3110 memcpy(local_address, &endpoint->u.sa, endpoint->u.sa.sa_len);
3111 }
3112 }
3113 break;
3114 }
3115 case NECP_CLIENT_RESULT_REMOTE_ENDPOINT: {
3116 if (length >= sizeof(struct necp_client_endpoint)) {
3117 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
3118 if (remote_address != NULL && necp_client_address_is_valid(&endpoint->u.sa)) {
3119 memcpy(remote_address, &endpoint->u.sa, endpoint->u.sa.sa_len);
3120 }
3121 }
3122 break;
3123 }
3124 #if SKYWALK
3125 case NECP_CLIENT_RESULT_NEXUS_FLOW_STATS: {
3126 // this TLV contains flow_stats pointer which is refcnt'ed.
3127 if (length >= sizeof(struct sk_stats_flow *)) {
3128 struct flow_stats *fs = *(void **)(void *)value;
3129 if (flow_stats != NULL) {
3130 // transfer the refcnt to flow_stats pointer
3131 *flow_stats = fs;
3132 } else {
3133 // otherwise, release the refcnt
3134 VERIFY(fs != NULL);
3135 flow_stats_release(fs);
3136 }
3137 memset(value, 0, sizeof(struct flow_stats*)); // nullify TLV always
3138 }
3139 break;
3140 }
3141 #endif /* SKYWALK */
3142 default: {
3143 break;
3144 }
3145 }
3146 }
3147 }
3148
3149 offset += sizeof(struct necp_tlv_header) + length;
3150 }
3151
3152 return error;
3153 }
3154
3155 static struct necp_client_flow_registration *
necp_client_create_flow_registration(struct necp_fd_data * fd_data,struct necp_client * client)3156 necp_client_create_flow_registration(struct necp_fd_data *fd_data, struct necp_client *client)
3157 {
3158 NECP_FD_ASSERT_LOCKED(fd_data);
3159 NECP_CLIENT_ASSERT_LOCKED(client);
3160
3161 struct necp_client_flow_registration *new_registration = mcache_alloc(necp_flow_registration_cache, MCR_SLEEP);
3162 if (new_registration == NULL) {
3163 return NULL;
3164 }
3165
3166 memset(new_registration, 0, sizeof(*new_registration));
3167
3168 new_registration->last_interface_details = combine_interface_details(IFSCOPE_NONE, NSTAT_IFNET_IS_UNKNOWN_TYPE);
3169
3170 necp_generate_client_id(new_registration->registration_id, true);
3171 LIST_INIT(&new_registration->flow_list);
3172
3173 // Add registration to client list
3174 RB_INSERT(_necp_client_flow_tree, &client->flow_registrations, new_registration);
3175
3176 // Add registration to fd list
3177 RB_INSERT(_necp_fd_flow_tree, &fd_data->flows, new_registration);
3178
3179 // Add registration to global tree for lookup
3180 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
3181 RB_INSERT(_necp_client_flow_global_tree, &necp_client_flow_global_tree, new_registration);
3182 NECP_FLOW_TREE_UNLOCK();
3183
3184 new_registration->client = client;
3185
3186 #if SKYWALK
3187 {
3188 // The uuid caching here is something of a hack, but saves a dynamic lookup with attendant lock hierarchy issues
3189 uint64_t stats_event_type = (uuid_is_null(client->latest_flow_registration_id)) ? NSTAT_EVENT_SRC_FLOW_UUID_ASSIGNED : NSTAT_EVENT_SRC_FLOW_UUID_CHANGED;
3190 uuid_copy(client->latest_flow_registration_id, new_registration->registration_id);
3191
3192 // With the flow uuid known, push a new statistics update to ensure the uuid gets known by any clients before the flow can close
3193 if (client->nstat_context != NULL) {
3194 nstat_provider_stats_event(client->nstat_context, stats_event_type);
3195 }
3196 }
3197 #endif /* !SKYWALK */
3198
3199 // Start out assuming there is nothing to read from the flow
3200 new_registration->flow_result_read = true;
3201
3202 return new_registration;
3203 }
3204
3205 static void
necp_client_add_socket_flow(struct necp_client_flow_registration * flow_registration,struct inpcb * inp)3206 necp_client_add_socket_flow(struct necp_client_flow_registration *flow_registration,
3207 struct inpcb *inp)
3208 {
3209 struct necp_client_flow *new_flow = mcache_alloc(necp_flow_cache, MCR_SLEEP);
3210 if (new_flow == NULL) {
3211 NECPLOG0(LOG_ERR, "Failed to allocate socket flow");
3212 return;
3213 }
3214
3215 memset(new_flow, 0, sizeof(*new_flow));
3216
3217 new_flow->socket = TRUE;
3218 new_flow->u.socket_handle = inp;
3219 new_flow->u.cb = inp->necp_cb;
3220
3221 OSIncrementAtomic(&necp_socket_flow_count);
3222
3223 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
3224 }
3225
3226 static int
necp_client_register_socket_inner(pid_t pid,uuid_t client_id,struct inpcb * inp,bool is_listener)3227 necp_client_register_socket_inner(pid_t pid, uuid_t client_id, struct inpcb *inp, bool is_listener)
3228 {
3229 int error = 0;
3230 struct necp_fd_data *client_fd = NULL;
3231 bool found_client = FALSE;
3232
3233 NECP_FD_LIST_LOCK_SHARED();
3234 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3235 NECP_FD_LOCK(client_fd);
3236 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3237 if (client != NULL) {
3238 if (!pid || client->proc_pid == pid) {
3239 if (is_listener) {
3240 found_client = TRUE;
3241 #if SKYWALK
3242 // Check netns token for registration
3243 if (!NETNS_TOKEN_VALID(&client->port_reservation)) {
3244 error = EINVAL;
3245 }
3246 #endif /* !SKYWALK */
3247 } else {
3248 // Find client flow and assign from socket
3249 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3250 if (flow_registration != NULL) {
3251 // Found the right client and flow registration, add a new flow
3252 found_client = TRUE;
3253 necp_client_add_socket_flow(flow_registration, inp);
3254 } else if (RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
3255 // No flows yet on this client, add a new registration
3256 flow_registration = necp_client_create_flow_registration(client_fd, client);
3257 if (flow_registration == NULL) {
3258 error = ENOMEM;
3259 } else {
3260 // Add a new flow
3261 found_client = TRUE;
3262 necp_client_add_socket_flow(flow_registration, inp);
3263 }
3264 }
3265 }
3266 }
3267
3268 NECP_CLIENT_UNLOCK(client);
3269 }
3270 NECP_FD_UNLOCK(client_fd);
3271
3272 if (found_client) {
3273 break;
3274 }
3275 }
3276 NECP_FD_LIST_UNLOCK();
3277
3278 if (!found_client) {
3279 error = ENOENT;
3280 } else {
3281 // Count the sockets that have the NECP client UUID set
3282 struct socket *so = inp->inp_socket;
3283 if (!(so->so_flags1 & SOF1_HAS_NECP_CLIENT_UUID)) {
3284 so->so_flags1 |= SOF1_HAS_NECP_CLIENT_UUID;
3285 INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_necp_clientuuid_total);
3286 }
3287 }
3288
3289 return error;
3290 }
3291
3292 int
necp_client_register_socket_flow(pid_t pid,uuid_t client_id,struct inpcb * inp)3293 necp_client_register_socket_flow(pid_t pid, uuid_t client_id, struct inpcb *inp)
3294 {
3295 return necp_client_register_socket_inner(pid, client_id, inp, false);
3296 }
3297
3298 int
necp_client_register_socket_listener(pid_t pid,uuid_t client_id,struct inpcb * inp)3299 necp_client_register_socket_listener(pid_t pid, uuid_t client_id, struct inpcb *inp)
3300 {
3301 return necp_client_register_socket_inner(pid, client_id, inp, true);
3302 }
3303
3304 #if SKYWALK
3305 int
necp_client_get_netns_flow_info(uuid_t client_id,struct ns_flow_info * flow_info)3306 necp_client_get_netns_flow_info(uuid_t client_id, struct ns_flow_info *flow_info)
3307 {
3308 int error = 0;
3309 struct necp_fd_data *client_fd = NULL;
3310 bool found_client = FALSE;
3311
3312 NECP_FD_LIST_LOCK_SHARED();
3313 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3314 NECP_FD_LOCK(client_fd);
3315 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3316 if (client != NULL) {
3317 found_client = TRUE;
3318 if (!NETNS_TOKEN_VALID(&client->port_reservation)) {
3319 error = EINVAL;
3320 } else {
3321 error = netns_get_flow_info(&client->port_reservation, flow_info);
3322 }
3323
3324 NECP_CLIENT_UNLOCK(client);
3325 }
3326 NECP_FD_UNLOCK(client_fd);
3327
3328 if (found_client) {
3329 break;
3330 }
3331 }
3332 NECP_FD_LIST_UNLOCK();
3333
3334 if (!found_client) {
3335 error = ENOENT;
3336 }
3337
3338 return error;
3339 }
3340 #endif /* !SKYWALK */
3341
3342 static void
necp_client_add_multipath_interface_flows(struct necp_client_flow_registration * flow_registration,struct necp_client * client,struct mppcb * mpp)3343 necp_client_add_multipath_interface_flows(struct necp_client_flow_registration *flow_registration,
3344 struct necp_client *client,
3345 struct mppcb *mpp)
3346 {
3347 flow_registration->interface_handle = mpp;
3348 flow_registration->interface_cb = mpp->necp_cb;
3349
3350 proc_t proc = proc_find(client->proc_pid);
3351 if (proc == PROC_NULL) {
3352 return;
3353 }
3354
3355 // Traverse all interfaces and add a tracking flow if needed
3356 necp_flow_add_interface_flows(proc, client, flow_registration, true);
3357
3358 proc_rele(proc);
3359 proc = PROC_NULL;
3360 }
3361
3362 int
necp_client_register_multipath_cb(pid_t pid,uuid_t client_id,struct mppcb * mpp)3363 necp_client_register_multipath_cb(pid_t pid, uuid_t client_id, struct mppcb *mpp)
3364 {
3365 int error = 0;
3366 struct necp_fd_data *client_fd = NULL;
3367 bool found_client = FALSE;
3368
3369 NECP_FD_LIST_LOCK_SHARED();
3370 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3371 NECP_FD_LOCK(client_fd);
3372 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3373 if (client != NULL) {
3374 if (!pid || client->proc_pid == pid) {
3375 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3376 if (flow_registration != NULL) {
3377 // Found the right client and flow registration, add a new flow
3378 found_client = TRUE;
3379 necp_client_add_multipath_interface_flows(flow_registration, client, mpp);
3380 } else if (RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
3381 // No flows yet on this client, add a new registration
3382 flow_registration = necp_client_create_flow_registration(client_fd, client);
3383 if (flow_registration == NULL) {
3384 error = ENOMEM;
3385 } else {
3386 // Add a new flow
3387 found_client = TRUE;
3388 necp_client_add_multipath_interface_flows(flow_registration, client, mpp);
3389 }
3390 }
3391 }
3392
3393 NECP_CLIENT_UNLOCK(client);
3394 }
3395 NECP_FD_UNLOCK(client_fd);
3396
3397 if (found_client) {
3398 break;
3399 }
3400 }
3401 NECP_FD_LIST_UNLOCK();
3402
3403 if (!found_client && error == 0) {
3404 error = ENOENT;
3405 }
3406
3407 return error;
3408 }
3409
3410 #define NETAGENT_DOMAIN_RADIO_MANAGER "WirelessRadioManager"
3411 #define NETAGENT_TYPE_RADIO_MANAGER "WirelessRadioManager:BB Manager"
3412
3413 static int
necp_client_lookup_bb_radio_manager(struct necp_client * client,uuid_t netagent_uuid)3414 necp_client_lookup_bb_radio_manager(struct necp_client *client,
3415 uuid_t netagent_uuid)
3416 {
3417 char netagent_domain[NETAGENT_DOMAINSIZE];
3418 char netagent_type[NETAGENT_TYPESIZE];
3419 struct necp_aggregate_result result;
3420 proc_t proc;
3421 int error;
3422
3423 proc = proc_find(client->proc_pid);
3424 if (proc == PROC_NULL) {
3425 return ESRCH;
3426 }
3427
3428 error = necp_application_find_policy_match_internal(proc, client->parameters, (u_int32_t)client->parameters_length,
3429 &result, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, true, true, NULL);
3430
3431 proc_rele(proc);
3432 proc = PROC_NULL;
3433
3434 if (error) {
3435 return error;
3436 }
3437
3438 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
3439 if (uuid_is_null(result.netagents[i])) {
3440 // Passed end of valid agents
3441 break;
3442 }
3443
3444 memset(&netagent_domain, 0, NETAGENT_DOMAINSIZE);
3445 memset(&netagent_type, 0, NETAGENT_TYPESIZE);
3446 if (netagent_get_agent_domain_and_type(result.netagents[i], netagent_domain, netagent_type) == FALSE) {
3447 continue;
3448 }
3449
3450 if (strncmp(netagent_domain, NETAGENT_DOMAIN_RADIO_MANAGER, NETAGENT_DOMAINSIZE) != 0) {
3451 continue;
3452 }
3453
3454 if (strncmp(netagent_type, NETAGENT_TYPE_RADIO_MANAGER, NETAGENT_TYPESIZE) != 0) {
3455 continue;
3456 }
3457
3458 uuid_copy(netagent_uuid, result.netagents[i]);
3459
3460 break;
3461 }
3462
3463 return 0;
3464 }
3465
3466 static int
necp_client_assert_bb_radio_manager_common(struct necp_client * client,bool assert)3467 necp_client_assert_bb_radio_manager_common(struct necp_client *client, bool assert)
3468 {
3469 uuid_t netagent_uuid;
3470 uint8_t assert_type;
3471 int error;
3472
3473 error = necp_client_lookup_bb_radio_manager(client, netagent_uuid);
3474 if (error) {
3475 NECPLOG0(LOG_ERR, "BB radio manager agent not found");
3476 return error;
3477 }
3478
3479 // Before unasserting, verify that the assertion was already taken
3480 if (assert == FALSE) {
3481 assert_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
3482
3483 if (!necp_client_remove_assertion(client, netagent_uuid)) {
3484 return EINVAL;
3485 }
3486 } else {
3487 assert_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT;
3488 }
3489
3490 error = netagent_client_message(netagent_uuid, client->client_id, client->proc_pid, client->agent_handle, assert_type);
3491 if (error) {
3492 NECPLOG0(LOG_ERR, "netagent_client_message failed");
3493 return error;
3494 }
3495
3496 // Only save the assertion if the action succeeded
3497 if (assert == TRUE) {
3498 necp_client_add_assertion(client, netagent_uuid);
3499 }
3500
3501 return 0;
3502 }
3503
3504 int
necp_client_assert_bb_radio_manager(uuid_t client_id,bool assert)3505 necp_client_assert_bb_radio_manager(uuid_t client_id, bool assert)
3506 {
3507 struct necp_client *client;
3508 int error = 0;
3509
3510 NECP_CLIENT_TREE_LOCK_SHARED();
3511
3512 client = necp_find_client_and_lock(client_id);
3513
3514 if (client) {
3515 // Found the right client!
3516 error = necp_client_assert_bb_radio_manager_common(client, assert);
3517
3518 NECP_CLIENT_UNLOCK(client);
3519 } else {
3520 NECPLOG0(LOG_ERR, "Couldn't find client");
3521 error = ENOENT;
3522 }
3523
3524 NECP_CLIENT_TREE_UNLOCK();
3525
3526 return error;
3527 }
3528
3529 static int
necp_client_unregister_socket_flow(uuid_t client_id,void * handle)3530 necp_client_unregister_socket_flow(uuid_t client_id, void *handle)
3531 {
3532 int error = 0;
3533 struct necp_fd_data *client_fd = NULL;
3534 bool found_client = FALSE;
3535 bool client_updated = FALSE;
3536
3537 NECP_FD_LIST_LOCK_SHARED();
3538 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3539 NECP_FD_LOCK(client_fd);
3540
3541 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3542 if (client != NULL) {
3543 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3544 if (flow_registration != NULL) {
3545 // Found the right client and flow!
3546 found_client = TRUE;
3547
3548 // Remove flow assignment
3549 struct necp_client_flow *search_flow = NULL;
3550 struct necp_client_flow *temp_flow = NULL;
3551 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
3552 if (search_flow->socket && search_flow->u.socket_handle == handle) {
3553 if (search_flow->assigned_results != NULL) {
3554 kfree_data(search_flow->assigned_results, search_flow->assigned_results_length);
3555 search_flow->assigned_results = NULL;
3556 }
3557 client_updated = TRUE;
3558 flow_registration->flow_result_read = FALSE;
3559 LIST_REMOVE(search_flow, flow_chain);
3560 OSDecrementAtomic(&necp_socket_flow_count);
3561 mcache_free(necp_flow_cache, search_flow);
3562 }
3563 }
3564 }
3565
3566 NECP_CLIENT_UNLOCK(client);
3567 }
3568
3569 if (client_updated) {
3570 necp_fd_notify(client_fd, true);
3571 }
3572 NECP_FD_UNLOCK(client_fd);
3573
3574 if (found_client) {
3575 break;
3576 }
3577 }
3578 NECP_FD_LIST_UNLOCK();
3579
3580 if (!found_client) {
3581 error = ENOENT;
3582 }
3583
3584 return error;
3585 }
3586
3587 static int
necp_client_unregister_multipath_cb(uuid_t client_id,void * handle)3588 necp_client_unregister_multipath_cb(uuid_t client_id, void *handle)
3589 {
3590 int error = 0;
3591 bool found_client = FALSE;
3592
3593 NECP_CLIENT_TREE_LOCK_SHARED();
3594
3595 struct necp_client *client = necp_find_client_and_lock(client_id);
3596 if (client != NULL) {
3597 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3598 if (flow_registration != NULL) {
3599 // Found the right client and flow!
3600 found_client = TRUE;
3601
3602 // Remove flow assignment
3603 struct necp_client_flow *search_flow = NULL;
3604 struct necp_client_flow *temp_flow = NULL;
3605 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
3606 if (!search_flow->socket && !search_flow->nexus &&
3607 search_flow->u.socket_handle == handle) {
3608 search_flow->u.socket_handle = NULL;
3609 search_flow->u.cb = NULL;
3610 }
3611 }
3612
3613 flow_registration->interface_handle = NULL;
3614 flow_registration->interface_cb = NULL;
3615 }
3616
3617 NECP_CLIENT_UNLOCK(client);
3618 }
3619
3620 NECP_CLIENT_TREE_UNLOCK();
3621
3622 if (!found_client) {
3623 error = ENOENT;
3624 }
3625
3626 return error;
3627 }
3628
3629 int
necp_client_assign_from_socket(pid_t pid,uuid_t client_id,struct inpcb * inp)3630 necp_client_assign_from_socket(pid_t pid, uuid_t client_id, struct inpcb *inp)
3631 {
3632 int error = 0;
3633 struct necp_fd_data *client_fd = NULL;
3634 bool found_client = FALSE;
3635 bool client_updated = FALSE;
3636
3637 NECP_FD_LIST_LOCK_SHARED();
3638 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3639 if (pid && client_fd->proc_pid != pid) {
3640 continue;
3641 }
3642
3643 proc_t proc = proc_find(client_fd->proc_pid);
3644 if (proc == PROC_NULL) {
3645 continue;
3646 }
3647
3648 NECP_FD_LOCK(client_fd);
3649
3650 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3651 if (client != NULL) {
3652 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3653 if (flow_registration == NULL && RB_EMPTY(&client->flow_registrations) && !necp_client_id_is_flow(client_id)) {
3654 // No flows yet on this client, add a new registration
3655 flow_registration = necp_client_create_flow_registration(client_fd, client);
3656 if (flow_registration == NULL) {
3657 error = ENOMEM;
3658 }
3659 }
3660 if (flow_registration != NULL) {
3661 // Found the right client and flow!
3662 found_client = TRUE;
3663
3664 struct necp_client_flow *flow = NULL;
3665 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
3666 if (flow->socket && flow->u.socket_handle == inp) {
3667 // Release prior results and route
3668 if (flow->assigned_results != NULL) {
3669 kfree_data(flow->assigned_results, flow->assigned_results_length);
3670 flow->assigned_results = NULL;
3671 }
3672
3673 ifnet_t ifp = NULL;
3674 if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp) {
3675 ifp = inp->inp_boundifp;
3676 } else {
3677 ifp = inp->inp_last_outifp;
3678 }
3679
3680 if (ifp != NULL) {
3681 flow->interface_index = ifp->if_index;
3682 } else {
3683 flow->interface_index = IFSCOPE_NONE;
3684 }
3685
3686 if (inp->inp_vflag & INP_IPV4) {
3687 flow->local_addr.sin.sin_family = AF_INET;
3688 flow->local_addr.sin.sin_len = sizeof(struct sockaddr_in);
3689 flow->local_addr.sin.sin_port = inp->inp_lport;
3690 memcpy(&flow->local_addr.sin.sin_addr, &inp->inp_laddr, sizeof(struct in_addr));
3691
3692 flow->remote_addr.sin.sin_family = AF_INET;
3693 flow->remote_addr.sin.sin_len = sizeof(struct sockaddr_in);
3694 flow->remote_addr.sin.sin_port = inp->inp_fport;
3695 memcpy(&flow->remote_addr.sin.sin_addr, &inp->inp_faddr, sizeof(struct in_addr));
3696 } else if (inp->inp_vflag & INP_IPV6) {
3697 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, inp->inp_lifscope, &flow->local_addr.sin6, sizeof(flow->local_addr));
3698 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, inp->inp_fifscope, &flow->remote_addr.sin6, sizeof(flow->remote_addr));
3699 }
3700
3701 flow->viable = necp_client_flow_is_viable(proc, client, flow);
3702
3703 uuid_t empty_uuid;
3704 uuid_clear(empty_uuid);
3705 flow->assigned = TRUE;
3706 flow->assigned_results = necp_create_nexus_assign_message(empty_uuid, 0, NULL, 0,
3707 (struct necp_client_endpoint *)&flow->local_addr,
3708 (struct necp_client_endpoint *)&flow->remote_addr,
3709 NULL, 0, NULL, &flow->assigned_results_length);
3710 flow_registration->flow_result_read = FALSE;
3711 client_updated = TRUE;
3712 break;
3713 }
3714 }
3715 }
3716
3717 NECP_CLIENT_UNLOCK(client);
3718 }
3719 if (client_updated) {
3720 necp_fd_notify(client_fd, true);
3721 }
3722 NECP_FD_UNLOCK(client_fd);
3723
3724 proc_rele(proc);
3725 proc = PROC_NULL;
3726
3727 if (found_client) {
3728 break;
3729 }
3730 }
3731 NECP_FD_LIST_UNLOCK();
3732
3733 if (error == 0) {
3734 if (!found_client) {
3735 error = ENOENT;
3736 } else if (!client_updated) {
3737 error = EINVAL;
3738 }
3739 }
3740
3741 return error;
3742 }
3743
3744 bool
necp_socket_is_allowed_to_recv_on_interface(struct inpcb * inp,ifnet_t interface)3745 necp_socket_is_allowed_to_recv_on_interface(struct inpcb *inp, ifnet_t interface)
3746 {
3747 if (interface == NULL ||
3748 inp == NULL ||
3749 !(inp->inp_flags2 & INP2_EXTERNAL_PORT) ||
3750 uuid_is_null(inp->necp_client_uuid)) {
3751 // If there's no interface or client ID to check,
3752 // or if this is not a listener, pass.
3753 // Outbound connections will have already been
3754 // validated for policy.
3755 return TRUE;
3756 }
3757
3758 // Only filter out listener sockets (no remote address specified)
3759 if ((inp->inp_vflag & INP_IPV4) &&
3760 inp->inp_faddr.s_addr != INADDR_ANY) {
3761 return TRUE;
3762 }
3763 if ((inp->inp_vflag & INP_IPV6) &&
3764 !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) {
3765 return TRUE;
3766 }
3767
3768 bool allowed = TRUE;
3769
3770 NECP_CLIENT_TREE_LOCK_SHARED();
3771
3772 struct necp_client *client = necp_find_client_and_lock(inp->necp_client_uuid);
3773 if (client != NULL) {
3774 struct necp_client_parsed_parameters *parsed_parameters = NULL;
3775
3776 parsed_parameters = kalloc_type(struct necp_client_parsed_parameters,
3777 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3778 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, parsed_parameters);
3779 if (error == 0) {
3780 if (!necp_ifnet_matches_parameters(interface, parsed_parameters, 0, NULL, true, false)) {
3781 allowed = FALSE;
3782 }
3783 }
3784 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
3785
3786 NECP_CLIENT_UNLOCK(client);
3787 }
3788
3789 NECP_CLIENT_TREE_UNLOCK();
3790
3791 return allowed;
3792 }
3793
3794 int
necp_update_flow_protoctl_event(uuid_t netagent_uuid,uuid_t client_id,uint32_t protoctl_event_code,uint32_t protoctl_event_val,uint32_t protoctl_event_tcp_seq_number)3795 necp_update_flow_protoctl_event(uuid_t netagent_uuid, uuid_t client_id,
3796 uint32_t protoctl_event_code, uint32_t protoctl_event_val,
3797 uint32_t protoctl_event_tcp_seq_number)
3798 {
3799 int error = 0;
3800 struct necp_fd_data *client_fd = NULL;
3801 bool found_client = FALSE;
3802 bool client_updated = FALSE;
3803
3804 NECP_FD_LIST_LOCK_SHARED();
3805 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3806 proc_t proc = proc_find(client_fd->proc_pid);
3807 if (proc == PROC_NULL) {
3808 continue;
3809 }
3810
3811 NECP_FD_LOCK(client_fd);
3812
3813 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3814 if (client != NULL) {
3815 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3816 if (flow_registration != NULL) {
3817 // Found the right client and flow!
3818 found_client = TRUE;
3819
3820 struct necp_client_flow *flow = NULL;
3821 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
3822 // Verify that the client nexus agent matches
3823 if ((flow->nexus && uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) ||
3824 flow->socket) {
3825 flow->has_protoctl_event = TRUE;
3826 flow->protoctl_event.protoctl_event_code = protoctl_event_code;
3827 flow->protoctl_event.protoctl_event_val = protoctl_event_val;
3828 flow->protoctl_event.protoctl_event_tcp_seq_num = protoctl_event_tcp_seq_number;
3829 flow_registration->flow_result_read = FALSE;
3830 client_updated = TRUE;
3831 break;
3832 }
3833 }
3834 }
3835
3836 NECP_CLIENT_UNLOCK(client);
3837 }
3838
3839 if (client_updated) {
3840 necp_fd_notify(client_fd, true);
3841 }
3842
3843 NECP_FD_UNLOCK(client_fd);
3844 proc_rele(proc);
3845 proc = PROC_NULL;
3846
3847 if (found_client) {
3848 break;
3849 }
3850 }
3851 NECP_FD_LIST_UNLOCK();
3852
3853 if (!found_client) {
3854 error = ENOENT;
3855 } else if (!client_updated) {
3856 error = EINVAL;
3857 }
3858 return error;
3859 }
3860
3861 static bool
necp_assign_client_result_locked(struct proc * proc,struct necp_fd_data * client_fd,struct necp_client * client,struct necp_client_flow_registration * flow_registration,uuid_t netagent_uuid,u_int8_t * assigned_results,size_t assigned_results_length,bool notify_fd)3862 necp_assign_client_result_locked(struct proc *proc,
3863 struct necp_fd_data *client_fd,
3864 struct necp_client *client,
3865 struct necp_client_flow_registration *flow_registration,
3866 uuid_t netagent_uuid,
3867 u_int8_t *assigned_results,
3868 size_t assigned_results_length,
3869 bool notify_fd)
3870 {
3871 bool client_updated = FALSE;
3872
3873 NECP_FD_ASSERT_LOCKED(client_fd);
3874 NECP_CLIENT_ASSERT_LOCKED(client);
3875
3876 struct necp_client_flow *flow = NULL;
3877 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
3878 // Verify that the client nexus agent matches
3879 if (flow->nexus &&
3880 uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) {
3881 // Release prior results and route
3882 if (flow->assigned_results != NULL) {
3883 kfree_data(flow->assigned_results, flow->assigned_results_length);
3884 flow->assigned_results = NULL;
3885 }
3886
3887 void *nexus_stats = NULL;
3888 if (assigned_results != NULL && assigned_results_length > 0) {
3889 int error = necp_client_parse_result(assigned_results, (u_int32_t)assigned_results_length,
3890 &flow->local_addr, &flow->remote_addr, &nexus_stats);
3891 VERIFY(error == 0);
3892 }
3893
3894 flow->viable = necp_client_flow_is_viable(proc, client, flow);
3895
3896 flow->assigned = TRUE;
3897 flow->assigned_results = assigned_results;
3898 flow->assigned_results_length = assigned_results_length;
3899 flow_registration->flow_result_read = FALSE;
3900 #if SKYWALK
3901 if (nexus_stats != NULL) {
3902 if (flow_registration->nexus_stats != NULL) {
3903 flow_stats_release(flow_registration->nexus_stats);
3904 }
3905 flow_registration->nexus_stats = nexus_stats;
3906 }
3907 #endif /* SKYWALK */
3908 client_updated = TRUE;
3909 break;
3910 }
3911 }
3912
3913 if (client_updated && notify_fd) {
3914 necp_fd_notify(client_fd, true);
3915 }
3916
3917 // if not updated, client must free assigned_results
3918 return client_updated;
3919 }
3920
3921 int
necp_assign_client_result(uuid_t netagent_uuid,uuid_t client_id,u_int8_t * assigned_results,size_t assigned_results_length)3922 necp_assign_client_result(uuid_t netagent_uuid, uuid_t client_id,
3923 u_int8_t *assigned_results, size_t assigned_results_length)
3924 {
3925 int error = 0;
3926 struct necp_fd_data *client_fd = NULL;
3927 bool found_client = FALSE;
3928 bool client_updated = FALSE;
3929
3930 NECP_FD_LIST_LOCK_SHARED();
3931
3932 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3933 proc_t proc = proc_find(client_fd->proc_pid);
3934 if (proc == PROC_NULL) {
3935 continue;
3936 }
3937
3938 NECP_FD_LOCK(client_fd);
3939 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3940 if (client != NULL) {
3941 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
3942 if (flow_registration != NULL) {
3943 // Found the right client and flow!
3944 found_client = TRUE;
3945 if (necp_assign_client_result_locked(proc, client_fd, client, flow_registration, netagent_uuid,
3946 assigned_results, assigned_results_length, true)) {
3947 client_updated = TRUE;
3948 }
3949 }
3950
3951 NECP_CLIENT_UNLOCK(client);
3952 }
3953 NECP_FD_UNLOCK(client_fd);
3954
3955 proc_rele(proc);
3956 proc = PROC_NULL;
3957
3958 if (found_client) {
3959 break;
3960 }
3961 }
3962
3963 NECP_FD_LIST_UNLOCK();
3964
3965 // upon error, client must free assigned_results
3966 if (!found_client) {
3967 error = ENOENT;
3968 } else if (!client_updated) {
3969 error = EINVAL;
3970 }
3971
3972 return error;
3973 }
3974
3975 int
necp_assign_client_group_members(uuid_t netagent_uuid,uuid_t client_id,u_int8_t * assigned_group_members,size_t assigned_group_members_length)3976 necp_assign_client_group_members(uuid_t netagent_uuid, uuid_t client_id,
3977 u_int8_t *assigned_group_members, size_t assigned_group_members_length)
3978 {
3979 #pragma unused(netagent_uuid)
3980 int error = 0;
3981 struct necp_fd_data *client_fd = NULL;
3982 bool found_client = false;
3983 bool client_updated = false;
3984
3985 NECP_FD_LIST_LOCK_SHARED();
3986
3987 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
3988 proc_t proc = proc_find(client_fd->proc_pid);
3989 if (proc == PROC_NULL) {
3990 continue;
3991 }
3992
3993 NECP_FD_LOCK(client_fd);
3994 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
3995 if (client != NULL) {
3996 found_client = true;
3997 // Release prior results
3998 if (client->assigned_group_members != NULL) {
3999 kfree_data(client->assigned_group_members, client->assigned_group_members_length);
4000 client->assigned_group_members = NULL;
4001 }
4002
4003 // Save new results
4004 client->assigned_group_members = assigned_group_members;
4005 client->assigned_group_members_length = assigned_group_members_length;
4006 client->group_members_read = false;
4007
4008 client_updated = true;
4009 necp_fd_notify(client_fd, true);
4010
4011 NECP_CLIENT_UNLOCK(client);
4012 }
4013 NECP_FD_UNLOCK(client_fd);
4014
4015 proc_rele(proc);
4016 proc = PROC_NULL;
4017
4018 if (found_client) {
4019 break;
4020 }
4021 }
4022
4023 NECP_FD_LIST_UNLOCK();
4024
4025 // upon error, client must free assigned_results
4026 if (!found_client) {
4027 error = ENOENT;
4028 } else if (!client_updated) {
4029 error = EINVAL;
4030 }
4031
4032 return error;
4033 }
4034
4035 /// Client updating
4036
4037 static bool
necp_update_parsed_parameters(struct necp_client_parsed_parameters * parsed_parameters,struct necp_aggregate_result * result)4038 necp_update_parsed_parameters(struct necp_client_parsed_parameters *parsed_parameters,
4039 struct necp_aggregate_result *result)
4040 {
4041 if (parsed_parameters == NULL ||
4042 result == NULL) {
4043 return false;
4044 }
4045
4046 bool updated = false;
4047 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
4048 if (uuid_is_null(result->netagents[i])) {
4049 // Passed end of valid agents
4050 break;
4051 }
4052
4053 if (!(result->netagent_use_flags[i] & NECP_AGENT_USE_FLAG_SCOPE)) {
4054 // Not a scoped agent, ignore
4055 continue;
4056 }
4057
4058 // This is a scoped agent. Add it to the required agents.
4059 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
4060 // Already some required agents, add this at the end
4061 for (int j = 0; j < NECP_MAX_AGENT_PARAMETERS; j++) {
4062 if (uuid_compare(parsed_parameters->required_netagents[j], result->netagents[i]) == 0) {
4063 // Already required, break
4064 break;
4065 }
4066 if (uuid_is_null(parsed_parameters->required_netagents[j])) {
4067 // Add here
4068 memcpy(&parsed_parameters->required_netagents[j], result->netagents[i], sizeof(uuid_t));
4069 updated = true;
4070 break;
4071 }
4072 }
4073 } else {
4074 // No required agents yet, add this one
4075 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT;
4076 memcpy(&parsed_parameters->required_netagents[0], result->netagents[i], sizeof(uuid_t));
4077 updated = true;
4078 }
4079
4080 // Remove requirements for agents of the same type
4081 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
4082 char remove_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4083 char remove_agent_type[NETAGENT_TYPESIZE] = { 0 };
4084 if (netagent_get_agent_domain_and_type(result->netagents[i], remove_agent_domain, remove_agent_type)) {
4085 for (int j = 0; j < NECP_MAX_AGENT_PARAMETERS; j++) {
4086 if (strlen(parsed_parameters->required_netagent_types[j].netagent_domain) == 0 &&
4087 strlen(parsed_parameters->required_netagent_types[j].netagent_type) == 0) {
4088 break;
4089 }
4090
4091 if (strncmp(parsed_parameters->required_netagent_types[j].netagent_domain, remove_agent_domain, NETAGENT_DOMAINSIZE) == 0 &&
4092 strncmp(parsed_parameters->required_netagent_types[j].netagent_type, remove_agent_type, NETAGENT_TYPESIZE) == 0) {
4093 updated = true;
4094
4095 if (j == NECP_MAX_AGENT_PARAMETERS - 1) {
4096 // Last field, just clear and break
4097 memset(&parsed_parameters->required_netagent_types[NECP_MAX_AGENT_PARAMETERS - 1], 0, sizeof(struct necp_client_parameter_netagent_type));
4098 break;
4099 } else {
4100 // Move the parameters down, clear the last entry
4101 memmove(&parsed_parameters->required_netagent_types[j],
4102 &parsed_parameters->required_netagent_types[j + 1],
4103 sizeof(struct necp_client_parameter_netagent_type) * (NECP_MAX_AGENT_PARAMETERS - (j + 1)));
4104 memset(&parsed_parameters->required_netagent_types[NECP_MAX_AGENT_PARAMETERS - 1], 0, sizeof(struct necp_client_parameter_netagent_type));
4105 // Continue, don't increment but look at the new shifted item instead
4106 continue;
4107 }
4108 }
4109
4110 // Increment j to look at the next agent type parameter
4111 j++;
4112 }
4113 }
4114 }
4115 }
4116
4117 if (updated &&
4118 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4119 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) {
4120 // A required interface index was added after the fact. Clear it.
4121 parsed_parameters->required_interface_index = IFSCOPE_NONE;
4122 }
4123
4124
4125 return updated;
4126 }
4127
4128 static inline bool
necp_agent_types_match(const char * agent_domain1,const char * agent_type1,const char * agent_domain2,const char * agent_type2)4129 necp_agent_types_match(const char *agent_domain1, const char *agent_type1,
4130 const char *agent_domain2, const char *agent_type2)
4131 {
4132 return (strlen(agent_domain1) == 0 ||
4133 strncmp(agent_domain2, agent_domain1, NETAGENT_DOMAINSIZE) == 0) &&
4134 (strlen(agent_type1) == 0 ||
4135 strncmp(agent_type2, agent_type1, NETAGENT_TYPESIZE) == 0);
4136 }
4137
4138 static inline bool
necp_calculate_client_result(proc_t proc,struct necp_client * client,struct necp_client_parsed_parameters * parsed_parameters,struct necp_aggregate_result * result,u_int32_t * flags,u_int32_t * reason,struct necp_client_endpoint * v4_gateway,struct necp_client_endpoint * v6_gateway,uuid_t * override_euuid)4139 necp_calculate_client_result(proc_t proc,
4140 struct necp_client *client,
4141 struct necp_client_parsed_parameters *parsed_parameters,
4142 struct necp_aggregate_result *result,
4143 u_int32_t *flags,
4144 u_int32_t *reason,
4145 struct necp_client_endpoint *v4_gateway,
4146 struct necp_client_endpoint *v6_gateway,
4147 uuid_t *override_euuid)
4148 {
4149 struct rtentry *route = NULL;
4150
4151 // Check parameters to find best interface
4152 bool validate_agents = false;
4153 u_int matching_if_index = 0;
4154 if (necp_find_matching_interface_index(parsed_parameters, &matching_if_index, &validate_agents)) {
4155 if (matching_if_index != 0) {
4156 parsed_parameters->required_interface_index = matching_if_index;
4157 }
4158 // Interface found or not needed, match policy.
4159 memset(result, 0, sizeof(*result));
4160 int error = necp_application_find_policy_match_internal(proc, client->parameters,
4161 (u_int32_t)client->parameters_length,
4162 result, flags, reason, matching_if_index,
4163 NULL, NULL,
4164 v4_gateway, v6_gateway,
4165 &route, false, true,
4166 override_euuid);
4167 if (error != 0) {
4168 if (route != NULL) {
4169 rtfree(route);
4170 }
4171 return FALSE;
4172 }
4173
4174 if (validate_agents) {
4175 bool requirement_failed = FALSE;
4176 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
4177 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
4178 if (uuid_is_null(parsed_parameters->required_netagents[i])) {
4179 break;
4180 }
4181
4182 bool requirement_found = FALSE;
4183 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4184 if (uuid_is_null(result->netagents[j])) {
4185 break;
4186 }
4187
4188 if (result->netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) {
4189 // A removed agent, ignore
4190 continue;
4191 }
4192
4193 if (uuid_compare(parsed_parameters->required_netagents[i], result->netagents[j]) == 0) {
4194 requirement_found = TRUE;
4195 break;
4196 }
4197 }
4198
4199 if (!requirement_found) {
4200 requirement_failed = TRUE;
4201 break;
4202 }
4203 }
4204 }
4205
4206 if (!requirement_failed && parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
4207 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
4208 if (strlen(parsed_parameters->required_netagent_types[i].netagent_domain) == 0 &&
4209 strlen(parsed_parameters->required_netagent_types[i].netagent_type) == 0) {
4210 break;
4211 }
4212
4213 bool requirement_found = FALSE;
4214 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4215 if (uuid_is_null(result->netagents[j])) {
4216 break;
4217 }
4218
4219 if (result->netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) {
4220 // A removed agent, ignore
4221 continue;
4222 }
4223
4224 char policy_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
4225 char policy_agent_type[NETAGENT_TYPESIZE] = { 0 };
4226
4227 if (netagent_get_agent_domain_and_type(result->netagents[j], policy_agent_domain, policy_agent_type)) {
4228 if (necp_agent_types_match(parsed_parameters->required_netagent_types[i].netagent_domain,
4229 parsed_parameters->required_netagent_types[i].netagent_type,
4230 policy_agent_domain, policy_agent_type)) {
4231 requirement_found = TRUE;
4232 break;
4233 }
4234 }
4235 }
4236
4237 if (!requirement_found) {
4238 requirement_failed = TRUE;
4239 break;
4240 }
4241 }
4242 }
4243
4244 if (requirement_failed) {
4245 // Agent requirement failed. Clear out the whole result, make everything fail.
4246 memset(result, 0, sizeof(*result));
4247 if (route != NULL) {
4248 rtfree(route);
4249 }
4250 return TRUE;
4251 }
4252 }
4253
4254 // Reset current route
4255 NECP_CLIENT_ROUTE_LOCK(client);
4256 if (client->current_route != NULL) {
4257 rtfree(client->current_route);
4258 }
4259 client->current_route = route;
4260 NECP_CLIENT_ROUTE_UNLOCK(client);
4261 } else {
4262 // Interface not found. Clear out the whole result, make everything fail.
4263 memset(result, 0, sizeof(*result));
4264 }
4265
4266 return TRUE;
4267 }
4268
4269 #define NECP_PARSED_PARAMETERS_REQUIRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF | \
4270 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
4271 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
4272 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE)
4273
4274 static bool
necp_update_client_result(proc_t proc,struct necp_fd_data * client_fd,struct necp_client * client,struct _necp_flow_defunct_list * defunct_list)4275 necp_update_client_result(proc_t proc,
4276 struct necp_fd_data *client_fd,
4277 struct necp_client *client,
4278 struct _necp_flow_defunct_list *defunct_list)
4279 {
4280 struct necp_client_result_netagent netagent;
4281 struct necp_aggregate_result result;
4282 struct necp_client_parsed_parameters *parsed_parameters = NULL;
4283 u_int32_t flags = 0;
4284 u_int32_t reason = 0;
4285
4286 NECP_CLIENT_ASSERT_LOCKED(client);
4287
4288 parsed_parameters = kalloc_type(struct necp_client_parsed_parameters,
4289 Z_WAITOK | Z_ZERO | Z_NOFAIL);
4290
4291 // Nexus flows will be brought back if they are still valid
4292 necp_client_mark_all_nonsocket_flows_as_invalid(client);
4293
4294 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, parsed_parameters);
4295 if (error != 0) {
4296 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4297 return FALSE;
4298 }
4299
4300 // Update saved IP protocol
4301 client->ip_protocol = parsed_parameters->ip_protocol;
4302
4303 // Calculate the policy result
4304 struct necp_client_endpoint v4_gateway = {};
4305 struct necp_client_endpoint v6_gateway = {};
4306 uuid_t override_euuid;
4307 uuid_clear(override_euuid);
4308 if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags, &reason, &v4_gateway, &v6_gateway, &override_euuid)) {
4309 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4310 return FALSE;
4311 }
4312
4313 if (necp_update_parsed_parameters(parsed_parameters, &result)) {
4314 // Changed the parameters based on result, try again (only once)
4315 if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags, &reason, &v4_gateway, &v6_gateway, &override_euuid)) {
4316 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4317 return FALSE;
4318 }
4319 }
4320
4321 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) &&
4322 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4323 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) {
4324 // Listener should not apply required interface index if
4325 parsed_parameters->required_interface_index = IFSCOPE_NONE;
4326 }
4327
4328 // Save the last policy id on the client
4329 client->policy_id = result.policy_id;
4330 uuid_copy(client->override_euuid, override_euuid);
4331
4332 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH) ||
4333 (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) ||
4334 ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) &&
4335 result.routing_result != NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED)) {
4336 client->allow_multiple_flows = TRUE;
4337 } else {
4338 client->allow_multiple_flows = FALSE;
4339 }
4340
4341 // If the original request was scoped, and the policy result matches, make sure the result is scoped
4342 if ((result.routing_result == NECP_KERNEL_POLICY_RESULT_NONE ||
4343 result.routing_result == NECP_KERNEL_POLICY_RESULT_PASS) &&
4344 result.routed_interface_index != IFSCOPE_NONE &&
4345 parsed_parameters->required_interface_index == result.routed_interface_index) {
4346 result.routing_result = NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED;
4347 result.routing_result_parameter.scoped_interface_index = result.routed_interface_index;
4348 }
4349
4350 if (defunct_list != NULL &&
4351 result.routing_result == NECP_KERNEL_POLICY_RESULT_DROP) {
4352 // If we are forced to drop the client, defunct it if it has flows
4353 necp_defunct_client_for_policy(client, defunct_list);
4354 }
4355
4356 // Recalculate flags
4357 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
4358 // Listeners are valid as long as they aren't dropped
4359 if (result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP) {
4360 flags |= NECP_CLIENT_RESULT_FLAG_SATISFIED;
4361 }
4362 } else if (result.routed_interface_index != 0) {
4363 // Clients without flows determine viability based on having some routable interface
4364 flags |= NECP_CLIENT_RESULT_FLAG_SATISFIED;
4365 }
4366
4367 bool updated = FALSE;
4368 u_int8_t *cursor = client->result;
4369 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLAGS, sizeof(flags), &flags, &updated, client->result, sizeof(client->result));
4370 if (reason != 0) {
4371 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_REASON, sizeof(reason), &reason, &updated, client->result, sizeof(client->result));
4372 }
4373 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_CLIENT_ID, sizeof(uuid_t), client->client_id, &updated,
4374 client->result, sizeof(client->result));
4375 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT, sizeof(result.routing_result), &result.routing_result, &updated,
4376 client->result, sizeof(client->result));
4377 if (result.routing_result_parameter.tunnel_interface_index != 0) {
4378 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT_PARAMETER,
4379 sizeof(result.routing_result_parameter), &result.routing_result_parameter, &updated,
4380 client->result, sizeof(client->result));
4381 }
4382 if (result.filter_control_unit != 0) {
4383 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FILTER_CONTROL_UNIT,
4384 sizeof(result.filter_control_unit), &result.filter_control_unit, &updated,
4385 client->result, sizeof(client->result));
4386 }
4387 if (result.flow_divert_aggregate_unit != 0) {
4388 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLOW_DIVERT_AGGREGATE_UNIT,
4389 sizeof(result.flow_divert_aggregate_unit), &result.flow_divert_aggregate_unit, &updated,
4390 client->result, sizeof(client->result));
4391 }
4392 if (result.routed_interface_index != 0) {
4393 u_int routed_interface_index = result.routed_interface_index;
4394 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
4395 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_REQUIRED_FIELDS) &&
4396 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4397 parsed_parameters->required_interface_index != result.routed_interface_index) {
4398 routed_interface_index = parsed_parameters->required_interface_index;
4399 }
4400
4401 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_INDEX,
4402 sizeof(routed_interface_index), &routed_interface_index, &updated,
4403 client->result, sizeof(client->result));
4404 }
4405 if (client_fd && client_fd->flags & NECP_OPEN_FLAG_BACKGROUND) {
4406 u_int32_t effective_traffic_class = SO_TC_BK_SYS;
4407 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_TRAFFIC_CLASS,
4408 sizeof(effective_traffic_class), &effective_traffic_class, &updated,
4409 client->result, sizeof(client->result));
4410 }
4411
4412 if (client_fd->background) {
4413 bool has_assigned_flow = FALSE;
4414 struct necp_client_flow_registration *flow_registration = NULL;
4415 struct necp_client_flow *search_flow = NULL;
4416 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
4417 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
4418 if (search_flow->assigned) {
4419 has_assigned_flow = TRUE;
4420 break;
4421 }
4422 }
4423 }
4424
4425 if (has_assigned_flow) {
4426 u_int32_t background = client_fd->background;
4427 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_TRAFFIC_MGMT_BG,
4428 sizeof(background), &background, &updated,
4429 client->result, sizeof(client->result));
4430 }
4431 }
4432
4433 bool write_v4_gateway = !necp_client_endpoint_is_unspecified(&v4_gateway);
4434 bool write_v6_gateway = !necp_client_endpoint_is_unspecified(&v6_gateway);
4435
4436 NECP_CLIENT_ROUTE_LOCK(client);
4437 if (client->current_route != NULL) {
4438 const u_int32_t route_mtu = get_maxmtu(client->current_route);
4439 if (route_mtu != 0) {
4440 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_MTU,
4441 sizeof(route_mtu), &route_mtu, &updated,
4442 client->result, sizeof(client->result));
4443 }
4444 bool has_remote_addr = parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR;
4445 if (has_remote_addr && client->current_route->rt_gateway != NULL) {
4446 if (client->current_route->rt_gateway->sa_family == AF_INET) {
4447 write_v6_gateway = false;
4448 } else if (client->current_route->rt_gateway->sa_family == AF_INET6) {
4449 write_v4_gateway = false;
4450 }
4451 }
4452 }
4453 NECP_CLIENT_ROUTE_UNLOCK(client);
4454
4455 if (write_v4_gateway) {
4456 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_GATEWAY,
4457 sizeof(struct necp_client_endpoint), &v4_gateway, &updated,
4458 client->result, sizeof(client->result));
4459 }
4460
4461 if (write_v6_gateway) {
4462 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_GATEWAY,
4463 sizeof(struct necp_client_endpoint), &v6_gateway, &updated,
4464 client->result, sizeof(client->result));
4465 }
4466
4467 for (int i = 0; i < NAT64_MAX_NUM_PREFIXES; i++) {
4468 if (result.nat64_prefixes[i].prefix_len != 0) {
4469 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NAT64,
4470 sizeof(result.nat64_prefixes), result.nat64_prefixes, &updated,
4471 client->result, sizeof(client->result));
4472 break;
4473 }
4474 }
4475
4476 if (result.mss_recommended != 0) {
4477 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_RECOMMENDED_MSS,
4478 sizeof(result.mss_recommended), &result.mss_recommended, &updated,
4479 client->result, sizeof(client->result));
4480 }
4481
4482 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
4483 if (uuid_is_null(result.netagents[i])) {
4484 break;
4485 }
4486 if (result.netagent_use_flags[i] & NECP_AGENT_USE_FLAG_REMOVE) {
4487 // A removed agent, ignore
4488 continue;
4489 }
4490 uuid_copy(netagent.netagent_uuid, result.netagents[i]);
4491 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
4492 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE, 0, 0)) {
4493 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
4494 client->result, sizeof(client->result));
4495 }
4496 }
4497
4498 ifnet_head_lock_shared();
4499 ifnet_t direct_interface = NULL;
4500 ifnet_t delegate_interface = NULL;
4501 ifnet_t original_scoped_interface = NULL;
4502
4503 if (result.routed_interface_index != IFSCOPE_NONE && result.routed_interface_index <= (u_int32_t)if_index) {
4504 direct_interface = ifindex2ifnet[result.routed_interface_index];
4505 } else if (parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4506 parsed_parameters->required_interface_index <= (u_int32_t)if_index) {
4507 // If the request was scoped, but the route didn't match, still grab the agents
4508 direct_interface = ifindex2ifnet[parsed_parameters->required_interface_index];
4509 } else if (result.routed_interface_index == IFSCOPE_NONE &&
4510 result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED &&
4511 result.routing_result_parameter.scoped_interface_index != IFSCOPE_NONE) {
4512 direct_interface = ifindex2ifnet[result.routing_result_parameter.scoped_interface_index];
4513 }
4514 if (direct_interface != NULL) {
4515 delegate_interface = direct_interface->if_delegated.ifp;
4516 }
4517 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL &&
4518 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_REQUIRED_FIELDS) &&
4519 parsed_parameters->required_interface_index != IFSCOPE_NONE &&
4520 parsed_parameters->required_interface_index != result.routing_result_parameter.tunnel_interface_index &&
4521 parsed_parameters->required_interface_index <= (u_int32_t)if_index) {
4522 original_scoped_interface = ifindex2ifnet[parsed_parameters->required_interface_index];
4523 }
4524 // Add interfaces
4525 if (original_scoped_interface != NULL) {
4526 struct necp_client_result_interface interface_struct;
4527 interface_struct.index = original_scoped_interface->if_index;
4528 interface_struct.generation = ifnet_get_generation(original_scoped_interface);
4529 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
4530 client->result, sizeof(client->result));
4531 }
4532 if (direct_interface != NULL) {
4533 struct necp_client_result_interface interface_struct;
4534 interface_struct.index = direct_interface->if_index;
4535 interface_struct.generation = ifnet_get_generation(direct_interface);
4536 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
4537 client->result, sizeof(client->result));
4538
4539 // Set the delta time since interface up/down
4540 struct timeval updown_delta = {};
4541 if (ifnet_updown_delta(direct_interface, &updown_delta) == 0) {
4542 u_int32_t delta = updown_delta.tv_sec;
4543 bool ignore_updated = FALSE;
4544 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_TIME_DELTA,
4545 sizeof(delta), &delta, &ignore_updated,
4546 client->result, sizeof(client->result));
4547 }
4548 }
4549 if (delegate_interface != NULL) {
4550 struct necp_client_result_interface interface_struct;
4551 interface_struct.index = delegate_interface->if_index;
4552 interface_struct.generation = ifnet_get_generation(delegate_interface);
4553 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated,
4554 client->result, sizeof(client->result));
4555 }
4556
4557 // Update multipath/listener interface flows
4558 if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH) {
4559 // Add the interface option for the routed interface first
4560 if (direct_interface != NULL) {
4561 // Add nexus agent
4562 necp_client_add_agent_interface_options(client, parsed_parameters, direct_interface);
4563
4564 // Add interface option in case it is not a nexus
4565 necp_client_add_interface_option_if_needed(client, direct_interface->if_index,
4566 ifnet_get_generation(direct_interface), NULL, false);
4567 }
4568 // Get other multipath interface options from ordered list
4569 struct ifnet *multi_interface = NULL;
4570 TAILQ_FOREACH(multi_interface, &ifnet_ordered_head, if_ordered_link) {
4571 if (multi_interface != direct_interface &&
4572 necp_ifnet_matches_parameters(multi_interface, parsed_parameters, 0, NULL, true, false)) {
4573 // Add nexus agents for multipath
4574 necp_client_add_agent_interface_options(client, parsed_parameters, multi_interface);
4575
4576 // Add multipath interface flows for kernel MPTCP
4577 necp_client_add_interface_option_if_needed(client, multi_interface->if_index,
4578 ifnet_get_generation(multi_interface), NULL, false);
4579 }
4580 }
4581 } else if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) {
4582 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
4583 if (direct_interface != NULL) {
4584 // If scoped, only listen on that interface
4585 // Add nexus agents for listeners
4586 necp_client_add_agent_interface_options(client, parsed_parameters, direct_interface);
4587
4588 // Add interface option in case it is not a nexus
4589 necp_client_add_interface_option_if_needed(client, direct_interface->if_index,
4590 ifnet_get_generation(direct_interface), NULL, false);
4591 }
4592 } else {
4593 // Get listener interface options from global list
4594 struct ifnet *listen_interface = NULL;
4595 TAILQ_FOREACH(listen_interface, &ifnet_head, if_link) {
4596 if ((listen_interface->if_flags & (IFF_UP | IFF_RUNNING)) &&
4597 necp_ifnet_matches_parameters(listen_interface, parsed_parameters, 0, NULL, true, false)) {
4598 // Add nexus agents for listeners
4599 necp_client_add_agent_interface_options(client, parsed_parameters, listen_interface);
4600 }
4601 }
4602 }
4603 } else if (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) {
4604 if (result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
4605 if (direct_interface != NULL) {
4606 // Add browse option if it has an agent
4607 necp_client_add_browse_interface_options(client, parsed_parameters, direct_interface);
4608 }
4609 } else {
4610 // Get browse interface options from global list
4611 struct ifnet *browse_interface = NULL;
4612 TAILQ_FOREACH(browse_interface, &ifnet_head, if_link) {
4613 if (necp_ifnet_matches_parameters(browse_interface, parsed_parameters, 0, NULL, true, false)) {
4614 necp_client_add_browse_interface_options(client, parsed_parameters, browse_interface);
4615 }
4616 }
4617 }
4618 }
4619
4620 struct necp_client_result_estimated_throughput throughput = {
4621 .up = 0,
4622 .down = 0,
4623 };
4624
4625 // Add agents
4626 if (original_scoped_interface != NULL) {
4627 ifnet_lock_shared(original_scoped_interface);
4628 if (original_scoped_interface->if_agentids != NULL) {
4629 for (u_int32_t i = 0; i < original_scoped_interface->if_agentcount; i++) {
4630 if (uuid_is_null(original_scoped_interface->if_agentids[i])) {
4631 continue;
4632 }
4633 bool skip_agent = false;
4634 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4635 if (uuid_is_null(result.netagents[j])) {
4636 break;
4637 }
4638 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
4639 uuid_compare(original_scoped_interface->if_agentids[i], result.netagents[j]) == 0) {
4640 skip_agent = true;
4641 break;
4642 }
4643 }
4644 if (skip_agent) {
4645 continue;
4646 }
4647 uuid_copy(netagent.netagent_uuid, original_scoped_interface->if_agentids[i]);
4648 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
4649 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE,
4650 original_scoped_interface->if_index, ifnet_get_generation(original_scoped_interface))) {
4651 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
4652 client->result, sizeof(client->result));
4653 }
4654 }
4655 }
4656 ifnet_lock_done(original_scoped_interface);
4657 }
4658 if (direct_interface != NULL) {
4659 ifnet_lock_shared(direct_interface);
4660 throughput.up = direct_interface->if_estimated_up_bucket;
4661 throughput.down = direct_interface->if_estimated_down_bucket;
4662 if (direct_interface->if_agentids != NULL) {
4663 for (u_int32_t i = 0; i < direct_interface->if_agentcount; i++) {
4664 if (uuid_is_null(direct_interface->if_agentids[i])) {
4665 continue;
4666 }
4667 bool skip_agent = false;
4668 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4669 if (uuid_is_null(result.netagents[j])) {
4670 break;
4671 }
4672 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
4673 uuid_compare(direct_interface->if_agentids[i], result.netagents[j]) == 0) {
4674 skip_agent = true;
4675 break;
4676 }
4677 }
4678 if (skip_agent) {
4679 continue;
4680 }
4681 uuid_copy(netagent.netagent_uuid, direct_interface->if_agentids[i]);
4682 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
4683 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE,
4684 direct_interface->if_index, ifnet_get_generation(direct_interface))) {
4685 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
4686 client->result, sizeof(client->result));
4687 }
4688 }
4689 }
4690 ifnet_lock_done(direct_interface);
4691 }
4692 if (delegate_interface != NULL) {
4693 ifnet_lock_shared(delegate_interface);
4694 if (throughput.up == 0 && throughput.down == 0) {
4695 throughput.up = delegate_interface->if_estimated_up_bucket;
4696 throughput.down = delegate_interface->if_estimated_down_bucket;
4697 }
4698 if (delegate_interface->if_agentids != NULL) {
4699 for (u_int32_t i = 0; i < delegate_interface->if_agentcount; i++) {
4700 if (uuid_is_null(delegate_interface->if_agentids[i])) {
4701 continue;
4702 }
4703 bool skip_agent = false;
4704 for (int j = 0; j < NECP_MAX_NETAGENTS; j++) {
4705 if (uuid_is_null(result.netagents[j])) {
4706 break;
4707 }
4708 if ((result.netagent_use_flags[j] & NECP_AGENT_USE_FLAG_REMOVE) &&
4709 uuid_compare(delegate_interface->if_agentids[i], result.netagents[j]) == 0) {
4710 skip_agent = true;
4711 break;
4712 }
4713 }
4714 if (skip_agent) {
4715 continue;
4716 }
4717 uuid_copy(netagent.netagent_uuid, delegate_interface->if_agentids[i]);
4718 netagent.generation = netagent_get_generation(netagent.netagent_uuid);
4719 if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE,
4720 delegate_interface->if_index, ifnet_get_generation(delegate_interface))) {
4721 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated,
4722 client->result, sizeof(client->result));
4723 }
4724 }
4725 }
4726 ifnet_lock_done(delegate_interface);
4727 }
4728 ifnet_head_done();
4729
4730 if (throughput.up != 0 || throughput.down != 0) {
4731 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_ESTIMATED_THROUGHPUT,
4732 sizeof(throughput), &throughput, &updated, client->result, sizeof(client->result));
4733 }
4734
4735 // Add interface options
4736 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
4737 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
4738 struct necp_client_interface_option *option = &client->interface_options[option_i];
4739 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated,
4740 client->result, sizeof(client->result));
4741 } else {
4742 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
4743 cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated,
4744 client->result, sizeof(client->result));
4745 }
4746 }
4747
4748 size_t new_result_length = (cursor - client->result);
4749 if (new_result_length != client->result_length) {
4750 client->result_length = new_result_length;
4751 updated = TRUE;
4752 }
4753
4754 // Update flow viability/flags
4755 if (necp_client_update_flows(proc, client, defunct_list)) {
4756 updated = TRUE;
4757 }
4758
4759 if (updated) {
4760 client->result_read = FALSE;
4761 necp_client_update_observer_update(client);
4762 }
4763
4764 kfree_type(struct necp_client_parsed_parameters, parsed_parameters);
4765 return updated;
4766 }
4767
4768 static bool
necp_defunct_client_fd_locked_inner(struct necp_fd_data * client_fd,struct _necp_flow_defunct_list * defunct_list,bool destroy_stats)4769 necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats)
4770 {
4771 bool updated_result = FALSE;
4772 struct necp_client *client = NULL;
4773
4774 NECP_FD_ASSERT_LOCKED(client_fd);
4775
4776 RB_FOREACH(client, _necp_client_tree, &client_fd->clients) {
4777 struct necp_client_flow_registration *flow_registration = NULL;
4778
4779 NECP_CLIENT_LOCK(client);
4780
4781 // Prepare close events to be sent to the nexus to effectively remove the flows
4782 struct necp_client_flow *search_flow = NULL;
4783 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
4784 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
4785 if (search_flow->nexus &&
4786 !uuid_is_null(search_flow->u.nexus_agent)) {
4787 // Sleeping alloc won't fail; copy only what's necessary
4788 struct necp_flow_defunct *flow_defunct = kalloc_type(struct necp_flow_defunct, Z_WAITOK | Z_ZERO);
4789 uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent);
4790 uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
4791 client->client_id :
4792 flow_registration->registration_id));
4793 flow_defunct->proc_pid = client->proc_pid;
4794 flow_defunct->agent_handle = client->agent_handle;
4795 flow_defunct->flags = flow_registration->flags;
4796 #if SKYWALK
4797 if (flow_registration->kstats_kaddr != NULL) {
4798 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
4799 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
4800 if (quicstats != NULL &&
4801 quicstats->necp_quic_udp_stats.necp_udp_hdr.necp_stats_type == NECP_CLIENT_STATISTICS_TYPE_QUIC) {
4802 memcpy(flow_defunct->close_parameters.u.close_token, quicstats->necp_quic_extra.ssr_token, sizeof(flow_defunct->close_parameters.u.close_token));
4803 flow_defunct->has_close_parameters = true;
4804 }
4805 }
4806 #endif /* SKYWALK */
4807 // Add to the list provided by caller
4808 LIST_INSERT_HEAD(defunct_list, flow_defunct, chain);
4809
4810 flow_registration->defunct = true;
4811 flow_registration->flow_result_read = false;
4812 updated_result = true;
4813 }
4814 }
4815 }
4816 if (destroy_stats) {
4817 #if SKYWALK
4818 // Free any remaining stats objects back to the arena where they came from;
4819 // do this independent of the above defunct check, as the client may have
4820 // been marked as defunct separately via necp_defunct_client_for_policy().
4821 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
4822 necp_destroy_flow_stats(client_fd, flow_registration, NULL, FALSE);
4823 }
4824 #endif /* SKYWALK */
4825 }
4826 NECP_CLIENT_UNLOCK(client);
4827 }
4828
4829 return updated_result;
4830 }
4831
4832 static inline void
necp_defunct_client_fd_locked(struct necp_fd_data * client_fd,struct _necp_flow_defunct_list * defunct_list,struct proc * proc)4833 necp_defunct_client_fd_locked(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, struct proc *proc)
4834 {
4835 #pragma unused(proc)
4836 bool updated_result = FALSE;
4837
4838 NECP_FD_ASSERT_LOCKED(client_fd);
4839 #if SKYWALK
4840 // redirect regions of currently-active stats arena to zero-filled pages
4841 struct necp_arena_info *nai = necp_fd_mredirect_stats_arena(client_fd, proc);
4842 #endif /* SKYWALK */
4843
4844 updated_result = necp_defunct_client_fd_locked_inner(client_fd, defunct_list, true);
4845
4846 #if SKYWALK
4847 // and tear down the currently-active arena's regions now that the redirection and freeing are done
4848 if (nai != NULL) {
4849 ASSERT((nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)) == NAIF_REDIRECT);
4850 ASSERT(nai->nai_arena != NULL);
4851 ASSERT(nai->nai_mmap.ami_mapref != NULL);
4852
4853 int err = skmem_arena_defunct(nai->nai_arena);
4854 VERIFY(err == 0);
4855
4856 nai->nai_flags |= NAIF_DEFUNCT;
4857 }
4858 #endif /* SKYWALK */
4859
4860 if (updated_result) {
4861 necp_fd_notify(client_fd, true);
4862 }
4863 }
4864
4865 static inline void
necp_update_client_fd_locked(struct necp_fd_data * client_fd,proc_t proc,struct _necp_flow_defunct_list * defunct_list)4866 necp_update_client_fd_locked(struct necp_fd_data *client_fd,
4867 proc_t proc,
4868 struct _necp_flow_defunct_list *defunct_list)
4869 {
4870 struct necp_client *client = NULL;
4871 bool updated_result = FALSE;
4872 NECP_FD_ASSERT_LOCKED(client_fd);
4873 RB_FOREACH(client, _necp_client_tree, &client_fd->clients) {
4874 NECP_CLIENT_LOCK(client);
4875 if (necp_update_client_result(proc, client_fd, client, defunct_list)) {
4876 updated_result = TRUE;
4877 }
4878 NECP_CLIENT_UNLOCK(client);
4879 }
4880 if (updated_result) {
4881 necp_fd_notify(client_fd, true);
4882 }
4883 }
4884
4885 #if SKYWALK
4886 static void
necp_close_empty_arenas_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)4887 necp_close_empty_arenas_callout(__unused thread_call_param_t dummy,
4888 __unused thread_call_param_t arg)
4889 {
4890 struct necp_fd_data *client_fd = NULL;
4891
4892 NECP_FD_LIST_LOCK_SHARED();
4893
4894 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4895 NECP_FD_LOCK(client_fd);
4896 necp_stats_arenas_destroy(client_fd, FALSE);
4897 NECP_FD_UNLOCK(client_fd);
4898 }
4899
4900 NECP_FD_LIST_UNLOCK();
4901 }
4902 #endif /* SKYWALK */
4903
4904 static void
necp_update_all_clients_callout(__unused thread_call_param_t dummy,__unused thread_call_param_t arg)4905 necp_update_all_clients_callout(__unused thread_call_param_t dummy,
4906 __unused thread_call_param_t arg)
4907 {
4908 struct necp_fd_data *client_fd = NULL;
4909
4910 struct _necp_flow_defunct_list defunct_list;
4911 LIST_INIT(&defunct_list);
4912
4913 NECP_FD_LIST_LOCK_SHARED();
4914
4915 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
4916 proc_t proc = proc_find(client_fd->proc_pid);
4917 if (proc == PROC_NULL) {
4918 continue;
4919 }
4920
4921 // Update all clients on one fd
4922 NECP_FD_LOCK(client_fd);
4923 necp_update_client_fd_locked(client_fd, proc, &defunct_list);
4924 NECP_FD_UNLOCK(client_fd);
4925
4926 proc_rele(proc);
4927 proc = PROC_NULL;
4928 }
4929
4930 NECP_FD_LIST_UNLOCK();
4931
4932 // Handle the case in which some clients became newly defunct
4933 necp_process_defunct_list(&defunct_list);
4934 }
4935
4936 void
necp_update_all_clients(void)4937 necp_update_all_clients(void)
4938 {
4939 necp_update_all_clients_immediately_if_needed(false);
4940 }
4941
4942 void
necp_update_all_clients_immediately_if_needed(bool should_update_immediately)4943 necp_update_all_clients_immediately_if_needed(bool should_update_immediately)
4944 {
4945 if (necp_client_update_tcall == NULL) {
4946 // Don't try to update clients if the module is not initialized
4947 return;
4948 }
4949
4950 uint64_t deadline = 0;
4951 uint64_t leeway = 0;
4952
4953 uint32_t timeout_to_use = necp_timeout_microseconds;
4954 uint32_t leeway_to_use = necp_timeout_leeway_microseconds;
4955 if (should_update_immediately) {
4956 timeout_to_use = 1000 * 10; // 10ms
4957 leeway_to_use = 1000 * 10; // 10ms;
4958 }
4959
4960 clock_interval_to_deadline(timeout_to_use, NSEC_PER_USEC, &deadline);
4961 clock_interval_to_absolutetime_interval(leeway_to_use, NSEC_PER_USEC, &leeway);
4962
4963 thread_call_enter_delayed_with_leeway(necp_client_update_tcall, NULL,
4964 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
4965 }
4966
4967 bool
necp_set_client_as_background(proc_t proc,struct fileproc * fp,bool background)4968 necp_set_client_as_background(proc_t proc,
4969 struct fileproc *fp,
4970 bool background)
4971 {
4972 if (proc == PROC_NULL) {
4973 NECPLOG0(LOG_ERR, "NULL proc");
4974 return FALSE;
4975 }
4976
4977 if (fp == NULL) {
4978 NECPLOG0(LOG_ERR, "NULL fp");
4979 return FALSE;
4980 }
4981
4982 struct necp_fd_data *client_fd = (struct necp_fd_data *)fp_get_data(fp);
4983 if (client_fd == NULL) {
4984 NECPLOG0(LOG_ERR, "Could not find client structure for backgrounded client");
4985 return FALSE;
4986 }
4987
4988 if (client_fd->necp_fd_type != necp_fd_type_client) {
4989 // Not a client fd, ignore
4990 NECPLOG0(LOG_ERR, "Not a client fd, ignore");
4991 return FALSE;
4992 }
4993
4994 client_fd->background = background;
4995
4996 return TRUE;
4997 }
4998
4999 void
necp_fd_memstatus(proc_t proc,uint32_t status,struct necp_fd_data * client_fd)5000 necp_fd_memstatus(proc_t proc, uint32_t status,
5001 struct necp_fd_data *client_fd)
5002 {
5003 #pragma unused(proc, status, client_fd)
5004 ASSERT(proc != PROC_NULL);
5005 ASSERT(client_fd != NULL);
5006
5007 // Nothing to reap for the process or client for now,
5008 // but this is where we would trigger that in future.
5009 }
5010
5011 void
necp_fd_defunct(proc_t proc,struct necp_fd_data * client_fd)5012 necp_fd_defunct(proc_t proc, struct necp_fd_data *client_fd)
5013 {
5014 struct _necp_flow_defunct_list defunct_list;
5015
5016 ASSERT(proc != PROC_NULL);
5017 ASSERT(client_fd != NULL);
5018
5019 if (client_fd->necp_fd_type != necp_fd_type_client) {
5020 // Not a client fd, ignore
5021 return;
5022 }
5023
5024 // Our local temporary list
5025 LIST_INIT(&defunct_list);
5026
5027 // Need to hold lock so ntstats defunct the same set of clients
5028 NECP_FD_LOCK(client_fd);
5029 #if SKYWALK
5030 // Shut down statistics
5031 nstats_userland_stats_defunct_for_process(proc_getpid(proc));
5032 #endif /* SKYWALK */
5033 necp_defunct_client_fd_locked(client_fd, &defunct_list, proc);
5034 NECP_FD_UNLOCK(client_fd);
5035
5036 necp_process_defunct_list(&defunct_list);
5037 }
5038
5039 static void
necp_client_remove_agent_from_result(struct necp_client * client,uuid_t netagent_uuid)5040 necp_client_remove_agent_from_result(struct necp_client *client, uuid_t netagent_uuid)
5041 {
5042 size_t offset = 0;
5043
5044 u_int8_t *result_buffer = client->result;
5045 while ((offset + sizeof(struct necp_tlv_header)) <= client->result_length) {
5046 u_int8_t type = necp_buffer_get_tlv_type(result_buffer, offset);
5047 u_int32_t length = necp_buffer_get_tlv_length(result_buffer, offset);
5048
5049 size_t tlv_total_length = (sizeof(struct necp_tlv_header) + length);
5050 if (type == NECP_CLIENT_RESULT_NETAGENT &&
5051 length == sizeof(struct necp_client_result_netagent) &&
5052 (offset + tlv_total_length) <= client->result_length) {
5053 struct necp_client_result_netagent *value = ((struct necp_client_result_netagent *)(void *)
5054 necp_buffer_get_tlv_value(result_buffer, offset, NULL));
5055 if (uuid_compare(value->netagent_uuid, netagent_uuid) == 0) {
5056 // Found a netagent to remove
5057 // Shift bytes down to remove the tlv, and adjust total length
5058 // Don't adjust the current offset
5059 memmove(result_buffer + offset,
5060 result_buffer + offset + tlv_total_length,
5061 client->result_length - (offset + tlv_total_length));
5062 client->result_length -= tlv_total_length;
5063 memset(result_buffer + client->result_length, 0, sizeof(client->result) - client->result_length);
5064 continue;
5065 }
5066 }
5067
5068 offset += tlv_total_length;
5069 }
5070 }
5071
5072 void
necp_force_update_client(uuid_t client_id,uuid_t remove_netagent_uuid,u_int32_t agent_generation)5073 necp_force_update_client(uuid_t client_id, uuid_t remove_netagent_uuid, u_int32_t agent_generation)
5074 {
5075 struct necp_fd_data *client_fd = NULL;
5076
5077 NECP_FD_LIST_LOCK_SHARED();
5078
5079 LIST_FOREACH(client_fd, &necp_fd_list, chain) {
5080 bool updated_result = FALSE;
5081 NECP_FD_LOCK(client_fd);
5082 struct necp_client *client = necp_client_fd_find_client_and_lock(client_fd, client_id);
5083 if (client != NULL) {
5084 client->failed_trigger_agent.generation = agent_generation;
5085 uuid_copy(client->failed_trigger_agent.netagent_uuid, remove_netagent_uuid);
5086 if (!uuid_is_null(remove_netagent_uuid)) {
5087 necp_client_remove_agent_from_result(client, remove_netagent_uuid);
5088 }
5089 client->result_read = FALSE;
5090 // Found the client, break
5091 updated_result = TRUE;
5092 NECP_CLIENT_UNLOCK(client);
5093 }
5094 if (updated_result) {
5095 necp_fd_notify(client_fd, true);
5096 }
5097 NECP_FD_UNLOCK(client_fd);
5098 if (updated_result) {
5099 // Found the client, break
5100 break;
5101 }
5102 }
5103
5104 NECP_FD_LIST_UNLOCK();
5105 }
5106
5107 #if SKYWALK
5108 void
necp_client_early_close(uuid_t client_id)5109 necp_client_early_close(uuid_t client_id)
5110 {
5111 NECP_CLIENT_TREE_LOCK_SHARED();
5112
5113 struct necp_client *client = necp_find_client_and_lock(client_id);
5114 if (client != NULL) {
5115 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
5116 if (flow_registration != NULL) {
5117 // Found the right client and flow, mark the stats as over
5118 if (flow_registration->stats_handler_context != NULL) {
5119 ntstat_userland_stats_event(flow_registration->stats_handler_context,
5120 NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT);
5121 }
5122 }
5123 NECP_CLIENT_UNLOCK(client);
5124 }
5125
5126 NECP_CLIENT_TREE_UNLOCK();
5127 }
5128 #endif /* SKYWALK */
5129
5130 /// Interface matching
5131
5132 #define NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5133 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF | \
5134 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
5135 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE | \
5136 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
5137 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT | \
5138 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5139 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \
5140 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \
5141 NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE | \
5142 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \
5143 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE)
5144
5145 #define NECP_PARSED_PARAMETERS_SCOPED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5146 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \
5147 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \
5148 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5149 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \
5150 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE)
5151
5152 #define NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \
5153 NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)
5154
5155 #define NECP_PARSED_PARAMETERS_PREFERRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \
5156 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \
5157 NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \
5158 NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE)
5159
5160 static bool
necp_ifnet_matches_type(struct ifnet * ifp,u_int8_t interface_type,bool check_delegates)5161 necp_ifnet_matches_type(struct ifnet *ifp, u_int8_t interface_type, bool check_delegates)
5162 {
5163 struct ifnet *check_ifp = ifp;
5164 while (check_ifp) {
5165 if (if_functional_type(check_ifp, TRUE) == interface_type) {
5166 return TRUE;
5167 }
5168 if (!check_delegates) {
5169 break;
5170 }
5171 check_ifp = check_ifp->if_delegated.ifp;
5172 }
5173 return FALSE;
5174 }
5175
5176 static bool
necp_ifnet_matches_name(struct ifnet * ifp,const char * interface_name,bool check_delegates)5177 necp_ifnet_matches_name(struct ifnet *ifp, const char *interface_name, bool check_delegates)
5178 {
5179 struct ifnet *check_ifp = ifp;
5180 while (check_ifp) {
5181 if (strncmp(check_ifp->if_xname, interface_name, IFXNAMSIZ) == 0) {
5182 return TRUE;
5183 }
5184 if (!check_delegates) {
5185 break;
5186 }
5187 check_ifp = check_ifp->if_delegated.ifp;
5188 }
5189 return FALSE;
5190 }
5191
5192 static bool
necp_ifnet_matches_agent(struct ifnet * ifp,uuid_t * agent_uuid,bool check_delegates)5193 necp_ifnet_matches_agent(struct ifnet *ifp, uuid_t *agent_uuid, bool check_delegates)
5194 {
5195 struct ifnet *check_ifp = ifp;
5196
5197 while (check_ifp != NULL) {
5198 ifnet_lock_shared(check_ifp);
5199 if (check_ifp->if_agentids != NULL) {
5200 for (u_int32_t index = 0; index < check_ifp->if_agentcount; index++) {
5201 if (uuid_compare(check_ifp->if_agentids[index], *agent_uuid) == 0) {
5202 ifnet_lock_done(check_ifp);
5203 return TRUE;
5204 }
5205 }
5206 }
5207 ifnet_lock_done(check_ifp);
5208
5209 if (!check_delegates) {
5210 break;
5211 }
5212 check_ifp = check_ifp->if_delegated.ifp;
5213 }
5214 return FALSE;
5215 }
5216
5217 static bool
necp_ifnet_matches_agent_type(struct ifnet * ifp,const char * agent_domain,const char * agent_type,bool check_delegates)5218 necp_ifnet_matches_agent_type(struct ifnet *ifp, const char *agent_domain, const char *agent_type, bool check_delegates)
5219 {
5220 struct ifnet *check_ifp = ifp;
5221
5222 while (check_ifp != NULL) {
5223 ifnet_lock_shared(check_ifp);
5224 if (check_ifp->if_agentids != NULL) {
5225 for (u_int32_t index = 0; index < check_ifp->if_agentcount; index++) {
5226 if (uuid_is_null(check_ifp->if_agentids[index])) {
5227 continue;
5228 }
5229
5230 char if_agent_domain[NETAGENT_DOMAINSIZE] = { 0 };
5231 char if_agent_type[NETAGENT_TYPESIZE] = { 0 };
5232
5233 if (netagent_get_agent_domain_and_type(check_ifp->if_agentids[index], if_agent_domain, if_agent_type)) {
5234 if (necp_agent_types_match(agent_domain, agent_type, if_agent_domain, if_agent_type)) {
5235 ifnet_lock_done(check_ifp);
5236 return TRUE;
5237 }
5238 }
5239 }
5240 }
5241 ifnet_lock_done(check_ifp);
5242
5243 if (!check_delegates) {
5244 break;
5245 }
5246 check_ifp = check_ifp->if_delegated.ifp;
5247 }
5248 return FALSE;
5249 }
5250
5251 static bool
necp_ifnet_matches_local_address(struct ifnet * ifp,struct sockaddr * sa)5252 necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa)
5253 {
5254 struct ifaddr *ifa = NULL;
5255 bool matched_local_address = FALSE;
5256
5257 // Transform sa into the ifaddr form
5258 // IPv6 Scope IDs are always embedded in the ifaddr list
5259 struct sockaddr_storage address;
5260 u_int ifscope = IFSCOPE_NONE;
5261 (void)sa_copy(sa, &address, &ifscope);
5262 SIN(&address)->sin_port = 0;
5263 if (address.ss_family == AF_INET6) {
5264 if (in6_embedded_scope ||
5265 !IN6_IS_SCOPE_EMBED(&SIN6(&address)->sin6_addr)) {
5266 SIN6(&address)->sin6_scope_id = 0;
5267 }
5268 }
5269
5270 ifa = ifa_ifwithaddr_scoped_locked((struct sockaddr *)&address, ifp->if_index);
5271 matched_local_address = (ifa != NULL);
5272
5273 if (ifa) {
5274 ifaddr_release(ifa);
5275 }
5276
5277 return matched_local_address;
5278 }
5279
5280 static bool
necp_interface_type_is_primary_eligible(u_int8_t interface_type)5281 necp_interface_type_is_primary_eligible(u_int8_t interface_type)
5282 {
5283 switch (interface_type) {
5284 // These types can never be primary, so a client requesting these types is allowed
5285 // to match an interface that isn't currently eligible to be primary (has default
5286 // route, dns, etc)
5287 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
5288 case IFRTYPE_FUNCTIONAL_INTCOPROC:
5289 return false;
5290 default:
5291 break;
5292 }
5293 return true;
5294 }
5295
5296 #define NECP_IFP_IS_ON_ORDERED_LIST(_ifp) ((_ifp)->if_ordered_link.tqe_next != NULL || (_ifp)->if_ordered_link.tqe_prev != NULL)
5297
5298 // Secondary interface flag indicates that the interface is being
5299 // used for multipath or a listener as an extra path
5300 static bool
necp_ifnet_matches_parameters(struct ifnet * ifp,struct necp_client_parsed_parameters * parsed_parameters,u_int32_t override_flags,u_int32_t * preferred_count,bool secondary_interface,bool require_scoped_field)5301 necp_ifnet_matches_parameters(struct ifnet *ifp,
5302 struct necp_client_parsed_parameters *parsed_parameters,
5303 u_int32_t override_flags,
5304 u_int32_t *preferred_count,
5305 bool secondary_interface,
5306 bool require_scoped_field)
5307 {
5308 bool matched_some_scoped_field = FALSE;
5309
5310 if (preferred_count) {
5311 *preferred_count = 0;
5312 }
5313
5314 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) {
5315 if (parsed_parameters->required_interface_index != ifp->if_index) {
5316 return FALSE;
5317 }
5318 }
5319 #if SKYWALK
5320 else {
5321 if (ifnet_is_low_latency(ifp)) {
5322 return FALSE;
5323 }
5324 }
5325 #endif /* SKYWALK */
5326
5327 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR) {
5328 if (!necp_ifnet_matches_local_address(ifp, &parsed_parameters->local_addr.sa)) {
5329 return FALSE;
5330 }
5331 if (require_scoped_field) {
5332 matched_some_scoped_field = TRUE;
5333 }
5334 }
5335
5336 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) {
5337 if (override_flags != 0) {
5338 if ((override_flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE) &&
5339 IFNET_IS_EXPENSIVE(ifp)) {
5340 return FALSE;
5341 }
5342 if ((override_flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED) &&
5343 IFNET_IS_CONSTRAINED(ifp)) {
5344 return FALSE;
5345 }
5346 } else {
5347 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE) &&
5348 IFNET_IS_EXPENSIVE(ifp)) {
5349 return FALSE;
5350 }
5351 if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED) &&
5352 IFNET_IS_CONSTRAINED(ifp)) {
5353 return FALSE;
5354 }
5355 }
5356 }
5357
5358 if ((!secondary_interface || // Enforce interface type if this is the primary interface
5359 !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) || // or if there are no flags
5360 !(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_ONLY_PRIMARY_REQUIRES_TYPE)) && // or if the flags don't give an exception
5361 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) &&
5362 !necp_ifnet_matches_type(ifp, parsed_parameters->required_interface_type, FALSE)) {
5363 return FALSE;
5364 }
5365
5366 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) {
5367 if (require_scoped_field) {
5368 matched_some_scoped_field = TRUE;
5369 }
5370 }
5371
5372 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE) {
5373 for (int i = 0; i < NECP_MAX_INTERFACE_PARAMETERS; i++) {
5374 if (parsed_parameters->prohibited_interface_types[i] == 0) {
5375 break;
5376 }
5377
5378 if (necp_ifnet_matches_type(ifp, parsed_parameters->prohibited_interface_types[i], TRUE)) {
5379 return FALSE;
5380 }
5381 }
5382 }
5383
5384 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF) {
5385 for (int i = 0; i < NECP_MAX_INTERFACE_PARAMETERS; i++) {
5386 if (strlen(parsed_parameters->prohibited_interfaces[i]) == 0) {
5387 break;
5388 }
5389
5390 if (necp_ifnet_matches_name(ifp, parsed_parameters->prohibited_interfaces[i], TRUE)) {
5391 return FALSE;
5392 }
5393 }
5394 }
5395
5396 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT) {
5397 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
5398 if (uuid_is_null(parsed_parameters->required_netagents[i])) {
5399 break;
5400 }
5401
5402 if (!necp_ifnet_matches_agent(ifp, &parsed_parameters->required_netagents[i], FALSE)) {
5403 return FALSE;
5404 }
5405
5406 if (require_scoped_field) {
5407 matched_some_scoped_field = TRUE;
5408 }
5409 }
5410 }
5411
5412 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT) {
5413 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
5414 if (uuid_is_null(parsed_parameters->prohibited_netagents[i])) {
5415 break;
5416 }
5417
5418 if (necp_ifnet_matches_agent(ifp, &parsed_parameters->prohibited_netagents[i], TRUE)) {
5419 return FALSE;
5420 }
5421 }
5422 }
5423
5424 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) {
5425 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
5426 if (strlen(parsed_parameters->required_netagent_types[i].netagent_domain) == 0 &&
5427 strlen(parsed_parameters->required_netagent_types[i].netagent_type) == 0) {
5428 break;
5429 }
5430
5431 if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->required_netagent_types[i].netagent_domain, parsed_parameters->required_netagent_types[i].netagent_type, FALSE)) {
5432 return FALSE;
5433 }
5434
5435 if (require_scoped_field) {
5436 matched_some_scoped_field = TRUE;
5437 }
5438 }
5439 }
5440
5441 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE) {
5442 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
5443 if (strlen(parsed_parameters->prohibited_netagent_types[i].netagent_domain) == 0 &&
5444 strlen(parsed_parameters->prohibited_netagent_types[i].netagent_type) == 0) {
5445 break;
5446 }
5447
5448 if (necp_ifnet_matches_agent_type(ifp, parsed_parameters->prohibited_netagent_types[i].netagent_domain, parsed_parameters->prohibited_netagent_types[i].netagent_type, TRUE)) {
5449 return FALSE;
5450 }
5451 }
5452 }
5453
5454 // Checked preferred properties
5455 if (preferred_count) {
5456 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT) {
5457 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
5458 if (uuid_is_null(parsed_parameters->preferred_netagents[i])) {
5459 break;
5460 }
5461
5462 if (necp_ifnet_matches_agent(ifp, &parsed_parameters->preferred_netagents[i], TRUE)) {
5463 (*preferred_count)++;
5464 if (require_scoped_field) {
5465 matched_some_scoped_field = TRUE;
5466 }
5467 }
5468 }
5469 }
5470
5471 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE) {
5472 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
5473 if (strlen(parsed_parameters->preferred_netagent_types[i].netagent_domain) == 0 &&
5474 strlen(parsed_parameters->preferred_netagent_types[i].netagent_type) == 0) {
5475 break;
5476 }
5477
5478 if (necp_ifnet_matches_agent_type(ifp, parsed_parameters->preferred_netagent_types[i].netagent_domain, parsed_parameters->preferred_netagent_types[i].netagent_type, TRUE)) {
5479 (*preferred_count)++;
5480 if (require_scoped_field) {
5481 matched_some_scoped_field = TRUE;
5482 }
5483 }
5484 }
5485 }
5486
5487 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT) {
5488 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
5489 if (uuid_is_null(parsed_parameters->avoided_netagents[i])) {
5490 break;
5491 }
5492
5493 if (!necp_ifnet_matches_agent(ifp, &parsed_parameters->avoided_netagents[i], TRUE)) {
5494 (*preferred_count)++;
5495 }
5496 }
5497 }
5498
5499 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) {
5500 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
5501 if (strlen(parsed_parameters->avoided_netagent_types[i].netagent_domain) == 0 &&
5502 strlen(parsed_parameters->avoided_netagent_types[i].netagent_type) == 0) {
5503 break;
5504 }
5505
5506 if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->avoided_netagent_types[i].netagent_domain,
5507 parsed_parameters->avoided_netagent_types[i].netagent_type, TRUE)) {
5508 (*preferred_count)++;
5509 }
5510 }
5511 }
5512 }
5513
5514 if (require_scoped_field) {
5515 return matched_some_scoped_field;
5516 }
5517
5518 return TRUE;
5519 }
5520
5521 static bool
necp_find_matching_interface_index(struct necp_client_parsed_parameters * parsed_parameters,u_int * return_ifindex,bool * validate_agents)5522 necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters,
5523 u_int *return_ifindex, bool *validate_agents)
5524 {
5525 struct ifnet *ifp = NULL;
5526 u_int32_t best_preferred_count = 0;
5527 bool has_preferred_fields = FALSE;
5528 *return_ifindex = 0;
5529
5530 if (parsed_parameters->required_interface_index != 0) {
5531 *return_ifindex = parsed_parameters->required_interface_index;
5532 return TRUE;
5533 }
5534
5535 // Check and save off flags
5536 u_int32_t flags = 0;
5537 bool has_prohibit_flags = FALSE;
5538 if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) {
5539 flags = parsed_parameters->flags;
5540 has_prohibit_flags = (parsed_parameters->flags &
5541 (NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE |
5542 NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED));
5543 }
5544
5545 if (!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS) &&
5546 !has_prohibit_flags) {
5547 return TRUE;
5548 }
5549
5550 has_preferred_fields = (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS);
5551
5552 // We have interesting parameters to parse and find a matching interface
5553 ifnet_head_lock_shared();
5554
5555 if (!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_FIELDS) &&
5556 !has_preferred_fields) {
5557 // We do have fields to match, but they are only prohibitory
5558 // If the first interface in the list matches, or there are no ordered interfaces, we don't need to scope
5559 ifp = TAILQ_FIRST(&ifnet_ordered_head);
5560 if (ifp == NULL || necp_ifnet_matches_parameters(ifp, parsed_parameters, 0, NULL, false, false)) {
5561 // Don't set return_ifindex, so the client doesn't need to scope
5562 ifnet_head_done();
5563 return TRUE;
5564 }
5565 }
5566
5567 // First check the ordered interface list
5568 TAILQ_FOREACH(ifp, &ifnet_ordered_head, if_ordered_link) {
5569 u_int32_t preferred_count = 0;
5570 if (necp_ifnet_matches_parameters(ifp, parsed_parameters, flags, &preferred_count, false, false)) {
5571 if (preferred_count > best_preferred_count ||
5572 *return_ifindex == 0) {
5573 // Everything matched, and is most preferred. Return this interface.
5574 *return_ifindex = ifp->if_index;
5575 best_preferred_count = preferred_count;
5576
5577 if (!has_preferred_fields) {
5578 break;
5579 }
5580 }
5581 }
5582
5583 if (has_prohibit_flags &&
5584 ifp == TAILQ_FIRST(&ifnet_ordered_head)) {
5585 // This was the first interface. From here on, if the
5586 // client prohibited either expensive or constrained,
5587 // don't allow either as a secondary interface option.
5588 flags |= (NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE |
5589 NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_CONSTRAINED);
5590 }
5591 }
5592
5593 bool is_listener = ((parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) &&
5594 (parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER));
5595
5596 // Then check the remaining interfaces
5597 if ((parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_FIELDS) &&
5598 ((!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)) ||
5599 !necp_interface_type_is_primary_eligible(parsed_parameters->required_interface_type) ||
5600 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR) ||
5601 is_listener) &&
5602 (*return_ifindex == 0 || has_preferred_fields)) {
5603 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
5604 u_int32_t preferred_count = 0;
5605 if (NECP_IFP_IS_ON_ORDERED_LIST(ifp)) {
5606 // This interface was in the ordered list, skip
5607 continue;
5608 }
5609 if (necp_ifnet_matches_parameters(ifp, parsed_parameters, flags, &preferred_count, false, true)) {
5610 if (preferred_count > best_preferred_count ||
5611 *return_ifindex == 0) {
5612 // Everything matched, and is most preferred. Return this interface.
5613 *return_ifindex = ifp->if_index;
5614 best_preferred_count = preferred_count;
5615
5616 if (!has_preferred_fields) {
5617 break;
5618 }
5619 }
5620 }
5621 }
5622 }
5623
5624 ifnet_head_done();
5625
5626 if (has_preferred_fields && best_preferred_count == 0 &&
5627 ((parsed_parameters->valid_fields & (NECP_PARSED_PARAMETERS_SCOPED_FIELDS | NECP_PARSED_PARAMETERS_PREFERRED_FIELDS)) ==
5628 (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS))) {
5629 // If only has preferred ifnet fields, and nothing was found, clear the interface index and return TRUE
5630 *return_ifindex = 0;
5631 return TRUE;
5632 }
5633
5634 if (*return_ifindex == 0 &&
5635 !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS)) {
5636 // Has required fields, but not including specific interface fields. Pass for now, and check
5637 // to see if agents are satisfied by policy.
5638 *validate_agents = TRUE;
5639 return TRUE;
5640 }
5641
5642 return *return_ifindex != 0;
5643 }
5644
5645 #if SKYWALK
5646
5647 static size_t
necp_find_domain_info_common(struct necp_client * client,u_int8_t * parameters,size_t parameters_size,struct necp_client_flow_registration * flow_registration,nstat_domain_info * domain_info)5648 necp_find_domain_info_common(struct necp_client *client,
5649 u_int8_t *parameters,
5650 size_t parameters_size,
5651 struct necp_client_flow_registration *flow_registration, /* For logging purposes only */
5652 nstat_domain_info *domain_info)
5653 {
5654 if (client == NULL) {
5655 return 0;
5656 }
5657 if (domain_info == NULL) {
5658 return sizeof(nstat_domain_info);
5659 }
5660
5661 size_t offset = 0;
5662 u_int32_t flags = 0;
5663 u_int8_t *tracker_domain = NULL;
5664 u_int8_t *domain = NULL;
5665
5666 NECP_CLIENT_FLOW_LOG(client, flow_registration, "Collecting stats");
5667
5668 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
5669 u_int8_t type = necp_buffer_get_tlv_type(parameters, offset);
5670 u_int32_t length = necp_buffer_get_tlv_length(parameters, offset);
5671
5672 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
5673 // If the length is larger than what can fit in the remaining parameters size, bail
5674 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
5675 break;
5676 }
5677
5678 if (length > 0) {
5679 u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL);
5680 if (value != NULL) {
5681 switch (type) {
5682 case NECP_CLIENT_PARAMETER_FLAGS: {
5683 if (length >= sizeof(u_int32_t)) {
5684 memcpy(&flags, value, sizeof(u_int32_t));
5685 }
5686
5687 domain_info->is_tracker =
5688 !!(flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER);
5689 domain_info->is_non_app_initiated =
5690 !!(flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED);
5691 domain_info->is_silent =
5692 !!(flags & NECP_CLIENT_PARAMETER_FLAG_SILENT);
5693 break;
5694 }
5695 case NECP_CLIENT_PARAMETER_TRACKER_DOMAIN: {
5696 tracker_domain = value;
5697 break;
5698 }
5699 case NECP_CLIENT_PARAMETER_DOMAIN: {
5700 domain = value;
5701 break;
5702 }
5703 case NECP_CLIENT_PARAMETER_DOMAIN_OWNER: {
5704 strlcpy(domain_info->domain_owner, (const char *)value, sizeof(domain_info->domain_owner));
5705 break;
5706 }
5707 case NECP_CLIENT_PARAMETER_DOMAIN_CONTEXT: {
5708 strlcpy(domain_info->domain_tracker_ctxt, (const char *)value, sizeof(domain_info->domain_tracker_ctxt));
5709 break;
5710 }
5711 case NECP_CLIENT_PARAMETER_ATTRIBUTED_BUNDLE_IDENTIFIER: {
5712 strlcpy(domain_info->domain_attributed_bundle_id, (const char *)value, sizeof(domain_info->domain_attributed_bundle_id));
5713 break;
5714 }
5715 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
5716 if (length >= sizeof(struct necp_policy_condition_addr)) {
5717 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
5718 if (necp_client_address_is_valid(&address_struct->address.sa)) {
5719 memcpy(&domain_info->remote, &address_struct->address, sizeof(address_struct->address));
5720 }
5721 }
5722 break;
5723 }
5724 default: {
5725 break;
5726 }
5727 }
5728 }
5729 }
5730 offset += sizeof(struct necp_tlv_header) + length;
5731 }
5732
5733 if (domain_info->is_tracker && tracker_domain) {
5734 strlcpy(domain_info->domain_name, (const char *)tracker_domain, sizeof(domain_info->domain_name));
5735 } else if (domain) {
5736 strlcpy(domain_info->domain_name, (const char *)domain, sizeof(domain_info->domain_name));
5737 }
5738
5739 // Log if it is a known tracker
5740 if (domain_info->is_tracker && client) {
5741 NECP_CLIENT_TRACKER_LOG(client->proc_pid,
5742 "Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> "
5743 "is_tracker %d is_non_app_initiated %d is_silent %d",
5744 domain_info->domain_name[0] ? "present" : "not set",
5745 domain_info->domain_owner[0] ? "present" : "not set",
5746 domain_info->domain_tracker_ctxt[0] ? "present" : "not set",
5747 domain_info->domain_attributed_bundle_id[0] ? "present" : "not set",
5748 domain_info->is_tracker,
5749 domain_info->is_non_app_initiated,
5750 domain_info->is_silent);
5751 }
5752
5753 NECP_CLIENT_FLOW_LOG(client, flow_registration,
5754 "Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> "
5755 "is_tracker %d is_non_app_initiated %d is_silent %d",
5756 domain_info->domain_name,
5757 domain_info->domain_owner,
5758 domain_info->domain_tracker_ctxt,
5759 domain_info->domain_attributed_bundle_id,
5760 domain_info->is_tracker,
5761 domain_info->is_non_app_initiated,
5762 domain_info->is_silent);
5763
5764 return sizeof(nstat_domain_info);
5765 }
5766
5767 static size_t
necp_find_conn_extension_info(nstat_provider_context ctx,int requested_extension,void * buf,size_t buf_size)5768 necp_find_conn_extension_info(nstat_provider_context ctx,
5769 int requested_extension, /* The extension to be returned */
5770 void *buf, /* If not NULL, the address for extensions to be returned in */
5771 size_t buf_size) /* The size of the buffer space, typically matching the return from a previous call with a NULL buf pointer */
5772 {
5773 // Note, the caller has guaranteed that any buffer has been zeroed, there is no need to clear it again
5774
5775 if (ctx == NULL) {
5776 return 0;
5777 }
5778 struct necp_client *client = (struct necp_client *)ctx;
5779 switch (requested_extension) {
5780 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
5781 // This is for completeness. The intent is that domain information can be extracted at user level from the TLV parameters
5782 if (buf == NULL) {
5783 return sizeof(nstat_domain_info);
5784 }
5785 if (buf_size < sizeof(nstat_domain_info)) {
5786 return 0;
5787 }
5788 return necp_find_domain_info_common(client, client->parameters, client->parameters_length, NULL, (nstat_domain_info *)buf);
5789
5790 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV:
5791 if (buf == NULL) {
5792 return client->parameters_length;
5793 }
5794 if (buf_size < client->parameters_length) {
5795 return 0;
5796 }
5797 memcpy(buf, client->parameters, client->parameters_length);
5798 return client->parameters_length;
5799
5800 case NSTAT_EXTENDED_UPDATE_TYPE_ORIGINAL_NECP_TLV:
5801 if (buf == NULL) {
5802 return (client->original_parameters_source != NULL) ? client->original_parameters_source->parameters_length : 0;
5803 }
5804 if ((client->original_parameters_source == NULL) || (buf_size < client->original_parameters_source->parameters_length)) {
5805 return 0;
5806 }
5807 memcpy(buf, client->original_parameters_source->parameters, client->original_parameters_source->parameters_length);
5808 return client->original_parameters_source->parameters_length;
5809
5810 case NSTAT_EXTENDED_UPDATE_TYPE_ORIGINAL_DOMAIN:
5811 if (buf == NULL) {
5812 return (client->original_parameters_source != NULL) ? sizeof(nstat_domain_info) : 0;
5813 }
5814 if ((buf_size < sizeof(nstat_domain_info)) || (client->original_parameters_source == NULL)) {
5815 return 0;
5816 }
5817 return necp_find_domain_info_common(client, client->original_parameters_source->parameters, client->original_parameters_source->parameters_length,
5818 NULL, (nstat_domain_info *)buf);
5819
5820 default:
5821 return 0;
5822 }
5823 }
5824
5825 static size_t
necp_find_extension_info(userland_stats_provider_context * ctx,int requested_extension,void * buf,size_t buf_size)5826 necp_find_extension_info(userland_stats_provider_context *ctx,
5827 int requested_extension, /* The extension to be returned */
5828 void *buf, /* If not NULL, the address for extensions to be returned in */
5829 size_t buf_size) /* The size of the buffer space, typically matching the return from a previous call with a NULL buf pointer */
5830 {
5831 if (ctx == NULL) {
5832 return 0;
5833 }
5834 struct necp_client_flow_registration *flow_registration = (struct necp_client_flow_registration *)(uintptr_t)ctx;
5835 struct necp_client *client = flow_registration->client;
5836
5837 switch (requested_extension) {
5838 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
5839 if (buf == NULL) {
5840 return sizeof(nstat_domain_info);
5841 }
5842 if (buf_size < sizeof(nstat_domain_info)) {
5843 return 0;
5844 }
5845 return necp_find_domain_info_common(client, client->parameters, client->parameters_length, flow_registration, (nstat_domain_info *)buf);
5846
5847 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV:
5848 if (buf == NULL) {
5849 return client->parameters_length;
5850 }
5851 if (buf_size < client->parameters_length) {
5852 return 0;
5853 }
5854 memcpy(buf, client->parameters, client->parameters_length);
5855 return client->parameters_length;
5856
5857 default:
5858 return 0;
5859 }
5860 }
5861
5862 static void
necp_find_netstat_data(struct necp_client * client,union necp_sockaddr_union * remote,pid_t * effective_pid,uuid_t euuid,u_int32_t * traffic_class,u_int8_t * fallback_mode)5863 necp_find_netstat_data(struct necp_client *client,
5864 union necp_sockaddr_union *remote,
5865 pid_t *effective_pid,
5866 uuid_t euuid,
5867 u_int32_t *traffic_class,
5868 u_int8_t *fallback_mode)
5869 {
5870 size_t offset = 0;
5871 u_int8_t *parameters;
5872 u_int32_t parameters_size;
5873
5874 parameters = client->parameters;
5875 parameters_size = (u_int32_t)client->parameters_length;
5876
5877 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
5878 u_int8_t type = necp_buffer_get_tlv_type(parameters, offset);
5879 u_int32_t length = necp_buffer_get_tlv_length(parameters, offset);
5880
5881 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
5882 // If the length is larger than what can fit in the remaining parameters size, bail
5883 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
5884 break;
5885 }
5886
5887 if (length > 0) {
5888 u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL);
5889 if (value != NULL) {
5890 switch (type) {
5891 case NECP_CLIENT_PARAMETER_APPLICATION: {
5892 if (length >= sizeof(uuid_t)) {
5893 uuid_copy(euuid, value);
5894 }
5895 break;
5896 }
5897 case NECP_CLIENT_PARAMETER_PID: {
5898 if (length >= sizeof(pid_t)) {
5899 memcpy(effective_pid, value, sizeof(pid_t));
5900 }
5901 break;
5902 }
5903 case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: {
5904 if (length >= sizeof(u_int32_t)) {
5905 memcpy(traffic_class, value, sizeof(u_int32_t));
5906 }
5907 break;
5908 }
5909 case NECP_CLIENT_PARAMETER_FALLBACK_MODE: {
5910 if (length >= sizeof(u_int8_t)) {
5911 memcpy(fallback_mode, value, sizeof(u_int8_t));
5912 }
5913 break;
5914 }
5915 // It is an implementation quirk that the remote address can be found in the necp parameters
5916 // while the local address must be retrieved from the flowswitch
5917 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
5918 if (length >= sizeof(struct necp_policy_condition_addr)) {
5919 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
5920 if (necp_client_address_is_valid(&address_struct->address.sa)) {
5921 memcpy(remote, &address_struct->address, sizeof(address_struct->address));
5922 }
5923 }
5924 break;
5925 }
5926 default: {
5927 break;
5928 }
5929 }
5930 }
5931 }
5932 offset += sizeof(struct necp_tlv_header) + length;
5933 }
5934 }
5935
5936 // Called from NetworkStatistics when it wishes to collect latest information for a TCP flow.
5937 // It is a responsibility of NetworkStatistics to have previously zeroed any supplied memory.
5938 static bool
necp_request_tcp_netstats(userland_stats_provider_context * ctx,u_int16_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,void * metadatap)5939 necp_request_tcp_netstats(userland_stats_provider_context *ctx,
5940 u_int16_t *ifflagsp,
5941 nstat_progress_digest *digestp,
5942 nstat_counts *countsp,
5943 void *metadatap)
5944 {
5945 if (ctx == NULL) {
5946 return false;
5947 }
5948
5949 struct necp_client_flow_registration *flow_registration = (struct necp_client_flow_registration *)(uintptr_t)ctx;
5950 struct necp_client *client = flow_registration->client;
5951 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
5952 struct necp_tcp_stats *tcpstats = (struct necp_tcp_stats *)ustats_kaddr;
5953 ASSERT(tcpstats != NULL);
5954
5955 u_int16_t nstat_diagnostic_flags = 0;
5956
5957 // Retrieve details from the last time the assigned flows were updated
5958 u_int32_t route_ifindex = IFSCOPE_NONE;
5959 u_int16_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
5960 u_int64_t combined_interface_details = 0;
5961
5962 atomic_get_64(combined_interface_details, &flow_registration->last_interface_details);
5963 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
5964
5965 if (route_ifindex == IFSCOPE_NONE) {
5966 // Mark no interface
5967 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
5968 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
5969 NECPLOG(LOG_INFO, "req tcp stats, failed to get route details for pid %d curproc %d %s\n",
5970 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
5971 }
5972
5973 if (ifflagsp) {
5974 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
5975 if (tcpstats->necp_tcp_extra.flags1 & SOF1_CELLFALLBACK) {
5976 *ifflagsp |= NSTAT_IFNET_VIA_CELLFALLBACK;
5977 }
5978 if ((digestp == NULL) && (countsp == NULL) && (metadatap == NULL)) {
5979 return true;
5980 }
5981 }
5982
5983 if (digestp) {
5984 // The digest is intended to give information that may help give insight into the state of the link
5985 // while avoiding the need to do the relatively expensive flowswitch lookup
5986 digestp->rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
5987 digestp->txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
5988 digestp->rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
5989 digestp->rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
5990 digestp->txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
5991 digestp->ifindex = route_ifindex;
5992 digestp->state = tcpstats->necp_tcp_extra.state;
5993 digestp->txunacked = tcpstats->necp_tcp_extra.txunacked;
5994 digestp->txwindow = tcpstats->necp_tcp_extra.txwindow;
5995 digestp->connstatus.probe_activated = tcpstats->necp_tcp_extra.probestatus.probe_activated;
5996 digestp->connstatus.write_probe_failed = tcpstats->necp_tcp_extra.probestatus.write_probe_failed;
5997 digestp->connstatus.read_probe_failed = tcpstats->necp_tcp_extra.probestatus.read_probe_failed;
5998 digestp->connstatus.conn_probe_failed = tcpstats->necp_tcp_extra.probestatus.conn_probe_failed;
5999
6000 if ((countsp == NULL) && (metadatap == NULL)) {
6001 return true;
6002 }
6003 }
6004
6005 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6006 if (sf == NULL) {
6007 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
6008 char namebuf[MAXCOMLEN + 1];
6009 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
6010 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
6011 NECPLOG(LOG_ERR, "req tcp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
6012 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
6013 sf = &ntstat_sk_stats_zero;
6014 }
6015
6016 if (countsp) {
6017 countsp->nstat_rxbytes = tcpstats->necp_tcp_counts.necp_stat_rxbytes;
6018 countsp->nstat_txbytes = tcpstats->necp_tcp_counts.necp_stat_txbytes;
6019
6020 countsp->nstat_rxduplicatebytes = tcpstats->necp_tcp_counts.necp_stat_rxduplicatebytes;
6021 countsp->nstat_rxoutoforderbytes = tcpstats->necp_tcp_counts.necp_stat_rxoutoforderbytes;
6022 countsp->nstat_txretransmit = tcpstats->necp_tcp_counts.necp_stat_txretransmit;
6023
6024 countsp->nstat_min_rtt = tcpstats->necp_tcp_counts.necp_stat_min_rtt;
6025 countsp->nstat_avg_rtt = tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
6026 countsp->nstat_var_rtt = tcpstats->necp_tcp_counts.necp_stat_var_rtt;
6027
6028 countsp->nstat_connectattempts = tcpstats->necp_tcp_extra.state >= TCPS_SYN_SENT ? 1 : 0;
6029 countsp->nstat_connectsuccesses = tcpstats->necp_tcp_extra.state >= TCPS_ESTABLISHED ? 1 : 0;
6030
6031 // Supplement what the user level has told us with what we know from the flowswitch
6032 countsp->nstat_rxpackets = sf->sf_ipackets;
6033 countsp->nstat_txpackets = sf->sf_opackets;
6034 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
6035 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
6036 countsp->nstat_cell_txbytes = sf->sf_obytes;
6037 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
6038 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
6039 countsp->nstat_wifi_txbytes = sf->sf_obytes;
6040 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
6041 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
6042 countsp->nstat_wired_txbytes = sf->sf_obytes;
6043 }
6044 }
6045
6046 if (metadatap) {
6047 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)metadatap;
6048 memset(desc, 0, sizeof(*desc));
6049
6050 // Metadata from the flow registration
6051 uuid_copy(desc->fuuid, flow_registration->registration_id);
6052
6053 // Metadata that the necp client should have in TLV format.
6054 pid_t effective_pid = client->proc_pid;
6055 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, desc->euuid, &desc->traffic_class, &desc->fallback_mode);
6056 desc->epid = (u_int32_t)effective_pid;
6057
6058 // Metadata from the flow registration
6059 // This needs to revisited if multiple flows are created from one flow registration
6060 struct necp_client_flow *flow = NULL;
6061 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
6062 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
6063 break;
6064 }
6065
6066 // Metadata from the route
6067 desc->ifindex = route_ifindex;
6068 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
6069 desc->ifnet_properties |= (sf->sf_flags & SFLOWF_ONLINK) ? NSTAT_IFNET_IS_LOCAL : NSTAT_IFNET_IS_NON_LOCAL;
6070 if (tcpstats->necp_tcp_extra.flags1 & SOF1_CELLFALLBACK) {
6071 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
6072 }
6073
6074 // Basic metadata from userland
6075 desc->rcvbufsize = tcpstats->necp_tcp_basic.rcvbufsize;
6076 desc->rcvbufused = tcpstats->necp_tcp_basic.rcvbufused;
6077
6078 // Additional TCP specific data
6079 desc->sndbufsize = tcpstats->necp_tcp_extra.sndbufsize;
6080 desc->sndbufused = tcpstats->necp_tcp_extra.sndbufused;
6081 desc->txunacked = tcpstats->necp_tcp_extra.txunacked;
6082 desc->txwindow = tcpstats->necp_tcp_extra.txwindow;
6083 desc->txcwindow = tcpstats->necp_tcp_extra.txcwindow;
6084 desc->traffic_mgt_flags = tcpstats->necp_tcp_extra.traffic_mgt_flags;
6085 desc->state = tcpstats->necp_tcp_extra.state;
6086
6087 u_int32_t cc_alg_index = tcpstats->necp_tcp_extra.cc_alg_index;
6088 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
6089 strlcpy(desc->cc_algo, tcp_cc_algo_list[cc_alg_index]->name, sizeof(desc->cc_algo));
6090 } else {
6091 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
6092 }
6093
6094 desc->connstatus.probe_activated = tcpstats->necp_tcp_extra.probestatus.probe_activated;
6095 desc->connstatus.write_probe_failed = tcpstats->necp_tcp_extra.probestatus.write_probe_failed;
6096 desc->connstatus.read_probe_failed = tcpstats->necp_tcp_extra.probestatus.read_probe_failed;
6097 desc->connstatus.conn_probe_failed = tcpstats->necp_tcp_extra.probestatus.conn_probe_failed;
6098
6099 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6100 }
6101
6102 return true;
6103 }
6104
6105 // Called from NetworkStatistics when it wishes to collect latest information for a UDP flow.
6106 static bool
necp_request_udp_netstats(userland_stats_provider_context * ctx,u_int16_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,void * metadatap)6107 necp_request_udp_netstats(userland_stats_provider_context *ctx,
6108 u_int16_t *ifflagsp,
6109 nstat_progress_digest *digestp,
6110 nstat_counts *countsp,
6111 void *metadatap)
6112 {
6113 #pragma unused(digestp)
6114
6115 if (ctx == NULL) {
6116 return false;
6117 }
6118
6119 struct necp_client_flow_registration *flow_registration = (struct necp_client_flow_registration *)(uintptr_t)ctx;
6120 struct necp_client *client = flow_registration->client;
6121 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
6122 struct necp_udp_stats *udpstats = (struct necp_udp_stats *)ustats_kaddr;
6123 ASSERT(udpstats != NULL);
6124
6125 u_int16_t nstat_diagnostic_flags = 0;
6126
6127 // Retrieve details from the last time the assigned flows were updated
6128 u_int32_t route_ifindex = IFSCOPE_NONE;
6129 u_int16_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6130 u_int64_t combined_interface_details = 0;
6131
6132 atomic_get_64(combined_interface_details, &flow_registration->last_interface_details);
6133 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6134
6135 if (route_ifindex == IFSCOPE_NONE) {
6136 // Mark no interface
6137 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
6138 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6139 NECPLOG(LOG_INFO, "req udp stats, failed to get route details for pid %d curproc %d %s\n",
6140 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
6141 }
6142
6143 if (ifflagsp) {
6144 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
6145 if ((countsp == NULL) && (metadatap == NULL)) {
6146 return true;
6147 }
6148 }
6149 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6150 if (sf == NULL) {
6151 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
6152 char namebuf[MAXCOMLEN + 1];
6153 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
6154 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
6155 NECPLOG(LOG_ERR, "req udp stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
6156 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
6157 sf = &ntstat_sk_stats_zero;
6158 }
6159
6160 if (countsp) {
6161 countsp->nstat_rxbytes = udpstats->necp_udp_counts.necp_stat_rxbytes;
6162 countsp->nstat_txbytes = udpstats->necp_udp_counts.necp_stat_txbytes;
6163
6164 countsp->nstat_rxduplicatebytes = udpstats->necp_udp_counts.necp_stat_rxduplicatebytes;
6165 countsp->nstat_rxoutoforderbytes = udpstats->necp_udp_counts.necp_stat_rxoutoforderbytes;
6166 countsp->nstat_txretransmit = udpstats->necp_udp_counts.necp_stat_txretransmit;
6167
6168 countsp->nstat_min_rtt = udpstats->necp_udp_counts.necp_stat_min_rtt;
6169 countsp->nstat_avg_rtt = udpstats->necp_udp_counts.necp_stat_avg_rtt;
6170 countsp->nstat_var_rtt = udpstats->necp_udp_counts.necp_stat_var_rtt;
6171
6172 // Supplement what the user level has told us with what we know from the flowswitch
6173 countsp->nstat_rxpackets = sf->sf_ipackets;
6174 countsp->nstat_txpackets = sf->sf_opackets;
6175 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
6176 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
6177 countsp->nstat_cell_txbytes = sf->sf_obytes;
6178 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
6179 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
6180 countsp->nstat_wifi_txbytes = sf->sf_obytes;
6181 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
6182 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
6183 countsp->nstat_wired_txbytes = sf->sf_obytes;
6184 }
6185 }
6186
6187 if (metadatap) {
6188 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)metadatap;
6189 memset(desc, 0, sizeof(*desc));
6190
6191 // Metadata from the flow registration
6192 uuid_copy(desc->fuuid, flow_registration->registration_id);
6193
6194 // Metadata that the necp client should have in TLV format.
6195 pid_t effective_pid = client->proc_pid;
6196 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, desc->euuid, &desc->traffic_class, &desc->fallback_mode);
6197 desc->epid = (u_int32_t)effective_pid;
6198
6199 // Metadata from the flow registration
6200 // This needs to revisited if multiple flows are created from one flow registration
6201 struct necp_client_flow *flow = NULL;
6202 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
6203 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
6204 break;
6205 }
6206
6207 // Metadata from the route
6208 desc->ifindex = route_ifindex;
6209 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
6210
6211 // Basic metadata is all that is required for UDP
6212 desc->rcvbufsize = udpstats->necp_udp_basic.rcvbufsize;
6213 desc->rcvbufused = udpstats->necp_udp_basic.rcvbufused;
6214
6215 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6216 }
6217
6218 return true;
6219 }
6220
6221 // Called from NetworkStatistics when it wishes to collect latest information for a QUIC flow.
6222 //
6223 // TODO: For now it is an exact implementation as that of TCP.
6224 // Still to keep the logic separate for future divergence, keeping the routines separate.
6225 // It also seems there are lots of common code between existing implementations and
6226 // it would be good to refactor this logic at some point.
6227 static bool
necp_request_quic_netstats(userland_stats_provider_context * ctx,u_int16_t * ifflagsp,nstat_progress_digest * digestp,nstat_counts * countsp,void * metadatap)6228 necp_request_quic_netstats(userland_stats_provider_context *ctx,
6229 u_int16_t *ifflagsp,
6230 nstat_progress_digest *digestp,
6231 nstat_counts *countsp,
6232 void *metadatap)
6233 {
6234 if (ctx == NULL) {
6235 return false;
6236 }
6237
6238 struct necp_client_flow_registration *flow_registration = (struct necp_client_flow_registration *)(uintptr_t)ctx;
6239 struct necp_client *client = flow_registration->client;
6240 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
6241 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
6242 ASSERT(quicstats != NULL);
6243
6244 u_int16_t nstat_diagnostic_flags = 0;
6245
6246 // Retrieve details from the last time the assigned flows were updated
6247 u_int32_t route_ifindex = IFSCOPE_NONE;
6248 u_int16_t route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6249 u_int64_t combined_interface_details = 0;
6250
6251 atomic_get_64(combined_interface_details, &flow_registration->last_interface_details);
6252 split_interface_details(combined_interface_details, &route_ifindex, &route_ifflags);
6253
6254 if (route_ifindex == IFSCOPE_NONE) {
6255 // Mark no interface
6256 nstat_diagnostic_flags |= NSTAT_IFNET_ROUTE_VALUE_UNOBTAINABLE;
6257 route_ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
6258 NECPLOG(LOG_INFO, "req quic stats, failed to get route details for pid %d curproc %d %s\n",
6259 client->proc_pid, proc_pid(current_proc()), proc_best_name(current_proc()));
6260 }
6261
6262 if (ifflagsp) {
6263 *ifflagsp = route_ifflags | nstat_diagnostic_flags;
6264 if ((digestp == NULL) && (countsp == NULL) && (metadatap == NULL)) {
6265 return true;
6266 }
6267 }
6268
6269 if (digestp) {
6270 // The digest is intended to give information that may help give insight into the state of the link
6271 // while avoiding the need to do the relatively expensive flowswitch lookup
6272 digestp->rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
6273 digestp->txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
6274 digestp->rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
6275 digestp->rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
6276 digestp->txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
6277 digestp->ifindex = route_ifindex;
6278 digestp->state = quicstats->necp_quic_extra.state;
6279 digestp->txunacked = quicstats->necp_quic_extra.txunacked;
6280 digestp->txwindow = quicstats->necp_quic_extra.txwindow;
6281
6282 if ((countsp == NULL) && (metadatap == NULL)) {
6283 return true;
6284 }
6285 }
6286
6287 const struct sk_stats_flow *sf = &flow_registration->nexus_stats->fs_stats;
6288 if (sf == NULL) {
6289 nstat_diagnostic_flags |= NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE;
6290 char namebuf[MAXCOMLEN + 1];
6291 (void) strlcpy(namebuf, "unknown", sizeof(namebuf));
6292 proc_name(client->proc_pid, namebuf, sizeof(namebuf));
6293 NECPLOG(LOG_ERR, "req quic stats, necp_client flow_registration flow_stats missing for pid %d %s curproc %d %s\n",
6294 client->proc_pid, namebuf, proc_pid(current_proc()), proc_best_name(current_proc()));
6295 sf = &ntstat_sk_stats_zero;
6296 }
6297
6298 if (countsp) {
6299 countsp->nstat_rxbytes = quicstats->necp_quic_counts.necp_stat_rxbytes;
6300 countsp->nstat_txbytes = quicstats->necp_quic_counts.necp_stat_txbytes;
6301
6302 countsp->nstat_rxduplicatebytes = quicstats->necp_quic_counts.necp_stat_rxduplicatebytes;
6303 countsp->nstat_rxoutoforderbytes = quicstats->necp_quic_counts.necp_stat_rxoutoforderbytes;
6304 countsp->nstat_txretransmit = quicstats->necp_quic_counts.necp_stat_txretransmit;
6305
6306 countsp->nstat_min_rtt = quicstats->necp_quic_counts.necp_stat_min_rtt;
6307 countsp->nstat_avg_rtt = quicstats->necp_quic_counts.necp_stat_avg_rtt;
6308 countsp->nstat_var_rtt = quicstats->necp_quic_counts.necp_stat_var_rtt;
6309
6310 // TODO: It would be good to expose QUIC stats for CH/SH retransmission and connection state
6311 // Supplement what the user level has told us with what we know from the flowswitch
6312 countsp->nstat_rxpackets = sf->sf_ipackets;
6313 countsp->nstat_txpackets = sf->sf_opackets;
6314 if (route_ifflags & NSTAT_IFNET_IS_CELLULAR) {
6315 countsp->nstat_cell_rxbytes = sf->sf_ibytes;
6316 countsp->nstat_cell_txbytes = sf->sf_obytes;
6317 } else if (route_ifflags & NSTAT_IFNET_IS_WIFI) {
6318 countsp->nstat_wifi_rxbytes = sf->sf_ibytes;
6319 countsp->nstat_wifi_txbytes = sf->sf_obytes;
6320 } else if (route_ifflags & NSTAT_IFNET_IS_WIRED) {
6321 countsp->nstat_wired_rxbytes = sf->sf_ibytes;
6322 countsp->nstat_wired_txbytes = sf->sf_obytes;
6323 }
6324 }
6325
6326 if (metadatap) {
6327 nstat_quic_descriptor *desc = (nstat_quic_descriptor *)metadatap;
6328 memset(desc, 0, sizeof(*desc));
6329
6330 // Metadata from the flow registration
6331 uuid_copy(desc->fuuid, flow_registration->registration_id);
6332
6333 // Metadata, that the necp client should have, in TLV format.
6334 pid_t effective_pid = client->proc_pid;
6335 necp_find_netstat_data(client, (union necp_sockaddr_union *)&desc->remote, &effective_pid, desc->euuid, &desc->traffic_class, &desc->fallback_mode);
6336 desc->epid = (u_int32_t)effective_pid;
6337
6338 // Metadata from the flow registration
6339 // This needs to revisited if multiple flows are created from one flow registration
6340 struct necp_client_flow *flow = NULL;
6341 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
6342 memcpy(&desc->local, &flow->local_addr, sizeof(desc->local));
6343 break;
6344 }
6345
6346 // Metadata from the route
6347 desc->ifindex = route_ifindex;
6348 desc->ifnet_properties = route_ifflags | nstat_diagnostic_flags;
6349
6350 // Basic metadata from userland
6351 desc->rcvbufsize = quicstats->necp_quic_basic.rcvbufsize;
6352 desc->rcvbufused = quicstats->necp_quic_basic.rcvbufused;
6353
6354 // Additional QUIC specific data
6355 desc->sndbufsize = quicstats->necp_quic_extra.sndbufsize;
6356 desc->sndbufused = quicstats->necp_quic_extra.sndbufused;
6357 desc->txunacked = quicstats->necp_quic_extra.txunacked;
6358 desc->txwindow = quicstats->necp_quic_extra.txwindow;
6359 desc->txcwindow = quicstats->necp_quic_extra.txcwindow;
6360 desc->traffic_mgt_flags = quicstats->necp_quic_extra.traffic_mgt_flags;
6361 desc->state = quicstats->necp_quic_extra.state;
6362
6363 // TODO: CC algo defines should be named agnostic of the protocol
6364 u_int32_t cc_alg_index = quicstats->necp_quic_extra.cc_alg_index;
6365 if (cc_alg_index < TCP_CC_ALGO_COUNT) {
6366 strlcpy(desc->cc_algo, tcp_cc_algo_list[cc_alg_index]->name, sizeof(desc->cc_algo));
6367 } else {
6368 strlcpy(desc->cc_algo, "unknown", sizeof(desc->cc_algo));
6369 }
6370
6371 memcpy(&desc->activity_bitmap, &sf->sf_activity, sizeof(sf->sf_activity));
6372 }
6373 return true;
6374 }
6375
6376 // Support functions for NetworkStatistics support for necp_client connections
6377
6378 static void
necp_client_inherit_from_parent(struct necp_client * client,struct necp_client * parent)6379 necp_client_inherit_from_parent(
6380 struct necp_client *client,
6381 struct necp_client *parent)
6382 {
6383 assert(client->original_parameters_source == NULL);
6384
6385 if (parent->original_parameters_source != NULL) {
6386 client->original_parameters_source = parent->original_parameters_source;
6387 } else {
6388 client->original_parameters_source = parent;
6389 }
6390 necp_client_retain(client->original_parameters_source);
6391 }
6392
6393 static void
necp_find_conn_netstat_data(struct necp_client * client,u_int32_t * ntstat_flags,pid_t * effective_pid,uuid_t puuid,uuid_t euuid)6394 necp_find_conn_netstat_data(struct necp_client *client,
6395 u_int32_t *ntstat_flags,
6396 pid_t *effective_pid,
6397 uuid_t puuid,
6398 uuid_t euuid)
6399 {
6400 bool has_remote_address = false;
6401 bool has_ip_protocol = false;
6402 bool has_transport_protocol = false;
6403 size_t offset = 0;
6404 u_int8_t *parameters;
6405 u_int32_t parameters_size;
6406
6407
6408 parameters = client->parameters;
6409 parameters_size = (u_int32_t)client->parameters_length;
6410
6411 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
6412 u_int8_t type = necp_buffer_get_tlv_type(parameters, offset);
6413 u_int32_t length = necp_buffer_get_tlv_length(parameters, offset);
6414
6415 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
6416 // If the length is larger than what can fit in the remaining parameters size, bail
6417 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
6418 break;
6419 }
6420
6421 if (length > 0) {
6422 u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL);
6423 if (value != NULL) {
6424 switch (type) {
6425 case NECP_CLIENT_PARAMETER_APPLICATION: {
6426 if ((euuid) && (length >= sizeof(uuid_t))) {
6427 uuid_copy(euuid, value);
6428 }
6429 break;
6430 }
6431 case NECP_CLIENT_PARAMETER_IP_PROTOCOL: {
6432 if (length >= 1) {
6433 has_ip_protocol = true;
6434 }
6435 break;
6436 }
6437 case NECP_CLIENT_PARAMETER_PID: {
6438 if ((effective_pid) && length >= sizeof(pid_t)) {
6439 memcpy(effective_pid, value, sizeof(pid_t));
6440 }
6441 break;
6442 }
6443 case NECP_CLIENT_PARAMETER_PARENT_ID: {
6444 if ((puuid) && (length == sizeof(uuid_t))) {
6445 uuid_copy(puuid, value);
6446 }
6447 break;
6448 }
6449 // It is an implementation quirk that the remote address can be found in the necp parameters
6450 case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: {
6451 if (length >= sizeof(struct necp_policy_condition_addr)) {
6452 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
6453 if (necp_client_address_is_valid(&address_struct->address.sa)) {
6454 has_remote_address = true;
6455 }
6456 }
6457 break;
6458 }
6459 case NECP_CLIENT_PARAMETER_TRANSPORT_PROTOCOL: {
6460 if (length >= 1) {
6461 has_transport_protocol = true;
6462 }
6463 break;
6464 }
6465 default: {
6466 break;
6467 }
6468 }
6469 }
6470 }
6471 offset += sizeof(struct necp_tlv_header) + length;
6472 }
6473 if (ntstat_flags) {
6474 *ntstat_flags = (has_remote_address && has_ip_protocol && has_transport_protocol)? NSTAT_NECP_CONN_HAS_NET_ACCESS: 0;
6475 }
6476 }
6477
6478 static bool
necp_request_conn_netstats(nstat_provider_context ctx,u_int32_t * ifflagsp,nstat_counts * countsp,void * metadatap)6479 necp_request_conn_netstats(nstat_provider_context ctx,
6480 u_int32_t *ifflagsp,
6481 nstat_counts *countsp,
6482 void *metadatap)
6483 {
6484 if (ctx == NULL) {
6485 return false;
6486 }
6487 struct necp_client *client = (struct necp_client *)(uintptr_t)ctx;
6488 nstat_connection_descriptor *desc = (nstat_connection_descriptor *)metadatap;
6489
6490 if (ifflagsp) {
6491 necp_find_conn_netstat_data(client, ifflagsp, NULL, NULL, NULL);
6492 }
6493 if (countsp) {
6494 memset(countsp, 0, sizeof(*countsp));
6495 }
6496 if (desc) {
6497 memset(desc, 0, sizeof(*desc));
6498 // Metadata, that the necp client should have, in TLV format.
6499 pid_t effective_pid = client->proc_pid;
6500 necp_find_conn_netstat_data(client, &desc->ifnet_properties, &effective_pid, desc->puuid, desc->euuid);
6501 desc->epid = (u_int32_t)effective_pid;
6502
6503 // User level should obtain almost all connection information from an extension
6504 // leaving little to do here
6505 uuid_copy(desc->fuuid, client->latest_flow_registration_id);
6506 uuid_copy(desc->cuuid, client->client_id);
6507 }
6508 return true;
6509 }
6510
6511 #endif /* SKYWALK */
6512
6513 static int
necp_skywalk_priv_check_cred(proc_t p,kauth_cred_t cred)6514 necp_skywalk_priv_check_cred(proc_t p, kauth_cred_t cred)
6515 {
6516 #pragma unused(p, cred)
6517 #if SKYWALK
6518 /* This includes Nexus controller and Skywalk observer privs */
6519 return skywalk_nxctl_check_privileges(p, cred);
6520 #else /* !SKYWALK */
6521 return 0;
6522 #endif /* !SKYWALK */
6523 }
6524
6525 /// System calls
6526
6527 int
necp_open(struct proc * p,struct necp_open_args * uap,int * retval)6528 necp_open(struct proc *p, struct necp_open_args *uap, int *retval)
6529 {
6530 #pragma unused(retval)
6531 int error = 0;
6532 struct necp_fd_data *fd_data = NULL;
6533 struct fileproc *fp = NULL;
6534 int fd = -1;
6535
6536 if (uap->flags & NECP_OPEN_FLAG_OBSERVER ||
6537 uap->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
6538 if (necp_skywalk_priv_check_cred(p, kauth_cred_get()) != 0 &&
6539 priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) {
6540 NECPLOG0(LOG_ERR, "Client does not hold necessary entitlement to observe other NECP clients");
6541 error = EACCES;
6542 goto done;
6543 }
6544 }
6545
6546 #if CONFIG_MACF
6547 error = mac_necp_check_open(p, uap->flags);
6548 if (error) {
6549 goto done;
6550 }
6551 #endif /* MACF */
6552
6553 error = falloc(p, &fp, &fd, vfs_context_current());
6554 if (error != 0) {
6555 goto done;
6556 }
6557
6558 if ((fd_data = zalloc(necp_client_fd_zone)) == NULL) {
6559 error = ENOMEM;
6560 goto done;
6561 }
6562
6563 memset(fd_data, 0, sizeof(*fd_data));
6564
6565 fd_data->necp_fd_type = necp_fd_type_client;
6566 fd_data->flags = uap->flags;
6567 RB_INIT(&fd_data->clients);
6568 RB_INIT(&fd_data->flows);
6569 TAILQ_INIT(&fd_data->update_list);
6570 lck_mtx_init(&fd_data->fd_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
6571 klist_init(&fd_data->si.si_note);
6572 fd_data->proc_pid = proc_pid(p);
6573 #if SKYWALK
6574 LIST_INIT(&fd_data->stats_arena_list);
6575 #endif /* !SKYWALK */
6576
6577 fp->fp_flags |= FP_CLOEXEC | FP_CLOFORK;
6578 fp->fp_glob->fg_flag = FREAD;
6579 fp->fp_glob->fg_ops = &necp_fd_ops;
6580 fp_set_data(fp, fd_data);
6581
6582 proc_fdlock(p);
6583
6584 procfdtbl_releasefd(p, fd, NULL);
6585 fp_drop(p, fd, fp, 1);
6586
6587 *retval = fd;
6588
6589 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
6590 NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
6591 LIST_INSERT_HEAD(&necp_fd_observer_list, fd_data, chain);
6592 OSIncrementAtomic(&necp_observer_fd_count);
6593 NECP_OBSERVER_LIST_UNLOCK();
6594
6595 // Walk all existing clients and add them
6596 NECP_CLIENT_TREE_LOCK_SHARED();
6597 struct necp_client *existing_client = NULL;
6598 RB_FOREACH(existing_client, _necp_client_global_tree, &necp_client_global_tree) {
6599 NECP_CLIENT_LOCK(existing_client);
6600 necp_client_update_observer_add_internal(fd_data, existing_client);
6601 necp_client_update_observer_update_internal(fd_data, existing_client);
6602 NECP_CLIENT_UNLOCK(existing_client);
6603 }
6604 NECP_CLIENT_TREE_UNLOCK();
6605 } else {
6606 NECP_FD_LIST_LOCK_EXCLUSIVE();
6607 LIST_INSERT_HEAD(&necp_fd_list, fd_data, chain);
6608 OSIncrementAtomic(&necp_client_fd_count);
6609 NECP_FD_LIST_UNLOCK();
6610 }
6611
6612 proc_fdunlock(p);
6613
6614 done:
6615 if (error != 0) {
6616 if (fp != NULL) {
6617 fp_free(p, fd, fp);
6618 fp = NULL;
6619 }
6620 if (fd_data != NULL) {
6621 zfree(necp_client_fd_zone, fd_data);
6622 fd_data = NULL;
6623 }
6624 }
6625
6626 return error;
6627 }
6628
6629 // All functions called directly from necp_client_action() to handle one of the
6630 // types should be marked with NECP_CLIENT_ACTION_FUNCTION. This ensures that
6631 // necp_client_action() does not inline all the actions into a single function.
6632 #define NECP_CLIENT_ACTION_FUNCTION __attribute__((noinline))
6633
6634 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_add(struct proc * p,struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)6635 necp_client_add(struct proc *p, struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
6636 {
6637 int error = 0;
6638 struct necp_client *client = NULL;
6639 const size_t buffer_size = uap->buffer_size;
6640
6641 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
6642 NECPLOG0(LOG_ERR, "NECP client observers with push enabled may not add their own clients");
6643 return EINVAL;
6644 }
6645
6646 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
6647 buffer_size == 0 || buffer_size > NECP_MAX_CLIENT_PARAMETERS_SIZE || uap->buffer == 0) {
6648 return EINVAL;
6649 }
6650
6651 client = kalloc_type(struct necp_client, Z_WAITOK | Z_ZERO | Z_NOFAIL);
6652 client->parameters = kalloc_data(buffer_size, Z_WAITOK | Z_NOFAIL);
6653 lck_mtx_init(&client->lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
6654 lck_mtx_init(&client->route_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
6655
6656 error = copyin(uap->buffer, client->parameters, buffer_size);
6657 if (error) {
6658 NECPLOG(LOG_ERR, "necp_client_add parameters copyin error (%d)", error);
6659 goto done;
6660 }
6661
6662 os_ref_init(&client->reference_count, &necp_client_refgrp); // Hold our reference until close
6663
6664 client->parameters_length = buffer_size;
6665 client->proc_pid = fd_data->proc_pid; // Save off proc pid in case the client will persist past fd
6666 client->agent_handle = (void *)fd_data;
6667 client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1);
6668
6669 necp_generate_client_id(client->client_id, false);
6670 LIST_INIT(&client->assertion_list);
6671 RB_INIT(&client->flow_registrations);
6672
6673 NECP_CLIENT_LOG(client, "Adding client");
6674
6675 error = copyout(client->client_id, uap->client_id, sizeof(uuid_t));
6676 if (error) {
6677 NECPLOG(LOG_ERR, "necp_client_add client_id copyout error (%d)", error);
6678 goto done;
6679 }
6680
6681 #if SKYWALK
6682 struct necp_client_parsed_parameters parsed_parameters = {};
6683 int parse_error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, &parsed_parameters);
6684
6685 if (parse_error == 0 &&
6686 ((parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) ||
6687 (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER))) {
6688 bool has_delegation_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0);
6689 if (!has_delegation_entitlement) {
6690 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) {
6691 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by upid",
6692 proc_name_address(p), proc_pid(p));
6693 }
6694 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER) {
6695 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to set attributed bundle identifier",
6696 proc_name_address(p), proc_pid(p));
6697 }
6698 error = EPERM;
6699 goto done;
6700 }
6701
6702 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID) {
6703 // Save off delegated unique PID
6704 client->delegated_upid = parsed_parameters.delegated_upid;
6705 }
6706 }
6707
6708 if (parse_error == 0 && parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) {
6709 bool has_nexus_entitlement = (necp_skywalk_priv_check_cred(p, kauth_cred_get()) == 0);
6710 if (!has_nexus_entitlement) {
6711 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to open a custom nexus client",
6712 proc_name_address(p), proc_pid(p));
6713 error = EPERM;
6714 goto done;
6715 }
6716 }
6717
6718 if (parse_error == 0 && (parsed_parameters.flags &
6719 (NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER | NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP))) {
6720 bool has_custom_protocol_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_CUSTOM_PROTOCOL, 0) == 0);
6721 if (!has_custom_protocol_entitlement) {
6722 NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement for custom protocol APIs",
6723 proc_name_address(p), proc_pid(p));
6724 error = EPERM;
6725 goto done;
6726 }
6727 }
6728
6729 if (parse_error == 0 && parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER &&
6730 (parsed_parameters.ip_protocol == IPPROTO_TCP || parsed_parameters.ip_protocol == IPPROTO_UDP)) {
6731 uint32_t *netns_addr = NULL;
6732 uint8_t netns_addr_len = 0;
6733 struct ns_flow_info flow_info = {};
6734 uuid_copy(flow_info.nfi_flow_uuid, client->client_id);
6735 flow_info.nfi_protocol = parsed_parameters.ip_protocol;
6736 flow_info.nfi_owner_pid = client->proc_pid;
6737 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID) {
6738 flow_info.nfi_effective_pid = parsed_parameters.effective_pid;
6739 } else {
6740 flow_info.nfi_effective_pid = flow_info.nfi_owner_pid;
6741 }
6742 proc_name(flow_info.nfi_owner_pid, flow_info.nfi_owner_name, MAXCOMLEN);
6743 proc_name(flow_info.nfi_effective_pid, flow_info.nfi_effective_name, MAXCOMLEN);
6744
6745 if (parsed_parameters.local_addr.sa.sa_family == AF_UNSPEC) {
6746 // Treat no local address as a wildcard IPv6
6747 // parsed_parameters is already initialized to all zeros
6748 parsed_parameters.local_addr.sin6.sin6_family = AF_INET6;
6749 parsed_parameters.local_addr.sin6.sin6_len = sizeof(struct sockaddr_in6);
6750 }
6751
6752 switch (parsed_parameters.local_addr.sa.sa_family) {
6753 case AF_INET: {
6754 memcpy(&flow_info.nfi_laddr, &parsed_parameters.local_addr.sa, parsed_parameters.local_addr.sa.sa_len);
6755 netns_addr = (uint32_t *)&parsed_parameters.local_addr.sin.sin_addr;
6756 netns_addr_len = 4;
6757 break;
6758 }
6759 case AF_INET6: {
6760 memcpy(&flow_info.nfi_laddr, &parsed_parameters.local_addr.sa, parsed_parameters.local_addr.sa.sa_len);
6761 netns_addr = (uint32_t *)&parsed_parameters.local_addr.sin6.sin6_addr;
6762 netns_addr_len = 16;
6763 break;
6764 }
6765
6766 default: {
6767 NECPLOG(LOG_ERR, "necp_client_add listener invalid address family (%d)", parsed_parameters.local_addr.sa.sa_family);
6768 error = EINVAL;
6769 goto done;
6770 }
6771 }
6772 if (parsed_parameters.local_addr.sin.sin_port == 0) {
6773 error = netns_reserve_ephemeral(&client->port_reservation, netns_addr, netns_addr_len, parsed_parameters.ip_protocol,
6774 &parsed_parameters.local_addr.sin.sin_port, NETNS_LISTENER, &flow_info);
6775 if (error) {
6776 NECPLOG(LOG_ERR, "necp_client_add netns_reserve_ephemeral error (%d)", error);
6777 goto done;
6778 }
6779
6780 // Update the parameter TLVs with the assigned port
6781 necp_client_update_local_port_parameters(client->parameters, (u_int32_t)client->parameters_length, parsed_parameters.local_addr.sin.sin_port);
6782 } else {
6783 error = netns_reserve(&client->port_reservation, netns_addr, netns_addr_len, parsed_parameters.ip_protocol,
6784 parsed_parameters.local_addr.sin.sin_port, NETNS_LISTENER, &flow_info);
6785 if (error) {
6786 NECPLOG(LOG_ERR, "necp_client_add netns_reserve error (%d)", error);
6787 goto done;
6788 }
6789 }
6790 }
6791 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID) {
6792 // The parent "should" be found on fd_data without having to search across the whole necp_fd_list
6793 // It would be nice to do this a little further down where there's another instance of NECP_FD_LOCK
6794 // but the logic here depends on the parse paramters
6795 struct necp_client *parent = NULL;
6796 NECP_FD_LOCK(fd_data);
6797 parent = necp_client_fd_find_client_unlocked(fd_data, parsed_parameters.parent_uuid);
6798 if (parent != NULL) {
6799 necp_client_inherit_from_parent(client, parent);
6800 }
6801 NECP_FD_UNLOCK(fd_data);
6802 if (parent == NULL) {
6803 NECPLOG0(LOG_ERR, "necp_client_add, no necp_client_inherit_from_parent as can't find parent on fd_data");
6804 }
6805 }
6806
6807 #endif /* !SKYWALK */
6808
6809 necp_client_update_observer_add(client);
6810
6811 NECP_FD_LOCK(fd_data);
6812 RB_INSERT(_necp_client_tree, &fd_data->clients, client);
6813 OSIncrementAtomic(&necp_client_count);
6814 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
6815 RB_INSERT(_necp_client_global_tree, &necp_client_global_tree, client);
6816 NECP_CLIENT_TREE_UNLOCK();
6817
6818 // Prime the client result
6819 NECP_CLIENT_LOCK(client);
6820 (void)necp_update_client_result(current_proc(), fd_data, client, NULL);
6821 NECP_CLIENT_UNLOCK(client);
6822 NECP_FD_UNLOCK(fd_data);
6823 #if SKYWALK
6824 // Now everything is set, it's safe to plumb this in to NetworkStatistics
6825 uint32_t ntstat_properties = 0;
6826 necp_find_conn_netstat_data(client, &ntstat_properties, NULL, NULL, NULL);
6827
6828 client->nstat_context = nstat_provider_stats_open((nstat_provider_context)client,
6829 NSTAT_PROVIDER_CONN_USERLAND, (u_int64_t)ntstat_properties, necp_request_conn_netstats, necp_find_conn_extension_info);
6830 #endif /* !SKYWALK */
6831 done:
6832 if (error != 0 && client != NULL) {
6833 necp_client_free(client);
6834 client = NULL;
6835 }
6836 *retval = error;
6837
6838 return error;
6839 }
6840
6841 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_claim(struct proc * p,struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)6842 necp_client_claim(struct proc *p, struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
6843 {
6844 int error = 0;
6845 uuid_t client_id = {};
6846 struct necp_client *client = NULL;
6847
6848 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
6849 error = EINVAL;
6850 goto done;
6851 }
6852
6853 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
6854 if (error) {
6855 NECPLOG(LOG_ERR, "necp_client_claim copyin client_id error (%d)", error);
6856 goto done;
6857 }
6858
6859 u_int64_t upid = proc_uniqueid(p);
6860
6861 NECP_FD_LIST_LOCK_SHARED();
6862
6863 struct necp_fd_data *find_fd = NULL;
6864 LIST_FOREACH(find_fd, &necp_fd_list, chain) {
6865 NECP_FD_LOCK(find_fd);
6866 struct necp_client *find_client = necp_client_fd_find_client_and_lock(find_fd, client_id);
6867 if (find_client != NULL) {
6868 if (find_client->delegated_upid == upid) {
6869 // Matched the client to claim; remove from the old fd
6870 client = find_client;
6871 RB_REMOVE(_necp_client_tree, &find_fd->clients, client);
6872 necp_client_retain_locked(client);
6873 }
6874 NECP_CLIENT_UNLOCK(find_client);
6875 }
6876 NECP_FD_UNLOCK(find_fd);
6877
6878 if (client != NULL) {
6879 break;
6880 }
6881 }
6882
6883 NECP_FD_LIST_UNLOCK();
6884
6885 if (client == NULL) {
6886 error = ENOENT;
6887 goto done;
6888 }
6889
6890 client->proc_pid = fd_data->proc_pid; // Transfer client to claiming pid
6891 client->agent_handle = (void *)fd_data;
6892 client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1);
6893
6894 NECP_CLIENT_LOG(client, "Claiming client");
6895
6896 // Add matched client to our fd and re-run result
6897 NECP_FD_LOCK(fd_data);
6898 RB_INSERT(_necp_client_tree, &fd_data->clients, client);
6899 NECP_CLIENT_LOCK(client);
6900 (void)necp_update_client_result(current_proc(), fd_data, client, NULL);
6901 NECP_CLIENT_UNLOCK(client);
6902 NECP_FD_UNLOCK(fd_data);
6903
6904 necp_client_release(client);
6905
6906 done:
6907 *retval = error;
6908
6909 return error;
6910 }
6911
6912 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_remove(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)6913 necp_client_remove(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
6914 {
6915 int error = 0;
6916 uuid_t client_id = {};
6917 struct ifnet_stats_per_flow flow_ifnet_stats = {};
6918 const size_t buffer_size = uap->buffer_size;
6919
6920 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
6921 error = EINVAL;
6922 goto done;
6923 }
6924
6925 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
6926 if (error) {
6927 NECPLOG(LOG_ERR, "necp_client_remove copyin client_id error (%d)", error);
6928 goto done;
6929 }
6930
6931 if (uap->buffer != 0 && buffer_size == sizeof(flow_ifnet_stats)) {
6932 error = copyin(uap->buffer, &flow_ifnet_stats, buffer_size);
6933 if (error) {
6934 NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error);
6935 // Not fatal; make sure to zero-out stats in case of partial copy
6936 memset(&flow_ifnet_stats, 0, sizeof(flow_ifnet_stats));
6937 error = 0;
6938 }
6939 } else if (uap->buffer != 0) {
6940 NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size);
6941 }
6942
6943 NECP_FD_LOCK(fd_data);
6944
6945 pid_t pid = fd_data->proc_pid;
6946 struct necp_client *client = necp_client_fd_find_client_unlocked(fd_data, client_id);
6947
6948 NECP_CLIENT_LOG(client, "Removing client");
6949
6950 if (client != NULL) {
6951 // Remove any flow registrations that match
6952 struct necp_client_flow_registration *flow_registration = NULL;
6953 struct necp_client_flow_registration *temp_flow_registration = NULL;
6954 RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
6955 if (flow_registration->client == client) {
6956 #if SKYWALK
6957 necp_destroy_flow_stats(fd_data, flow_registration, NULL, TRUE);
6958 #endif /* SKYWALK */
6959 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
6960 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
6961 NECP_FLOW_TREE_UNLOCK();
6962 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
6963 }
6964 }
6965 #if SKYWALK
6966 if (client->nstat_context != NULL) {
6967 // Main path, we expect stats to be in existance at this point
6968 nstat_provider_stats_close(client->nstat_context);
6969 client->nstat_context = NULL;
6970 } else {
6971 NECPLOG0(LOG_ERR, "necp_client_remove ntstat shutdown finds nstat_context NULL");
6972 }
6973 #endif /* SKYWALK */
6974 // Remove client from lists
6975 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
6976 RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
6977 NECP_CLIENT_TREE_UNLOCK();
6978 RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
6979 }
6980
6981 #if SKYWALK
6982 // If the currently-active arena is idle (has no more flows referring to it), or if there are defunct
6983 // arenas lingering in the list, schedule a threadcall to do the clean up. The idle check is done
6984 // by checking if the reference count is 3: one held by this client (will be released below when we
6985 // destroy it) when it's non-NULL; the rest held by stats_arena_{active,list}.
6986 if ((fd_data->stats_arena_active != NULL && fd_data->stats_arena_active->nai_use_count == 3) ||
6987 (fd_data->stats_arena_active == NULL && !LIST_EMPTY(&fd_data->stats_arena_list))) {
6988 uint64_t deadline = 0;
6989 uint64_t leeway = 0;
6990 clock_interval_to_deadline(necp_close_arenas_timeout_microseconds, NSEC_PER_USEC, &deadline);
6991 clock_interval_to_absolutetime_interval(necp_close_arenas_timeout_leeway_microseconds, NSEC_PER_USEC, &leeway);
6992
6993 thread_call_enter_delayed_with_leeway(necp_close_empty_arenas_tcall, NULL,
6994 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
6995 }
6996 #endif /* SKYWALK */
6997
6998 NECP_FD_UNLOCK(fd_data);
6999
7000 if (client != NULL) {
7001 ASSERT(error == 0);
7002 necp_destroy_client(client, pid, true);
7003 } else {
7004 error = ENOENT;
7005 NECPLOG(LOG_ERR, "necp_client_remove invalid client_id (%d)", error);
7006 }
7007 done:
7008 *retval = error;
7009
7010 return error;
7011 }
7012
7013 static struct necp_client_flow_registration *
necp_client_fd_find_flow(struct necp_fd_data * client_fd,uuid_t flow_id)7014 necp_client_fd_find_flow(struct necp_fd_data *client_fd, uuid_t flow_id)
7015 {
7016 NECP_FD_ASSERT_LOCKED(client_fd);
7017 struct necp_client_flow_registration *flow = NULL;
7018
7019 if (necp_client_id_is_flow(flow_id)) {
7020 struct necp_client_flow_registration find;
7021 uuid_copy(find.registration_id, flow_id);
7022 flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find);
7023 }
7024
7025 return flow;
7026 }
7027
7028 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_remove_flow(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)7029 necp_client_remove_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
7030 {
7031 int error = 0;
7032 uuid_t flow_id = {};
7033 struct ifnet_stats_per_flow flow_ifnet_stats = {};
7034 const size_t buffer_size = uap->buffer_size;
7035
7036 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
7037 error = EINVAL;
7038 NECPLOG(LOG_ERR, "necp_client_remove_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
7039 goto done;
7040 }
7041
7042 error = copyin(uap->client_id, flow_id, sizeof(uuid_t));
7043 if (error) {
7044 NECPLOG(LOG_ERR, "necp_client_remove_flow copyin client_id error (%d)", error);
7045 goto done;
7046 }
7047
7048 if (uap->buffer != 0 && buffer_size == sizeof(flow_ifnet_stats)) {
7049 error = copyin(uap->buffer, &flow_ifnet_stats, buffer_size);
7050 if (error) {
7051 NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error);
7052 // Not fatal
7053 }
7054 } else if (uap->buffer != 0) {
7055 NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size);
7056 }
7057
7058 NECP_FD_LOCK(fd_data);
7059 struct necp_client *client = NULL;
7060 struct necp_client_flow_registration *flow_registration = necp_client_fd_find_flow(fd_data, flow_id);
7061 if (flow_registration != NULL) {
7062 #if SKYWALK
7063 // Cleanup stats per flow
7064 necp_destroy_flow_stats(fd_data, flow_registration, &flow_ifnet_stats, TRUE);
7065 #endif /* SKYWALK */
7066 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
7067 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
7068 NECP_FLOW_TREE_UNLOCK();
7069 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
7070
7071 client = flow_registration->client;
7072 if (client != NULL) {
7073 necp_client_retain(client);
7074 }
7075 }
7076 NECP_FD_UNLOCK(fd_data);
7077
7078 NECP_CLIENT_FLOW_LOG(client, flow_registration, "removing flow");
7079
7080 if (flow_registration != NULL && client != NULL) {
7081 NECP_CLIENT_LOCK(client);
7082 if (flow_registration->client == client) {
7083 necp_destroy_client_flow_registration(client, flow_registration, fd_data->proc_pid, false);
7084 }
7085 necp_client_release_locked(client);
7086 NECP_CLIENT_UNLOCK(client);
7087 }
7088
7089 done:
7090 *retval = error;
7091 if (error != 0) {
7092 NECPLOG(LOG_ERR, "Remove flow error (%d)", error);
7093 }
7094
7095 return error;
7096 }
7097
7098 // Don't inline the function since it includes necp_client_parsed_parameters on the stack
7099 static __attribute__((noinline)) int
necp_client_check_tcp_heuristics(struct necp_client * client,struct necp_client_flow * flow,u_int32_t * flags,u_int8_t * tfo_cookie,u_int8_t * tfo_cookie_len)7100 necp_client_check_tcp_heuristics(struct necp_client *client, struct necp_client_flow *flow, u_int32_t *flags, u_int8_t *tfo_cookie, u_int8_t *tfo_cookie_len)
7101 {
7102 struct necp_client_parsed_parameters parsed_parameters;
7103 int error = 0;
7104
7105 error = necp_client_parse_parameters(client, client->parameters,
7106 (u_int32_t)client->parameters_length,
7107 &parsed_parameters);
7108 if (error) {
7109 NECPLOG(LOG_ERR, "necp_client_parse_parameters error (%d)", error);
7110 return error;
7111 }
7112
7113 if ((flow->remote_addr.sa.sa_family != AF_INET &&
7114 flow->remote_addr.sa.sa_family != AF_INET6) ||
7115 (flow->local_addr.sa.sa_family != AF_INET &&
7116 flow->local_addr.sa.sa_family != AF_INET6)) {
7117 return EINVAL;
7118 }
7119
7120 NECP_CLIENT_ROUTE_LOCK(client);
7121
7122 if (client->current_route == NULL) {
7123 error = ENOENT;
7124 goto do_unlock;
7125 }
7126
7127 bool check_ecn = false;
7128 do {
7129 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) ==
7130 NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) {
7131 check_ecn = true;
7132 break;
7133 }
7134
7135 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) ==
7136 NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) {
7137 break;
7138 }
7139
7140 if (client->current_route != NULL) {
7141 if (client->current_route->rt_ifp->if_eflags & IFEF_ECN_ENABLE) {
7142 check_ecn = true;
7143 break;
7144 }
7145 if (client->current_route->rt_ifp->if_eflags & IFEF_ECN_DISABLE) {
7146 break;
7147 }
7148 }
7149
7150 bool inbound = ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) == 0);
7151 if ((inbound && tcp_ecn_inbound == 1) ||
7152 (!inbound && tcp_ecn_outbound == 1)) {
7153 check_ecn = true;
7154 }
7155 } while (false);
7156
7157 if (check_ecn) {
7158 if (tcp_heuristic_do_ecn_with_address(client->current_route->rt_ifp,
7159 (union sockaddr_in_4_6 *)&flow->local_addr)) {
7160 *flags |= NECP_CLIENT_RESULT_FLAG_ECN_ENABLED;
7161 }
7162 }
7163
7164 if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) ==
7165 NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) {
7166 if (!tcp_heuristic_do_tfo_with_address(client->current_route->rt_ifp,
7167 (union sockaddr_in_4_6 *)&flow->local_addr,
7168 (union sockaddr_in_4_6 *)&flow->remote_addr,
7169 tfo_cookie, tfo_cookie_len)) {
7170 *flags |= NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED;
7171 *tfo_cookie_len = 0;
7172 }
7173 } else {
7174 *flags |= NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED;
7175 *tfo_cookie_len = 0;
7176 }
7177 do_unlock:
7178 NECP_CLIENT_ROUTE_UNLOCK(client);
7179
7180 return error;
7181 }
7182
7183 static size_t
necp_client_calculate_flow_tlv_size(struct necp_client_flow_registration * flow_registration)7184 necp_client_calculate_flow_tlv_size(struct necp_client_flow_registration *flow_registration)
7185 {
7186 size_t assigned_results_size = 0;
7187 struct necp_client_flow *flow = NULL;
7188 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
7189 if (flow->assigned) {
7190 size_t header_length = 0;
7191 if (flow->nexus) {
7192 header_length = sizeof(struct necp_client_nexus_flow_header);
7193 } else {
7194 header_length = sizeof(struct necp_client_flow_header);
7195 }
7196 assigned_results_size += (header_length + flow->assigned_results_length);
7197
7198 if (flow->has_protoctl_event) {
7199 assigned_results_size += sizeof(struct necp_client_flow_protoctl_event_header);
7200 }
7201 }
7202 }
7203 return assigned_results_size;
7204 }
7205
7206 static int
necp_client_fillout_flow_tlvs(struct necp_client * client,bool client_is_observed,struct necp_client_flow_registration * flow_registration,struct necp_client_action_args * uap,size_t * assigned_results_cursor)7207 necp_client_fillout_flow_tlvs(struct necp_client *client,
7208 bool client_is_observed,
7209 struct necp_client_flow_registration *flow_registration,
7210 struct necp_client_action_args *uap,
7211 size_t *assigned_results_cursor)
7212 {
7213 int error = 0;
7214 struct necp_client_flow *flow = NULL;
7215 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
7216 if (flow->assigned) {
7217 // Write TLV headers
7218 struct necp_client_nexus_flow_header header = {};
7219 u_int32_t length = 0;
7220 u_int32_t flags = 0;
7221 u_int8_t tfo_cookie_len = 0;
7222 u_int8_t type = 0;
7223
7224 type = NECP_CLIENT_RESULT_FLOW_ID;
7225 length = sizeof(header.flow_header.flow_id);
7226 memcpy(&header.flow_header.flow_id_tlv_header.type, &type, sizeof(type));
7227 memcpy(&header.flow_header.flow_id_tlv_header.length, &length, sizeof(length));
7228 uuid_copy(header.flow_header.flow_id, flow_registration->registration_id);
7229
7230 if (flow->nexus) {
7231 if (flow->check_tcp_heuristics) {
7232 u_int8_t tfo_cookie[NECP_TFO_COOKIE_LEN_MAX];
7233 tfo_cookie_len = NECP_TFO_COOKIE_LEN_MAX;
7234
7235 if (necp_client_check_tcp_heuristics(client, flow, &flags,
7236 tfo_cookie, &tfo_cookie_len) != 0) {
7237 tfo_cookie_len = 0;
7238 } else {
7239 flow->check_tcp_heuristics = FALSE;
7240
7241 if (tfo_cookie_len != 0) {
7242 type = NECP_CLIENT_RESULT_TFO_COOKIE;
7243 length = tfo_cookie_len;
7244 memcpy(&header.tfo_cookie_tlv_header.type, &type, sizeof(type));
7245 memcpy(&header.tfo_cookie_tlv_header.length, &length, sizeof(length));
7246 memcpy(&header.tfo_cookie_value, tfo_cookie, tfo_cookie_len);
7247 }
7248 }
7249 }
7250 }
7251
7252 size_t header_length = 0;
7253 if (flow->nexus) {
7254 if (tfo_cookie_len != 0) {
7255 header_length = sizeof(struct necp_client_nexus_flow_header) - (NECP_TFO_COOKIE_LEN_MAX - tfo_cookie_len);
7256 } else {
7257 header_length = sizeof(struct necp_client_nexus_flow_header) - sizeof(struct necp_tlv_header) - NECP_TFO_COOKIE_LEN_MAX;
7258 }
7259 } else {
7260 header_length = sizeof(struct necp_client_flow_header);
7261 }
7262
7263 type = NECP_CLIENT_RESULT_FLAGS;
7264 length = sizeof(header.flow_header.flags_value);
7265 memcpy(&header.flow_header.flags_tlv_header.type, &type, sizeof(type));
7266 memcpy(&header.flow_header.flags_tlv_header.length, &length, sizeof(length));
7267 if (flow->assigned) {
7268 flags |= NECP_CLIENT_RESULT_FLAG_FLOW_ASSIGNED;
7269 }
7270 if (flow->viable) {
7271 flags |= NECP_CLIENT_RESULT_FLAG_FLOW_VIABLE;
7272 }
7273 if (flow_registration->defunct) {
7274 flags |= NECP_CLIENT_RESULT_FLAG_DEFUNCT;
7275 }
7276 flags |= flow->necp_flow_flags;
7277 memcpy(&header.flow_header.flags_value, &flags, sizeof(flags));
7278
7279 type = NECP_CLIENT_RESULT_INTERFACE;
7280 length = sizeof(header.flow_header.interface_value);
7281 memcpy(&header.flow_header.interface_tlv_header.type, &type, sizeof(type));
7282 memcpy(&header.flow_header.interface_tlv_header.length, &length, sizeof(length));
7283
7284 struct necp_client_result_interface interface_struct;
7285 interface_struct.generation = 0;
7286 interface_struct.index = flow->interface_index;
7287
7288 memcpy(&header.flow_header.interface_value, &interface_struct, sizeof(interface_struct));
7289 if (flow->nexus) {
7290 type = NECP_CLIENT_RESULT_NETAGENT;
7291 length = sizeof(header.agent_value);
7292 memcpy(&header.agent_tlv_header.type, &type, sizeof(type));
7293 memcpy(&header.agent_tlv_header.length, &length, sizeof(length));
7294
7295 struct necp_client_result_netagent agent_struct;
7296 uuid_copy(agent_struct.netagent_uuid, flow->u.nexus_agent);
7297 agent_struct.generation = netagent_get_generation(agent_struct.netagent_uuid);
7298
7299 memcpy(&header.agent_value, &agent_struct, sizeof(agent_struct));
7300 }
7301
7302 // Don't include outer TLV header in length field
7303 type = NECP_CLIENT_RESULT_FLOW;
7304 length = (header_length - sizeof(struct necp_tlv_header) + flow->assigned_results_length);
7305 if (flow->has_protoctl_event) {
7306 length += sizeof(struct necp_client_flow_protoctl_event_header);
7307 }
7308 memcpy(&header.flow_header.outer_header.type, &type, sizeof(type));
7309 memcpy(&header.flow_header.outer_header.length, &length, sizeof(length));
7310
7311 error = copyout(&header, uap->buffer + client->result_length + *assigned_results_cursor, header_length);
7312 if (error) {
7313 NECPLOG(LOG_ERR, "necp_client_copy assigned results tlv_header copyout error (%d)", error);
7314 return error;
7315 }
7316 *assigned_results_cursor += header_length;
7317
7318 if (flow->assigned_results && flow->assigned_results_length) {
7319 // Write inner TLVs
7320 error = copyout(flow->assigned_results, uap->buffer + client->result_length + *assigned_results_cursor,
7321 flow->assigned_results_length);
7322 if (error) {
7323 NECPLOG(LOG_ERR, "necp_client_copy assigned results copyout error (%d)", error);
7324 return error;
7325 }
7326 }
7327 *assigned_results_cursor += flow->assigned_results_length;
7328
7329 /* Read the protocol event and reset it */
7330 if (flow->has_protoctl_event) {
7331 struct necp_client_flow_protoctl_event_header protoctl_event_header = {};
7332
7333 type = NECP_CLIENT_RESULT_PROTO_CTL_EVENT;
7334 length = sizeof(protoctl_event_header.protoctl_event);
7335
7336 memcpy(&protoctl_event_header.protoctl_tlv_header.type, &type, sizeof(type));
7337 memcpy(&protoctl_event_header.protoctl_tlv_header.length, &length, sizeof(length));
7338 memcpy(&protoctl_event_header.protoctl_event, &flow->protoctl_event,
7339 sizeof(flow->protoctl_event));
7340
7341 error = copyout(&protoctl_event_header, uap->buffer + client->result_length + *assigned_results_cursor,
7342 sizeof(protoctl_event_header));
7343
7344 if (error) {
7345 NECPLOG(LOG_ERR, "necp_client_copy protocol control event results"
7346 " tlv_header copyout error (%d)", error);
7347 return error;
7348 }
7349 *assigned_results_cursor += sizeof(protoctl_event_header);
7350 flow->has_protoctl_event = FALSE;
7351 flow->protoctl_event.protoctl_event_code = 0;
7352 flow->protoctl_event.protoctl_event_val = 0;
7353 flow->protoctl_event.protoctl_event_tcp_seq_num = 0;
7354 }
7355 }
7356 }
7357 if (!client_is_observed) {
7358 flow_registration->flow_result_read = TRUE;
7359 }
7360 return 0;
7361 }
7362
7363 static int
necp_client_copy_internal(struct necp_client * client,uuid_t client_id,bool client_is_observed,struct necp_client_action_args * uap,int * retval)7364 necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool client_is_observed, struct necp_client_action_args *uap, int *retval)
7365 {
7366 NECP_CLIENT_ASSERT_LOCKED(client);
7367 int error = 0;
7368 // Copy results out
7369 if (uap->action == NECP_CLIENT_ACTION_COPY_PARAMETERS) {
7370 if (uap->buffer_size < client->parameters_length) {
7371 return EINVAL;
7372 }
7373 error = copyout(client->parameters, uap->buffer, client->parameters_length);
7374 if (error) {
7375 NECPLOG(LOG_ERR, "necp_client_copy parameters copyout error (%d)", error);
7376 return error;
7377 }
7378 *retval = client->parameters_length;
7379 } else if (uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT &&
7380 client->result_read && client->group_members_read && !necp_client_has_unread_flows(client)) {
7381 // Copy updates only, but nothing to read
7382 // Just return 0 for bytes read
7383 *retval = 0;
7384 } else if (uap->action == NECP_CLIENT_ACTION_COPY_RESULT ||
7385 uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT) {
7386 size_t assigned_results_size = client->assigned_group_members_length;
7387
7388 bool some_flow_is_defunct = false;
7389 struct necp_client_flow_registration *single_flow_registration = NULL;
7390 if (necp_client_id_is_flow(client_id)) {
7391 single_flow_registration = necp_client_find_flow(client, client_id);
7392 if (single_flow_registration != NULL) {
7393 assigned_results_size += necp_client_calculate_flow_tlv_size(single_flow_registration);
7394 }
7395 } else {
7396 // This request is for the client, so copy everything
7397 struct necp_client_flow_registration *flow_registration = NULL;
7398 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
7399 if (flow_registration->defunct) {
7400 some_flow_is_defunct = true;
7401 }
7402 assigned_results_size += necp_client_calculate_flow_tlv_size(flow_registration);
7403 }
7404 }
7405 if (uap->buffer_size < (client->result_length + assigned_results_size)) {
7406 return EINVAL;
7407 }
7408
7409 u_int32_t original_flags = 0;
7410 bool flags_updated = false;
7411 if (some_flow_is_defunct && client->legacy_client_is_flow) {
7412 // If our client expects the defunct flag in the client, add it now
7413 u_int32_t client_flags = 0;
7414 u_int32_t value_size = 0;
7415 u_int8_t *flags_pointer = necp_buffer_get_tlv_value(client->result, 0, &value_size);
7416 if (flags_pointer != NULL && value_size == sizeof(client_flags)) {
7417 memcpy(&client_flags, flags_pointer, value_size);
7418 original_flags = client_flags;
7419 client_flags |= NECP_CLIENT_RESULT_FLAG_DEFUNCT;
7420 (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS,
7421 sizeof(client_flags), &client_flags, &flags_updated,
7422 client->result, sizeof(client->result));
7423 }
7424 }
7425
7426 error = copyout(client->result, uap->buffer, client->result_length);
7427
7428 if (flags_updated) {
7429 // Revert stored flags
7430 (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS,
7431 sizeof(original_flags), &original_flags, &flags_updated,
7432 client->result, sizeof(client->result));
7433 }
7434
7435 if (error != 0) {
7436 NECPLOG(LOG_ERR, "necp_client_copy result copyout error (%d)", error);
7437 return error;
7438 }
7439
7440 if (client->assigned_group_members != NULL && client->assigned_group_members_length > 0) {
7441 error = copyout(client->assigned_group_members, uap->buffer + client->result_length, client->assigned_group_members_length);
7442 if (error != 0) {
7443 NECPLOG(LOG_ERR, "necp_client_copy group members copyout error (%d)", error);
7444 return error;
7445 }
7446 }
7447
7448 size_t assigned_results_cursor = client->assigned_group_members_length; // Start with an offset based on the group members
7449 if (necp_client_id_is_flow(client_id)) {
7450 if (single_flow_registration != NULL) {
7451 error = necp_client_fillout_flow_tlvs(client, client_is_observed, single_flow_registration, uap, &assigned_results_cursor);
7452 if (error != 0) {
7453 return error;
7454 }
7455 }
7456 } else {
7457 // This request is for the client, so copy everything
7458 struct necp_client_flow_registration *flow_registration = NULL;
7459 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
7460 error = necp_client_fillout_flow_tlvs(client, client_is_observed, flow_registration, uap, &assigned_results_cursor);
7461 if (error != 0) {
7462 return error;
7463 }
7464 }
7465 }
7466
7467 *retval = client->result_length + assigned_results_cursor;
7468
7469 if (!client_is_observed) {
7470 client->result_read = TRUE;
7471 client->group_members_read = TRUE;
7472 }
7473 }
7474
7475 return 0;
7476 }
7477
7478 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)7479 necp_client_copy(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
7480 {
7481 int error = 0;
7482 struct necp_client *client = NULL;
7483 uuid_t client_id;
7484 uuid_clear(client_id);
7485
7486 *retval = 0;
7487
7488 if (uap->buffer_size == 0 || uap->buffer == 0) {
7489 return EINVAL;
7490 }
7491
7492 if (uap->action != NECP_CLIENT_ACTION_COPY_PARAMETERS &&
7493 uap->action != NECP_CLIENT_ACTION_COPY_RESULT &&
7494 uap->action != NECP_CLIENT_ACTION_COPY_UPDATED_RESULT) {
7495 return EINVAL;
7496 }
7497
7498 if (uap->client_id) {
7499 if (uap->client_id_len != sizeof(uuid_t)) {
7500 NECPLOG(LOG_ERR, "Incorrect length (got %zu, expected %zu)", (size_t)uap->client_id_len, sizeof(uuid_t));
7501 return ERANGE;
7502 }
7503
7504 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
7505 if (error) {
7506 NECPLOG(LOG_ERR, "necp_client_copy client_id copyin error (%d)", error);
7507 return error;
7508 }
7509 }
7510
7511 const bool is_wildcard = (bool)uuid_is_null(client_id);
7512
7513 NECP_FD_LOCK(fd_data);
7514
7515 if (is_wildcard) {
7516 if (uap->action == NECP_CLIENT_ACTION_COPY_RESULT || uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT) {
7517 struct necp_client *find_client = NULL;
7518 RB_FOREACH(find_client, _necp_client_tree, &fd_data->clients) {
7519 NECP_CLIENT_LOCK(find_client);
7520 if (!find_client->result_read || !find_client->group_members_read || necp_client_has_unread_flows(find_client)) {
7521 client = find_client;
7522 // Leave the client locked, and break
7523 break;
7524 }
7525 NECP_CLIENT_UNLOCK(find_client);
7526 }
7527 }
7528 } else {
7529 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
7530 }
7531
7532 if (client != NULL) {
7533 // If client is set, it is locked
7534 error = necp_client_copy_internal(client, client_id, FALSE, uap, retval);
7535 NECP_CLIENT_UNLOCK(client);
7536 }
7537
7538 // Unlock our own fd before moving on or returning
7539 NECP_FD_UNLOCK(fd_data);
7540
7541 if (client == NULL) {
7542 if (fd_data->flags & NECP_OPEN_FLAG_OBSERVER) {
7543 // Observers are allowed to lookup clients on other fds
7544
7545 // Lock tree
7546 NECP_CLIENT_TREE_LOCK_SHARED();
7547
7548 bool found_client = FALSE;
7549
7550 client = necp_find_client_and_lock(client_id);
7551 if (client != NULL) {
7552 // Matched, copy out data
7553 found_client = TRUE;
7554 error = necp_client_copy_internal(client, client_id, TRUE, uap, retval);
7555 NECP_CLIENT_UNLOCK(client);
7556 }
7557
7558 // Unlock tree
7559 NECP_CLIENT_TREE_UNLOCK();
7560
7561 // No client found, fail
7562 if (!found_client) {
7563 return ENOENT;
7564 }
7565 } else {
7566 // No client found, and not allowed to search other fds, fail
7567 return ENOENT;
7568 }
7569 }
7570
7571 return error;
7572 }
7573
7574 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_client_update(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)7575 necp_client_copy_client_update(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
7576 {
7577 int error = 0;
7578
7579 *retval = 0;
7580
7581 if (!(fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER)) {
7582 NECPLOG0(LOG_ERR, "NECP fd is not observer, cannot copy client update");
7583 return EINVAL;
7584 }
7585
7586 if (uap->client_id_len != sizeof(uuid_t) || uap->client_id == 0) {
7587 NECPLOG0(LOG_ERR, "Client id invalid, cannot copy client update");
7588 return EINVAL;
7589 }
7590
7591 if (uap->buffer_size == 0 || uap->buffer == 0) {
7592 NECPLOG0(LOG_ERR, "Buffer invalid, cannot copy client update");
7593 return EINVAL;
7594 }
7595
7596 NECP_FD_LOCK(fd_data);
7597 struct necp_client_update *client_update = TAILQ_FIRST(&fd_data->update_list);
7598 if (client_update != NULL) {
7599 TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
7600 VERIFY(fd_data->update_count > 0);
7601 fd_data->update_count--;
7602 }
7603 NECP_FD_UNLOCK(fd_data);
7604
7605 if (client_update != NULL) {
7606 error = copyout(client_update->client_id, uap->client_id, sizeof(uuid_t));
7607 if (error) {
7608 NECPLOG(LOG_ERR, "Copy client update copyout client id error (%d)", error);
7609 } else {
7610 if (uap->buffer_size < client_update->update_length) {
7611 NECPLOG(LOG_ERR, "Buffer size cannot hold update (%zu < %zu)", (size_t)uap->buffer_size, client_update->update_length);
7612 error = EINVAL;
7613 } else {
7614 error = copyout(client_update->update, uap->buffer, client_update->update_length);
7615 if (error) {
7616 NECPLOG(LOG_ERR, "Copy client update copyout error (%d)", error);
7617 } else {
7618 *retval = client_update->update_length;
7619 }
7620 }
7621 }
7622
7623 necp_client_update_free(client_update);
7624 client_update = NULL;
7625 } else {
7626 error = ENOENT;
7627 }
7628
7629 return error;
7630 }
7631
7632 static int
necp_client_copy_parameters_locked(struct necp_client * client,struct necp_client_nexus_parameters * parameters)7633 necp_client_copy_parameters_locked(struct necp_client *client,
7634 struct necp_client_nexus_parameters *parameters)
7635 {
7636 VERIFY(parameters != NULL);
7637
7638 struct necp_client_parsed_parameters parsed_parameters = {};
7639 int error = necp_client_parse_parameters(client, client->parameters, (u_int32_t)client->parameters_length, &parsed_parameters);
7640
7641 parameters->pid = client->proc_pid;
7642 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID) {
7643 parameters->epid = parsed_parameters.effective_pid;
7644 } else {
7645 parameters->epid = parameters->pid;
7646 }
7647 #if SKYWALK
7648 parameters->port_reservation = client->port_reservation;
7649 #endif /* !SKYWALK */
7650 memcpy(¶meters->local_addr, &parsed_parameters.local_addr, sizeof(parameters->local_addr));
7651 memcpy(¶meters->remote_addr, &parsed_parameters.remote_addr, sizeof(parameters->remote_addr));
7652 parameters->ip_protocol = parsed_parameters.ip_protocol;
7653 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL) {
7654 parameters->transport_protocol = parsed_parameters.transport_protocol;
7655 } else {
7656 parameters->transport_protocol = parsed_parameters.ip_protocol;
7657 }
7658 parameters->ethertype = parsed_parameters.ethertype;
7659 parameters->traffic_class = parsed_parameters.traffic_class;
7660 if (uuid_is_null(client->override_euuid)) {
7661 uuid_copy(parameters->euuid, parsed_parameters.effective_uuid);
7662 } else {
7663 uuid_copy(parameters->euuid, client->override_euuid);
7664 }
7665 parameters->is_listener = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) ? 1 : 0;
7666 parameters->is_interpose = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) ? 1 : 0;
7667 parameters->is_custom_ether = (parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) ? 1 : 0;
7668 parameters->policy_id = client->policy_id;
7669
7670 // parse client result flag
7671 u_int32_t client_result_flags = 0;
7672 u_int32_t value_size = 0;
7673 u_int8_t *flags_pointer = NULL;
7674 flags_pointer = necp_buffer_get_tlv_value(client->result, 0, &value_size);
7675 if (flags_pointer && value_size == sizeof(client_result_flags)) {
7676 memcpy(&client_result_flags, flags_pointer, value_size);
7677 }
7678 parameters->allow_qos_marking = (client_result_flags & NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING) ? 1 : 0;
7679
7680 if (parsed_parameters.valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE) {
7681 if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_DEFAULT) {
7682 parameters->override_address_selection = false;
7683 } else if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_TEMPORARY) {
7684 parameters->override_address_selection = true;
7685 parameters->use_stable_address = false;
7686 } else if (parsed_parameters.local_address_preference == NECP_CLIENT_PARAMETER_LOCAL_ADDRESS_PREFERENCE_STABLE) {
7687 parameters->override_address_selection = true;
7688 parameters->use_stable_address = true;
7689 }
7690 } else {
7691 parameters->override_address_selection = false;
7692 }
7693
7694 return error;
7695 }
7696
7697 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_list(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)7698 necp_client_list(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
7699 {
7700 int error = 0;
7701 struct necp_client *find_client = NULL;
7702 uuid_t *list = NULL;
7703 u_int32_t requested_client_count = 0;
7704 u_int32_t client_count = 0;
7705 size_t copy_buffer_size = 0;
7706
7707 if (uap->buffer_size < sizeof(requested_client_count) || uap->buffer == 0) {
7708 error = EINVAL;
7709 goto done;
7710 }
7711
7712 if (!(fd_data->flags & NECP_OPEN_FLAG_OBSERVER)) {
7713 NECPLOG0(LOG_ERR, "Client does not hold necessary entitlement to list other NECP clients");
7714 error = EACCES;
7715 goto done;
7716 }
7717
7718 error = copyin(uap->buffer, &requested_client_count, sizeof(requested_client_count));
7719 if (error) {
7720 goto done;
7721 }
7722
7723 if (os_mul_overflow(sizeof(uuid_t), requested_client_count, ©_buffer_size)) {
7724 error = ERANGE;
7725 goto done;
7726 }
7727
7728 if (uap->buffer_size - sizeof(requested_client_count) != copy_buffer_size) {
7729 error = EINVAL;
7730 goto done;
7731 }
7732
7733 if (copy_buffer_size > NECP_MAX_CLIENT_LIST_SIZE) {
7734 error = EINVAL;
7735 goto done;
7736 }
7737
7738 if (requested_client_count > 0) {
7739 if ((list = (uuid_t*)kalloc_data(copy_buffer_size, Z_WAITOK | Z_ZERO)) == NULL) {
7740 error = ENOMEM;
7741 goto done;
7742 }
7743 }
7744
7745 // Lock tree
7746 NECP_CLIENT_TREE_LOCK_SHARED();
7747
7748 find_client = NULL;
7749 RB_FOREACH(find_client, _necp_client_global_tree, &necp_client_global_tree) {
7750 NECP_CLIENT_LOCK(find_client);
7751 if (!uuid_is_null(find_client->client_id)) {
7752 if (client_count < requested_client_count) {
7753 uuid_copy(list[client_count], find_client->client_id);
7754 }
7755 client_count++;
7756 }
7757 NECP_CLIENT_UNLOCK(find_client);
7758 }
7759
7760 // Unlock tree
7761 NECP_CLIENT_TREE_UNLOCK();
7762
7763 error = copyout(&client_count, uap->buffer, sizeof(client_count));
7764 if (error) {
7765 NECPLOG(LOG_ERR, "necp_client_list buffer copyout error (%d)", error);
7766 goto done;
7767 }
7768
7769 if (requested_client_count > 0 &&
7770 client_count > 0 &&
7771 list != NULL) {
7772 error = copyout(list, uap->buffer + sizeof(client_count), copy_buffer_size);
7773 if (error) {
7774 NECPLOG(LOG_ERR, "necp_client_list client count copyout error (%d)", error);
7775 goto done;
7776 }
7777 }
7778 done:
7779 if (list != NULL) {
7780 kfree_data(list, copy_buffer_size);
7781 }
7782 *retval = error;
7783
7784 return error;
7785 }
7786
7787 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_add_flow(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)7788 necp_client_add_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
7789 {
7790 int error = 0;
7791 struct necp_client *client = NULL;
7792 uuid_t client_id;
7793 struct necp_client_nexus_parameters parameters = {};
7794 struct proc *proc = PROC_NULL;
7795 struct necp_client_add_flow *add_request = NULL;
7796 struct necp_client_add_flow *allocated_add_request = NULL;
7797 struct necp_client_add_flow_default default_add_request = {};
7798 const size_t buffer_size = uap->buffer_size;
7799
7800 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
7801 error = EINVAL;
7802 NECPLOG(LOG_ERR, "necp_client_add_flow invalid client_id (length %zu)", (size_t)uap->client_id_len);
7803 goto done;
7804 }
7805
7806 if (uap->buffer == 0 || buffer_size < sizeof(struct necp_client_add_flow) ||
7807 buffer_size > sizeof(struct necp_client_add_flow_default) * 4) {
7808 error = EINVAL;
7809 NECPLOG(LOG_ERR, "necp_client_add_flow invalid buffer (length %zu)", buffer_size);
7810 goto done;
7811 }
7812
7813 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
7814 if (error) {
7815 NECPLOG(LOG_ERR, "necp_client_add_flow copyin client_id error (%d)", error);
7816 goto done;
7817 }
7818
7819 if (buffer_size <= sizeof(struct necp_client_add_flow_default)) {
7820 // Fits in default size
7821 error = copyin(uap->buffer, &default_add_request, buffer_size);
7822 if (error) {
7823 NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error);
7824 goto done;
7825 }
7826
7827 add_request = (struct necp_client_add_flow *)&default_add_request;
7828 } else {
7829 allocated_add_request = (struct necp_client_add_flow *)kalloc_data(buffer_size, Z_WAITOK | Z_ZERO);
7830 if (allocated_add_request == NULL) {
7831 error = ENOMEM;
7832 goto done;
7833 }
7834
7835 error = copyin(uap->buffer, allocated_add_request, buffer_size);
7836 if (error) {
7837 NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error);
7838 goto done;
7839 }
7840
7841 add_request = allocated_add_request;
7842 }
7843
7844 NECP_FD_LOCK(fd_data);
7845 pid_t pid = fd_data->proc_pid;
7846 proc = proc_find(pid);
7847 if (proc == PROC_NULL) {
7848 NECP_FD_UNLOCK(fd_data);
7849 NECPLOG(LOG_ERR, "necp_client_add_flow process not found for pid %d error (%d)", pid, error);
7850 error = ESRCH;
7851 goto done;
7852 }
7853
7854 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
7855 if (client == NULL) {
7856 error = ENOENT;
7857 NECP_FD_UNLOCK(fd_data);
7858 goto done;
7859 }
7860
7861 // Using ADD_FLOW indicates that the client supports multiple flows per client
7862 client->legacy_client_is_flow = false;
7863
7864 necp_client_retain_locked(client);
7865 necp_client_copy_parameters_locked(client, ¶meters);
7866
7867 struct necp_client_flow_registration *new_registration = necp_client_create_flow_registration(fd_data, client);
7868 if (new_registration == NULL) {
7869 error = ENOMEM;
7870 NECP_CLIENT_UNLOCK(client);
7871 NECP_FD_UNLOCK(fd_data);
7872 NECPLOG0(LOG_ERR, "Failed to allocate flow registration");
7873 goto done;
7874 }
7875
7876 new_registration->flags = add_request->flags;
7877
7878 // Copy new ID out to caller
7879 uuid_copy(add_request->registration_id, new_registration->registration_id);
7880
7881 NECP_CLIENT_FLOW_LOG(client, new_registration, "adding flow");
7882
7883 // Copy override address
7884 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OVERRIDE_ADDRESS) {
7885 size_t offset_of_address = (sizeof(struct necp_client_add_flow) +
7886 add_request->stats_request_count * sizeof(struct necp_client_flow_stats));
7887 if (buffer_size >= offset_of_address + sizeof(struct sockaddr_in)) {
7888 struct sockaddr *override_address = (struct sockaddr *)(((uint8_t *)add_request) + offset_of_address);
7889 if (buffer_size >= offset_of_address + override_address->sa_len &&
7890 override_address->sa_len <= sizeof(parameters.remote_addr)) {
7891 memcpy(¶meters.remote_addr, override_address, override_address->sa_len);
7892 }
7893 }
7894 }
7895
7896 #if SKYWALK
7897 if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS) {
7898 void *assigned_results = NULL;
7899 size_t assigned_results_length = 0;
7900 uint32_t interface_index = 0;
7901
7902 // Validate that the nexus UUID is assigned
7903 bool found_nexus = false;
7904 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
7905 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
7906 struct necp_client_interface_option *option = &client->interface_options[option_i];
7907 if (uuid_compare(option->nexus_agent, add_request->agent_uuid) == 0) {
7908 interface_index = option->interface_index;
7909 found_nexus = true;
7910 break;
7911 }
7912 } else {
7913 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
7914 if (uuid_compare(option->nexus_agent, add_request->agent_uuid) == 0) {
7915 interface_index = option->interface_index;
7916 found_nexus = true;
7917 break;
7918 }
7919 }
7920 }
7921
7922 if (!found_nexus) {
7923 NECPLOG0(LOG_ERR, "Requested nexus not found");
7924 } else {
7925 necp_client_add_nexus_flow_if_needed(new_registration, add_request->agent_uuid, interface_index);
7926
7927 error = netagent_client_message_with_params(add_request->agent_uuid,
7928 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
7929 client->client_id :
7930 new_registration->registration_id),
7931 pid, client->agent_handle,
7932 NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS,
7933 (struct necp_client_agent_parameters *)¶meters,
7934 &assigned_results, &assigned_results_length);
7935 if (error != 0) {
7936 VERIFY(assigned_results == NULL);
7937 VERIFY(assigned_results_length == 0);
7938 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
7939 } else if (assigned_results != NULL) {
7940 if (!necp_assign_client_result_locked(proc, fd_data, client, new_registration, add_request->agent_uuid,
7941 assigned_results, assigned_results_length, false)) {
7942 kfree_data(assigned_results, assigned_results_length);
7943 }
7944 }
7945 }
7946 }
7947
7948 // Don't request stats if nexus creation fails
7949 if (error == 0 && add_request->stats_request_count > 0 && necp_arena_initialize(fd_data, true) == 0) {
7950 struct necp_client_flow_stats *stats_request = (struct necp_client_flow_stats *)&add_request->stats_requests[0];
7951 struct necp_stats_bufreq bufreq = {};
7952
7953 NECP_CLIENT_FLOW_LOG(client, new_registration, "Initializing stats");
7954
7955 bufreq.necp_stats_bufreq_id = NECP_CLIENT_STATISTICS_BUFREQ_ID;
7956 bufreq.necp_stats_bufreq_type = stats_request->stats_type;
7957 bufreq.necp_stats_bufreq_ver = stats_request->stats_version;
7958 bufreq.necp_stats_bufreq_size = stats_request->stats_size;
7959 bufreq.necp_stats_bufreq_uaddr = stats_request->stats_addr;
7960 (void)necp_stats_initialize(fd_data, client, new_registration, &bufreq);
7961 stats_request->stats_type = bufreq.necp_stats_bufreq_type;
7962 stats_request->stats_version = bufreq.necp_stats_bufreq_ver;
7963 stats_request->stats_size = bufreq.necp_stats_bufreq_size;
7964 stats_request->stats_addr = bufreq.necp_stats_bufreq_uaddr;
7965 }
7966 #endif /* !SKYWALK */
7967
7968 if (error == 0 &&
7969 (add_request->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE ||
7970 add_request->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) {
7971 uint32_t interface_index = IFSCOPE_NONE;
7972 ifnet_head_lock_shared();
7973 struct ifnet *interface = NULL;
7974 TAILQ_FOREACH(interface, &ifnet_head, if_link) {
7975 ifnet_lock_shared(interface);
7976 if (interface->if_agentids != NULL) {
7977 for (u_int32_t i = 0; i < interface->if_agentcount; i++) {
7978 if (uuid_compare(interface->if_agentids[i], add_request->agent_uuid) == 0) {
7979 interface_index = interface->if_index;
7980 break;
7981 }
7982 }
7983 }
7984 ifnet_lock_done(interface);
7985 if (interface_index != IFSCOPE_NONE) {
7986 break;
7987 }
7988 }
7989 ifnet_head_done();
7990
7991 necp_client_add_nexus_flow_if_needed(new_registration, add_request->agent_uuid, interface_index);
7992
7993 error = netagent_client_message_with_params(add_request->agent_uuid,
7994 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
7995 client->client_id :
7996 new_registration->registration_id),
7997 pid, client->agent_handle,
7998 NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT,
7999 (struct necp_client_agent_parameters *)¶meters,
8000 NULL, NULL);
8001 if (error != 0) {
8002 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
8003 }
8004 }
8005
8006 if (error != 0) {
8007 // Encountered an error in adding the flow, destroy the flow registration
8008 #if SKYWALK
8009 necp_destroy_flow_stats(fd_data, new_registration, NULL, false);
8010 #endif /* SKYWALK */
8011 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
8012 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, new_registration);
8013 NECP_FLOW_TREE_UNLOCK();
8014 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, new_registration);
8015 necp_destroy_client_flow_registration(client, new_registration, fd_data->proc_pid, true);
8016 new_registration = NULL;
8017 }
8018
8019 NECP_CLIENT_UNLOCK(client);
8020 NECP_FD_UNLOCK(fd_data);
8021
8022 necp_client_release(client);
8023
8024 if (error != 0) {
8025 goto done;
8026 }
8027
8028 // Copy the request back out to the caller with assigned fields
8029 error = copyout(add_request, uap->buffer, buffer_size);
8030 if (error != 0) {
8031 NECPLOG(LOG_ERR, "necp_client_add_flow copyout add_request error (%d)", error);
8032 }
8033
8034 done:
8035 *retval = error;
8036 if (error != 0) {
8037 NECPLOG(LOG_ERR, "Add flow error (%d)", error);
8038 }
8039
8040 if (allocated_add_request != NULL) {
8041 kfree_data(allocated_add_request, buffer_size);
8042 }
8043
8044 if (proc != PROC_NULL) {
8045 proc_rele(proc);
8046 }
8047 return error;
8048 }
8049
8050 #if SKYWALK
8051
8052 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_request_nexus(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8053 necp_client_request_nexus(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8054 {
8055 int error = 0;
8056 struct necp_client *client = NULL;
8057 uuid_t client_id;
8058 struct necp_client_nexus_parameters parameters = {};
8059 struct proc *proc = PROC_NULL;
8060 const size_t buffer_size = uap->buffer_size;
8061
8062 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
8063 error = EINVAL;
8064 goto done;
8065 }
8066
8067 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8068 if (error) {
8069 NECPLOG(LOG_ERR, "necp_client_request_nexus copyin client_id error (%d)", error);
8070 goto done;
8071 }
8072
8073 NECP_FD_LOCK(fd_data);
8074 pid_t pid = fd_data->proc_pid;
8075 proc = proc_find(pid);
8076 if (proc == PROC_NULL) {
8077 NECP_FD_UNLOCK(fd_data);
8078 NECPLOG(LOG_ERR, "necp_client_request_nexus process not found for pid %d error (%d)", pid, error);
8079 error = ESRCH;
8080 goto done;
8081 }
8082
8083 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
8084 if (client == NULL) {
8085 NECP_FD_UNLOCK(fd_data);
8086 error = ENOENT;
8087 goto done;
8088 }
8089
8090 // Using REQUEST_NEXUS indicates that the client only supports one flow per client
8091 client->legacy_client_is_flow = true;
8092
8093 necp_client_retain_locked(client);
8094 necp_client_copy_parameters_locked(client, ¶meters);
8095
8096 do {
8097 void *assigned_results = NULL;
8098 size_t assigned_results_length = 0;
8099 uuid_t nexus_uuid;
8100 uint32_t interface_index = 0;
8101
8102 // Validate that the nexus UUID is assigned
8103 bool found_nexus = false;
8104 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
8105 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
8106 struct necp_client_interface_option *option = &client->interface_options[option_i];
8107 if (!uuid_is_null(option->nexus_agent)) {
8108 uuid_copy(nexus_uuid, option->nexus_agent);
8109 interface_index = option->interface_index;
8110 found_nexus = true;
8111 break;
8112 }
8113 } else {
8114 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
8115 if (!uuid_is_null(option->nexus_agent)) {
8116 uuid_copy(nexus_uuid, option->nexus_agent);
8117 interface_index = option->interface_index;
8118 found_nexus = true;
8119 break;
8120 }
8121 }
8122 }
8123
8124 if (!found_nexus) {
8125 NECP_CLIENT_UNLOCK(client);
8126 NECP_FD_UNLOCK(fd_data);
8127 necp_client_release(client);
8128 // Break the loop
8129 error = ENETDOWN;
8130 goto done;
8131 }
8132
8133 struct necp_client_flow_registration *new_registration = necp_client_create_flow_registration(fd_data, client);
8134 if (new_registration == NULL) {
8135 error = ENOMEM;
8136 NECP_CLIENT_UNLOCK(client);
8137 NECP_FD_UNLOCK(fd_data);
8138 necp_client_release(client);
8139 NECPLOG0(LOG_ERR, "Failed to allocate flow registration");
8140 goto done;
8141 }
8142
8143 new_registration->flags = (NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS | NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID);
8144
8145 necp_client_add_nexus_flow_if_needed(new_registration, nexus_uuid, interface_index);
8146
8147 // Note: Any clients using "request_nexus" are not flow-registration aware.
8148 // Register the Client ID rather than the Registration ID with the nexus, since
8149 // the client will send traffic based on the client ID.
8150 error = netagent_client_message_with_params(nexus_uuid,
8151 ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
8152 client->client_id :
8153 new_registration->registration_id),
8154 pid, client->agent_handle,
8155 NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS,
8156 (struct necp_client_agent_parameters *)¶meters,
8157 &assigned_results, &assigned_results_length);
8158 if (error) {
8159 NECP_CLIENT_UNLOCK(client);
8160 NECP_FD_UNLOCK(fd_data);
8161 necp_client_release(client);
8162 VERIFY(assigned_results == NULL);
8163 VERIFY(assigned_results_length == 0);
8164 NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error);
8165 goto done;
8166 }
8167
8168 if (assigned_results != NULL) {
8169 if (!necp_assign_client_result_locked(proc, fd_data, client, new_registration, nexus_uuid,
8170 assigned_results, assigned_results_length, false)) {
8171 kfree_data(assigned_results, assigned_results_length);
8172 }
8173 }
8174
8175 if (uap->buffer != 0 && buffer_size == sizeof(struct necp_stats_bufreq) &&
8176 necp_arena_initialize(fd_data, true) == 0) {
8177 struct necp_stats_bufreq bufreq = {};
8178 int copy_error = copyin(uap->buffer, &bufreq, buffer_size);
8179 if (copy_error) {
8180 NECPLOG(LOG_ERR, "necp_client_request_nexus copyin bufreq error (%d)", copy_error);
8181 } else {
8182 (void)necp_stats_initialize(fd_data, client, new_registration, &bufreq);
8183 copy_error = copyout(&bufreq, uap->buffer, buffer_size);
8184 if (copy_error != 0) {
8185 NECPLOG(LOG_ERR, "necp_client_request_nexus copyout bufreq error (%d)", copy_error);
8186 }
8187 }
8188 }
8189 } while (false);
8190
8191 NECP_CLIENT_UNLOCK(client);
8192 NECP_FD_UNLOCK(fd_data);
8193
8194 necp_client_release(client);
8195
8196 done:
8197 *retval = error;
8198 if (error != 0) {
8199 NECPLOG(LOG_ERR, "Request nexus error (%d)", error);
8200 }
8201
8202 if (proc != PROC_NULL) {
8203 proc_rele(proc);
8204 }
8205 return error;
8206 }
8207 #endif /* !SKYWALK */
8208
8209 static void
necp_client_add_assertion(struct necp_client * client,uuid_t netagent_uuid)8210 necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid)
8211 {
8212 struct necp_client_assertion *new_assertion = NULL;
8213
8214 new_assertion = kalloc_type(struct necp_client_assertion,
8215 Z_WAITOK | Z_NOFAIL);
8216
8217 uuid_copy(new_assertion->asserted_netagent, netagent_uuid);
8218
8219 LIST_INSERT_HEAD(&client->assertion_list, new_assertion, assertion_chain);
8220 }
8221
8222 static bool
necp_client_remove_assertion(struct necp_client * client,uuid_t netagent_uuid)8223 necp_client_remove_assertion(struct necp_client *client, uuid_t netagent_uuid)
8224 {
8225 struct necp_client_assertion *found_assertion = NULL;
8226 struct necp_client_assertion *search_assertion = NULL;
8227 LIST_FOREACH(search_assertion, &client->assertion_list, assertion_chain) {
8228 if (uuid_compare(search_assertion->asserted_netagent, netagent_uuid) == 0) {
8229 found_assertion = search_assertion;
8230 break;
8231 }
8232 }
8233
8234 if (found_assertion == NULL) {
8235 NECPLOG0(LOG_ERR, "Netagent uuid not previously asserted");
8236 return false;
8237 }
8238
8239 LIST_REMOVE(found_assertion, assertion_chain);
8240 kfree_type(struct necp_client_assertion, found_assertion);
8241 return true;
8242 }
8243
8244 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_agent_action(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8245 necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8246 {
8247 int error = 0;
8248 struct necp_client *client = NULL;
8249 uuid_t client_id;
8250 bool acted_on_agent = FALSE;
8251 u_int8_t *parameters = NULL;
8252 const size_t buffer_size = uap->buffer_size;
8253
8254 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
8255 buffer_size == 0 || uap->buffer == 0) {
8256 NECPLOG0(LOG_ERR, "necp_client_agent_action invalid parameters");
8257 error = EINVAL;
8258 goto done;
8259 }
8260
8261 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8262 if (error) {
8263 NECPLOG(LOG_ERR, "necp_client_agent_action copyin client_id error (%d)", error);
8264 goto done;
8265 }
8266
8267 if (buffer_size > NECP_MAX_AGENT_ACTION_SIZE) {
8268 NECPLOG(LOG_ERR, "necp_client_agent_action invalid buffer size (>%u)", NECP_MAX_AGENT_ACTION_SIZE);
8269 error = EINVAL;
8270 goto done;
8271 }
8272
8273 if ((parameters = (u_int8_t *)kalloc_data(buffer_size, Z_WAITOK | Z_ZERO)) == NULL) {
8274 NECPLOG0(LOG_ERR, "necp_client_agent_action malloc failed");
8275 error = ENOMEM;
8276 goto done;
8277 }
8278
8279 error = copyin(uap->buffer, parameters, buffer_size);
8280 if (error) {
8281 NECPLOG(LOG_ERR, "necp_client_agent_action parameters copyin error (%d)", error);
8282 goto done;
8283 }
8284
8285 NECP_FD_LOCK(fd_data);
8286 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
8287 if (client != NULL) {
8288 size_t offset = 0;
8289 while ((offset + sizeof(struct necp_tlv_header)) <= buffer_size) {
8290 u_int8_t type = necp_buffer_get_tlv_type(parameters, offset);
8291 u_int32_t length = necp_buffer_get_tlv_length(parameters, offset);
8292
8293 if (length > (buffer_size - (offset + sizeof(struct necp_tlv_header)))) {
8294 // If the length is larger than what can fit in the remaining parameters size, bail
8295 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
8296 break;
8297 }
8298
8299 if (length >= sizeof(uuid_t)) {
8300 u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL);
8301 if (value == NULL) {
8302 NECPLOG0(LOG_ERR, "Invalid TLV value");
8303 break;
8304 }
8305 if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT ||
8306 type == NECP_CLIENT_PARAMETER_ASSERT_AGENT ||
8307 type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
8308 uuid_t agent_uuid;
8309 uuid_copy(agent_uuid, value);
8310 u_int8_t netagent_message_type = 0;
8311 if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT) {
8312 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER;
8313 } else if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) {
8314 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT;
8315 } else if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
8316 netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
8317 }
8318
8319 // Before unasserting, verify that the assertion was already taken
8320 if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) {
8321 if (!necp_client_remove_assertion(client, agent_uuid)) {
8322 error = ENOENT;
8323 break;
8324 }
8325 }
8326
8327 struct necp_client_nexus_parameters parsed_parameters = {};
8328 necp_client_copy_parameters_locked(client, &parsed_parameters);
8329
8330 error = netagent_client_message_with_params(agent_uuid,
8331 client_id,
8332 fd_data->proc_pid,
8333 client->agent_handle,
8334 netagent_message_type,
8335 (struct necp_client_agent_parameters *)&parsed_parameters,
8336 NULL, NULL);
8337 if (error == 0) {
8338 acted_on_agent = TRUE;
8339 } else {
8340 break;
8341 }
8342
8343 // Only save the assertion if the action succeeded
8344 if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) {
8345 necp_client_add_assertion(client, agent_uuid);
8346 }
8347 } else if (type == NECP_CLIENT_PARAMETER_AGENT_ADD_GROUP_MEMBERS ||
8348 type == NECP_CLIENT_PARAMETER_AGENT_REMOVE_GROUP_MEMBERS) {
8349 uuid_t agent_uuid;
8350 uuid_copy(agent_uuid, value);
8351 u_int8_t netagent_message_type = 0;
8352 if (type == NECP_CLIENT_PARAMETER_AGENT_ADD_GROUP_MEMBERS) {
8353 netagent_message_type = NETAGENT_MESSAGE_TYPE_ADD_GROUP_MEMBERS;
8354 } else if (type == NECP_CLIENT_PARAMETER_AGENT_REMOVE_GROUP_MEMBERS) {
8355 netagent_message_type = NETAGENT_MESSAGE_TYPE_REMOVE_GROUP_MEMBERS;
8356 }
8357
8358 struct necp_client_group_members group_members = {};
8359 group_members.group_members_length = (length - sizeof(uuid_t));
8360 group_members.group_members = (value + sizeof(uuid_t));
8361 error = netagent_client_message_with_params(agent_uuid,
8362 client_id,
8363 fd_data->proc_pid,
8364 client->agent_handle,
8365 netagent_message_type,
8366 (struct necp_client_agent_parameters *)&group_members,
8367 NULL, NULL);
8368 if (error == 0) {
8369 acted_on_agent = TRUE;
8370 } else {
8371 break;
8372 }
8373 } else if (type == NECP_CLIENT_PARAMETER_REPORT_AGENT_ERROR) {
8374 uuid_t agent_uuid;
8375 uuid_copy(agent_uuid, value);
8376 struct necp_client_agent_parameters agent_params = {};
8377 if ((length - sizeof(uuid_t)) >= sizeof(agent_params.u.error)) {
8378 memcpy(&agent_params.u.error,
8379 (value + sizeof(uuid_t)),
8380 sizeof(agent_params.u.error));
8381 }
8382 error = netagent_client_message_with_params(agent_uuid,
8383 client_id,
8384 fd_data->proc_pid,
8385 client->agent_handle,
8386 NETAGENT_MESSAGE_TYPE_CLIENT_ERROR,
8387 &agent_params,
8388 NULL, NULL);
8389 if (error == 0) {
8390 acted_on_agent = TRUE;
8391 } else {
8392 break;
8393 }
8394 }
8395 }
8396
8397 offset += sizeof(struct necp_tlv_header) + length;
8398 }
8399
8400 NECP_CLIENT_UNLOCK(client);
8401 }
8402 NECP_FD_UNLOCK(fd_data);
8403
8404 if (!acted_on_agent &&
8405 error == 0) {
8406 error = ENOENT;
8407 }
8408 done:
8409 *retval = error;
8410 if (parameters != NULL) {
8411 kfree_data(parameters, buffer_size);
8412 parameters = NULL;
8413 }
8414
8415 return error;
8416 }
8417
8418 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_agent(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8419 necp_client_copy_agent(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8420 {
8421 int error = 0;
8422 uuid_t agent_uuid;
8423 const size_t buffer_size = uap->buffer_size;
8424
8425 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
8426 buffer_size == 0 || uap->buffer == 0) {
8427 NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input");
8428 error = EINVAL;
8429 goto done;
8430 }
8431
8432 error = copyin(uap->client_id, agent_uuid, sizeof(uuid_t));
8433 if (error) {
8434 NECPLOG(LOG_ERR, "necp_client_copy_agent copyin agent_uuid error (%d)", error);
8435 goto done;
8436 }
8437
8438 error = netagent_copyout(agent_uuid, uap->buffer, buffer_size);
8439 if (error) {
8440 // netagent_copyout already logs appropriate errors
8441 goto done;
8442 }
8443 done:
8444 *retval = error;
8445
8446 return error;
8447 }
8448
8449 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_agent_use(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8450 necp_client_agent_use(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8451 {
8452 int error = 0;
8453 struct necp_client *client = NULL;
8454 uuid_t client_id;
8455 struct necp_agent_use_parameters parameters = {};
8456 const size_t buffer_size = uap->buffer_size;
8457
8458 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
8459 buffer_size != sizeof(parameters) || uap->buffer == 0) {
8460 error = EINVAL;
8461 goto done;
8462 }
8463
8464 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
8465 if (error) {
8466 NECPLOG(LOG_ERR, "Copyin client_id error (%d)", error);
8467 goto done;
8468 }
8469
8470 error = copyin(uap->buffer, ¶meters, buffer_size);
8471 if (error) {
8472 NECPLOG(LOG_ERR, "Parameters copyin error (%d)", error);
8473 goto done;
8474 }
8475
8476 NECP_FD_LOCK(fd_data);
8477 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
8478 if (client != NULL) {
8479 error = netagent_use(parameters.agent_uuid, ¶meters.out_use_count);
8480 NECP_CLIENT_UNLOCK(client);
8481 } else {
8482 error = ENOENT;
8483 }
8484
8485 NECP_FD_UNLOCK(fd_data);
8486
8487 if (error == 0) {
8488 error = copyout(¶meters, uap->buffer, buffer_size);
8489 if (error) {
8490 NECPLOG(LOG_ERR, "Parameters copyout error (%d)", error);
8491 goto done;
8492 }
8493 }
8494
8495 done:
8496 *retval = error;
8497
8498 return error;
8499 }
8500
8501 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_acquire_agent_token(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8502 necp_client_acquire_agent_token(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8503 {
8504 int error = 0;
8505 uuid_t agent_uuid = {};
8506 const size_t buffer_size = uap->buffer_size;
8507
8508 *retval = 0;
8509
8510 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
8511 buffer_size == 0 || uap->buffer == 0) {
8512 NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input");
8513 error = EINVAL;
8514 goto done;
8515 }
8516
8517 error = copyin(uap->client_id, agent_uuid, sizeof(uuid_t));
8518 if (error) {
8519 NECPLOG(LOG_ERR, "necp_client_copy_agent copyin agent_uuid error (%d)", error);
8520 goto done;
8521 }
8522
8523 error = netagent_acquire_token(agent_uuid, uap->buffer, buffer_size, retval);
8524 done:
8525 return error;
8526 }
8527
8528 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_interface(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8529 necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8530 {
8531 int error = 0;
8532 u_int32_t interface_index = 0;
8533 struct necp_interface_details interface_details = {};
8534
8535 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
8536 uap->buffer_size < sizeof(interface_details) ||
8537 uap->buffer == 0) {
8538 NECPLOG0(LOG_ERR, "necp_client_copy_interface bad input");
8539 error = EINVAL;
8540 goto done;
8541 }
8542
8543 error = copyin(uap->client_id, &interface_index, sizeof(u_int32_t));
8544 if (error) {
8545 NECPLOG(LOG_ERR, "necp_client_copy_interface copyin interface_index error (%d)", error);
8546 goto done;
8547 }
8548
8549 if (interface_index == 0) {
8550 error = ENOENT;
8551 NECPLOG(LOG_ERR, "necp_client_copy_interface bad interface_index (%d)", interface_index);
8552 goto done;
8553 }
8554
8555 lck_mtx_lock(rnh_lock);
8556 ifnet_head_lock_shared();
8557 ifnet_t interface = NULL;
8558 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
8559 interface = ifindex2ifnet[interface_index];
8560 }
8561
8562 if (interface != NULL) {
8563 if (interface->if_xname != NULL) {
8564 strlcpy((char *)&interface_details.name, interface->if_xname, sizeof(interface_details.name));
8565 }
8566 interface_details.index = interface->if_index;
8567 interface_details.generation = ifnet_get_generation(interface);
8568 if (interface->if_delegated.ifp != NULL) {
8569 interface_details.delegate_index = interface->if_delegated.ifp->if_index;
8570 }
8571 interface_details.functional_type = if_functional_type(interface, TRUE);
8572 if (IFNET_IS_EXPENSIVE(interface)) {
8573 interface_details.flags |= NECP_INTERFACE_FLAG_EXPENSIVE;
8574 }
8575 if (IFNET_IS_CONSTRAINED(interface)) {
8576 interface_details.flags |= NECP_INTERFACE_FLAG_CONSTRAINED;
8577 }
8578 if ((interface->if_eflags & IFEF_TXSTART) == IFEF_TXSTART) {
8579 interface_details.flags |= NECP_INTERFACE_FLAG_TXSTART;
8580 }
8581 if ((interface->if_eflags & IFEF_NOACKPRI) == IFEF_NOACKPRI) {
8582 interface_details.flags |= NECP_INTERFACE_FLAG_NOACKPRI;
8583 }
8584 if ((interface->if_eflags & IFEF_3CA) == IFEF_3CA) {
8585 interface_details.flags |= NECP_INTERFACE_FLAG_3CARRIERAGG;
8586 }
8587 if (IFNET_IS_LOW_POWER(interface)) {
8588 interface_details.flags |= NECP_INTERFACE_FLAG_IS_LOW_POWER;
8589 }
8590 if (interface->if_xflags & IFXF_MPK_LOG) {
8591 interface_details.flags |= NECP_INTERFACE_FLAG_MPK_LOG;
8592 }
8593 if (interface->if_flags & IFF_MULTICAST) {
8594 interface_details.flags |= NECP_INTERFACE_FLAG_SUPPORTS_MULTICAST;
8595 }
8596 if (IS_INTF_CLAT46(interface)) {
8597 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_NAT64;
8598 }
8599 interface_details.mtu = interface->if_mtu;
8600
8601 u_int8_t ipv4_signature_len = sizeof(interface_details.ipv4_signature.signature);
8602 u_int16_t ipv4_signature_flags;
8603 if (ifnet_get_netsignature(interface, AF_INET, &ipv4_signature_len, &ipv4_signature_flags,
8604 (u_int8_t *)&interface_details.ipv4_signature) != 0) {
8605 ipv4_signature_len = 0;
8606 }
8607 interface_details.ipv4_signature.signature_len = ipv4_signature_len;
8608
8609 // Check for default scoped routes for IPv4 and IPv6
8610 union necp_sockaddr_union default_address;
8611 struct rtentry *v4Route = NULL;
8612 memset(&default_address, 0, sizeof(default_address));
8613 default_address.sa.sa_family = AF_INET;
8614 default_address.sa.sa_len = sizeof(struct sockaddr_in);
8615 v4Route = rtalloc1_scoped_locked((struct sockaddr *)&default_address, 0, 0,
8616 interface->if_index);
8617 if (v4Route != NULL) {
8618 if (v4Route->rt_ifp != NULL && !IS_INTF_CLAT46(v4Route->rt_ifp)) {
8619 interface_details.flags |= NECP_INTERFACE_FLAG_IPV4_ROUTABLE;
8620 }
8621 rtfree_locked(v4Route);
8622 v4Route = NULL;
8623 }
8624
8625 struct rtentry *v6Route = NULL;
8626 memset(&default_address, 0, sizeof(default_address));
8627 default_address.sa.sa_family = AF_INET6;
8628 default_address.sa.sa_len = sizeof(struct sockaddr_in6);
8629 v6Route = rtalloc1_scoped_locked((struct sockaddr *)&default_address, 0, 0,
8630 interface->if_index);
8631 if (v6Route != NULL) {
8632 if (v6Route->rt_ifp != NULL) {
8633 interface_details.flags |= NECP_INTERFACE_FLAG_IPV6_ROUTABLE;
8634 }
8635 rtfree_locked(v6Route);
8636 v6Route = NULL;
8637 }
8638
8639 u_int8_t ipv6_signature_len = sizeof(interface_details.ipv6_signature.signature);
8640 u_int16_t ipv6_signature_flags;
8641 if (ifnet_get_netsignature(interface, AF_INET6, &ipv6_signature_len, &ipv6_signature_flags,
8642 (u_int8_t *)&interface_details.ipv6_signature) != 0) {
8643 ipv6_signature_len = 0;
8644 }
8645 interface_details.ipv6_signature.signature_len = ipv6_signature_len;
8646
8647 ifnet_lock_shared(interface);
8648 struct ifaddr *ifa = NULL;
8649 TAILQ_FOREACH(ifa, &interface->if_addrhead, ifa_link) {
8650 IFA_LOCK(ifa);
8651 if (ifa->ifa_addr->sa_family == AF_INET) {
8652 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_NETMASK;
8653 interface_details.ipv4_netmask = ((struct in_ifaddr *)ifa)->ia_sockmask.sin_addr.s_addr;
8654 if (interface->if_flags & IFF_BROADCAST) {
8655 interface_details.flags |= NECP_INTERFACE_FLAG_HAS_BROADCAST;
8656 interface_details.ipv4_broadcast = ((struct in_ifaddr *)ifa)->ia_broadaddr.sin_addr.s_addr;
8657 }
8658 }
8659 IFA_UNLOCK(ifa);
8660 }
8661
8662 interface_details.radio_type = interface->if_radio_type;
8663 if (interface_details.radio_type == 0 && interface->if_delegated.ifp) {
8664 interface_details.radio_type = interface->if_delegated.ifp->if_radio_type;
8665 }
8666 ifnet_lock_done(interface);
8667 }
8668
8669 ifnet_head_done();
8670 lck_mtx_unlock(rnh_lock);
8671
8672 // If the client is using an older version of the struct, copy that length
8673 error = copyout(&interface_details, uap->buffer, sizeof(interface_details));
8674 if (error) {
8675 NECPLOG(LOG_ERR, "necp_client_copy_interface copyout error (%d)", error);
8676 goto done;
8677 }
8678 done:
8679 *retval = error;
8680
8681 return error;
8682 }
8683
8684 #if SKYWALK
8685
8686 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_get_interface_address(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)8687 necp_client_get_interface_address(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
8688 {
8689 int error = 0;
8690 u_int32_t interface_index = IFSCOPE_NONE;
8691 struct sockaddr_storage address = {};
8692 const size_t buffer_size = uap->buffer_size;
8693
8694 if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) ||
8695 buffer_size < sizeof(struct sockaddr_in) ||
8696 buffer_size > sizeof(struct sockaddr_storage) ||
8697 uap->buffer == 0) {
8698 NECPLOG0(LOG_ERR, "necp_client_get_interface_address bad input");
8699 error = EINVAL;
8700 goto done;
8701 }
8702
8703 error = copyin(uap->client_id, &interface_index, sizeof(u_int32_t));
8704 if (error) {
8705 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyin interface_index error (%d)", error);
8706 goto done;
8707 }
8708
8709 if (interface_index == IFSCOPE_NONE) {
8710 error = ENOENT;
8711 NECPLOG(LOG_ERR, "necp_client_get_interface_address bad interface_index (%d)", interface_index);
8712 goto done;
8713 }
8714
8715 error = copyin(uap->buffer, &address, buffer_size);
8716 if (error) {
8717 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyin address error (%d)", error);
8718 goto done;
8719 }
8720
8721 if (address.ss_family != AF_INET && address.ss_family != AF_INET6) {
8722 error = EINVAL;
8723 NECPLOG(LOG_ERR, "necp_client_get_interface_address invalid address family (%u)", address.ss_family);
8724 goto done;
8725 }
8726
8727 if (address.ss_len != buffer_size) {
8728 error = EINVAL;
8729 NECPLOG(LOG_ERR, "necp_client_get_interface_address invalid address length (%u)", address.ss_len);
8730 goto done;
8731 }
8732
8733 ifnet_head_lock_shared();
8734 ifnet_t ifp = NULL;
8735 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
8736 ifp = ifindex2ifnet[interface_index];
8737 }
8738 ifnet_head_done();
8739 if (ifp == NULL) {
8740 error = ENOENT;
8741 NECPLOG0(LOG_ERR, "necp_client_get_interface_address no matching interface found");
8742 goto done;
8743 }
8744
8745 struct rtentry *rt = rtalloc1_scoped((struct sockaddr *)&address, 0, 0, interface_index);
8746 if (rt == NULL) {
8747 error = EINVAL;
8748 NECPLOG0(LOG_ERR, "necp_client_get_interface_address route lookup failed");
8749 goto done;
8750 }
8751
8752 uint32_t gencount = 0;
8753 struct sockaddr_storage local_address = {};
8754 error = flow_route_select_laddr((union sockaddr_in_4_6 *)&local_address,
8755 (union sockaddr_in_4_6 *)&address, ifp, rt, &gencount, 1);
8756 rtfree(rt);
8757 rt = NULL;
8758
8759 if (error) {
8760 NECPLOG(LOG_ERR, "necp_client_get_interface_address local address selection failed (%d)", error);
8761 goto done;
8762 }
8763
8764 if (local_address.ss_len > buffer_size) {
8765 error = EMSGSIZE;
8766 NECPLOG(LOG_ERR, "necp_client_get_interface_address local address too long for buffer (%u)",
8767 local_address.ss_len);
8768 goto done;
8769 }
8770
8771 error = copyout(&local_address, uap->buffer, local_address.ss_len);
8772 if (error) {
8773 NECPLOG(LOG_ERR, "necp_client_get_interface_address copyout error (%d)", error);
8774 goto done;
8775 }
8776 done:
8777 *retval = error;
8778
8779 return error;
8780 }
8781
8782 extern char *proc_name_address(void *p);
8783
8784 int
necp_stats_ctor(struct skmem_obj_info * oi,struct skmem_obj_info * oim,void * arg,uint32_t skmflag)8785 necp_stats_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim,
8786 void *arg, uint32_t skmflag)
8787 {
8788 #pragma unused(arg, skmflag)
8789 struct necp_all_kstats *kstats = SKMEM_OBJ_ADDR(oi);
8790
8791 ASSERT(oim != NULL && SKMEM_OBJ_ADDR(oim) != NULL);
8792 ASSERT(SKMEM_OBJ_SIZE(oi) == SKMEM_OBJ_SIZE(oim));
8793
8794 kstats->necp_stats_ustats = SKMEM_OBJ_ADDR(oim);
8795
8796 return 0;
8797 }
8798
8799 int
necp_stats_dtor(void * addr,void * arg)8800 necp_stats_dtor(void *addr, void *arg)
8801 {
8802 #pragma unused(addr, arg)
8803 struct necp_all_kstats *kstats = addr;
8804
8805 kstats->necp_stats_ustats = NULL;
8806
8807 return 0;
8808 }
8809
8810 static void
necp_fd_insert_stats_arena(struct necp_fd_data * fd_data,struct necp_arena_info * nai)8811 necp_fd_insert_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai)
8812 {
8813 NECP_FD_ASSERT_LOCKED(fd_data);
8814 VERIFY(!(nai->nai_flags & NAIF_ATTACHED));
8815 VERIFY(nai->nai_chain.le_next == NULL && nai->nai_chain.le_prev == NULL);
8816
8817 LIST_INSERT_HEAD(&fd_data->stats_arena_list, nai, nai_chain);
8818 nai->nai_flags |= NAIF_ATTACHED;
8819 necp_arena_info_retain(nai); // for the list
8820 }
8821
8822 static void
necp_fd_remove_stats_arena(struct necp_fd_data * fd_data,struct necp_arena_info * nai)8823 necp_fd_remove_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai)
8824 {
8825 #pragma unused(fd_data)
8826 NECP_FD_ASSERT_LOCKED(fd_data);
8827 VERIFY(nai->nai_flags & NAIF_ATTACHED);
8828 VERIFY(nai->nai_use_count >= 1);
8829
8830 LIST_REMOVE(nai, nai_chain);
8831 nai->nai_flags &= ~NAIF_ATTACHED;
8832 nai->nai_chain.le_next = NULL;
8833 nai->nai_chain.le_prev = NULL;
8834 necp_arena_info_release(nai); // for the list
8835 }
8836
8837 static struct necp_arena_info *
necp_fd_mredirect_stats_arena(struct necp_fd_data * fd_data,struct proc * proc)8838 necp_fd_mredirect_stats_arena(struct necp_fd_data *fd_data, struct proc *proc)
8839 {
8840 struct necp_arena_info *nai, *nai_ret = NULL;
8841
8842 NECP_FD_ASSERT_LOCKED(fd_data);
8843
8844 // Redirect currently-active stats arena and remove it from the active state;
8845 // upon process resumption, new flow request would trigger the creation of
8846 // another active arena.
8847 if ((nai = fd_data->stats_arena_active) != NULL) {
8848 boolean_t need_defunct = FALSE;
8849
8850 ASSERT(!(nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)));
8851 VERIFY(nai->nai_use_count >= 2);
8852 ASSERT(nai->nai_arena != NULL);
8853 ASSERT(nai->nai_mmap.ami_mapref != NULL);
8854
8855 int err = skmem_arena_mredirect(nai->nai_arena, &nai->nai_mmap, proc, &need_defunct);
8856 VERIFY(err == 0);
8857 // must be TRUE since we don't mmap the arena more than once
8858 VERIFY(need_defunct == TRUE);
8859
8860 nai->nai_flags |= NAIF_REDIRECT;
8861 nai_ret = nai; // return to caller
8862
8863 necp_arena_info_release(nai); // for fd_data
8864 fd_data->stats_arena_active = nai = NULL;
8865 }
8866
8867 #if (DEVELOPMENT || DEBUG)
8868 // make sure this list now contains nothing but redirected/defunct arenas
8869 LIST_FOREACH(nai, &fd_data->stats_arena_list, nai_chain) {
8870 ASSERT(nai->nai_use_count >= 1);
8871 ASSERT(nai->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT));
8872 }
8873 #endif /* (DEVELOPMENT || DEBUG) */
8874
8875 return nai_ret;
8876 }
8877
8878 static void
necp_arena_info_retain(struct necp_arena_info * nai)8879 necp_arena_info_retain(struct necp_arena_info *nai)
8880 {
8881 nai->nai_use_count++;
8882 VERIFY(nai->nai_use_count != 0);
8883 }
8884
8885 static void
necp_arena_info_release(struct necp_arena_info * nai)8886 necp_arena_info_release(struct necp_arena_info *nai)
8887 {
8888 VERIFY(nai->nai_use_count > 0);
8889 if (--nai->nai_use_count == 0) {
8890 necp_arena_info_free(nai);
8891 }
8892 }
8893
8894 static struct necp_arena_info *
necp_arena_info_alloc(void)8895 necp_arena_info_alloc(void)
8896 {
8897 return zalloc_flags(necp_arena_info_zone, Z_WAITOK | Z_ZERO);
8898 }
8899
8900 static void
necp_arena_info_free(struct necp_arena_info * nai)8901 necp_arena_info_free(struct necp_arena_info *nai)
8902 {
8903 VERIFY(nai->nai_chain.le_next == NULL && nai->nai_chain.le_prev == NULL);
8904 VERIFY(nai->nai_use_count == 0);
8905
8906 // NOTE: destroying the arena requires that all outstanding objects
8907 // that were allocated have been freed, else it will assert.
8908 if (nai->nai_arena != NULL) {
8909 skmem_arena_munmap(nai->nai_arena, &nai->nai_mmap);
8910 skmem_arena_release(nai->nai_arena);
8911 OSDecrementAtomic(&necp_arena_count);
8912 nai->nai_arena = NULL;
8913 nai->nai_roff = 0;
8914 }
8915
8916 ASSERT(nai->nai_arena == NULL);
8917 ASSERT(nai->nai_mmap.ami_mapref == NULL);
8918 ASSERT(nai->nai_mmap.ami_arena == NULL);
8919 ASSERT(nai->nai_mmap.ami_maptask == TASK_NULL);
8920
8921 zfree(necp_arena_info_zone, nai);
8922 }
8923
8924 static int
necp_arena_create(struct necp_fd_data * fd_data,size_t obj_size,size_t obj_cnt,struct proc * p)8925 necp_arena_create(struct necp_fd_data *fd_data, size_t obj_size, size_t obj_cnt, struct proc *p)
8926 {
8927 struct skmem_region_params srp_ustats = {};
8928 struct skmem_region_params srp_kstats = {};
8929 struct necp_arena_info *nai;
8930 char name[32];
8931 int error = 0;
8932
8933 NECP_FD_ASSERT_LOCKED(fd_data);
8934 ASSERT(fd_data->stats_arena_active == NULL);
8935 ASSERT(p != PROC_NULL);
8936 ASSERT(proc_pid(p) == fd_data->proc_pid);
8937
8938 // inherit the default parameters for the stats region
8939 srp_ustats = *skmem_get_default(SKMEM_REGION_USTATS);
8940 srp_kstats = *skmem_get_default(SKMEM_REGION_KSTATS);
8941
8942 // enable multi-segment mode
8943 srp_ustats.srp_cflags &= ~SKMEM_REGION_CR_MONOLITHIC;
8944 srp_kstats.srp_cflags &= ~SKMEM_REGION_CR_MONOLITHIC;
8945
8946 // configure and adjust the region parameters
8947 srp_ustats.srp_r_obj_cnt = srp_kstats.srp_r_obj_cnt = obj_cnt;
8948 srp_ustats.srp_r_obj_size = srp_kstats.srp_r_obj_size = obj_size;
8949 skmem_region_params_config(&srp_ustats);
8950 skmem_region_params_config(&srp_kstats);
8951
8952 nai = necp_arena_info_alloc();
8953
8954 nai->nai_proc_pid = fd_data->proc_pid;
8955 (void) snprintf(name, sizeof(name), "stats-%u.%s.%d", fd_data->stats_arena_gencnt, proc_name_address(p), fd_data->proc_pid);
8956 nai->nai_arena = skmem_arena_create_for_necp(name, &srp_ustats, &srp_kstats, &error);
8957 ASSERT(nai->nai_arena != NULL || error != 0);
8958 if (error != 0) {
8959 NECPLOG(LOG_ERR, "failed to create stats arena for pid %d\n", fd_data->proc_pid);
8960 } else {
8961 OSIncrementAtomic(&necp_arena_count);
8962
8963 // Get region offsets from base of mmap span; the arena
8964 // doesn't need to be mmap'd at this point, since we simply
8965 // compute the relative offset.
8966 nai->nai_roff = skmem_arena_get_region_offset(nai->nai_arena, SKMEM_REGION_USTATS);
8967
8968 // map to the task/process; upon success, the base address of the region
8969 // will be returned in nai_mmap.ami_mapaddr; this can be communicated to
8970 // the process.
8971 error = skmem_arena_mmap(nai->nai_arena, p, &nai->nai_mmap);
8972 if (error != 0) {
8973 NECPLOG(LOG_ERR, "failed to map stats arena for pid %d\n", fd_data->proc_pid);
8974 }
8975 }
8976
8977 if (error == 0) {
8978 fd_data->stats_arena_active = nai;
8979 necp_arena_info_retain(nai); // for fd_data
8980 necp_fd_insert_stats_arena(fd_data, nai);
8981 ++fd_data->stats_arena_gencnt;
8982 } else {
8983 necp_arena_info_free(nai);
8984 }
8985
8986 return error;
8987 }
8988
8989 static int
necp_arena_stats_obj_alloc(struct necp_fd_data * fd_data,mach_vm_offset_t * off,struct necp_arena_info ** stats_arena,void ** kstats_kaddr,boolean_t cansleep)8990 necp_arena_stats_obj_alloc(struct necp_fd_data *fd_data,
8991 mach_vm_offset_t *off,
8992 struct necp_arena_info **stats_arena,
8993 void **kstats_kaddr,
8994 boolean_t cansleep)
8995 {
8996 struct skmem_cache *kstats_cp = NULL;
8997 void *ustats_obj = NULL;
8998 void *kstats_obj = NULL;
8999 struct necp_all_kstats *kstats = NULL;
9000 struct skmem_obj_info kstats_oi = {};
9001
9002 ASSERT(off != NULL);
9003 ASSERT(stats_arena != NULL && *stats_arena == NULL);
9004 ASSERT(kstats_kaddr != NULL && *kstats_kaddr == NULL);
9005
9006 NECP_FD_ASSERT_LOCKED(fd_data);
9007 ASSERT(fd_data->stats_arena_active != NULL);
9008 ASSERT(fd_data->stats_arena_active->nai_arena != NULL);
9009
9010 kstats_cp = skmem_arena_necp(fd_data->stats_arena_active->nai_arena)->arc_kstats_cache;
9011 if ((kstats_obj = skmem_cache_alloc(kstats_cp, (cansleep ? SKMEM_SLEEP : SKMEM_NOSLEEP))) == NULL) {
9012 return ENOMEM;
9013 }
9014
9015 kstats = (struct necp_all_kstats*)kstats_obj;
9016 ustats_obj = kstats->necp_stats_ustats;
9017
9018 skmem_cache_get_obj_info(kstats_cp, kstats_obj, &kstats_oi, NULL);
9019 ASSERT(SKMEM_OBJ_SIZE(&kstats_oi) >= sizeof(struct necp_all_stats));
9020 // reset all stats counters
9021 bzero(ustats_obj, SKMEM_OBJ_SIZE(&kstats_oi));
9022 bzero(&kstats->necp_stats_comm, sizeof(struct necp_all_stats));
9023 *stats_arena = fd_data->stats_arena_active;
9024 *kstats_kaddr = kstats_obj;
9025 // kstats and ustats are mirrored and have the same offset
9026 *off = fd_data->stats_arena_active->nai_roff + SKMEM_OBJ_ROFF(&kstats_oi);
9027
9028 return 0;
9029 }
9030
9031 static void
necp_arena_stats_obj_free(struct necp_fd_data * fd_data,struct necp_arena_info * stats_arena,void ** kstats_kaddr,mach_vm_address_t * ustats_uaddr)9032 necp_arena_stats_obj_free(struct necp_fd_data *fd_data, struct necp_arena_info *stats_arena, void **kstats_kaddr, mach_vm_address_t *ustats_uaddr)
9033 {
9034 #pragma unused(fd_data)
9035 NECP_FD_ASSERT_LOCKED(fd_data);
9036
9037 ASSERT(stats_arena != NULL);
9038 ASSERT(stats_arena->nai_arena != NULL);
9039 ASSERT(kstats_kaddr != NULL && *kstats_kaddr != NULL);
9040 ASSERT(ustats_uaddr != NULL);
9041
9042 skmem_cache_free(skmem_arena_necp(stats_arena->nai_arena)->arc_kstats_cache, *kstats_kaddr);
9043 *kstats_kaddr = NULL;
9044 *ustats_uaddr = 0;
9045 }
9046
9047 // This routine returns the KVA of the sysctls object, as well as the
9048 // offset of that object relative to the mmap base address for the
9049 // task/process.
9050 static void *
necp_arena_sysctls_obj(struct necp_fd_data * fd_data,mach_vm_offset_t * off,size_t * size)9051 necp_arena_sysctls_obj(struct necp_fd_data *fd_data, mach_vm_offset_t *off, size_t *size)
9052 {
9053 void *objaddr;
9054
9055 NECP_FD_ASSERT_LOCKED(fd_data);
9056 ASSERT(fd_data->sysctl_arena != NULL);
9057
9058 // kernel virtual address of the sysctls object
9059 objaddr = skmem_arena_system_sysctls_obj_addr(fd_data->sysctl_arena);
9060 ASSERT(objaddr != NULL);
9061
9062 // Return the relative offset of the sysctls object; there is
9063 // only 1 object in the entire sysctls region, and therefore the
9064 // object's offset is simply the region's offset in the arena.
9065 // (sysctl_mmap.ami_mapaddr + offset) is the address of this object
9066 // in the task/process.
9067 if (off != NULL) {
9068 *off = fd_data->system_sysctls_roff;
9069 }
9070
9071 if (size != NULL) {
9072 *size = skmem_arena_system_sysctls_obj_size(fd_data->sysctl_arena);
9073 ASSERT(*size != 0);
9074 }
9075
9076 return objaddr;
9077 }
9078
9079 static void
necp_stats_arenas_destroy(struct necp_fd_data * fd_data,boolean_t closing)9080 necp_stats_arenas_destroy(struct necp_fd_data *fd_data, boolean_t closing)
9081 {
9082 struct necp_arena_info *nai, *nai_tmp;
9083
9084 NECP_FD_ASSERT_LOCKED(fd_data);
9085
9086 // If reaping (not closing), release reference only for idle active arena; the reference
9087 // count must be 2 by now, when it's not being referred to by any clients/flows.
9088 if ((nai = fd_data->stats_arena_active) != NULL && (closing || nai->nai_use_count == 2)) {
9089 VERIFY(nai->nai_use_count >= 2);
9090 necp_arena_info_release(nai); // for fd_data
9091 fd_data->stats_arena_active = NULL;
9092 }
9093
9094 // clean up any defunct arenas left in the list
9095 LIST_FOREACH_SAFE(nai, &fd_data->stats_arena_list, nai_chain, nai_tmp) {
9096 // If reaping, release reference if the list holds the last one
9097 if (closing || nai->nai_use_count == 1) {
9098 VERIFY(nai->nai_use_count >= 1);
9099 // callee unchains nai (and may free it)
9100 necp_fd_remove_stats_arena(fd_data, nai);
9101 }
9102 }
9103 }
9104
9105 static void
necp_sysctl_arena_destroy(struct necp_fd_data * fd_data)9106 necp_sysctl_arena_destroy(struct necp_fd_data *fd_data)
9107 {
9108 NECP_FD_ASSERT_LOCKED(fd_data);
9109
9110 // NOTE: destroying the arena requires that all outstanding objects
9111 // that were allocated have been freed, else it will assert.
9112 if (fd_data->sysctl_arena != NULL) {
9113 skmem_arena_munmap(fd_data->sysctl_arena, &fd_data->sysctl_mmap);
9114 skmem_arena_release(fd_data->sysctl_arena);
9115 OSDecrementAtomic(&necp_sysctl_arena_count);
9116 fd_data->sysctl_arena = NULL;
9117 fd_data->system_sysctls_roff = 0;
9118 }
9119 }
9120
9121 static int
necp_arena_initialize(struct necp_fd_data * fd_data,bool locked)9122 necp_arena_initialize(struct necp_fd_data *fd_data, bool locked)
9123 {
9124 int error = 0;
9125 size_t stats_obj_size = MAX(sizeof(struct necp_all_stats), sizeof(struct necp_all_kstats));
9126
9127 if (!locked) {
9128 NECP_FD_LOCK(fd_data);
9129 }
9130 if (fd_data->stats_arena_active == NULL) {
9131 error = necp_arena_create(fd_data, stats_obj_size,
9132 NECP_MAX_PER_PROCESS_CLIENT_STATISTICS_STRUCTS,
9133 current_proc());
9134 }
9135 if (!locked) {
9136 NECP_FD_UNLOCK(fd_data);
9137 }
9138
9139 return error;
9140 }
9141
9142 static int
necp_sysctl_arena_initialize(struct necp_fd_data * fd_data,bool locked)9143 necp_sysctl_arena_initialize(struct necp_fd_data *fd_data, bool locked)
9144 {
9145 int error = 0;
9146
9147 if (!locked) {
9148 NECP_FD_LOCK(fd_data);
9149 }
9150
9151 NECP_FD_ASSERT_LOCKED(fd_data);
9152
9153 if (fd_data->sysctl_arena == NULL) {
9154 char name[32];
9155 struct proc *p = current_proc();
9156
9157 ASSERT(p != PROC_NULL);
9158 ASSERT(proc_pid(p) == fd_data->proc_pid);
9159
9160 (void) snprintf(name, sizeof(name), "sysctl.%s.%d", proc_name_address(p), fd_data->proc_pid);
9161 fd_data->sysctl_arena = skmem_arena_create_for_system(name, &error);
9162 ASSERT(fd_data->sysctl_arena != NULL || error != 0);
9163 if (error != 0) {
9164 NECPLOG(LOG_ERR, "failed to create arena for pid %d\n", fd_data->proc_pid);
9165 } else {
9166 OSIncrementAtomic(&necp_sysctl_arena_count);
9167
9168 // Get region offsets from base of mmap span; the arena
9169 // doesn't need to be mmap'd at this point, since we simply
9170 // compute the relative offset.
9171 fd_data->system_sysctls_roff = skmem_arena_get_region_offset(fd_data->sysctl_arena, SKMEM_REGION_SYSCTLS);
9172
9173 // map to the task/process; upon success, the base address of the region
9174 // will be returned in nai_mmap.ami_mapaddr; this can be communicated to
9175 // the process.
9176 error = skmem_arena_mmap(fd_data->sysctl_arena, p, &fd_data->sysctl_mmap);
9177 if (error != 0) {
9178 NECPLOG(LOG_ERR, "failed to map sysctl arena for pid %d\n", fd_data->proc_pid);
9179 necp_sysctl_arena_destroy(fd_data);
9180 }
9181 }
9182 }
9183
9184 if (!locked) {
9185 NECP_FD_UNLOCK(fd_data);
9186 }
9187
9188 return error;
9189 }
9190
9191 static int
necp_client_stats_bufreq(struct necp_fd_data * fd_data,struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct necp_stats_bufreq * bufreq,struct necp_stats_hdr * out_header)9192 necp_client_stats_bufreq(struct necp_fd_data *fd_data,
9193 struct necp_client *client,
9194 struct necp_client_flow_registration *flow_registration,
9195 struct necp_stats_bufreq *bufreq,
9196 struct necp_stats_hdr *out_header)
9197 {
9198 int error = 0;
9199 NECP_CLIENT_ASSERT_LOCKED(client);
9200 NECP_FD_ASSERT_LOCKED(fd_data);
9201
9202 if ((bufreq->necp_stats_bufreq_id == NECP_CLIENT_STATISTICS_BUFREQ_ID) &&
9203 ((bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_TCP &&
9204 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_TCP_CURRENT_VER) ||
9205 (bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_UDP &&
9206 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_UDP_CURRENT_VER) ||
9207 (bufreq->necp_stats_bufreq_type == NECP_CLIENT_STATISTICS_TYPE_QUIC &&
9208 bufreq->necp_stats_bufreq_ver == NECP_CLIENT_STATISTICS_TYPE_QUIC_CURRENT_VER)) &&
9209 (bufreq->necp_stats_bufreq_size == sizeof(struct necp_all_stats))) {
9210 // There should be one and only one stats allocation per client.
9211 // If asked more than once, we just repeat ourselves.
9212 if (flow_registration->ustats_uaddr == 0) {
9213 mach_vm_offset_t off;
9214 ASSERT(flow_registration->stats_arena == NULL);
9215 ASSERT(flow_registration->kstats_kaddr == NULL);
9216 ASSERT(flow_registration->ustats_uaddr == 0);
9217 error = necp_arena_stats_obj_alloc(fd_data, &off, &flow_registration->stats_arena, &flow_registration->kstats_kaddr, FALSE);
9218 if (error == 0) {
9219 // upon success, hold a reference for the client; this is released when the client is removed/closed
9220 ASSERT(flow_registration->stats_arena != NULL);
9221 necp_arena_info_retain(flow_registration->stats_arena);
9222
9223 // compute user address based on mapping info and object offset
9224 flow_registration->ustats_uaddr = flow_registration->stats_arena->nai_mmap.ami_mapaddr + off;
9225
9226 // add to collect_stats list
9227 NECP_STATS_LIST_LOCK_EXCLUSIVE();
9228 necp_client_retain_locked(client); // Add a reference to the client
9229 LIST_INSERT_HEAD(&necp_collect_stats_flow_list, flow_registration, collect_stats_chain);
9230 NECP_STATS_LIST_UNLOCK();
9231 necp_schedule_collect_stats_clients(FALSE);
9232 } else {
9233 ASSERT(flow_registration->stats_arena == NULL);
9234 ASSERT(flow_registration->kstats_kaddr == NULL);
9235 }
9236 }
9237 if (flow_registration->ustats_uaddr != 0) {
9238 ASSERT(error == 0);
9239 ASSERT(flow_registration->stats_arena != NULL);
9240 ASSERT(flow_registration->kstats_kaddr != NULL);
9241
9242 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
9243 kstats->necp_stats_ustats->all_stats_u.tcp_stats.necp_tcp_hdr.necp_stats_type = bufreq->necp_stats_bufreq_type;
9244 kstats->necp_stats_ustats->all_stats_u.tcp_stats.necp_tcp_hdr.necp_stats_ver = bufreq->necp_stats_bufreq_ver;
9245
9246 if (out_header) {
9247 out_header->necp_stats_type = bufreq->necp_stats_bufreq_type;
9248 out_header->necp_stats_ver = bufreq->necp_stats_bufreq_ver;
9249 }
9250
9251 bufreq->necp_stats_bufreq_uaddr = flow_registration->ustats_uaddr;
9252 }
9253 } else {
9254 error = EINVAL;
9255 }
9256
9257 return error;
9258 }
9259
9260 static int
necp_client_stats_initial(struct necp_client_flow_registration * flow_registration,uint32_t stats_type,uint32_t stats_ver)9261 necp_client_stats_initial(struct necp_client_flow_registration *flow_registration, uint32_t stats_type, uint32_t stats_ver)
9262 {
9263 // An attempted create
9264 assert(flow_registration->stats_handler_context == NULL);
9265 assert(flow_registration->stats_arena);
9266 assert(flow_registration->ustats_uaddr);
9267 assert(flow_registration->kstats_kaddr);
9268
9269 int error = 0;
9270
9271 switch (stats_type) {
9272 case NECP_CLIENT_STATISTICS_TYPE_TCP: {
9273 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_TCP_VER_1) {
9274 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
9275 NSTAT_PROVIDER_TCP_USERLAND, 0, necp_request_tcp_netstats, necp_find_extension_info);
9276 if (flow_registration->stats_handler_context == NULL) {
9277 error = EIO;
9278 }
9279 } else {
9280 error = ENOTSUP;
9281 }
9282 break;
9283 }
9284 case NECP_CLIENT_STATISTICS_TYPE_UDP: {
9285 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_UDP_VER_1) {
9286 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
9287 NSTAT_PROVIDER_UDP_USERLAND, 0, necp_request_udp_netstats, necp_find_extension_info);
9288 if (flow_registration->stats_handler_context == NULL) {
9289 error = EIO;
9290 }
9291 } else {
9292 error = ENOTSUP;
9293 }
9294 break;
9295 }
9296 case NECP_CLIENT_STATISTICS_TYPE_QUIC: {
9297 if (stats_ver == NECP_CLIENT_STATISTICS_TYPE_QUIC_VER_1 && flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS) {
9298 flow_registration->stats_handler_context = ntstat_userland_stats_open((userland_stats_provider_context *)flow_registration,
9299 NSTAT_PROVIDER_QUIC_USERLAND, 0, necp_request_quic_netstats, necp_find_extension_info);
9300 if (flow_registration->stats_handler_context == NULL) {
9301 error = EIO;
9302 }
9303 } else {
9304 error = ENOTSUP;
9305 }
9306 break;
9307 }
9308 default: {
9309 error = ENOTSUP;
9310 break;
9311 }
9312 }
9313 return error;
9314 }
9315
9316 static int
necp_stats_initialize(struct necp_fd_data * fd_data,struct necp_client * client,struct necp_client_flow_registration * flow_registration,struct necp_stats_bufreq * bufreq)9317 necp_stats_initialize(struct necp_fd_data *fd_data,
9318 struct necp_client *client,
9319 struct necp_client_flow_registration *flow_registration,
9320 struct necp_stats_bufreq *bufreq)
9321 {
9322 int error = 0;
9323 struct necp_stats_hdr stats_hdr = {};
9324
9325 NECP_CLIENT_ASSERT_LOCKED(client);
9326 NECP_FD_ASSERT_LOCKED(fd_data);
9327 VERIFY(fd_data->stats_arena_active != NULL);
9328 VERIFY(fd_data->stats_arena_active->nai_arena != NULL);
9329 VERIFY(!(fd_data->stats_arena_active->nai_flags & (NAIF_REDIRECT | NAIF_DEFUNCT)));
9330
9331 if (bufreq == NULL) {
9332 return EINVAL;
9333 }
9334
9335 // Setup stats region
9336 error = necp_client_stats_bufreq(fd_data, client, flow_registration, bufreq, &stats_hdr);
9337 if (error) {
9338 return error;
9339 }
9340 // Notify ntstat about new flow
9341 if (flow_registration->stats_handler_context == NULL) {
9342 error = necp_client_stats_initial(flow_registration, stats_hdr.necp_stats_type, stats_hdr.necp_stats_ver);
9343 if (flow_registration->stats_handler_context != NULL) {
9344 ntstat_userland_stats_event(flow_registration->stats_handler_context, NECP_CLIENT_STATISTICS_EVENT_INIT);
9345 }
9346 NECP_CLIENT_FLOW_LOG(client, flow_registration, "Initialized stats <error %d>", error);
9347 }
9348
9349 return error;
9350 }
9351
9352 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_map_sysctls(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9353 necp_client_map_sysctls(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9354 {
9355 int result = 0;
9356 if (!retval) {
9357 retval = &result;
9358 }
9359
9360 do {
9361 mach_vm_address_t uaddr = 0;
9362 if (uap->buffer_size != sizeof(uaddr)) {
9363 *retval = EINVAL;
9364 break;
9365 }
9366
9367 *retval = necp_sysctl_arena_initialize(fd_data, false);
9368 if (*retval != 0) {
9369 break;
9370 }
9371
9372 mach_vm_offset_t off = 0;
9373 void *location = NULL;
9374 NECP_FD_LOCK(fd_data);
9375 location = necp_arena_sysctls_obj(fd_data, &off, NULL);
9376 NECP_FD_UNLOCK(fd_data);
9377
9378 if (location == NULL) {
9379 *retval = ENOENT;
9380 break;
9381 }
9382
9383 uaddr = fd_data->sysctl_mmap.ami_mapaddr + off;
9384 *retval = copyout(&uaddr, uap->buffer, sizeof(uaddr));
9385 } while (false);
9386
9387 return *retval;
9388 }
9389
9390 #endif /* !SKYWALK */
9391
9392 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_copy_route_statistics(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9393 necp_client_copy_route_statistics(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9394 {
9395 int error = 0;
9396 struct necp_client *client = NULL;
9397 uuid_t client_id;
9398
9399 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) ||
9400 uap->buffer_size < sizeof(struct necp_stat_counts) || uap->buffer == 0) {
9401 NECPLOG0(LOG_ERR, "necp_client_copy_route_statistics bad input");
9402 error = EINVAL;
9403 goto done;
9404 }
9405
9406 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9407 if (error) {
9408 NECPLOG(LOG_ERR, "necp_client_copy_route_statistics copyin client_id error (%d)", error);
9409 goto done;
9410 }
9411
9412 // Lock
9413 NECP_FD_LOCK(fd_data);
9414 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9415 if (client != NULL) {
9416 NECP_CLIENT_ROUTE_LOCK(client);
9417 struct necp_stat_counts route_stats = {};
9418 if (client->current_route != NULL && client->current_route->rt_stats != NULL) {
9419 struct nstat_counts *rt_stats = client->current_route->rt_stats;
9420 atomic_get_64(route_stats.necp_stat_rxpackets, &rt_stats->nstat_rxpackets);
9421 atomic_get_64(route_stats.necp_stat_rxbytes, &rt_stats->nstat_rxbytes);
9422 atomic_get_64(route_stats.necp_stat_txpackets, &rt_stats->nstat_txpackets);
9423 atomic_get_64(route_stats.necp_stat_txbytes, &rt_stats->nstat_txbytes);
9424 route_stats.necp_stat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
9425 route_stats.necp_stat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
9426 route_stats.necp_stat_txretransmit = rt_stats->nstat_txretransmit;
9427 route_stats.necp_stat_connectattempts = rt_stats->nstat_connectattempts;
9428 route_stats.necp_stat_connectsuccesses = rt_stats->nstat_connectsuccesses;
9429 route_stats.necp_stat_min_rtt = rt_stats->nstat_min_rtt;
9430 route_stats.necp_stat_avg_rtt = rt_stats->nstat_avg_rtt;
9431 route_stats.necp_stat_var_rtt = rt_stats->nstat_var_rtt;
9432 route_stats.necp_stat_route_flags = client->current_route->rt_flags;
9433 }
9434
9435 // Unlock before copying out
9436 NECP_CLIENT_ROUTE_UNLOCK(client);
9437 NECP_CLIENT_UNLOCK(client);
9438 NECP_FD_UNLOCK(fd_data);
9439
9440 error = copyout(&route_stats, uap->buffer, sizeof(route_stats));
9441 if (error) {
9442 NECPLOG(LOG_ERR, "necp_client_copy_route_statistics copyout error (%d)", error);
9443 }
9444 } else {
9445 // Unlock
9446 NECP_FD_UNLOCK(fd_data);
9447 error = ENOENT;
9448 }
9449
9450
9451 done:
9452 *retval = error;
9453 return error;
9454 }
9455
9456 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_update_cache(struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9457 necp_client_update_cache(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9458 {
9459 int error = 0;
9460 struct necp_client *client = NULL;
9461 uuid_t client_id;
9462
9463 if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) {
9464 error = EINVAL;
9465 goto done;
9466 }
9467
9468 error = copyin(uap->client_id, client_id, sizeof(uuid_t));
9469 if (error) {
9470 NECPLOG(LOG_ERR, "necp_client_update_cache copyin client_id error (%d)", error);
9471 goto done;
9472 }
9473
9474 NECP_FD_LOCK(fd_data);
9475 client = necp_client_fd_find_client_and_lock(fd_data, client_id);
9476 if (client == NULL) {
9477 NECP_FD_UNLOCK(fd_data);
9478 error = ENOENT;
9479 goto done;
9480 }
9481
9482 struct necp_client_flow_registration *flow_registration = necp_client_find_flow(client, client_id);
9483 if (flow_registration == NULL) {
9484 NECP_CLIENT_UNLOCK(client);
9485 NECP_FD_UNLOCK(fd_data);
9486 error = ENOENT;
9487 goto done;
9488 }
9489
9490 NECP_CLIENT_ROUTE_LOCK(client);
9491 // This needs to be changed when TFO/ECN is supported by multiple flows
9492 struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list);
9493 if (flow == NULL ||
9494 (flow->remote_addr.sa.sa_family != AF_INET &&
9495 flow->remote_addr.sa.sa_family != AF_INET6) ||
9496 (flow->local_addr.sa.sa_family != AF_INET &&
9497 flow->local_addr.sa.sa_family != AF_INET6)) {
9498 error = EINVAL;
9499 NECPLOG(LOG_ERR, "necp_client_update_cache no flow error (%d)", error);
9500 goto done_unlock;
9501 }
9502
9503 necp_cache_buffer cache_buffer;
9504 memset(&cache_buffer, 0, sizeof(cache_buffer));
9505
9506 if (uap->buffer_size != sizeof(necp_cache_buffer) ||
9507 uap->buffer == USER_ADDR_NULL) {
9508 error = EINVAL;
9509 goto done_unlock;
9510 }
9511
9512 error = copyin(uap->buffer, &cache_buffer, sizeof(cache_buffer));
9513 if (error) {
9514 NECPLOG(LOG_ERR, "necp_client_update_cache copyin cache buffer error (%d)", error);
9515 goto done_unlock;
9516 }
9517
9518 if (cache_buffer.necp_cache_buf_type == NECP_CLIENT_CACHE_TYPE_ECN &&
9519 cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_ECN_VER_1) {
9520 if (cache_buffer.necp_cache_buf_size != sizeof(necp_tcp_ecn_cache) ||
9521 cache_buffer.necp_cache_buf_addr == USER_ADDR_NULL) {
9522 error = EINVAL;
9523 goto done_unlock;
9524 }
9525
9526 necp_tcp_ecn_cache ecn_cache_buffer;
9527 memset(&ecn_cache_buffer, 0, sizeof(ecn_cache_buffer));
9528
9529 error = copyin(cache_buffer.necp_cache_buf_addr, &ecn_cache_buffer, sizeof(necp_tcp_ecn_cache));
9530 if (error) {
9531 NECPLOG(LOG_ERR, "necp_client_update_cache copyin ecn cache buffer error (%d)", error);
9532 goto done_unlock;
9533 }
9534
9535 if (client->current_route != NULL && client->current_route->rt_ifp != NULL) {
9536 if (!client->platform_binary) {
9537 ecn_cache_buffer.necp_tcp_ecn_heuristics_success = 0;
9538 }
9539 tcp_heuristics_ecn_update(&ecn_cache_buffer, client->current_route->rt_ifp,
9540 (union sockaddr_in_4_6 *)&flow->local_addr);
9541 }
9542 } else if (cache_buffer.necp_cache_buf_type == NECP_CLIENT_CACHE_TYPE_TFO &&
9543 cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_TFO_VER_1) {
9544 if (cache_buffer.necp_cache_buf_size != sizeof(necp_tcp_tfo_cache) ||
9545 cache_buffer.necp_cache_buf_addr == USER_ADDR_NULL) {
9546 error = EINVAL;
9547 goto done_unlock;
9548 }
9549
9550 necp_tcp_tfo_cache tfo_cache_buffer;
9551 memset(&tfo_cache_buffer, 0, sizeof(tfo_cache_buffer));
9552
9553 error = copyin(cache_buffer.necp_cache_buf_addr, &tfo_cache_buffer, sizeof(necp_tcp_tfo_cache));
9554 if (error) {
9555 NECPLOG(LOG_ERR, "necp_client_update_cache copyin tfo cache buffer error (%d)", error);
9556 goto done_unlock;
9557 }
9558
9559 if (client->current_route != NULL && client->current_route->rt_ifp != NULL) {
9560 if (!client->platform_binary) {
9561 tfo_cache_buffer.necp_tcp_tfo_heuristics_success = 0;
9562 }
9563 tcp_heuristics_tfo_update(&tfo_cache_buffer, client->current_route->rt_ifp,
9564 (union sockaddr_in_4_6 *)&flow->local_addr,
9565 (union sockaddr_in_4_6 *)&flow->remote_addr);
9566 }
9567 } else {
9568 error = EINVAL;
9569 }
9570 done_unlock:
9571 NECP_CLIENT_ROUTE_UNLOCK(client);
9572 NECP_CLIENT_UNLOCK(client);
9573 NECP_FD_UNLOCK(fd_data);
9574 done:
9575 *retval = error;
9576 return error;
9577 }
9578
9579 #define NECP_CLIENT_ACTION_SIGN_DEFAULT_HOSTNAME_LENGTH 64
9580 #define NECP_CLIENT_ACTION_SIGN_MAX_HOSTNAME_LENGTH 1024
9581
9582 #define NECP_CLIENT_ACTION_SIGN_TAG_LENGTH 32
9583
9584 static NECP_CLIENT_ACTION_FUNCTION int
necp_client_sign(__unused struct necp_fd_data * fd_data,struct necp_client_action_args * uap,int * retval)9585 necp_client_sign(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval)
9586 {
9587 int error = 0;
9588 u_int32_t hostname_length = 0;
9589 u_int8_t tag[NECP_CLIENT_ACTION_SIGN_TAG_LENGTH] = {};
9590 struct necp_client_signable signable = {};
9591 union necp_sockaddr_union address_answer = {};
9592 u_int8_t *client_hostname = NULL;
9593 u_int8_t *allocated_hostname = NULL;
9594 u_int8_t default_hostname[NECP_CLIENT_ACTION_SIGN_DEFAULT_HOSTNAME_LENGTH] = "";
9595 uint32_t tag_size = sizeof(tag);
9596
9597 *retval = 0;
9598
9599 const bool has_resolver_entitlement = (priv_check_cred(kauth_cred_get(), PRIV_NET_VALIDATED_RESOLVER, 0) == 0);
9600 if (!has_resolver_entitlement) {
9601 NECPLOG0(LOG_ERR, "Process does not hold the necessary entitlement to sign resolver answers");
9602 error = EPERM;
9603 goto done;
9604 }
9605
9606 if (uap->client_id == 0 || uap->client_id_len < sizeof(struct necp_client_signable)) {
9607 error = EINVAL;
9608 goto done;
9609 }
9610
9611 if (uap->buffer == 0 || uap->buffer_size != NECP_CLIENT_ACTION_SIGN_TAG_LENGTH) {
9612 error = EINVAL;
9613 goto done;
9614 }
9615
9616 error = copyin(uap->client_id, &signable, sizeof(signable));
9617 if (error) {
9618 NECPLOG(LOG_ERR, "necp_client_sign copyin signable error (%d)", error);
9619 goto done;
9620 }
9621
9622 if (signable.sign_type != NECP_CLIENT_SIGN_TYPE_RESOLVER_ANSWER) {
9623 NECPLOG(LOG_ERR, "necp_client_sign unknown signable type (%u)", signable.sign_type);
9624 error = EINVAL;
9625 goto done;
9626 }
9627
9628 if (uap->client_id_len < sizeof(struct necp_client_resolver_answer)) {
9629 error = EINVAL;
9630 goto done;
9631 }
9632
9633 error = copyin(uap->client_id + sizeof(signable), &address_answer, sizeof(address_answer));
9634 if (error) {
9635 NECPLOG(LOG_ERR, "necp_client_sign copyin address_answer error (%d)", error);
9636 goto done;
9637 }
9638
9639 error = copyin(uap->client_id + sizeof(signable) + sizeof(address_answer), &hostname_length, sizeof(hostname_length));
9640 if (error) {
9641 NECPLOG(LOG_ERR, "necp_client_sign copyin hostname_length error (%d)", error);
9642 goto done;
9643 }
9644
9645 if (hostname_length > NECP_CLIENT_ACTION_SIGN_MAX_HOSTNAME_LENGTH) {
9646 error = EINVAL;
9647 goto done;
9648 }
9649
9650 if (hostname_length > NECP_CLIENT_ACTION_SIGN_DEFAULT_HOSTNAME_LENGTH) {
9651 if ((allocated_hostname = (u_int8_t *)kalloc_data(hostname_length, Z_WAITOK | Z_ZERO)) == NULL) {
9652 NECPLOG(LOG_ERR, "necp_client_sign malloc hostname %u failed", hostname_length);
9653 error = ENOMEM;
9654 goto done;
9655 }
9656
9657 client_hostname = allocated_hostname;
9658 } else {
9659 client_hostname = default_hostname;
9660 }
9661
9662 error = copyin(uap->client_id + sizeof(signable) + sizeof(address_answer) + sizeof(hostname_length), client_hostname, hostname_length);
9663 if (error) {
9664 NECPLOG(LOG_ERR, "necp_client_sign copyin hostname error (%d)", error);
9665 goto done;
9666 }
9667
9668 address_answer.sin.sin_port = 0;
9669 error = necp_sign_resolver_answer(signable.client_id, client_hostname, hostname_length,
9670 (u_int8_t *)&address_answer, sizeof(address_answer),
9671 tag, &tag_size);
9672 if (tag_size != sizeof(tag)) {
9673 NECPLOG(LOG_ERR, "necp_client_sign unexpected tag size %u", tag_size);
9674 error = EINVAL;
9675 goto done;
9676 }
9677 error = copyout(tag, uap->buffer, tag_size);
9678 if (error) {
9679 NECPLOG(LOG_ERR, "necp_client_sign copyout error (%d)", error);
9680 goto done;
9681 }
9682
9683 done:
9684 if (allocated_hostname != NULL) {
9685 kfree_data(allocated_hostname, hostname_length);
9686 allocated_hostname = NULL;
9687 }
9688 *retval = error;
9689 return error;
9690 }
9691
9692 int
necp_client_action(struct proc * p,struct necp_client_action_args * uap,int * retval)9693 necp_client_action(struct proc *p, struct necp_client_action_args *uap, int *retval)
9694 {
9695 struct fileproc *fp;
9696 int error = 0;
9697 int return_value = 0;
9698 struct necp_fd_data *fd_data = NULL;
9699
9700 error = necp_find_fd_data(p, uap->necp_fd, &fp, &fd_data);
9701 if (error != 0) {
9702 NECPLOG(LOG_ERR, "necp_client_action find fd error (%d)", error);
9703 return error;
9704 }
9705
9706 u_int32_t action = uap->action;
9707
9708 #if CONFIG_MACF
9709 error = mac_necp_check_client_action(p, fp->fp_glob, action);
9710 if (error) {
9711 return_value = error;
9712 goto done;
9713 }
9714 #endif /* MACF */
9715
9716 switch (action) {
9717 case NECP_CLIENT_ACTION_ADD: {
9718 return_value = necp_client_add(p, fd_data, uap, retval);
9719 break;
9720 }
9721 case NECP_CLIENT_ACTION_CLAIM: {
9722 return_value = necp_client_claim(p, fd_data, uap, retval);
9723 break;
9724 }
9725 case NECP_CLIENT_ACTION_REMOVE: {
9726 return_value = necp_client_remove(fd_data, uap, retval);
9727 break;
9728 }
9729 case NECP_CLIENT_ACTION_COPY_PARAMETERS:
9730 case NECP_CLIENT_ACTION_COPY_RESULT:
9731 case NECP_CLIENT_ACTION_COPY_UPDATED_RESULT: {
9732 return_value = necp_client_copy(fd_data, uap, retval);
9733 break;
9734 }
9735 case NECP_CLIENT_ACTION_COPY_LIST: {
9736 return_value = necp_client_list(fd_data, uap, retval);
9737 break;
9738 }
9739 case NECP_CLIENT_ACTION_ADD_FLOW: {
9740 return_value = necp_client_add_flow(fd_data, uap, retval);
9741 break;
9742 }
9743 case NECP_CLIENT_ACTION_REMOVE_FLOW: {
9744 return_value = necp_client_remove_flow(fd_data, uap, retval);
9745 break;
9746 }
9747 #if SKYWALK
9748 case NECP_CLIENT_ACTION_REQUEST_NEXUS_INSTANCE: {
9749 return_value = necp_client_request_nexus(fd_data, uap, retval);
9750 break;
9751 }
9752 #endif /* !SKYWALK */
9753 case NECP_CLIENT_ACTION_AGENT: {
9754 return_value = necp_client_agent_action(fd_data, uap, retval);
9755 break;
9756 }
9757 case NECP_CLIENT_ACTION_COPY_AGENT: {
9758 return_value = necp_client_copy_agent(fd_data, uap, retval);
9759 break;
9760 }
9761 case NECP_CLIENT_ACTION_AGENT_USE: {
9762 return_value = necp_client_agent_use(fd_data, uap, retval);
9763 break;
9764 }
9765 case NECP_CLIENT_ACTION_ACQUIRE_AGENT_TOKEN: {
9766 return_value = necp_client_acquire_agent_token(fd_data, uap, retval);
9767 break;
9768 }
9769 case NECP_CLIENT_ACTION_COPY_INTERFACE: {
9770 return_value = necp_client_copy_interface(fd_data, uap, retval);
9771 break;
9772 }
9773 #if SKYWALK
9774 case NECP_CLIENT_ACTION_GET_INTERFACE_ADDRESS: {
9775 return_value = necp_client_get_interface_address(fd_data, uap, retval);
9776 break;
9777 }
9778 case NECP_CLIENT_ACTION_SET_STATISTICS: {
9779 return_value = ENOTSUP;
9780 break;
9781 }
9782 case NECP_CLIENT_ACTION_MAP_SYSCTLS: {
9783 return_value = necp_client_map_sysctls(fd_data, uap, retval);
9784 break;
9785 }
9786 #endif /* !SKYWALK */
9787 case NECP_CLIENT_ACTION_COPY_ROUTE_STATISTICS: {
9788 return_value = necp_client_copy_route_statistics(fd_data, uap, retval);
9789 break;
9790 }
9791 case NECP_CLIENT_ACTION_UPDATE_CACHE: {
9792 return_value = necp_client_update_cache(fd_data, uap, retval);
9793 break;
9794 }
9795 case NECP_CLIENT_ACTION_COPY_CLIENT_UPDATE: {
9796 return_value = necp_client_copy_client_update(fd_data, uap, retval);
9797 break;
9798 }
9799 case NECP_CLIENT_ACTION_SIGN: {
9800 return_value = necp_client_sign(fd_data, uap, retval);
9801 break;
9802 }
9803 default: {
9804 NECPLOG(LOG_ERR, "necp_client_action unknown action (%u)", action);
9805 return_value = EINVAL;
9806 break;
9807 }
9808 }
9809
9810 done:
9811 fp_drop(p, uap->necp_fd, fp, 0);
9812 return return_value;
9813 }
9814
9815 #define NECP_MAX_MATCH_POLICY_PARAMETER_SIZE 1024
9816
9817 int
necp_match_policy(struct proc * p,struct necp_match_policy_args * uap,int32_t * retval)9818 necp_match_policy(struct proc *p, struct necp_match_policy_args *uap, int32_t *retval)
9819 {
9820 #pragma unused(retval)
9821 u_int8_t *parameters = NULL;
9822 struct necp_aggregate_result returned_result;
9823 int error = 0;
9824
9825 if (uap == NULL) {
9826 error = EINVAL;
9827 goto done;
9828 }
9829
9830 if (uap->parameters == 0 || uap->parameters_size == 0 || uap->parameters_size > NECP_MAX_MATCH_POLICY_PARAMETER_SIZE || uap->returned_result == 0) {
9831 error = EINVAL;
9832 goto done;
9833 }
9834
9835 parameters = (u_int8_t *)kalloc_data(uap->parameters_size, Z_WAITOK | Z_ZERO);
9836 if (parameters == NULL) {
9837 error = ENOMEM;
9838 goto done;
9839 }
9840 // Copy parameters in
9841 error = copyin(uap->parameters, parameters, uap->parameters_size);
9842 if (error) {
9843 goto done;
9844 }
9845
9846 error = necp_application_find_policy_match_internal(p, parameters, uap->parameters_size,
9847 &returned_result, NULL, NULL, 0, NULL, NULL, NULL, NULL, NULL, false, false, NULL);
9848 if (error) {
9849 goto done;
9850 }
9851
9852 // Copy return value back
9853 error = copyout(&returned_result, uap->returned_result, sizeof(struct necp_aggregate_result));
9854 if (error) {
9855 goto done;
9856 }
9857 done:
9858 if (parameters != NULL) {
9859 kfree_data(parameters, uap->parameters_size);
9860 }
9861 return error;
9862 }
9863
9864 /// Socket operations
9865
9866 static errno_t
necp_set_socket_attribute(u_int8_t * buffer,size_t buffer_length,u_int8_t type,char ** buffer_p,bool * single_tlv)9867 necp_set_socket_attribute(u_int8_t *buffer, size_t buffer_length, u_int8_t type, char **buffer_p, bool *single_tlv)
9868 {
9869 int error = 0;
9870 int cursor = 0;
9871 size_t string_size = 0;
9872 char *local_string = NULL;
9873 u_int8_t *value = NULL;
9874
9875 cursor = necp_buffer_find_tlv(buffer, buffer_length, 0, type, NULL, 0);
9876 if (cursor < 0) {
9877 // This will clear out the parameter
9878 goto done;
9879 }
9880
9881 string_size = necp_buffer_get_tlv_length(buffer, cursor);
9882 if (single_tlv != NULL && (buffer_length == sizeof(struct necp_tlv_header) + string_size)) {
9883 *single_tlv = true;
9884 }
9885 if (string_size == 0 || string_size > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
9886 // This will clear out the parameter
9887 goto done;
9888 }
9889
9890 local_string = (char *)kalloc_data(string_size + 1, Z_WAITOK | Z_ZERO);
9891 if (local_string == NULL) {
9892 NECPLOG(LOG_ERR, "Failed to allocate a socket attribute buffer (size %zu)", string_size);
9893 goto fail;
9894 }
9895
9896 value = necp_buffer_get_tlv_value(buffer, cursor, NULL);
9897 if (value == NULL) {
9898 NECPLOG0(LOG_ERR, "Failed to get socket attribute");
9899 goto fail;
9900 }
9901
9902 memcpy(local_string, value, string_size);
9903 local_string[string_size] = 0;
9904
9905 done:
9906 if (*buffer_p != NULL) {
9907 kfree_data_addr(*buffer_p);
9908 *buffer_p = NULL;
9909 }
9910
9911 *buffer_p = local_string;
9912 return 0;
9913 fail:
9914 if (local_string != NULL) {
9915 kfree_data(local_string, string_size + 1);
9916 }
9917 return error;
9918 }
9919
9920 errno_t
necp_set_socket_attributes(struct inp_necp_attributes * attributes,struct sockopt * sopt)9921 necp_set_socket_attributes(struct inp_necp_attributes *attributes, struct sockopt *sopt)
9922 {
9923 int error = 0;
9924 u_int8_t *buffer = NULL;
9925 bool single_tlv = false;
9926 size_t valsize = sopt->sopt_valsize;
9927 if (valsize == 0 ||
9928 valsize > ((sizeof(struct necp_tlv_header) + NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) * 4)) {
9929 goto done;
9930 }
9931
9932 buffer = (u_int8_t *)kalloc_data(valsize, Z_WAITOK | Z_ZERO);
9933 if (buffer == NULL) {
9934 goto done;
9935 }
9936
9937 error = sooptcopyin(sopt, buffer, valsize, 0);
9938 if (error) {
9939 goto done;
9940 }
9941
9942 // If NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT is being set/cleared separately from the other attributes,
9943 // do not clear other attributes.
9944 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT, &attributes->inp_domain_context, &single_tlv);
9945 if (error) {
9946 NECPLOG0(LOG_ERR, "Could not set domain context TLV for socket attributes");
9947 goto done;
9948 }
9949 if (single_tlv == true) {
9950 goto done;
9951 }
9952
9953 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN, &attributes->inp_domain, NULL);
9954 if (error) {
9955 NECPLOG0(LOG_ERR, "Could not set domain TLV for socket attributes");
9956 goto done;
9957 }
9958
9959 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_DOMAIN_OWNER, &attributes->inp_domain_owner, NULL);
9960 if (error) {
9961 NECPLOG0(LOG_ERR, "Could not set domain owner TLV for socket attributes");
9962 goto done;
9963 }
9964
9965 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_TRACKER_DOMAIN, &attributes->inp_tracker_domain, NULL);
9966 if (error) {
9967 NECPLOG0(LOG_ERR, "Could not set tracker domain TLV for socket attributes");
9968 goto done;
9969 }
9970
9971 error = necp_set_socket_attribute(buffer, valsize, NECP_TLV_ATTRIBUTE_ACCOUNT, &attributes->inp_account, NULL);
9972 if (error) {
9973 NECPLOG0(LOG_ERR, "Could not set account TLV for socket attributes");
9974 goto done;
9975 }
9976
9977 done:
9978 NECP_SOCKET_ATTRIBUTE_LOG("NECP ATTRIBUTES SOCKET - domain <%s> owner <%s> context <%s> tracker domain <%s> account <%s>",
9979 attributes->inp_domain,
9980 attributes->inp_domain_owner,
9981 attributes->inp_domain_context,
9982 attributes->inp_tracker_domain,
9983 attributes->inp_account);
9984
9985 if (necp_debug) {
9986 NECPLOG(LOG_DEBUG, "Set on socket: Domain %s, Domain owner %s, Domain context %s, Tracker domain %s, Account %s",
9987 attributes->inp_domain,
9988 attributes->inp_domain_owner,
9989 attributes->inp_domain_context,
9990 attributes->inp_tracker_domain,
9991 attributes->inp_account);
9992 }
9993
9994 if (buffer != NULL) {
9995 kfree_data(buffer, valsize);
9996 }
9997
9998 return error;
9999 }
10000
10001 errno_t
necp_get_socket_attributes(struct inp_necp_attributes * attributes,struct sockopt * sopt)10002 necp_get_socket_attributes(struct inp_necp_attributes *attributes, struct sockopt *sopt)
10003 {
10004 int error = 0;
10005 u_int8_t *buffer = NULL;
10006 u_int8_t *cursor = NULL;
10007 size_t valsize = 0;
10008
10009 if (attributes->inp_domain != NULL) {
10010 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain);
10011 }
10012 if (attributes->inp_domain_owner != NULL) {
10013 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain_owner);
10014 }
10015 if (attributes->inp_domain_context != NULL) {
10016 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_domain_context);
10017 }
10018 if (attributes->inp_tracker_domain != NULL) {
10019 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_tracker_domain);
10020 }
10021 if (attributes->inp_account != NULL) {
10022 valsize += sizeof(struct necp_tlv_header) + strlen(attributes->inp_account);
10023 }
10024 if (valsize == 0) {
10025 goto done;
10026 }
10027
10028 buffer = (u_int8_t *)kalloc_data(valsize, Z_WAITOK | Z_ZERO);
10029 if (buffer == NULL) {
10030 goto done;
10031 }
10032
10033 cursor = buffer;
10034 if (attributes->inp_domain != NULL) {
10035 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN, strlen(attributes->inp_domain), attributes->inp_domain,
10036 buffer, valsize);
10037 }
10038
10039 if (attributes->inp_domain_owner != NULL) {
10040 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN_OWNER, strlen(attributes->inp_domain_owner), attributes->inp_domain_owner,
10041 buffer, valsize);
10042 }
10043
10044 if (attributes->inp_domain_context != NULL) {
10045 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN_CONTEXT, strlen(attributes->inp_domain_context), attributes->inp_domain_context,
10046 buffer, valsize);
10047 }
10048
10049 if (attributes->inp_tracker_domain != NULL) {
10050 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_TRACKER_DOMAIN, strlen(attributes->inp_tracker_domain), attributes->inp_tracker_domain,
10051 buffer, valsize);
10052 }
10053
10054 if (attributes->inp_account != NULL) {
10055 cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_ACCOUNT, strlen(attributes->inp_account), attributes->inp_account,
10056 buffer, valsize);
10057 }
10058
10059 error = sooptcopyout(sopt, buffer, valsize);
10060 if (error) {
10061 goto done;
10062 }
10063 done:
10064 if (buffer != NULL) {
10065 kfree_data(buffer, valsize);
10066 }
10067
10068 return error;
10069 }
10070
10071 /*
10072 * necp_set_socket_domain_attributes
10073 * Called from soconnectlock/soconnectxlock to directly set the tracker domain and owner for
10074 * a newly marked tracker socket.
10075 */
10076 errno_t
necp_set_socket_domain_attributes(struct socket * so,const char * domain,const char * domain_owner)10077 necp_set_socket_domain_attributes(struct socket *so, const char *domain, const char *domain_owner)
10078 {
10079 int error = 0;
10080 struct inpcb *inp = NULL;
10081 u_int8_t *buffer = NULL;
10082 size_t valsize = 0;
10083
10084 if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) {
10085 error = EINVAL;
10086 goto fail;
10087 }
10088
10089 // Set domain (required)
10090
10091 valsize = strlen(domain);
10092 if (valsize == 0 || valsize > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
10093 error = EINVAL;
10094 goto fail;
10095 }
10096
10097 buffer = (u_int8_t *)kalloc_data(valsize + 1, Z_WAITOK | Z_ZERO);
10098 if (buffer == NULL) {
10099 error = ENOMEM;
10100 goto fail;
10101 }
10102 bcopy(domain, buffer, valsize);
10103 buffer[valsize] = 0;
10104
10105 inp = sotoinpcb(so);
10106 // Do not overwrite a previously set domain if tracker domain is different.
10107 if (inp->inp_necp_attributes.inp_domain != NULL) {
10108 if (strlen(inp->inp_necp_attributes.inp_domain) != strlen(domain) ||
10109 strncmp(inp->inp_necp_attributes.inp_domain, domain, strlen(domain)) != 0) {
10110 if (inp->inp_necp_attributes.inp_tracker_domain != NULL) {
10111 kfree_data_addr(inp->inp_necp_attributes.inp_tracker_domain);
10112 inp->inp_necp_attributes.inp_tracker_domain = NULL;
10113 }
10114 inp->inp_necp_attributes.inp_tracker_domain = (char *)buffer;
10115 }
10116 } else {
10117 inp->inp_necp_attributes.inp_domain = (char *)buffer;
10118 }
10119 buffer = NULL;
10120
10121 // set domain_owner (required only for tracker)
10122 if (!(so->so_flags1 & SOF1_KNOWN_TRACKER)) {
10123 goto done;
10124 }
10125
10126 valsize = strlen(domain_owner);
10127 if (valsize == 0 || valsize > NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) {
10128 error = EINVAL;
10129 goto fail;
10130 }
10131
10132 buffer = (u_int8_t *)kalloc_data(valsize + 1, Z_WAITOK | Z_ZERO);
10133 if (buffer == NULL) {
10134 error = ENOMEM;
10135 goto fail;
10136 }
10137 bcopy(domain_owner, buffer, valsize);
10138 buffer[valsize] = 0;
10139
10140 inp = sotoinpcb(so);
10141 if (inp->inp_necp_attributes.inp_domain_owner != NULL) {
10142 kfree_data_addr(inp->inp_necp_attributes.inp_domain_owner);
10143 inp->inp_necp_attributes.inp_domain_owner = NULL;
10144 }
10145 inp->inp_necp_attributes.inp_domain_owner = (char *)buffer;
10146 buffer = NULL;
10147
10148 done:
10149 // Log if it is a known tracker
10150 if (so->so_flags1 & SOF1_KNOWN_TRACKER) {
10151 NECP_CLIENT_TRACKER_LOG(NECP_SOCKET_PID(so),
10152 "NECP ATTRIBUTES SOCKET - domain <%s> owner <%s> context <%s> tracker domain <%s> account <%s> "
10153 "<so flags - is_tracker %X non-app-initiated %X app-approved-domain %X",
10154 inp->inp_necp_attributes.inp_domain ? "present" : "not set",
10155 inp->inp_necp_attributes.inp_domain_owner ? "present" : "not set",
10156 inp->inp_necp_attributes.inp_domain_context ? "present" : "not set",
10157 inp->inp_necp_attributes.inp_tracker_domain ? "present" : "not set",
10158 inp->inp_necp_attributes.inp_account ? "present" : "not set",
10159 so->so_flags1 & SOF1_KNOWN_TRACKER,
10160 so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED,
10161 so->so_flags1 & SOF1_APPROVED_APP_DOMAIN);
10162 }
10163
10164 NECP_SOCKET_PARAMS_LOG(so, "NECP ATTRIBUTES SOCKET - domain <%s> owner <%s> context <%s> tracker domain <%s> account <%s> "
10165 "<so flags - is_tracker %X non-app-initiated %X app-approved-domain %X",
10166 inp->inp_necp_attributes.inp_domain,
10167 inp->inp_necp_attributes.inp_domain_owner,
10168 inp->inp_necp_attributes.inp_domain_context,
10169 inp->inp_necp_attributes.inp_tracker_domain,
10170 inp->inp_necp_attributes.inp_account,
10171 so->so_flags1 & SOF1_KNOWN_TRACKER,
10172 so->so_flags1 & SOF1_TRACKER_NON_APP_INITIATED,
10173 so->so_flags1 & SOF1_APPROVED_APP_DOMAIN);
10174
10175 if (necp_debug) {
10176 NECPLOG(LOG_DEBUG, "Set on socket: Domain <%s> Domain owner <%s> Domain context <%s> Tracker domain <%s> Account <%s> ",
10177 inp->inp_necp_attributes.inp_domain,
10178 inp->inp_necp_attributes.inp_domain_owner,
10179 inp->inp_necp_attributes.inp_domain_context,
10180 inp->inp_necp_attributes.inp_tracker_domain,
10181 inp->inp_necp_attributes.inp_account);
10182 }
10183 fail:
10184 if (buffer != NULL) {
10185 kfree_data(buffer, valsize + 1);
10186 }
10187 return error;
10188 }
10189
10190 void *
necp_create_nexus_assign_message(uuid_t nexus_instance,u_int32_t nexus_port,void * key,uint32_t key_length,struct necp_client_endpoint * local_endpoint,struct necp_client_endpoint * remote_endpoint,struct ether_addr * local_ether_addr,u_int32_t flow_adv_index,void * flow_stats,size_t * message_length)10191 necp_create_nexus_assign_message(uuid_t nexus_instance, u_int32_t nexus_port, void *key, uint32_t key_length,
10192 struct necp_client_endpoint *local_endpoint, struct necp_client_endpoint *remote_endpoint, struct ether_addr *local_ether_addr,
10193 u_int32_t flow_adv_index, void *flow_stats, size_t *message_length)
10194 {
10195 u_int8_t *buffer = NULL;
10196 u_int8_t *cursor = NULL;
10197 size_t valsize = 0;
10198 bool has_nexus_assignment = FALSE;
10199
10200 if (!uuid_is_null(nexus_instance)) {
10201 has_nexus_assignment = TRUE;
10202 valsize += sizeof(struct necp_tlv_header) + sizeof(uuid_t);
10203 valsize += sizeof(struct necp_tlv_header) + sizeof(u_int32_t);
10204 }
10205 if (flow_adv_index != NECP_FLOWADV_IDX_INVALID) {
10206 valsize += sizeof(struct necp_tlv_header) + sizeof(u_int32_t);
10207 }
10208 if (key != NULL && key_length > 0) {
10209 valsize += sizeof(struct necp_tlv_header) + key_length;
10210 }
10211 if (local_endpoint != NULL) {
10212 valsize += sizeof(struct necp_tlv_header) + sizeof(struct necp_client_endpoint);
10213 }
10214 if (remote_endpoint != NULL) {
10215 valsize += sizeof(struct necp_tlv_header) + sizeof(struct necp_client_endpoint);
10216 }
10217 if (local_ether_addr != NULL) {
10218 valsize += sizeof(struct necp_tlv_header) + sizeof(struct ether_addr);
10219 }
10220 if (flow_stats != NULL) {
10221 valsize += sizeof(struct necp_tlv_header) + sizeof(void *);
10222 }
10223 if (valsize == 0) {
10224 return NULL;
10225 }
10226
10227 buffer = kalloc_data(valsize, Z_WAITOK | Z_ZERO);
10228 if (buffer == NULL) {
10229 return NULL;
10230 }
10231
10232 cursor = buffer;
10233 if (has_nexus_assignment) {
10234 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_INSTANCE, sizeof(uuid_t), nexus_instance, buffer, valsize);
10235 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_PORT, sizeof(u_int32_t), &nexus_port, buffer, valsize);
10236 }
10237 if (flow_adv_index != NECP_FLOWADV_IDX_INVALID) {
10238 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_PORT_FLOW_INDEX, sizeof(u_int32_t), &flow_adv_index, buffer, valsize);
10239 }
10240 if (key != NULL && key_length > 0) {
10241 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_PARAMETER_NEXUS_KEY, key_length, key, buffer, valsize);
10242 }
10243 if (local_endpoint != NULL) {
10244 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ENDPOINT, sizeof(struct necp_client_endpoint), local_endpoint, buffer, valsize);
10245 }
10246 if (remote_endpoint != NULL) {
10247 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_REMOTE_ENDPOINT, sizeof(struct necp_client_endpoint), remote_endpoint, buffer, valsize);
10248 }
10249 if (local_ether_addr != NULL) {
10250 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_LOCAL_ETHER_ADDR, sizeof(struct ether_addr), local_ether_addr, buffer, valsize);
10251 }
10252 if (flow_stats != NULL) {
10253 cursor = necp_buffer_write_tlv(cursor, NECP_CLIENT_RESULT_NEXUS_FLOW_STATS, sizeof(void *), &flow_stats, buffer, valsize);
10254 }
10255
10256 *message_length = valsize;
10257
10258 return buffer;
10259 }
10260
10261 void
necp_inpcb_remove_cb(struct inpcb * inp)10262 necp_inpcb_remove_cb(struct inpcb *inp)
10263 {
10264 if (!uuid_is_null(inp->necp_client_uuid)) {
10265 necp_client_unregister_socket_flow(inp->necp_client_uuid, inp);
10266 uuid_clear(inp->necp_client_uuid);
10267 }
10268 }
10269
10270 void
necp_inpcb_dispose(struct inpcb * inp)10271 necp_inpcb_dispose(struct inpcb *inp)
10272 {
10273 necp_inpcb_remove_cb(inp); // Clear out socket registrations if not yet done
10274 if (inp->inp_necp_attributes.inp_domain != NULL) {
10275 kfree_data_addr(inp->inp_necp_attributes.inp_domain);
10276 inp->inp_necp_attributes.inp_domain = NULL;
10277 }
10278 if (inp->inp_necp_attributes.inp_account != NULL) {
10279 kfree_data_addr(inp->inp_necp_attributes.inp_account);
10280 inp->inp_necp_attributes.inp_account = NULL;
10281 }
10282 if (inp->inp_necp_attributes.inp_domain_owner != NULL) {
10283 kfree_data_addr(inp->inp_necp_attributes.inp_domain_owner);
10284 inp->inp_necp_attributes.inp_domain_owner = NULL;
10285 }
10286 if (inp->inp_necp_attributes.inp_domain_context != NULL) {
10287 kfree_data_addr(inp->inp_necp_attributes.inp_domain_context);
10288 inp->inp_necp_attributes.inp_domain_context = NULL;
10289 }
10290 if (inp->inp_necp_attributes.inp_tracker_domain != NULL) {
10291 kfree_data_addr(inp->inp_necp_attributes.inp_tracker_domain);
10292 inp->inp_necp_attributes.inp_tracker_domain = NULL;
10293 }
10294 }
10295
10296 void
necp_mppcb_dispose(struct mppcb * mpp)10297 necp_mppcb_dispose(struct mppcb *mpp)
10298 {
10299 if (!uuid_is_null(mpp->necp_client_uuid)) {
10300 necp_client_unregister_multipath_cb(mpp->necp_client_uuid, mpp);
10301 uuid_clear(mpp->necp_client_uuid);
10302 }
10303
10304 if (mpp->inp_necp_attributes.inp_domain != NULL) {
10305 kfree_data_addr(mpp->inp_necp_attributes.inp_domain);
10306 mpp->inp_necp_attributes.inp_domain = NULL;
10307 }
10308 if (mpp->inp_necp_attributes.inp_account != NULL) {
10309 kfree_data_addr(mpp->inp_necp_attributes.inp_account);
10310 mpp->inp_necp_attributes.inp_account = NULL;
10311 }
10312 if (mpp->inp_necp_attributes.inp_domain_owner != NULL) {
10313 kfree_data_addr(mpp->inp_necp_attributes.inp_domain_owner);
10314 mpp->inp_necp_attributes.inp_domain_owner = NULL;
10315 }
10316 if (mpp->inp_necp_attributes.inp_tracker_domain != NULL) {
10317 kfree_data_addr(mpp->inp_necp_attributes.inp_tracker_domain);
10318 mpp->inp_necp_attributes.inp_tracker_domain = NULL;
10319 }
10320 }
10321
10322 /// Module init
10323
10324 void
necp_client_init(void)10325 necp_client_init(void)
10326 {
10327 necp_flow_size = sizeof(struct necp_client_flow);
10328 necp_flow_cache = mcache_create(NECP_FLOW_ZONE_NAME, necp_flow_size, sizeof(uint64_t), 0, MCR_SLEEP);
10329 if (necp_flow_cache == NULL) {
10330 panic("mcache_create(necp_flow_cache) failed");
10331 /* NOTREACHED */
10332 }
10333
10334 necp_flow_registration_size = sizeof(struct necp_client_flow_registration);
10335 necp_flow_registration_cache = mcache_create(NECP_FLOW_REGISTRATION_ZONE_NAME, necp_flow_registration_size, sizeof(uint64_t), 0, MCR_SLEEP);
10336 if (necp_flow_registration_cache == NULL) {
10337 panic("mcache_create(necp_client_flow_registration) failed");
10338 /* NOTREACHED */
10339 }
10340
10341 necp_client_update_tcall = thread_call_allocate_with_options(necp_update_all_clients_callout, NULL,
10342 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
10343 VERIFY(necp_client_update_tcall != NULL);
10344 #if SKYWALK
10345
10346 necp_client_collect_stats_tcall = thread_call_allocate_with_options(necp_collect_stats_client_callout, NULL,
10347 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
10348 VERIFY(necp_client_collect_stats_tcall != NULL);
10349
10350 necp_close_empty_arenas_tcall = thread_call_allocate_with_options(necp_close_empty_arenas_callout, NULL,
10351 THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
10352 VERIFY(necp_close_empty_arenas_tcall != NULL);
10353 #endif /* SKYWALK */
10354
10355 LIST_INIT(&necp_fd_list);
10356 LIST_INIT(&necp_fd_observer_list);
10357 LIST_INIT(&necp_collect_stats_flow_list);
10358
10359 RB_INIT(&necp_client_global_tree);
10360 RB_INIT(&necp_client_flow_global_tree);
10361 }
10362
10363 void
necp_client_reap_caches(boolean_t purge)10364 necp_client_reap_caches(boolean_t purge)
10365 {
10366 mcache_reap_now(necp_flow_cache, purge);
10367 mcache_reap_now(necp_flow_registration_cache, purge);
10368 }
10369
10370 #if SKYWALK
10371 pid_t
necp_client_get_proc_pid_from_arena_info(struct skmem_arena_mmap_info * arena_info)10372 necp_client_get_proc_pid_from_arena_info(struct skmem_arena_mmap_info *arena_info)
10373 {
10374 ASSERT((arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_NECP) || (arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_SYSTEM));
10375
10376 if (arena_info->ami_arena->ar_type == SKMEM_ARENA_TYPE_NECP) {
10377 struct necp_arena_info *nai = container_of(arena_info, struct necp_arena_info, nai_mmap);
10378 return nai->nai_proc_pid;
10379 } else {
10380 struct necp_fd_data *fd_data = container_of(arena_info, struct necp_fd_data, sysctl_mmap);
10381 return fd_data->proc_pid;
10382 }
10383 }
10384 #endif /* !SKYWALK */
10385