1 /*
2 * Copyright (c) 2019-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/netif/nx_netif.h>
31 #include <sys/random.h>
32 #include <sys/sdt.h>
33 #include <net/sockaddr_utils.h>
34
35 #define NETIF_AGENT_FLOW_MAX 16
36
37 /* automatically register a netagent at constructor time */
38 static int nif_netagent = 1;
39
40 #if (DEVELOPMENT || DEBUG)
41 SYSCTL_INT(_kern_skywalk_netif, OID_AUTO, netagent,
42 CTLFLAG_RW | CTLFLAG_LOCKED, &nif_netagent, 0, "");
43 #endif /* !DEVELOPMENT && !DEBUG */
44
45 SK_NO_INLINE_ATTRIBUTE
46 static int
get_mac_addr(struct nx_netif * nif,struct ether_addr * addr)47 get_mac_addr(struct nx_netif *nif, struct ether_addr *addr)
48 {
49 struct ifnet *ifp = nif->nif_ifp;
50 struct ifaddr *lladdr;
51
52 ASSERT(ifp != NULL);
53 lladdr = ifp->if_lladdr;
54
55 if (SDL(lladdr->ifa_addr)->sdl_alen == ETHER_ADDR_LEN &&
56 SDL(lladdr->ifa_addr)->sdl_type == IFT_ETHER) {
57 ifnet_lladdr_copy_bytes(ifp, addr, ETHER_ADDR_LEN);
58 return 0;
59 }
60 return ENOTSUP;
61 }
62
63 static uint64_t ipv6_ula_interface_id = 1;
64
65 /*
66 * Generating an IPV6 ULA based on RFC4193
67 */
68 SK_NO_INLINE_ATTRIBUTE
69 static void
get_ipv6_ula(struct in6_addr * addr)70 get_ipv6_ula(struct in6_addr *addr)
71 {
72 uint8_t buf[16];
73 uint64_t interface_id;
74
75 bzero(buf, sizeof(buf));
76
77 /* Start with the 0xfc prefix with local bit set */
78 buf[0] = 0xfd;
79
80 /*
81 * RFC4193 describes a sample method to generate 40bit pseudo-random
82 * Global ID based on current time and EUI-64.
83 * Simplify it by just generating random bytes since after all the
84 * uniqueness matters, not the way it's achieved.
85 */
86 read_frandom(&buf[1], 5);
87
88 /* Hardcode subnet number to 0 */
89 buf[6] = 0;
90 buf[7] = 0;
91
92 /* Use a monotonically increasing interface ID */
93 interface_id = htonll(ipv6_ula_interface_id);
94 bcopy(&interface_id, &buf[8], sizeof(uint64_t));
95 do {
96 ipv6_ula_interface_id++;
97 } while (ipv6_ula_interface_id == 0);
98
99 /* Return the generated address */
100 static_assert(sizeof(buf) == sizeof(struct in6_addr));
101 bcopy(buf, addr, sizeof(struct in6_addr));
102
103 #if SK_LOG
104 char addrbuf[MAX_IPv6_STR_LEN];
105 SK_DF(SK_VERB_NETIF, "generated IPv6 address: %s",
106 sk_ntop(AF_INET6, addr, addrbuf, sizeof(addrbuf)));
107 #endif /* SK_LOG */
108 }
109
110 SK_NO_INLINE_ATTRIBUTE
111 static void
get_ipv6_sockaddr(struct sockaddr_in6 * sin6)112 get_ipv6_sockaddr(struct sockaddr_in6 *sin6)
113 {
114 sin6->sin6_len = sizeof(struct sockaddr_in6);
115 sin6->sin6_family = AF_INET6;
116 get_ipv6_ula(&sin6->sin6_addr);
117 }
118
119 SK_NO_INLINE_ATTRIBUTE
120 static int
validate_ipv6_sockaddr(struct sockaddr_in6 * sin6)121 validate_ipv6_sockaddr(struct sockaddr_in6 *sin6)
122 {
123 if (sin6->sin6_family != AF_INET6) {
124 SK_ERR("invalid source family");
125 return EINVAL;
126 }
127 if (sin6->sin6_len != sizeof(struct sockaddr_in6)) {
128 SK_ERR("invalid source length");
129 return EINVAL;
130 }
131 /*
132 * XXX
133 * We should use the stricter check IN6_IS_ADDR_UNIQUE_LOCAL().
134 * Leaving this as is for now because this gives us more
135 * flexibility on what addresses can be used for testing.
136 */
137 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
138 SK_ERR("address unspecified");
139 return EINVAL;
140 }
141 return 0;
142 }
143
144 SK_NO_INLINE_ATTRIBUTE
145 static boolean_t
flow_ipv6_ula_match(struct netif_agent_flow * naf,struct nx_flow_req * nfr)146 flow_ipv6_ula_match(struct netif_agent_flow *naf, struct nx_flow_req *nfr)
147 {
148 struct in6_addr *s1, *s2, *d1, *d2;
149
150 if (naf->naf_pid != nfr->nfr_pid) {
151 DTRACE_SKYWALK2(pid__mismatch, pid_t, naf->naf_pid,
152 pid_t, nfr->nfr_pid);
153 return FALSE;
154 }
155 if ((naf->naf_flags & NXFLOWREQF_IPV6_ULA) == 0) {
156 DTRACE_SKYWALK1(type__mismatch, uint16_t, naf->naf_flags);
157 return FALSE;
158 }
159 s1 = &naf->naf_saddr.sin6.sin6_addr;
160 s2 = &nfr->nfr_saddr.sin6.sin6_addr;
161 if (!IN6_ARE_ADDR_EQUAL(s1, s2)) {
162 DTRACE_SKYWALK2(saddr__mismatch, struct in6_addr *, s1,
163 struct in6_addr *, s2);
164 return FALSE;
165 }
166 d1 = &naf->naf_daddr.sin6.sin6_addr;
167 d2 = &nfr->nfr_daddr.sin6.sin6_addr;
168 if (!IN6_ARE_ADDR_EQUAL(d1, d2)) {
169 DTRACE_SKYWALK2(daddr__mismatch, struct in6_addr *, d1,
170 struct in6_addr *, d2);
171 return FALSE;
172 }
173 return TRUE;
174 }
175
176 static uint16_t forbidden_ethertypes[] = {
177 ETHERTYPE_IP,
178 ETHERTYPE_ARP,
179 ETHERTYPE_REVARP,
180 ETHERTYPE_VLAN,
181 ETHERTYPE_IPV6,
182 ETHERTYPE_PAE,
183 ETHERTYPE_RSN_PREAUTH,
184 };
185 #define FORBIDDEN_ETHERTYPES \
186 (sizeof(forbidden_ethertypes) / sizeof(forbidden_ethertypes[0]))
187
188 SK_NO_INLINE_ATTRIBUTE
189 static int
validate_ethertype(uint16_t ethertype)190 validate_ethertype(uint16_t ethertype)
191 {
192 uint32_t i;
193
194 for (i = 0; i < FORBIDDEN_ETHERTYPES; i++) {
195 if (forbidden_ethertypes[i] == ethertype) {
196 SK_ERR("ethertype 0x%x not allowed", ethertype);
197 return EINVAL;
198 }
199 }
200 if (ethertype <= ETHERMTU) {
201 SK_ERR("ethertype <= ETHERMTU");
202 return EINVAL;
203 }
204 return 0;
205 }
206
207 SK_NO_INLINE_ATTRIBUTE
208 static int
nx_netif_netagent_fill_port_info(struct nx_netif * nif,struct nx_flow_req * nfr,struct netif_port_info ** npip)209 nx_netif_netagent_fill_port_info(struct nx_netif *nif, struct nx_flow_req *nfr,
210 struct netif_port_info **npip)
211 {
212 #pragma unused(nif)
213 struct netif_flow_desc *fd;
214 struct netif_port_info *npi;
215 struct netif_stats *nifs = &nif->nif_stats;
216 uint32_t stat;
217 int err;
218
219 if ((nfr->nfr_flags & (NXFLOWREQF_CUSTOM_ETHER |
220 NXFLOWREQF_IPV6_ULA)) == 0) {
221 return 0;
222 }
223 npi = sk_alloc_data(sizeof(*npi), Z_WAITOK | Z_NOFAIL,
224 skmem_tag_nx_port_info);
225 npi->npi_hdr.ih_type = NX_PORT_INFO_TYPE_NETIF;
226 npi->npi_hdr.ih_size = sizeof(*npi);
227
228 fd = &npi->npi_fd;
229 if ((nfr->nfr_flags & NXFLOWREQF_CUSTOM_ETHER) != 0) {
230 if ((err = validate_ethertype(nfr->nfr_ethertype)) != 0) {
231 stat = NETIF_STATS_AGENT_BAD_ETHERTYPE;
232 goto fail;
233 }
234 fd->fd_ethertype = nfr->nfr_ethertype;
235 }
236 if ((nfr->nfr_flags & NXFLOWREQF_IPV6_ULA) != 0) {
237 struct sockaddr_in6 *sin6;
238
239 sin6 = &nfr->nfr_saddr.sin6;
240 if ((err = validate_ipv6_sockaddr(sin6)) != 0) {
241 stat = NETIF_STATS_AGENT_BAD_IPV6_ADDR;
242 goto fail;
243 }
244 fd->fd_laddr = sin6->sin6_addr;
245
246 sin6 = &nfr->nfr_daddr.sin6;
247 if ((err = validate_ipv6_sockaddr(sin6)) != 0) {
248 stat = NETIF_STATS_AGENT_BAD_IPV6_ADDR;
249 goto fail;
250 }
251 fd->fd_raddr = sin6->sin6_addr;
252 }
253 *npip = npi;
254 return 0;
255 fail:
256 STATS_INC(nifs, stat);
257 if (npi != NULL) {
258 sk_free_data(npi, sizeof(*npi));
259 }
260 return err;
261 }
262
263 SK_NO_INLINE_ATTRIBUTE
264 static int
nx_netif_netagent_flow_bind(struct nx_netif * nif,struct nx_flow_req * nfr)265 nx_netif_netagent_flow_bind(struct nx_netif *nif, struct nx_flow_req *nfr)
266 {
267 uuid_t uuid_key;
268 nexus_port_t nx_port;
269 struct nxbind nxb;
270 struct proc *p;
271 struct kern_nexus *nx = nif->nif_nx;
272 struct netif_port_info *__single npi = NULL;
273 pid_t pid = nfr->nfr_pid;
274 int err;
275 #if SK_LOG
276 uuid_string_t uuidstr;
277 #endif /* SK_LOG */
278
279 if ((nfr->nfr_flags & NXFLOWREQF_LISTENER) != 0) {
280 return ENOTSUP;
281 }
282 p = proc_find(pid);
283 if (p == PROC_NULL) {
284 SK_ERR("process for pid %d doesn't exist", pid);
285 return EINVAL;
286 }
287 nfr->nfr_proc = p;
288 uuid_generate_random(uuid_key);
289 bzero(&nxb, sizeof(nxb));
290 nxb.nxb_flags |= NXBF_MATCH_UNIQUEID;
291 nxb.nxb_uniqueid = proc_uniqueid(p);
292 nxb.nxb_pid = pid;
293 nxb.nxb_flags |= NXBF_MATCH_KEY;
294 nxb.nxb_key = sk_alloc_data(sizeof(uuid_key), Z_WAITOK | Z_NOFAIL,
295 skmem_tag_nx_key);
296 nxb.nxb_key_len = sizeof(uuid_key);
297 bcopy(uuid_key, nxb.nxb_key, nxb.nxb_key_len);
298
299 err = nx_netif_netagent_fill_port_info(nif, nfr, &npi);
300 if (err != 0) {
301 sk_free_data_sized_by(nxb.nxb_key, nxb.nxb_key_len);
302 nfr->nfr_proc = NULL;
303 proc_rele(p);
304 return err;
305 }
306 /*
307 * callee holds on to nxb_key on success. no need to free.
308 */
309 nx_port = NEXUS_PORT_ANY;
310 err = NX_DOM(nx)->nxdom_bind_port(nx, &nx_port, &nxb, npi);
311 if (err != 0) {
312 sk_free_data_sized_by(nxb.nxb_key, nxb.nxb_key_len);
313 if (npi != NULL) {
314 sk_free_data(npi, sizeof(*npi));
315 }
316 nfr->nfr_proc = NULL;
317 proc_rele(p);
318 SK_ERR("%s(%d) failed to bind flow_uuid %s to a "
319 "nx_port (err %d)", sk_proc_name(p),
320 pid, sk_uuid_unparse(nfr->nfr_flow_uuid,
321 uuidstr), err);
322 return err;
323 }
324 bcopy(uuid_key, nfr->nfr_bind_key, sizeof(uuid_key));
325 nfr->nfr_nx_port = nx_port;
326 nfr->nfr_proc = NULL;
327 proc_rele(p);
328 return 0;
329 }
330
331 SK_NO_INLINE_ATTRIBUTE
332 static int
nx_netif_netagent_flow_unbind(struct nx_netif * nif,struct nx_flow_req * nfr)333 nx_netif_netagent_flow_unbind(struct nx_netif *nif, struct nx_flow_req *nfr)
334 {
335 int err;
336 struct kern_nexus *nx = nif->nif_nx;
337
338 if ((nfr->nfr_flags & NXFLOWREQF_LISTENER) != 0) {
339 return ENOTSUP;
340 }
341 err = NX_DOM(nx)->nxdom_unbind_port(nif->nif_nx, nfr->nfr_nx_port);
342 if (err != 0) {
343 SK_ERR("nxdom_unbind_port failed: %d", err);
344 return err;
345 }
346 return 0;
347 }
348
349 SK_NO_INLINE_ATTRIBUTE
350 static int
nx_netif_netagent_check_flags(struct nx_netif * nif,struct nx_flow_req * nfr,boolean_t add)351 nx_netif_netagent_check_flags(struct nx_netif *nif, struct nx_flow_req *nfr,
352 boolean_t add)
353 {
354 uint32_t flags = nfr->nfr_flags;
355
356 if ((nif->nif_agent_flags & NETIF_AGENT_FLAG_ADDED) == 0) {
357 SK_ERR("no agent added");
358 return ENOTSUP;
359 }
360 if ((flags & NXFLOWREQF_FILTER) != 0) {
361 if ((flags & ~NXFLOWREQF_FILTER) != 0) {
362 SK_ERR("filter: incompatible with other features");
363 return EINVAL;
364 }
365 if ((nif->nif_filter_flags &
366 NETIF_FILTER_FLAG_INITIALIZED) == 0) {
367 SK_ERR("filter: uninitialized");
368 return ENOTSUP;
369 }
370 }
371 if ((flags & NXFLOWREQF_CUSTOM_ETHER) != 0) {
372 if ((flags & ~NXFLOWREQF_CUSTOM_ETHER) != 0) {
373 SK_ERR("custom ether: incompatible "
374 "with other features");
375 return EINVAL;
376 }
377 if ((nif->nif_flow_flags &
378 NETIF_FLOW_FLAG_INITIALIZED) == 0) {
379 SK_ERR("custom ether: uninitialized");
380 return ENOTSUP;
381 }
382 }
383 if ((flags & NXFLOWREQF_IPV6_ULA) != 0) {
384 if ((flags & ~(NXFLOWREQF_IPV6_ULA | NXFLOWREQF_LISTENER)) != 0) {
385 SK_ERR("IPv6 ULA: incompatible with other features");
386 return EINVAL;
387 }
388 if (!NETIF_IS_LOW_LATENCY(nif)) {
389 SK_ERR("IPv6 ULA: not supported on this nexus");
390 return ENOTSUP;
391 }
392 }
393 if (add && (flags & (NXFLOWREQF_FILTER | NXFLOWREQF_CUSTOM_ETHER |
394 NXFLOWREQF_IPV6_ULA)) == 0) {
395 SK_ERR("flow type must be specified");
396 return EINVAL;
397 }
398 return 0;
399 }
400
401 SK_NO_INLINE_ATTRIBUTE
402 static int
nx_netif_netagent_listener_flow_add(struct nx_netif * nif,struct nx_flow_req * nfr)403 nx_netif_netagent_listener_flow_add(struct nx_netif *nif,
404 struct nx_flow_req *nfr)
405 {
406 int err;
407
408 if ((nfr->nfr_flags & NXFLOWREQF_IPV6_ULA) == 0) {
409 SK_ERR("listener flow not suppported");
410 return ENOTSUP;
411 }
412 err = get_mac_addr(nif, &nfr->nfr_etheraddr);
413 if (err != 0) {
414 SK_ERR("get mac addr failed; %d", err);
415 return err;
416 }
417 get_ipv6_sockaddr(&nfr->nfr_saddr.sin6);
418 return 0;
419 }
420
421 /*
422 * This is for handling the case where the same flow (same ipv6
423 * local_addr:remote_addr tuple) is added twice. Instead of failing the
424 * second flow add, we would return the existing flow's nexus port. This
425 * would allow libnetcore to reuse the existing channel instead of opening
426 * a new one. Note that sidecar is not affected by this because it always
427 * adds flows with unique addresses.
428 */
429 SK_NO_INLINE_ATTRIBUTE
430 static int
nx_netif_netagent_flow_find(struct nx_netif * nif,struct nx_flow_req * nfr)431 nx_netif_netagent_flow_find(struct nx_netif *nif,
432 struct nx_flow_req *nfr)
433 {
434 struct netif_agent_flow *naf;
435
436 /* Only support llw flows */
437 if ((nfr->nfr_flags & NXFLOWREQF_IPV6_ULA) == 0) {
438 return ENOTSUP;
439 }
440 lck_mtx_lock(&nif->nif_agent_lock);
441 SLIST_FOREACH(naf, &nif->nif_agent_flow_list, naf_link) {
442 if (flow_ipv6_ula_match(naf, nfr)) {
443 break;
444 }
445 }
446 if (naf == NULL) {
447 DTRACE_SKYWALK2(dupflow__not__found, struct nx_netif *, nif,
448 struct nx_flow_req *, nfr);
449 lck_mtx_unlock(&nif->nif_agent_lock);
450 return ENOENT;
451 }
452 nfr->nfr_nx_port = naf->naf_nx_port;
453 uuid_copy(nfr->nfr_bind_key, naf->naf_bind_key);
454 lck_mtx_unlock(&nif->nif_agent_lock);
455 return 0;
456 }
457
458 SK_NO_INLINE_ATTRIBUTE
459 static void
nx_netif_netagent_fill_flow_info(struct netif_agent_flow * naf,struct nx_flow_req * nfr)460 nx_netif_netagent_fill_flow_info(struct netif_agent_flow *naf,
461 struct nx_flow_req *nfr)
462 {
463 uuid_copy(naf->naf_flow_uuid, nfr->nfr_flow_uuid);
464 uuid_copy(naf->naf_bind_key, nfr->nfr_bind_key);
465 naf->naf_nx_port = nfr->nfr_nx_port;
466 naf->naf_flags = nfr->nfr_flags;
467 naf->naf_pid = nfr->nfr_pid;
468
469 /* We only keep flow info for llw flows */
470 if ((naf->naf_flags & NXFLOWREQF_IPV6_ULA) != 0) {
471 naf->naf_saddr = nfr->nfr_saddr;
472 naf->naf_daddr = nfr->nfr_daddr;
473 }
474 }
475
476 int
nx_netif_netagent_flow_add(struct nx_netif * nif,struct nx_flow_req * nfr)477 nx_netif_netagent_flow_add(struct nx_netif *nif, struct nx_flow_req *nfr)
478 {
479 int err;
480 struct netif_agent_flow *naf;
481 struct netif_stats *nifs = &nif->nif_stats;
482
483 err = nx_netif_netagent_check_flags(nif, nfr, TRUE);
484 if (err != 0) {
485 SK_ERR("flow request inconsistent with current config");
486 DTRACE_SKYWALK3(invalid__flags, struct nx_netif *, nif,
487 struct nx_flow_req *, nfr, int, err);
488 return err;
489 }
490 err = nx_netif_netagent_flow_find(nif, nfr);
491 if (err == 0) {
492 SK_ERR("found existing flow: nx_port = %d", nfr->nfr_nx_port);
493 DTRACE_SKYWALK2(found__flow, struct nx_netif *, nif,
494 struct nx_flow_req *, nfr);
495 STATS_INC(nifs, NETIF_STATS_AGENT_DUP_FLOW);
496 return 0;
497 }
498 if ((nfr->nfr_flags & NXFLOWREQF_LISTENER) != 0) {
499 return nx_netif_netagent_listener_flow_add(nif, nfr);
500 }
501 naf = sk_alloc_type(struct netif_agent_flow, Z_WAITOK | Z_NOFAIL,
502 skmem_tag_netif_agent_flow);
503
504 if ((nfr->nfr_flags &
505 (NXFLOWREQF_CUSTOM_ETHER | NXFLOWREQF_IPV6_ULA)) != 0) {
506 err = get_mac_addr(nif, &nfr->nfr_etheraddr);
507 if (err != 0) {
508 SK_ERR("get mac addr failed: %d", err);
509 sk_free_type(struct netif_agent_flow, naf);
510 return err;
511 }
512 }
513 lck_mtx_lock(&nif->nif_agent_lock);
514 err = nx_netif_netagent_flow_bind(nif, nfr);
515 if (err != 0) {
516 SK_ERR("netagent flow bind failed: %d", err);
517 DTRACE_SKYWALK3(bind__failed, struct nx_netif *, nif,
518 struct nx_flow_req *, nfr, int, err);
519 sk_free_type(struct netif_agent_flow, naf);
520 lck_mtx_unlock(&nif->nif_agent_lock);
521 return err;
522 }
523 nx_netif_netagent_fill_flow_info(naf, nfr);
524 SLIST_INSERT_HEAD(&nif->nif_agent_flow_list, naf, naf_link);
525 nif->nif_agent_flow_cnt++;
526
527 #if SK_LOG
528 uuid_string_t uuidstr;
529
530 SK_DF(SK_VERB_NETIF, "flow uuid: %s",
531 sk_uuid_unparse(naf->naf_flow_uuid, uuidstr));
532 SK_DF(SK_VERB_NETIF, "nx port: %d", naf->naf_nx_port);
533 SK_DF(SK_VERB_NETIF, "nx key: %s",
534 sk_uuid_unparse(nfr->nfr_bind_key, uuidstr));
535
536 if ((nfr->nfr_flags & NXFLOWREQF_FILTER) != 0) {
537 SK_DF(SK_VERB_NETIF, "flow type: filter");
538 }
539 if ((nfr->nfr_flags & NXFLOWREQF_CUSTOM_ETHER) != 0) {
540 SK_DF(SK_VERB_NETIF, "flow type: custom ether");
541 SK_DF(SK_VERB_NETIF, "ethertype: 0x%x", nfr->nfr_ethertype);
542 }
543 if ((nfr->nfr_flags & NXFLOWREQF_IPV6_ULA) != 0) {
544 char local[MAX_IPv6_STR_LEN];
545 char remote[MAX_IPv6_STR_LEN];
546
547 SK_DF(SK_VERB_NETIF, "flow type: IPv6 ULA");
548 SK_DF(SK_VERB_NETIF, "IPv6 local: %s",
549 sk_ntop(AF_INET6, &nfr->nfr_saddr.sin6.sin6_addr,
550 local, sizeof(local)));
551 SK_DF(SK_VERB_NETIF, "IPv6 remote: %s",
552 sk_ntop(AF_INET6, &nfr->nfr_daddr.sin6.sin6_addr,
553 remote, sizeof(remote)));
554 }
555 #endif /* SK_LOG */
556 lck_mtx_unlock(&nif->nif_agent_lock);
557 return 0;
558 }
559
560 int
nx_netif_netagent_flow_del(struct nx_netif * nif,struct nx_flow_req * nfr)561 nx_netif_netagent_flow_del(struct nx_netif *nif, struct nx_flow_req *nfr)
562 {
563 int err;
564 struct netif_agent_flow *naf = NULL;
565
566 err = nx_netif_netagent_check_flags(nif, nfr, FALSE);
567 if (err != 0) {
568 SK_ERR("flow request inconsistent with current config");
569 DTRACE_SKYWALK3(invalid__flags, struct nx_netif *, nif,
570 struct nx_flow_req *, nfr, int, err);
571 return err;
572 }
573
574 /* no-op for listener */
575 if ((nfr->nfr_flags & NXFLOWREQF_LISTENER) != 0) {
576 DTRACE_SKYWALK2(listener, struct nx_netif *, nif,
577 struct nx_flow_req *, nfr);
578 return 0;
579 }
580 lck_mtx_lock(&nif->nif_agent_lock);
581 SLIST_FOREACH(naf, &nif->nif_agent_flow_list, naf_link) {
582 if (uuid_compare(naf->naf_flow_uuid, nfr->nfr_flow_uuid) == 0) {
583 break;
584 }
585 }
586 if (naf == NULL) {
587 SK_ERR("netagent flow not found");
588 DTRACE_SKYWALK2(flow__not__found, struct nx_netif *, nif,
589 struct nx_flow_req *, nfr);
590 lck_mtx_unlock(&nif->nif_agent_lock);
591 return ENOENT;
592 }
593 /* use the port from the agent flow, not the request */
594 nfr->nfr_nx_port = naf->naf_nx_port;
595
596 err = nx_netif_netagent_flow_unbind(nif, nfr);
597 if (err != 0) {
598 SK_ERR("netagent flow unbind failed: %d", err);
599 DTRACE_SKYWALK3(unbind__failed, struct nx_netif *, nif,
600 struct nx_flow_req *, nfr, int, err);
601 /*
602 * The channel auto closed the port. We can just
603 * clean up our agent flow.
604 */
605 }
606 SLIST_REMOVE(&nif->nif_agent_flow_list, naf, netif_agent_flow,
607 naf_link);
608 sk_free_type(struct netif_agent_flow, naf);
609 nif->nif_agent_flow_cnt--;
610 lck_mtx_unlock(&nif->nif_agent_lock);
611 return 0;
612 }
613
614 SK_NO_INLINE_ATTRIBUTE
615 static int
nx_netif_agent_flow_purge(struct nx_netif * nif)616 nx_netif_agent_flow_purge(struct nx_netif *nif)
617 {
618 struct netif_agent_flow *naf, *naf_tmp;
619 uint32_t cnt = 0;
620
621 lck_mtx_lock(&nif->nif_agent_lock);
622 SLIST_FOREACH_SAFE(naf, &nif->nif_agent_flow_list, naf_link, naf_tmp) {
623 SLIST_REMOVE(&nif->nif_agent_flow_list, naf, netif_agent_flow,
624 naf_link);
625 /*
626 * Since this gets called during detach, all ports will be
627 * unbound and freed by the nexus cleanup path. Nothing to
628 * do here.
629 */
630 sk_free_type(struct netif_agent_flow, naf);
631 cnt++;
632 }
633 SK_DF(SK_VERB_NETIF, "agent flows purged: %d", cnt);
634 DTRACE_SKYWALK2(agent__flows__purge, struct nx_netif *, nif,
635 uint32_t, cnt);
636 ASSERT(nif->nif_agent_flow_cnt == cnt);
637 nif->nif_agent_flow_cnt = 0;
638 lck_mtx_unlock(&nif->nif_agent_lock);
639 return 0;
640 }
641
642 SK_NO_INLINE_ATTRIBUTE
643 static int
nx_netif_netagent_handle_interpose_flow_add(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,struct necp_client_nexus_parameters * cparams,void * __sized_by (* results_length)* results,size_t * results_length)644 nx_netif_netagent_handle_interpose_flow_add(struct nx_netif *nif,
645 uuid_t flow_uuid, pid_t pid, struct necp_client_nexus_parameters *cparams,
646 void * __sized_by(*results_length) *results, size_t *results_length)
647 {
648 #pragma unused(cparams)
649 int err;
650 struct nx_flow_req nfr;
651 void *message;
652 size_t len;
653
654 bzero(&nfr, sizeof(nfr));
655 uuid_copy(nfr.nfr_flow_uuid, flow_uuid);
656 nfr.nfr_pid = pid;
657 nfr.nfr_nx_port = NEXUS_PORT_ANY;
658 nfr.nfr_flags |= NXFLOWREQF_FILTER;
659
660 err = nx_netif_netagent_flow_add(nif, &nfr);
661 if (err != 0) {
662 return err;
663 }
664 message =
665 necp_create_nexus_assign_message(nif->nif_nx->nx_uuid,
666 nfr.nfr_nx_port, nfr.nfr_bind_key, sizeof(nfr.nfr_bind_key),
667 NULL, NULL, NULL, 0, NULL, 0, &len);
668 if (message == NULL) {
669 (void) nx_netif_netagent_flow_del(nif, &nfr);
670 return ENOMEM;
671 }
672 *results = message;
673 *results_length = len;
674 return 0;
675 }
676
677 SK_NO_INLINE_ATTRIBUTE
678 static int
nx_netif_netagent_handle_custom_ether_flow_add(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,struct necp_client_nexus_parameters * cparams,void * __sized_by (* results_length)* results,size_t * results_length)679 nx_netif_netagent_handle_custom_ether_flow_add(struct nx_netif *nif,
680 uuid_t flow_uuid, pid_t pid, struct necp_client_nexus_parameters *cparams,
681 void * __sized_by(*results_length) *results, size_t *results_length)
682 {
683 int err;
684 struct nx_flow_req nfr;
685 void *message;
686 size_t len;
687
688 bzero(&nfr, sizeof(nfr));
689 uuid_copy(nfr.nfr_flow_uuid, flow_uuid);
690 nfr.nfr_pid = pid;
691 nfr.nfr_nx_port = NEXUS_PORT_ANY;
692 nfr.nfr_ethertype = cparams->ethertype;
693 nfr.nfr_flags |= NXFLOWREQF_CUSTOM_ETHER;
694
695 err = nx_netif_netagent_flow_add(nif, &nfr);
696 if (err != 0) {
697 return err;
698 }
699 message =
700 necp_create_nexus_assign_message(nif->nif_nx->nx_uuid,
701 nfr.nfr_nx_port, nfr.nfr_bind_key, sizeof(nfr.nfr_bind_key),
702 NULL, NULL, &nfr.nfr_etheraddr, 0, NULL, 0, &len);
703 if (message == NULL) {
704 (void) nx_netif_netagent_flow_del(nif, &nfr);
705 return ENOMEM;
706 }
707 *results = message;
708 *results_length = len;
709 return 0;
710 }
711
712 #define IS_V6_ADDR(addr) \
713 ((addr)->sin6.sin6_family == AF_INET6)
714
715 SK_NO_INLINE_ATTRIBUTE
716 static int
nx_netif_netagent_handle_ipv6_ula_flow_add(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,struct necp_client_nexus_parameters * cparams,void * __sized_by (* results_length)* results,size_t * results_length)717 nx_netif_netagent_handle_ipv6_ula_flow_add(struct nx_netif *nif,
718 uuid_t flow_uuid, pid_t pid, struct necp_client_nexus_parameters *cparams,
719 void *__sized_by(*results_length) *results, size_t *results_length)
720 {
721 int err;
722 struct nx_flow_req nfr;
723 struct necp_client_endpoint local_endpoint;
724 struct necp_client_endpoint remote_endpoint;
725 void *message;
726 size_t len;
727
728 bzero(&nfr, sizeof(nfr));
729 uuid_copy(nfr.nfr_flow_uuid, flow_uuid);
730 nfr.nfr_pid = pid;
731 nfr.nfr_nx_port = NEXUS_PORT_ANY;
732 nfr.nfr_flags |= NXFLOWREQF_IPV6_ULA;
733 if (cparams->is_listener) {
734 /*
735 * Preserve input args if possible
736 */
737 if (IS_V6_ADDR(&cparams->local_addr)) {
738 bcopy(&cparams->local_addr,
739 &nfr.nfr_saddr, sizeof(nfr.nfr_saddr));
740 }
741 if (IS_V6_ADDR(&cparams->remote_addr)) {
742 bcopy(&cparams->remote_addr,
743 &nfr.nfr_daddr, sizeof(nfr.nfr_daddr));
744 }
745 nfr.nfr_flags |= NXFLOWREQF_LISTENER;
746 } else {
747 /*
748 * Both local and remote addresses must be specified.
749 */
750 if (!IS_V6_ADDR(&cparams->local_addr)) {
751 SK_ERR("local addr missing");
752 return EINVAL;
753 }
754 bcopy(&cparams->local_addr,
755 &nfr.nfr_saddr, sizeof(nfr.nfr_saddr));
756
757 if (!IS_V6_ADDR(&cparams->remote_addr)) {
758 SK_ERR("remote addr missing");
759 return EINVAL;
760 }
761 bcopy(&cparams->remote_addr,
762 &nfr.nfr_daddr, sizeof(nfr.nfr_daddr));
763 }
764 err = nx_netif_netagent_flow_add(nif, &nfr);
765 if (err != 0) {
766 return err;
767 }
768 bzero(&local_endpoint, sizeof(local_endpoint));
769 SOCKADDR_COPY(&nfr.nfr_saddr.sin6, &local_endpoint.u.sin6,
770 sizeof(local_endpoint.u.sin6));
771
772 if (cparams->is_listener) {
773 uuid_t zero_nx_uuid;
774
775 bzero(zero_nx_uuid, sizeof(uuid_t));
776 message = necp_create_nexus_assign_message(
777 zero_nx_uuid, NEXUS_PORT_ANY, NULL,
778 0, &local_endpoint, NULL,
779 &nfr.nfr_etheraddr, 0, NULL, 0, &len);
780 } else {
781 bzero(&remote_endpoint, sizeof(remote_endpoint));
782 SOCKADDR_COPY(&nfr.nfr_daddr.sin6, &remote_endpoint.u.sin6,
783 sizeof(remote_endpoint.u.sin6));
784
785 message = necp_create_nexus_assign_message(
786 nif->nif_nx->nx_uuid, nfr.nfr_nx_port, nfr.nfr_bind_key,
787 sizeof(nfr.nfr_bind_key), &local_endpoint,
788 &remote_endpoint, &nfr.nfr_etheraddr, 0, NULL, 0, &len);
789 }
790 if (message == NULL) {
791 /* This is a no-op for the listener flow */
792 (void) nx_netif_netagent_flow_del(nif, &nfr);
793 return ENOMEM;
794 }
795 *results = message;
796 *results_length = len;
797 return 0;
798 }
799
800 SK_NO_INLINE_ATTRIBUTE
801 static int
nx_netif_netagent_handle_flow_add(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,struct necp_client_nexus_parameters * cparams,void * __sized_by (* results_length)* results,size_t * results_length)802 nx_netif_netagent_handle_flow_add(struct nx_netif *nif,
803 uuid_t flow_uuid, pid_t pid, struct necp_client_nexus_parameters *cparams,
804 void * __sized_by(*results_length) *results, size_t *results_length)
805 {
806 int err = 0;
807
808 ASSERT(cparams != NULL);
809 ASSERT(results != NULL && *results == NULL);
810 ASSERT(results_length != NULL && *results_length == 0);
811
812 if (cparams->is_interpose) {
813 err = nx_netif_netagent_handle_interpose_flow_add(nif,
814 flow_uuid, pid, cparams, results, results_length);
815 } else if (cparams->is_custom_ether) {
816 err = nx_netif_netagent_handle_custom_ether_flow_add(nif,
817 flow_uuid, pid, cparams, results, results_length);
818 } else if (NETIF_IS_LOW_LATENCY(nif)) {
819 err = nx_netif_netagent_handle_ipv6_ula_flow_add(nif,
820 flow_uuid, pid, cparams, results, results_length);
821 }
822 if (err != 0) {
823 ASSERT(*results == NULL);
824 ASSERT(*results_length == 0);
825 return err;
826 }
827 return 0;
828 }
829
830 SK_NO_INLINE_ATTRIBUTE
831 static int
nx_netif_netagent_handle_flow_del(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,boolean_t abort)832 nx_netif_netagent_handle_flow_del(struct nx_netif *nif,
833 uuid_t flow_uuid, pid_t pid, boolean_t abort)
834 {
835 #pragma unused(abort)
836 struct nx_flow_req nfr;
837
838 bzero(&nfr, sizeof(nfr));
839 uuid_copy(nfr.nfr_flow_uuid, flow_uuid);
840 nfr.nfr_pid = pid;
841 return nx_netif_netagent_flow_del(nif, &nfr);
842 }
843
844 static int
nx_netif_netagent_event(u_int8_t event,uuid_t flow_uuid,pid_t pid,void * context,void * ctx,struct necp_client_agent_parameters * cparams,void * __sized_by (* results_length)* results,size_t * results_length)845 nx_netif_netagent_event(u_int8_t event, uuid_t flow_uuid, pid_t pid,
846 void *context, void *ctx, struct necp_client_agent_parameters *cparams,
847 void * __sized_by(*results_length) *results, size_t *results_length)
848 {
849 #pragma unused(context)
850 struct nx_netif *nif;
851 int err = 0;
852
853 nif = (struct nx_netif *)ctx;
854 ASSERT(nif != NULL);
855
856 switch (event) {
857 case NETAGENT_EVENT_NEXUS_FLOW_INSERT:
858 /* these are required for this event */
859 ASSERT(cparams != NULL);
860 ASSERT(results != NULL);
861 ASSERT(results_length != NULL);
862 *results = NULL;
863 *results_length = 0;
864 err = nx_netif_netagent_handle_flow_add(nif, flow_uuid, pid,
865 &cparams->u.nexus_request, results, results_length);
866 break;
867
868 case NETAGENT_EVENT_NEXUS_FLOW_REMOVE:
869 case NETAGENT_EVENT_NEXUS_FLOW_ABORT:
870 err = nx_netif_netagent_handle_flow_del(nif, flow_uuid, pid,
871 (event == NETAGENT_EVENT_NEXUS_FLOW_REMOVE));
872 break;
873
874 default:
875 /* events not handled */
876 return 0;
877 }
878
879 return err;
880 }
881
882 static int
nx_netif_agent_register(struct nx_netif * nif,uint32_t features)883 nx_netif_agent_register(struct nx_netif *nif, uint32_t features)
884 {
885 struct netagent_nexus_agent agent;
886 int err = 0;
887
888 static_assert(FLOWADV_IDX_NONE == UINT32_MAX);
889 static_assert(NECP_FLOWADV_IDX_INVALID == FLOWADV_IDX_NONE);
890
891 if (!nif_netagent) {
892 return ENOTSUP;
893 }
894 nif->nif_agent_session = netagent_create(&nx_netif_netagent_event, nif);
895 if (nif->nif_agent_session == NULL) {
896 return ENOMEM;
897 }
898
899 bzero(&agent, sizeof(agent));
900 uuid_generate_random(agent.agent.netagent_uuid);
901 uuid_copy(nif->nif_agent_uuid, agent.agent.netagent_uuid);
902 (void) snprintf(agent.agent.netagent_domain,
903 sizeof(agent.agent.netagent_domain), "%s", "Skywalk");
904 (void) snprintf(agent.agent.netagent_type,
905 sizeof(agent.agent.netagent_type), "%s", "NetIf");
906 (void) snprintf(agent.agent.netagent_desc,
907 sizeof(agent.agent.netagent_desc), "%s", "Userspace Networking");
908
909 agent.agent.netagent_flags =
910 (NETAGENT_FLAG_ACTIVE | NETAGENT_FLAG_NEXUS_LISTENER | features);
911
912 agent.agent.netagent_data_size = sizeof(struct netagent_nexus);
913 agent.nexus_data.frame_type = NETAGENT_NEXUS_FRAME_TYPE_LINK;
914 agent.nexus_data.endpoint_assignment_type =
915 NETAGENT_NEXUS_ENDPOINT_TYPE_ADDRESS;
916 agent.nexus_data.endpoint_request_types[0] =
917 NETAGENT_NEXUS_ENDPOINT_TYPE_ADDRESS;
918 agent.nexus_data.nexus_flags |=
919 (NETAGENT_NEXUS_FLAG_ASSERT_UNSUPPORTED |
920 NETAGENT_NEXUS_FLAG_SUPPORTS_USER_PACKET_POOL);
921 if (NETIF_IS_LOW_LATENCY(nif)) {
922 agent.nexus_data.nexus_flags |=
923 NETAGENT_NEXUS_FLAG_SHOULD_USE_EVENT_RING;
924 }
925
926 if ((err = netagent_register(nif->nif_agent_session,
927 (struct netagent *)&agent)) != 0) {
928 netagent_destroy(nif->nif_agent_session);
929 nif->nif_agent_session = NULL;
930 uuid_clear(nif->nif_agent_uuid);
931 }
932 nif->nif_agent_flags |= NETIF_AGENT_FLAG_REGISTERED;
933 return err;
934 }
935
936 static void
nx_netif_agent_unregister(struct nx_netif * nif)937 nx_netif_agent_unregister(struct nx_netif *nif)
938 {
939 if ((nif->nif_agent_flags & NETIF_AGENT_FLAG_REGISTERED) == 0) {
940 return;
941 }
942 nif->nif_agent_flags &= ~NETIF_AGENT_FLAG_REGISTERED;
943
944 ASSERT(nif->nif_agent_session != NULL);
945 netagent_destroy(nif->nif_agent_session);
946 nif->nif_agent_session = NULL;
947 uuid_clear(nif->nif_agent_uuid);
948 }
949
950 static uint32_t
nx_netif_agent_get_features(struct nx_netif * nif)951 nx_netif_agent_get_features(struct nx_netif *nif)
952 {
953 uint32_t features = 0;
954
955 if ((nif->nif_filter_flags & NETIF_FILTER_FLAG_INITIALIZED) != 0) {
956 features |= (NETAGENT_FLAG_INTERPOSE_NEXUS |
957 NETAGENT_FLAG_NETWORK_PROVIDER);
958 }
959 if ((nif->nif_flow_flags & NETIF_FLOW_FLAG_INITIALIZED) != 0) {
960 if (NETIF_IS_LOW_LATENCY(nif)) {
961 features |= NETAGENT_FLAG_NEXUS_PROVIDER;
962 } else {
963 features |= NETAGENT_FLAG_CUSTOM_ETHER_NEXUS;
964 }
965 features |= NETAGENT_FLAG_NETWORK_PROVIDER;
966 }
967 return features;
968 }
969
970 void
nx_netif_agent_init(struct nx_netif * nif)971 nx_netif_agent_init(struct nx_netif *nif)
972 {
973 int err;
974 ifnet_t ifp = nif->nif_ifp;
975 uint32_t features = 0;
976
977 ASSERT(ifp != NULL);
978 features = nx_netif_agent_get_features(nif);
979 if (features == 0) {
980 SK_DF(SK_VERB_NETIF, "%s: no feature supported", if_name(ifp));
981 return;
982 }
983 ASSERT(nif->nif_agent_flags == 0);
984 lck_mtx_init(&nif->nif_agent_lock, &nexus_lock_group, &nexus_lock_attr);
985
986 SLIST_INIT(&nif->nif_agent_flow_list);
987 nif->nif_agent_flow_cnt = 0;
988
989 err = nx_netif_agent_register(nif, features);
990 if (err != 0) {
991 SK_DF(SK_VERB_ERROR, "%s: agent register failed: err %d",
992 if_name(ifp), err);
993 return;
994 }
995 ASSERT(!uuid_is_null(nif->nif_agent_uuid));
996 err = if_add_netagent_locked(ifp, nif->nif_agent_uuid);
997 if (err != 0) {
998 nx_netif_agent_unregister(nif);
999 SK_DF(SK_VERB_ERROR, "%s: agent add failed: err %d",
1000 if_name(ifp), err);
1001 return;
1002 }
1003 nif->nif_agent_flags |= NETIF_AGENT_FLAG_ADDED;
1004
1005 SK_DF(SK_VERB_NETIF, "%s: agent init complete", if_name(ifp));
1006 }
1007
1008 void
nx_netif_agent_fini(struct nx_netif * nif)1009 nx_netif_agent_fini(struct nx_netif *nif)
1010 {
1011 ifnet_t ifp = nif->nif_ifp;
1012
1013 ASSERT(ifp != NULL);
1014 if ((nif->nif_agent_flags & NETIF_AGENT_FLAG_ADDED) == 0) {
1015 SK_DF(SK_VERB_NETIF, "%s: no agent added", if_name(ifp));
1016 return;
1017 }
1018 nif->nif_agent_flags &= ~NETIF_AGENT_FLAG_ADDED;
1019 ASSERT(!uuid_is_null(nif->nif_agent_uuid));
1020 if_delete_netagent(ifp, nif->nif_agent_uuid);
1021
1022 nx_netif_agent_unregister(nif);
1023
1024 /*
1025 * XXX
1026 * This is asymmetrical with nx_netif_agent_init(). But we have to
1027 * cleanup here because the interface is detaching.
1028 */
1029 nx_netif_agent_flow_purge(nif);
1030 ASSERT(nif->nif_agent_flow_cnt == 0);
1031 ASSERT(SLIST_EMPTY(&nif->nif_agent_flow_list));
1032 lck_mtx_destroy(&nif->nif_agent_lock, &nexus_lock_group);
1033 SK_DF(SK_VERB_NETIF, "%s: agent fini complete", if_name(ifp));
1034 }
1035