xref: /xnu-8796.101.5/bsd/skywalk/nexus/netif/nx_netif_netagent.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5) !
1 /*
2  * Copyright (c) 2019-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/netif/nx_netif.h>
31 #include <libkern/crypto/sha1.h>
32 #include <sys/sdt.h>
33 
34 #define NETIF_AGENT_FLOW_MAX            16
35 
36 /* automatically register a netagent at constructor time */
37 static int nif_netagent = 1;
38 
39 #if (DEVELOPMENT || DEBUG)
40 SYSCTL_INT(_kern_skywalk_netif, OID_AUTO, netagent,
41     CTLFLAG_RW | CTLFLAG_LOCKED, &nif_netagent, 0, "");
42 #endif /* !DEVELOPMENT && !DEBUG */
43 
44 SK_NO_INLINE_ATTRIBUTE
45 static int
get_mac_addr(struct nx_netif * nif,struct ether_addr * addr)46 get_mac_addr(struct nx_netif *nif, struct ether_addr *addr)
47 {
48 	struct ifnet *ifp = nif->nif_ifp;
49 	struct ifaddr *lladdr;
50 
51 	ASSERT(ifp != NULL);
52 	lladdr = ifp->if_lladdr;
53 
54 	if (SDL(lladdr->ifa_addr)->sdl_alen == ETHER_ADDR_LEN &&
55 	    SDL(lladdr->ifa_addr)->sdl_type == IFT_ETHER) {
56 		ifnet_lladdr_copy_bytes(ifp, addr, ETHER_ADDR_LEN);
57 		return 0;
58 	}
59 	return ENOTSUP;
60 }
61 
62 static uint64_t ipv6_ula_interface_id = 1;
63 
64 /*
65  * Generating an IPV6 ULA based on RFC4193
66  */
67 SK_NO_INLINE_ATTRIBUTE
68 static int
get_ipv6_ula(struct nx_netif * nif,struct in6_addr * addr)69 get_ipv6_ula(struct nx_netif *nif, struct in6_addr *addr)
70 {
71 	int err;
72 	ether_addr_t ether_addr;
73 	uint8_t buf[16], *octet;
74 	uint8_t *eui64 = &buf[8];
75 	uint8_t digest[SHA1_RESULTLEN];
76 	uint64_t timestamp, interface_id;
77 	SHA1_CTX ctx;
78 
79 	err = get_mac_addr(nif, &ether_addr);
80 	if (err != 0) {
81 		SK_ERR("cannot get mac addr: %d", err);
82 		return err;
83 	}
84 	/* Concatenate a timestamp with a EUI64 */
85 	timestamp = mach_absolute_time();
86 	bcopy(&timestamp, buf, sizeof(timestamp));
87 
88 	_CASSERT(ETHER_ADDR_LEN == 6);
89 	octet = ether_addr.octet;
90 	eui64[0] = octet[0];
91 	eui64[1] = octet[1];
92 	eui64[2] = octet[2];
93 	eui64[3] = 0xff;
94 	eui64[4] = 0xfe;
95 	eui64[5] = octet[3];
96 	eui64[6] = octet[4];
97 	eui64[7] = octet[5];
98 
99 	/* Generate SHA1 digest */
100 	SHA1Init(&ctx);
101 	SHA1Update(&ctx, buf, 16);
102 	SHA1Final(digest, &ctx);
103 
104 	/* Reuse buf for generating the address */
105 	bzero(buf, sizeof(buf));
106 
107 	/* Start with the 0xfc prefix with local bit set */
108 	buf[0] = 0xfd;
109 
110 	/* Copy least significant 40bits, digest[15..19] */
111 	_CASSERT(SHA1_RESULTLEN == 20);
112 	bcopy(&digest[15], &buf[1], 5);
113 
114 	/* Hardcode subnet number to 0 */
115 	buf[6] = 0;
116 	buf[7] = 0;
117 
118 	/* Use a monotonically increasing interface ID */
119 	interface_id = htonll(ipv6_ula_interface_id);
120 	bcopy(&interface_id, &buf[8], sizeof(uint64_t));
121 	do {
122 		ipv6_ula_interface_id++;
123 	} while (ipv6_ula_interface_id == 0);
124 
125 	/* Return the generated address */
126 	_CASSERT(sizeof(buf) == sizeof(struct in6_addr));
127 	bcopy(buf, addr, sizeof(struct in6_addr));
128 
129 #if SK_LOG
130 	char addrbuf[MAX_IPv6_STR_LEN];
131 	SK_DF(SK_VERB_NETIF, "generated IPv6 address: %s",
132 	    inet_ntop(AF_INET6, addr, addrbuf, sizeof(addrbuf)));
133 #endif /* SK_LOG */
134 	return 0;
135 }
136 
137 SK_NO_INLINE_ATTRIBUTE
138 static int
get_ipv6_sockaddr(struct nx_netif * nif,struct sockaddr_in6 * sin6)139 get_ipv6_sockaddr(struct nx_netif *nif, struct sockaddr_in6 *sin6)
140 {
141 	int err;
142 
143 	sin6->sin6_len = sizeof(struct sockaddr_in6);
144 	sin6->sin6_family = AF_INET6;
145 	err = get_ipv6_ula(nif, &sin6->sin6_addr);
146 	if (err != 0) {
147 		SK_ERR("get_ipv6_ula failed: %d", err);
148 		return err;
149 	}
150 	return 0;
151 }
152 
153 SK_NO_INLINE_ATTRIBUTE
154 static int
validate_ipv6_sockaddr(struct sockaddr_in6 * sin6)155 validate_ipv6_sockaddr(struct sockaddr_in6 *sin6)
156 {
157 	if (sin6->sin6_family != AF_INET6) {
158 		SK_ERR("invalid source family");
159 		return EINVAL;
160 	}
161 	if (sin6->sin6_len != sizeof(struct sockaddr_in6)) {
162 		SK_ERR("invalid source length");
163 		return EINVAL;
164 	}
165 	/*
166 	 * XXX
167 	 * We should use the stricter check IN6_IS_ADDR_UNIQUE_LOCAL().
168 	 * Leaving this as is for now because this gives us more
169 	 * flexibility on what addresses can be used for testing.
170 	 */
171 	if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
172 		SK_ERR("address unspecified");
173 		return EINVAL;
174 	}
175 	return 0;
176 }
177 
178 SK_NO_INLINE_ATTRIBUTE
179 static boolean_t
flow_ipv6_ula_match(struct netif_agent_flow * naf,struct nx_flow_req * nfr)180 flow_ipv6_ula_match(struct netif_agent_flow *naf, struct nx_flow_req *nfr)
181 {
182 	struct in6_addr *s1, *s2, *d1, *d2;
183 
184 	if (naf->naf_pid != nfr->nfr_pid) {
185 		DTRACE_SKYWALK2(pid__mismatch, pid_t, naf->naf_pid,
186 		    pid_t, nfr->nfr_pid);
187 		return FALSE;
188 	}
189 	if ((naf->naf_flags & NXFLOWREQF_IPV6_ULA) == 0) {
190 		DTRACE_SKYWALK1(type__mismatch, uint16_t, naf->naf_flags);
191 		return FALSE;
192 	}
193 	s1 = &naf->naf_saddr.sin6.sin6_addr;
194 	s2 = &nfr->nfr_saddr.sin6.sin6_addr;
195 	if (!IN6_ARE_ADDR_EQUAL(s1, s2)) {
196 		DTRACE_SKYWALK2(saddr__mismatch, struct in6_addr *, s1,
197 		    struct in6_addr *, s2);
198 		return FALSE;
199 	}
200 	d1 = &naf->naf_daddr.sin6.sin6_addr;
201 	d2 = &nfr->nfr_daddr.sin6.sin6_addr;
202 	if (!IN6_ARE_ADDR_EQUAL(d1, d2)) {
203 		DTRACE_SKYWALK2(daddr__mismatch, struct in6_addr *, d1,
204 		    struct in6_addr *, d2);
205 		return FALSE;
206 	}
207 	return TRUE;
208 }
209 
210 static uint16_t forbidden_ethertypes[] = {
211 	ETHERTYPE_IP,
212 	ETHERTYPE_ARP,
213 	ETHERTYPE_REVARP,
214 	ETHERTYPE_VLAN,
215 	ETHERTYPE_IPV6,
216 	ETHERTYPE_PAE,
217 	ETHERTYPE_RSN_PREAUTH,
218 };
219 #define FORBIDDEN_ETHERTYPES \
220     (sizeof(forbidden_ethertypes) / sizeof(forbidden_ethertypes[0]))
221 
222 SK_NO_INLINE_ATTRIBUTE
223 static int
validate_ethertype(uint16_t ethertype)224 validate_ethertype(uint16_t ethertype)
225 {
226 	uint32_t i;
227 
228 	for (i = 0; i < FORBIDDEN_ETHERTYPES; i++) {
229 		if (forbidden_ethertypes[i] == ethertype) {
230 			SK_ERR("ethertype 0x%x not allowed", ethertype);
231 			return EINVAL;
232 		}
233 	}
234 	if (ethertype <= ETHERMTU) {
235 		SK_ERR("ethertype <= ETHERMTU");
236 		return EINVAL;
237 	}
238 	return 0;
239 }
240 
241 SK_NO_INLINE_ATTRIBUTE
242 static int
nx_netif_netagent_fill_port_info(struct nx_netif * nif,struct nx_flow_req * nfr,struct netif_port_info ** npip)243 nx_netif_netagent_fill_port_info(struct nx_netif *nif, struct nx_flow_req *nfr,
244     struct netif_port_info **npip)
245 {
246 #pragma unused(nif)
247 	struct netif_flow_desc *fd;
248 	struct netif_port_info *npi;
249 	struct netif_stats *nifs = &nif->nif_stats;
250 	uint32_t stat;
251 	int err;
252 
253 	if ((nfr->nfr_flags & (NXFLOWREQF_CUSTOM_ETHER |
254 	    NXFLOWREQF_IPV6_ULA)) == 0) {
255 		return 0;
256 	}
257 	npi = sk_alloc_data(sizeof(*npi), Z_WAITOK | Z_NOFAIL,
258 	    skmem_tag_nx_port_info);
259 	npi->npi_hdr.ih_type = NX_PORT_INFO_TYPE_NETIF;
260 	npi->npi_hdr.ih_size = sizeof(*npi);
261 
262 	fd = &npi->npi_fd;
263 	if ((nfr->nfr_flags & NXFLOWREQF_CUSTOM_ETHER) != 0) {
264 		if ((err = validate_ethertype(nfr->nfr_ethertype)) != 0) {
265 			stat = NETIF_STATS_AGENT_BAD_ETHERTYPE;
266 			goto fail;
267 		}
268 		fd->fd_ethertype = nfr->nfr_ethertype;
269 	}
270 	if ((nfr->nfr_flags & NXFLOWREQF_IPV6_ULA) != 0) {
271 		struct sockaddr_in6 *sin6;
272 
273 		sin6 = &nfr->nfr_saddr.sin6;
274 		if ((err = validate_ipv6_sockaddr(sin6)) != 0) {
275 			stat = NETIF_STATS_AGENT_BAD_IPV6_ADDR;
276 			goto fail;
277 		}
278 		fd->fd_laddr = sin6->sin6_addr;
279 
280 		sin6 = &nfr->nfr_daddr.sin6;
281 		if ((err = validate_ipv6_sockaddr(sin6)) != 0) {
282 			stat = NETIF_STATS_AGENT_BAD_IPV6_ADDR;
283 			goto fail;
284 		}
285 		fd->fd_raddr = sin6->sin6_addr;
286 	}
287 	*npip = npi;
288 	return 0;
289 fail:
290 	STATS_INC(nifs, stat);
291 	if (npi != NULL) {
292 		sk_free_data(npi, sizeof(*npi));
293 	}
294 	return err;
295 }
296 
297 SK_NO_INLINE_ATTRIBUTE
298 static int
nx_netif_netagent_flow_bind(struct nx_netif * nif,struct nx_flow_req * nfr)299 nx_netif_netagent_flow_bind(struct nx_netif *nif, struct nx_flow_req *nfr)
300 {
301 	uuid_t uuid_key;
302 	nexus_port_t nx_port;
303 	struct nxbind nxb;
304 	struct proc *p;
305 	struct kern_nexus *nx = nif->nif_nx;
306 	struct netif_port_info *npi = NULL;
307 	pid_t pid = nfr->nfr_pid;
308 	int err;
309 #if SK_LOG
310 	uuid_string_t uuidstr;
311 #endif /* SK_LOG */
312 
313 	if ((nfr->nfr_flags & NXFLOWREQF_LISTENER) != 0) {
314 		return ENOTSUP;
315 	}
316 	p = proc_find(pid);
317 	if (p == PROC_NULL) {
318 		SK_ERR("process for pid %d doesn't exist", pid);
319 		return EINVAL;
320 	}
321 	nfr->nfr_proc = p;
322 	uuid_generate_random(uuid_key);
323 	bzero(&nxb, sizeof(nxb));
324 	nxb.nxb_flags |= NXBF_MATCH_UNIQUEID;
325 	nxb.nxb_uniqueid = proc_uniqueid(p);
326 	nxb.nxb_pid = pid;
327 	nxb.nxb_flags |= NXBF_MATCH_KEY;
328 	nxb.nxb_key_len = sizeof(uuid_key);
329 	nxb.nxb_key = sk_alloc_data(nxb.nxb_key_len, Z_WAITOK | Z_NOFAIL,
330 	    skmem_tag_nx_key);
331 	bcopy(uuid_key, nxb.nxb_key, nxb.nxb_key_len);
332 
333 	err = nx_netif_netagent_fill_port_info(nif, nfr, &npi);
334 	if (err != 0) {
335 		sk_free_data(nxb.nxb_key, nxb.nxb_key_len);
336 		nfr->nfr_proc = NULL;
337 		proc_rele(p);
338 		return err;
339 	}
340 	/*
341 	 * callee holds on to nxb_key on success. no need to free.
342 	 */
343 	nx_port = NEXUS_PORT_ANY;
344 	err = NX_DOM(nx)->nxdom_bind_port(nx, &nx_port, &nxb, npi);
345 	if (err != 0) {
346 		sk_free_data(nxb.nxb_key, nxb.nxb_key_len);
347 		if (npi != NULL) {
348 			sk_free_data(npi, sizeof(*npi));
349 		}
350 		nfr->nfr_proc = NULL;
351 		proc_rele(p);
352 		SK_ERR("%s(%d) failed to bind flow_uuid %s to a "
353 		    "nx_port (err %d)", sk_proc_name_address(p),
354 		    pid, sk_uuid_unparse(nfr->nfr_flow_uuid,
355 		    uuidstr), err);
356 		return err;
357 	}
358 	bcopy(uuid_key, nfr->nfr_bind_key, sizeof(uuid_key));
359 	nfr->nfr_nx_port = nx_port;
360 	nfr->nfr_proc = NULL;
361 	proc_rele(p);
362 	return 0;
363 }
364 
365 SK_NO_INLINE_ATTRIBUTE
366 static int
nx_netif_netagent_flow_unbind(struct nx_netif * nif,struct nx_flow_req * nfr)367 nx_netif_netagent_flow_unbind(struct nx_netif *nif, struct nx_flow_req *nfr)
368 {
369 	int err;
370 	struct kern_nexus *nx = nif->nif_nx;
371 
372 	if ((nfr->nfr_flags & NXFLOWREQF_LISTENER) != 0) {
373 		return ENOTSUP;
374 	}
375 	err = NX_DOM(nx)->nxdom_unbind_port(nif->nif_nx, nfr->nfr_nx_port);
376 	if (err != 0) {
377 		SK_ERR("nxdom_unbind_port failed: %d", err);
378 		return err;
379 	}
380 	return 0;
381 }
382 
383 SK_NO_INLINE_ATTRIBUTE
384 static int
nx_netif_netagent_check_flags(struct nx_netif * nif,struct nx_flow_req * nfr,boolean_t add)385 nx_netif_netagent_check_flags(struct nx_netif *nif, struct nx_flow_req *nfr,
386     boolean_t add)
387 {
388 	uint16_t flags = nfr->nfr_flags;
389 
390 	if ((nif->nif_agent_flags & NETIF_AGENT_FLAG_ADDED) == 0) {
391 		SK_ERR("no agent added");
392 		return ENOTSUP;
393 	}
394 	if ((flags & NXFLOWREQF_FILTER) != 0) {
395 		if ((flags & ~NXFLOWREQF_FILTER) != 0) {
396 			SK_ERR("filter: incompatible with other features");
397 			return EINVAL;
398 		}
399 		if ((nif->nif_filter_flags &
400 		    NETIF_FILTER_FLAG_INITIALIZED) == 0) {
401 			SK_ERR("filter: uninitialized");
402 			return ENOTSUP;
403 		}
404 	}
405 	if ((flags & NXFLOWREQF_CUSTOM_ETHER) != 0) {
406 		if ((flags & ~NXFLOWREQF_CUSTOM_ETHER) != 0) {
407 			SK_ERR("custom ether: incompatible "
408 			    "with other features");
409 			return EINVAL;
410 		}
411 		if ((nif->nif_flow_flags &
412 		    NETIF_FLOW_FLAG_INITIALIZED) == 0) {
413 			SK_ERR("custom ether: uninitialized");
414 			return ENOTSUP;
415 		}
416 	}
417 	if ((flags & NXFLOWREQF_IPV6_ULA) != 0) {
418 		if ((flags & ~(NXFLOWREQF_IPV6_ULA | NXFLOWREQF_LISTENER)) != 0) {
419 			SK_ERR("IPv6 ULA: incompatible with other features");
420 			return EINVAL;
421 		}
422 		if (!NETIF_IS_LOW_LATENCY(nif)) {
423 			SK_ERR("IPv6 ULA: not supported on this nexus");
424 			return ENOTSUP;
425 		}
426 	}
427 	if (add && (flags & (NXFLOWREQF_FILTER | NXFLOWREQF_CUSTOM_ETHER |
428 	    NXFLOWREQF_IPV6_ULA)) == 0) {
429 		SK_ERR("flow type must be specified");
430 		return EINVAL;
431 	}
432 	return 0;
433 }
434 
435 SK_NO_INLINE_ATTRIBUTE
436 static int
nx_netif_netagent_listener_flow_add(struct nx_netif * nif,struct nx_flow_req * nfr)437 nx_netif_netagent_listener_flow_add(struct nx_netif *nif,
438     struct nx_flow_req *nfr)
439 {
440 	int err;
441 
442 	if ((nfr->nfr_flags & NXFLOWREQF_IPV6_ULA) == 0) {
443 		SK_ERR("listener flow not suppported");
444 		return ENOTSUP;
445 	}
446 	err = get_mac_addr(nif, &nfr->nfr_etheraddr);
447 	if (err != 0) {
448 		SK_ERR("get mac addr failed; %d", err);
449 		return err;
450 	}
451 	err = get_ipv6_sockaddr(nif, &nfr->nfr_saddr.sin6);
452 	if (err != 0) {
453 		SK_ERR("get ipv6 laddr failed; %d", err);
454 		return err;
455 	}
456 	return 0;
457 }
458 
459 /*
460  * This is for handling the case where the same flow (same ipv6
461  * local_addr:remote_addr tuple) is added twice. Instead of failing the
462  * second flow add, we would return the existing flow's nexus port. This
463  * would allow libnetcore to reuse the existing channel instead of opening
464  * a new one. Note that sidecar is not affected by this because it always
465  * adds flows with unique addresses.
466  */
467 SK_NO_INLINE_ATTRIBUTE
468 static int
nx_netif_netagent_flow_find(struct nx_netif * nif,struct nx_flow_req * nfr)469 nx_netif_netagent_flow_find(struct nx_netif *nif,
470     struct nx_flow_req *nfr)
471 {
472 	struct netif_agent_flow *naf;
473 
474 	/* Only support llw flows */
475 	if ((nfr->nfr_flags & NXFLOWREQF_IPV6_ULA) == 0) {
476 		return ENOTSUP;
477 	}
478 	lck_mtx_lock(&nif->nif_agent_lock);
479 	SLIST_FOREACH(naf, &nif->nif_agent_flow_list, naf_link) {
480 		if (flow_ipv6_ula_match(naf, nfr)) {
481 			break;
482 		}
483 	}
484 	if (naf == NULL) {
485 		DTRACE_SKYWALK2(dupflow__not__found, struct nx_netif *, nif,
486 		    struct nx_flow_req *, nfr);
487 		lck_mtx_unlock(&nif->nif_agent_lock);
488 		return ENOENT;
489 	}
490 	nfr->nfr_nx_port = naf->naf_nx_port;
491 	uuid_copy(nfr->nfr_bind_key, naf->naf_bind_key);
492 	lck_mtx_unlock(&nif->nif_agent_lock);
493 	return 0;
494 }
495 
496 SK_NO_INLINE_ATTRIBUTE
497 static void
nx_netif_netagent_fill_flow_info(struct netif_agent_flow * naf,struct nx_flow_req * nfr)498 nx_netif_netagent_fill_flow_info(struct netif_agent_flow *naf,
499     struct nx_flow_req *nfr)
500 {
501 	uuid_copy(naf->naf_flow_uuid, nfr->nfr_flow_uuid);
502 	uuid_copy(naf->naf_bind_key, nfr->nfr_bind_key);
503 	naf->naf_nx_port = nfr->nfr_nx_port;
504 	naf->naf_flags = nfr->nfr_flags;
505 	naf->naf_pid = nfr->nfr_pid;
506 
507 	/* We only keep flow info for llw flows */
508 	if ((naf->naf_flags & NXFLOWREQF_IPV6_ULA) != 0) {
509 		naf->naf_saddr = nfr->nfr_saddr;
510 		naf->naf_daddr = nfr->nfr_daddr;
511 	}
512 }
513 
514 int
nx_netif_netagent_flow_add(struct nx_netif * nif,struct nx_flow_req * nfr)515 nx_netif_netagent_flow_add(struct nx_netif *nif, struct nx_flow_req *nfr)
516 {
517 	int err;
518 	struct netif_agent_flow *naf;
519 	struct netif_stats *nifs = &nif->nif_stats;
520 
521 	err = nx_netif_netagent_check_flags(nif, nfr, TRUE);
522 	if (err != 0) {
523 		SK_ERR("flow request inconsistent with current config");
524 		DTRACE_SKYWALK3(invalid__flags, struct nx_netif *, nif,
525 		    struct nx_flow_req *, nfr, int, err);
526 		return err;
527 	}
528 	err = nx_netif_netagent_flow_find(nif, nfr);
529 	if (err == 0) {
530 		SK_ERR("found existing flow: nx_port = %d", nfr->nfr_nx_port);
531 		DTRACE_SKYWALK2(found__flow, struct nx_netif *, nif,
532 		    struct nx_flow_req *, nfr);
533 		STATS_INC(nifs, NETIF_STATS_AGENT_DUP_FLOW);
534 		return 0;
535 	}
536 	if ((nfr->nfr_flags & NXFLOWREQF_LISTENER) != 0) {
537 		return nx_netif_netagent_listener_flow_add(nif, nfr);
538 	}
539 	naf = sk_alloc_type(struct netif_agent_flow, Z_WAITOK | Z_NOFAIL,
540 	    skmem_tag_netif_agent_flow);
541 
542 	if ((nfr->nfr_flags &
543 	    (NXFLOWREQF_CUSTOM_ETHER | NXFLOWREQF_IPV6_ULA)) != 0) {
544 		err = get_mac_addr(nif, &nfr->nfr_etheraddr);
545 		if (err != 0) {
546 			SK_ERR("get mac addr failed: %d", err);
547 			sk_free_type(struct netif_agent_flow, naf);
548 			return err;
549 		}
550 	}
551 	lck_mtx_lock(&nif->nif_agent_lock);
552 	err = nx_netif_netagent_flow_bind(nif, nfr);
553 	if (err != 0) {
554 		SK_ERR("netagent flow bind failed: %d", err);
555 		DTRACE_SKYWALK3(bind__failed, struct nx_netif *, nif,
556 		    struct nx_flow_req *, nfr, int, err);
557 		sk_free_type(struct netif_agent_flow, naf);
558 		lck_mtx_unlock(&nif->nif_agent_lock);
559 		return err;
560 	}
561 	nx_netif_netagent_fill_flow_info(naf, nfr);
562 	SLIST_INSERT_HEAD(&nif->nif_agent_flow_list, naf, naf_link);
563 	nif->nif_agent_flow_cnt++;
564 
565 #if SK_LOG
566 	uuid_string_t uuidstr;
567 
568 	SK_DF(SK_VERB_NETIF, "flow uuid: %s",
569 	    sk_uuid_unparse(naf->naf_flow_uuid, uuidstr));
570 	SK_DF(SK_VERB_NETIF, "nx port: %d", naf->naf_nx_port);
571 	SK_DF(SK_VERB_NETIF, "nx key: %s",
572 	    sk_uuid_unparse(nfr->nfr_bind_key, uuidstr));
573 
574 	if ((nfr->nfr_flags & NXFLOWREQF_FILTER) != 0) {
575 		SK_DF(SK_VERB_NETIF, "flow type: filter");
576 	}
577 	if ((nfr->nfr_flags & NXFLOWREQF_CUSTOM_ETHER) != 0) {
578 		SK_DF(SK_VERB_NETIF, "flow type: custom ether");
579 		SK_DF(SK_VERB_NETIF, "ethertype: 0x%x", nfr->nfr_ethertype);
580 	}
581 	if ((nfr->nfr_flags & NXFLOWREQF_IPV6_ULA) != 0) {
582 		char local[MAX_IPv6_STR_LEN];
583 		char remote[MAX_IPv6_STR_LEN];
584 
585 		SK_DF(SK_VERB_NETIF, "flow type: IPv6 ULA");
586 		SK_DF(SK_VERB_NETIF, "IPv6 local: %s",
587 		    inet_ntop(AF_INET6, &nfr->nfr_saddr.sin6.sin6_addr,
588 		    local, sizeof(local)));
589 		SK_DF(SK_VERB_NETIF, "IPv6 remote: %s",
590 		    inet_ntop(AF_INET6, &nfr->nfr_daddr.sin6.sin6_addr,
591 		    remote, sizeof(remote)));
592 	}
593 #endif /* SK_LOG */
594 	lck_mtx_unlock(&nif->nif_agent_lock);
595 	return 0;
596 }
597 
598 int
nx_netif_netagent_flow_del(struct nx_netif * nif,struct nx_flow_req * nfr)599 nx_netif_netagent_flow_del(struct nx_netif *nif, struct nx_flow_req *nfr)
600 {
601 	int err;
602 	struct netif_agent_flow *naf = NULL;
603 
604 	err = nx_netif_netagent_check_flags(nif, nfr, FALSE);
605 	if (err != 0) {
606 		SK_ERR("flow request inconsistent with current config");
607 		DTRACE_SKYWALK3(invalid__flags, struct nx_netif *, nif,
608 		    struct nx_flow_req *, nfr, int, err);
609 		return err;
610 	}
611 
612 	/* no-op for listener */
613 	if ((nfr->nfr_flags & NXFLOWREQF_LISTENER) != 0) {
614 		DTRACE_SKYWALK2(listener, struct nx_netif *, nif,
615 		    struct nx_flow_req *, nfr);
616 		return 0;
617 	}
618 	lck_mtx_lock(&nif->nif_agent_lock);
619 	SLIST_FOREACH(naf, &nif->nif_agent_flow_list, naf_link) {
620 		if (uuid_compare(naf->naf_flow_uuid, nfr->nfr_flow_uuid) == 0) {
621 			break;
622 		}
623 	}
624 	if (naf == NULL) {
625 		SK_ERR("netagent flow not found");
626 		DTRACE_SKYWALK2(flow__not__found, struct nx_netif *, nif,
627 		    struct nx_flow_req *, nfr);
628 		lck_mtx_unlock(&nif->nif_agent_lock);
629 		return ENOENT;
630 	}
631 	/* use the port from the agent flow, not the request */
632 	nfr->nfr_nx_port = naf->naf_nx_port;
633 
634 	err = nx_netif_netagent_flow_unbind(nif, nfr);
635 	if (err != 0) {
636 		SK_ERR("netagent flow unbind failed: %d", err);
637 		DTRACE_SKYWALK3(unbind__failed, struct nx_netif *, nif,
638 		    struct nx_flow_req *, nfr, int, err);
639 		/*
640 		 * The channel auto closed the port. We can just
641 		 * clean up our agent flow.
642 		 */
643 	}
644 	SLIST_REMOVE(&nif->nif_agent_flow_list, naf, netif_agent_flow,
645 	    naf_link);
646 	sk_free_type(struct netif_agent_flow, naf);
647 	nif->nif_agent_flow_cnt--;
648 	lck_mtx_unlock(&nif->nif_agent_lock);
649 	return 0;
650 }
651 
652 SK_NO_INLINE_ATTRIBUTE
653 static int
nx_netif_agent_flow_purge(struct nx_netif * nif)654 nx_netif_agent_flow_purge(struct nx_netif *nif)
655 {
656 	struct netif_agent_flow *naf, *naf_tmp;
657 	uint32_t cnt = 0;
658 
659 	lck_mtx_lock(&nif->nif_agent_lock);
660 	SLIST_FOREACH_SAFE(naf, &nif->nif_agent_flow_list, naf_link, naf_tmp) {
661 		SLIST_REMOVE(&nif->nif_agent_flow_list, naf, netif_agent_flow,
662 		    naf_link);
663 		/*
664 		 * Since this gets called during detach, all ports will be
665 		 * unbound and freed by the nexus cleanup path. Nothing to
666 		 * do here.
667 		 */
668 		sk_free_type(struct netif_agent_flow, naf);
669 		cnt++;
670 	}
671 	SK_DF(SK_VERB_NETIF, "agent flows purged: %d", cnt);
672 	DTRACE_SKYWALK2(agent__flows__purge, struct nx_netif *, nif,
673 	    uint32_t, cnt);
674 	ASSERT(nif->nif_agent_flow_cnt == cnt);
675 	nif->nif_agent_flow_cnt = 0;
676 	lck_mtx_unlock(&nif->nif_agent_lock);
677 	return 0;
678 }
679 
680 SK_NO_INLINE_ATTRIBUTE
681 static int
nx_netif_netagent_handle_interpose_flow_add(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,struct necp_client_nexus_parameters * cparams,void ** results,size_t * results_length)682 nx_netif_netagent_handle_interpose_flow_add(struct nx_netif *nif,
683     uuid_t flow_uuid, pid_t pid, struct necp_client_nexus_parameters *cparams,
684     void **results, size_t *results_length)
685 {
686 #pragma unused(cparams)
687 	int err;
688 	struct nx_flow_req nfr;
689 	void *message;
690 	size_t len;
691 
692 	bzero(&nfr, sizeof(nfr));
693 	uuid_copy(nfr.nfr_flow_uuid, flow_uuid);
694 	nfr.nfr_pid = pid;
695 	nfr.nfr_nx_port = NEXUS_PORT_ANY;
696 	nfr.nfr_flags |= NXFLOWREQF_FILTER;
697 
698 	err = nx_netif_netagent_flow_add(nif, &nfr);
699 	if (err != 0) {
700 		return err;
701 	}
702 	message =
703 	    necp_create_nexus_assign_message(nif->nif_nx->nx_uuid,
704 	    nfr.nfr_nx_port, nfr.nfr_bind_key, sizeof(nfr.nfr_bind_key),
705 	    NULL, NULL, NULL, 0, NULL, &len);
706 	if (message == NULL) {
707 		(void) nx_netif_netagent_flow_del(nif, &nfr);
708 		return ENOMEM;
709 	}
710 	*results = message;
711 	*results_length = len;
712 	return 0;
713 }
714 
715 SK_NO_INLINE_ATTRIBUTE
716 static int
nx_netif_netagent_handle_custom_ether_flow_add(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,struct necp_client_nexus_parameters * cparams,void ** results,size_t * results_length)717 nx_netif_netagent_handle_custom_ether_flow_add(struct nx_netif *nif,
718     uuid_t flow_uuid, pid_t pid, struct necp_client_nexus_parameters *cparams,
719     void **results, size_t *results_length)
720 {
721 	int err;
722 	struct nx_flow_req nfr;
723 	void *message;
724 	size_t len;
725 
726 	bzero(&nfr, sizeof(nfr));
727 	uuid_copy(nfr.nfr_flow_uuid, flow_uuid);
728 	nfr.nfr_pid = pid;
729 	nfr.nfr_nx_port = NEXUS_PORT_ANY;
730 	nfr.nfr_ethertype = cparams->ethertype;
731 	nfr.nfr_flags |= NXFLOWREQF_CUSTOM_ETHER;
732 
733 	err = nx_netif_netagent_flow_add(nif, &nfr);
734 	if (err != 0) {
735 		return err;
736 	}
737 	message =
738 	    necp_create_nexus_assign_message(nif->nif_nx->nx_uuid,
739 	    nfr.nfr_nx_port, nfr.nfr_bind_key, sizeof(nfr.nfr_bind_key),
740 	    NULL, NULL, &nfr.nfr_etheraddr, 0, NULL, &len);
741 	if (message == NULL) {
742 		(void) nx_netif_netagent_flow_del(nif, &nfr);
743 		return ENOMEM;
744 	}
745 	*results = message;
746 	*results_length = len;
747 	return 0;
748 }
749 
750 #define IS_V6_ADDR(addr) \
751     ((addr)->sin6.sin6_family == AF_INET6)
752 
753 SK_NO_INLINE_ATTRIBUTE
754 static int
nx_netif_netagent_handle_ipv6_ula_flow_add(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,struct necp_client_nexus_parameters * cparams,void ** results,size_t * results_length)755 nx_netif_netagent_handle_ipv6_ula_flow_add(struct nx_netif *nif,
756     uuid_t flow_uuid, pid_t pid, struct necp_client_nexus_parameters *cparams,
757     void **results, size_t *results_length)
758 {
759 	int err;
760 	struct nx_flow_req nfr;
761 	struct necp_client_endpoint local_endpoint;
762 	struct necp_client_endpoint remote_endpoint;
763 	void *message;
764 	size_t len;
765 
766 	bzero(&nfr, sizeof(nfr));
767 	uuid_copy(nfr.nfr_flow_uuid, flow_uuid);
768 	nfr.nfr_pid = pid;
769 	nfr.nfr_nx_port = NEXUS_PORT_ANY;
770 	nfr.nfr_flags |= NXFLOWREQF_IPV6_ULA;
771 	if (cparams->is_listener) {
772 		/*
773 		 * Preserve input args if possible
774 		 */
775 		if (IS_V6_ADDR(&cparams->local_addr)) {
776 			bcopy(&cparams->local_addr,
777 			    &nfr.nfr_saddr, sizeof(nfr.nfr_saddr));
778 		}
779 		if (IS_V6_ADDR(&cparams->remote_addr)) {
780 			bcopy(&cparams->remote_addr,
781 			    &nfr.nfr_daddr, sizeof(nfr.nfr_daddr));
782 		}
783 		nfr.nfr_flags |= NXFLOWREQF_LISTENER;
784 	} else {
785 		/*
786 		 * Both local and remote addresses must be specified.
787 		 */
788 		if (!IS_V6_ADDR(&cparams->local_addr)) {
789 			SK_ERR("local addr missing");
790 			return EINVAL;
791 		}
792 		bcopy(&cparams->local_addr,
793 		    &nfr.nfr_saddr, sizeof(nfr.nfr_saddr));
794 
795 		if (!IS_V6_ADDR(&cparams->remote_addr)) {
796 			SK_ERR("remote addr missing");
797 			return EINVAL;
798 		}
799 		bcopy(&cparams->remote_addr,
800 		    &nfr.nfr_daddr, sizeof(nfr.nfr_daddr));
801 	}
802 	err = nx_netif_netagent_flow_add(nif, &nfr);
803 	if (err != 0) {
804 		return err;
805 	}
806 	bzero(&local_endpoint, sizeof(local_endpoint));
807 	bcopy(&nfr.nfr_saddr.sin6, &local_endpoint.u.sin6,
808 	    sizeof(local_endpoint.u.sin6));
809 
810 	if (cparams->is_listener) {
811 		uuid_t zero_nx_uuid;
812 
813 		bzero(zero_nx_uuid, sizeof(uuid_t));
814 		message = necp_create_nexus_assign_message(
815 			zero_nx_uuid, NEXUS_PORT_ANY, NULL,
816 			0, &local_endpoint, NULL,
817 			&nfr.nfr_etheraddr, 0, NULL, &len);
818 	} else {
819 		bzero(&remote_endpoint, sizeof(remote_endpoint));
820 		bcopy(&nfr.nfr_daddr.sin6, &remote_endpoint.u.sin6,
821 		    sizeof(remote_endpoint.u.sin6));
822 
823 		message = necp_create_nexus_assign_message(
824 			nif->nif_nx->nx_uuid, nfr.nfr_nx_port, nfr.nfr_bind_key,
825 			sizeof(nfr.nfr_bind_key), &local_endpoint,
826 			&remote_endpoint, &nfr.nfr_etheraddr, 0, NULL, &len);
827 	}
828 	if (message == NULL) {
829 		/* This is a no-op for the listener flow */
830 		(void) nx_netif_netagent_flow_del(nif, &nfr);
831 		return ENOMEM;
832 	}
833 	*results = message;
834 	*results_length = len;
835 	return 0;
836 }
837 
838 SK_NO_INLINE_ATTRIBUTE
839 static int
nx_netif_netagent_handle_flow_add(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,struct necp_client_nexus_parameters * cparams,void ** results,size_t * results_length)840 nx_netif_netagent_handle_flow_add(struct nx_netif *nif,
841     uuid_t flow_uuid, pid_t pid, struct necp_client_nexus_parameters *cparams,
842     void **results, size_t *results_length)
843 {
844 	int err = 0;
845 
846 	ASSERT(cparams != NULL);
847 	ASSERT(results != NULL && *results == NULL);
848 	ASSERT(results_length != NULL && *results_length == 0);
849 
850 	if (cparams->is_interpose) {
851 		err = nx_netif_netagent_handle_interpose_flow_add(nif,
852 		    flow_uuid, pid, cparams, results, results_length);
853 	} else if (cparams->is_custom_ether) {
854 		err = nx_netif_netagent_handle_custom_ether_flow_add(nif,
855 		    flow_uuid, pid, cparams, results, results_length);
856 	} else if (NETIF_IS_LOW_LATENCY(nif)) {
857 		err = nx_netif_netagent_handle_ipv6_ula_flow_add(nif,
858 		    flow_uuid, pid, cparams, results, results_length);
859 	}
860 	if (err != 0) {
861 		ASSERT(*results == NULL);
862 		ASSERT(*results_length == 0);
863 		return err;
864 	}
865 	return 0;
866 }
867 
868 SK_NO_INLINE_ATTRIBUTE
869 static int
nx_netif_netagent_handle_flow_del(struct nx_netif * nif,uuid_t flow_uuid,pid_t pid,boolean_t abort)870 nx_netif_netagent_handle_flow_del(struct nx_netif *nif,
871     uuid_t flow_uuid, pid_t pid, boolean_t abort)
872 {
873 #pragma unused(abort)
874 	struct nx_flow_req nfr;
875 
876 	bzero(&nfr, sizeof(nfr));
877 	uuid_copy(nfr.nfr_flow_uuid, flow_uuid);
878 	nfr.nfr_pid = pid;
879 	return nx_netif_netagent_flow_del(nif, &nfr);
880 }
881 
882 static int
nx_netif_netagent_event(u_int8_t event,uuid_t flow_uuid,pid_t pid,void * context,void * ctx,struct necp_client_agent_parameters * cparams,void ** results,size_t * results_length)883 nx_netif_netagent_event(u_int8_t event, uuid_t flow_uuid, pid_t pid,
884     void *context, void *ctx, struct necp_client_agent_parameters *cparams,
885     void **results, size_t *results_length)
886 {
887 #pragma unused(context)
888 	struct nx_netif *nif;
889 	int err = 0;
890 
891 	nif = (struct nx_netif *)ctx;
892 	ASSERT(nif != NULL);
893 
894 	switch (event) {
895 	case NETAGENT_EVENT_NEXUS_FLOW_INSERT:
896 		/* these are required for this event */
897 		ASSERT(cparams != NULL);
898 		ASSERT(results != NULL);
899 		ASSERT(results_length != NULL);
900 		*results = NULL;
901 		*results_length = 0;
902 		err = nx_netif_netagent_handle_flow_add(nif, flow_uuid, pid,
903 		    &cparams->u.nexus_request, results, results_length);
904 		break;
905 
906 	case NETAGENT_EVENT_NEXUS_FLOW_REMOVE:
907 	case NETAGENT_EVENT_NEXUS_FLOW_ABORT:
908 		err = nx_netif_netagent_handle_flow_del(nif, flow_uuid, pid,
909 		    (event == NETAGENT_EVENT_NEXUS_FLOW_REMOVE));
910 		break;
911 
912 	default:
913 		/* events not handled */
914 		return 0;
915 	}
916 
917 	return err;
918 }
919 
920 static int
nx_netif_agent_register(struct nx_netif * nif,uint32_t features)921 nx_netif_agent_register(struct nx_netif *nif, uint32_t features)
922 {
923 	struct netagent_nexus_agent agent;
924 	int err = 0;
925 
926 	_CASSERT(FLOWADV_IDX_NONE == UINT32_MAX);
927 	_CASSERT(NECP_FLOWADV_IDX_INVALID == FLOWADV_IDX_NONE);
928 
929 	if (!nif_netagent) {
930 		return ENOTSUP;
931 	}
932 	nif->nif_agent_session = netagent_create(&nx_netif_netagent_event, nif);
933 	if (nif->nif_agent_session == NULL) {
934 		return ENOMEM;
935 	}
936 
937 	bzero(&agent, sizeof(agent));
938 	uuid_generate_random(agent.agent.netagent_uuid);
939 	uuid_copy(nif->nif_agent_uuid, agent.agent.netagent_uuid);
940 	(void) snprintf(agent.agent.netagent_domain,
941 	    sizeof(agent.agent.netagent_domain), "%s", "Skywalk");
942 	(void) snprintf(agent.agent.netagent_type,
943 	    sizeof(agent.agent.netagent_type), "%s", "NetIf");
944 	(void) snprintf(agent.agent.netagent_desc,
945 	    sizeof(agent.agent.netagent_desc), "%s", "Userspace Networking");
946 
947 	agent.agent.netagent_flags =
948 	    (NETAGENT_FLAG_ACTIVE | NETAGENT_FLAG_NEXUS_LISTENER | features);
949 
950 	agent.agent.netagent_data_size = sizeof(struct netagent_nexus);
951 	agent.nexus_data.frame_type = NETAGENT_NEXUS_FRAME_TYPE_LINK;
952 	agent.nexus_data.endpoint_assignment_type =
953 	    NETAGENT_NEXUS_ENDPOINT_TYPE_ADDRESS;
954 	agent.nexus_data.endpoint_request_types[0] =
955 	    NETAGENT_NEXUS_ENDPOINT_TYPE_ADDRESS;
956 	agent.nexus_data.nexus_flags |=
957 	    (NETAGENT_NEXUS_FLAG_ASSERT_UNSUPPORTED |
958 	    NETAGENT_NEXUS_FLAG_SUPPORTS_USER_PACKET_POOL);
959 	if (NETIF_IS_LOW_LATENCY(nif)) {
960 		agent.nexus_data.nexus_flags |=
961 		    NETAGENT_NEXUS_FLAG_SHOULD_USE_EVENT_RING;
962 	}
963 
964 	if ((err = netagent_register(nif->nif_agent_session,
965 	    (struct netagent *)&agent)) != 0) {
966 		netagent_destroy(nif->nif_agent_session);
967 		nif->nif_agent_session = NULL;
968 		uuid_clear(nif->nif_agent_uuid);
969 	}
970 	nif->nif_agent_flags |= NETIF_AGENT_FLAG_REGISTERED;
971 	return err;
972 }
973 
974 static void
nx_netif_agent_unregister(struct nx_netif * nif)975 nx_netif_agent_unregister(struct nx_netif *nif)
976 {
977 	if ((nif->nif_agent_flags & NETIF_AGENT_FLAG_REGISTERED) == 0) {
978 		return;
979 	}
980 	nif->nif_agent_flags &= ~NETIF_AGENT_FLAG_REGISTERED;
981 
982 	ASSERT(nif->nif_agent_session != NULL);
983 	netagent_destroy(nif->nif_agent_session);
984 	nif->nif_agent_session = NULL;
985 	uuid_clear(nif->nif_agent_uuid);
986 }
987 
988 static uint32_t
nx_netif_agent_get_features(struct nx_netif * nif)989 nx_netif_agent_get_features(struct nx_netif *nif)
990 {
991 	uint32_t features = 0;
992 
993 	if ((nif->nif_filter_flags & NETIF_FILTER_FLAG_INITIALIZED) != 0) {
994 		features |= (NETAGENT_FLAG_INTERPOSE_NEXUS |
995 		    NETAGENT_FLAG_NETWORK_PROVIDER);
996 	}
997 	if ((nif->nif_flow_flags & NETIF_FLOW_FLAG_INITIALIZED) != 0) {
998 		if (NETIF_IS_LOW_LATENCY(nif)) {
999 			features |= NETAGENT_FLAG_NEXUS_PROVIDER;
1000 		} else {
1001 			features |= NETAGENT_FLAG_CUSTOM_ETHER_NEXUS;
1002 		}
1003 		features |= NETAGENT_FLAG_NETWORK_PROVIDER;
1004 	}
1005 	return features;
1006 }
1007 
1008 void
nx_netif_agent_init(struct nx_netif * nif)1009 nx_netif_agent_init(struct nx_netif *nif)
1010 {
1011 	int err;
1012 	ifnet_t ifp = nif->nif_ifp;
1013 	uint32_t features = 0;
1014 
1015 	ASSERT(ifp != NULL);
1016 	features = nx_netif_agent_get_features(nif);
1017 	if (features == 0) {
1018 		SK_DF(SK_VERB_NETIF, "%s: no feature supported", if_name(ifp));
1019 		return;
1020 	}
1021 	ASSERT(nif->nif_agent_flags == 0);
1022 	lck_mtx_init(&nif->nif_agent_lock, &nexus_lock_group, &nexus_lock_attr);
1023 
1024 	SLIST_INIT(&nif->nif_agent_flow_list);
1025 	nif->nif_agent_flow_cnt = 0;
1026 
1027 	err = nx_netif_agent_register(nif, features);
1028 	if (err != 0) {
1029 		SK_DF(SK_VERB_ERROR, "%s: agent register failed: err %d",
1030 		    if_name(ifp), err);
1031 		return;
1032 	}
1033 	ASSERT(!uuid_is_null(nif->nif_agent_uuid));
1034 	err = if_add_netagent_locked(ifp, nif->nif_agent_uuid);
1035 	if (err != 0) {
1036 		nx_netif_agent_unregister(nif);
1037 		SK_DF(SK_VERB_ERROR, "%s: agent add failed: err %d",
1038 		    if_name(ifp), err);
1039 		return;
1040 	}
1041 	nif->nif_agent_flags |= NETIF_AGENT_FLAG_ADDED;
1042 
1043 	SK_DF(SK_VERB_NETIF, "%s: agent init complete", if_name(ifp));
1044 }
1045 
1046 void
nx_netif_agent_fini(struct nx_netif * nif)1047 nx_netif_agent_fini(struct nx_netif *nif)
1048 {
1049 	ifnet_t ifp = nif->nif_ifp;
1050 
1051 	ASSERT(ifp != NULL);
1052 	if ((nif->nif_agent_flags & NETIF_AGENT_FLAG_ADDED) == 0) {
1053 		SK_DF(SK_VERB_NETIF, "%s: no agent added", if_name(ifp));
1054 		return;
1055 	}
1056 	nif->nif_agent_flags &= ~NETIF_AGENT_FLAG_ADDED;
1057 	ASSERT(!uuid_is_null(nif->nif_agent_uuid));
1058 	if_delete_netagent(ifp, nif->nif_agent_uuid);
1059 
1060 	nx_netif_agent_unregister(nif);
1061 
1062 	/*
1063 	 * XXX
1064 	 * This is asymmetrical with nx_netif_agent_init(). But we have to
1065 	 * cleanup here because the interface is detaching.
1066 	 */
1067 	nx_netif_agent_flow_purge(nif);
1068 	ASSERT(nif->nif_agent_flow_cnt == 0);
1069 	ASSERT(SLIST_EMPTY(&nif->nif_agent_flow_list));
1070 	lck_mtx_destroy(&nif->nif_agent_lock, &nexus_lock_group);
1071 	SK_DF(SK_VERB_NETIF, "%s: agent fini complete", if_name(ifp));
1072 }
1073