xref: /xnu-12377.1.9/tests/skywalk/skt_netifdirect.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2019-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <stddef.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <unistd.h>
33 #include <assert.h>
34 #include <stdbool.h>
35 #include <sys/event.h>
36 #include <sys/ioctl.h>
37 #include <sys/sockio.h>
38 #include <sys/sysctl.h>
39 #include <net/if.h>
40 #include <netinet/in.h>
41 #include <netinet/ip.h>
42 #include <netinet/ip6.h>
43 #include <netinet/udp.h>
44 #include <netinet/tcp.h>
45 #include <TargetConditionals.h>
46 #include <arpa/inet.h>
47 #include <mach/mach_time.h>
48 #include <darwintest.h>
49 #include "skywalk_test_driver.h"
50 #include "skywalk_test_utils.h"
51 #include "skywalk_test_common.h"
52 
53 //#define SKT_NETIF_DIRECT_DEBUG 1
54 
55 #define STR(x) _STR(x)
56 #define _STR(x) #x
57 
58 #if TARGET_OS_WATCH
59 #define NETIF_TXRX_PACKET_COUNT  (5 * 1000)
60 #define NETIF_TXRX_BATCH_COUNT   4
61 #define NETIF_TXRX_TIMEOUT_SECS  0
62 #define NETIF_TXRX_TIMEOUT_NSECS (100 * 1000 * 1000)
63 #define NETIF_IFADV_INTERVAL     30
64 #define NETIF_TX_PKT_DROP_RATE   100
65 #else /* TARGET_OS_WATCH */
66 #define NETIF_TXRX_PACKET_COUNT  (20 * 1000)
67 #define NETIF_TXRX_BATCH_COUNT   8
68 #define NETIF_TXRX_TIMEOUT_SECS  0
69 #define NETIF_TXRX_TIMEOUT_NSECS (100 * 1000 * 1000)
70 #define NETIF_IFADV_INTERVAL     30
71 #define NETIF_TX_PKT_DROP_RATE   100
72 #endif /* !TARGET_OS_WATCH */
73 
74 #define FETH0_UDP_PORT    0x1234
75 #define FETH1_UDP_PORT    0x5678
76 
77 /* test identifiers for netif direct channel tests */
78 #define SKT_NETIF_DIRECT_TEST_TXRX               1
79 #define SKT_NETIF_DIRECT_TEST_IF_ADV_ENABLED     2
80 #define SKT_NETIF_DIRECT_TEST_IF_ADV_DISABLED    3
81 #define SKT_NETIF_DIRECT_TEST_CHANNEL_EVENTS     4
82 #define SKT_NETIF_DIRECT_TEST_EXPIRY_EVENTS      5
83 
84 
85 /* netif event flags */
86 #define SKT_NETIF_DIRECT_EVFLAG_IFADV      0x1
87 #define SKT_NETIF_DIRECT_EVFLAG_CHANNEL    0x2
88 #define SKT_NETIF_DIRECT_EVFLAG_EXPIRY     0x4
89 
90 /* dummy packet identifier constants */
91 #define NETIF_PKTID_PAYLOAD_TYPE    0xFA
92 #define NETIF_PKTID_STREAM_ID       0xFB
93 
94 /* Expiry notification parameters */
95 #define SKT_NETIF_DIRECT_TEST_EXPIRY_DEADLINE_NS   15
96 
97 
98 #define SKT_ETH_IPV6_UDP_HDR_LEN    \
99     (sizeof(struct ether_header) + sizeof(struct ip6_hdr) + \
100     sizeof(struct udphdr))
101 
102 typedef struct {
103 	uint32_t    packet_number;
104 	char        data[1514 - SKT_ETH_IPV6_UDP_HDR_LEN - sizeof(uint32_t)];
105 } netif_payload, *netif_payload_t;
106 
107 static struct sktc_nexus_handles handles;
108 
109 static struct mach_timebase_info timebase_info = {0, 0};
110 #define SKT_NETIF_TIMESTAMP_MACH_TO_NS(ts_mach) \
111 	((int64_t)(((ts_mach) * timebase_info.denom) / timebase_info.numer))
112 #define SKT_NETIF_TIMESTAMP_NS_TO_MACH(ts_ns)   \
113 	((int64_t)(((ts_ns) * timebase_info.numer) / timebase_info.denom))
114 
115 static uint64_t expiration_deadline_mach = 0;
116 
117 static void
init_expiration_deadline_mach(void)118 init_expiration_deadline_mach(void)
119 {
120 	uint64_t deadline_ns = SKT_NETIF_DIRECT_TEST_EXPIRY_DEADLINE_NS;
121 	assert(mach_timebase_info(&timebase_info) == KERN_SUCCESS);
122 	expiration_deadline_mach = SKT_NETIF_TIMESTAMP_NS_TO_MACH(deadline_ns);
123 }
124 
125 static void
skt_add_netif_ipv6_flow(struct sktc_nexus_handles * handles,const struct in6_addr * our_ip,const struct in6_addr * peer_ip,uint16_t flags,nexus_port_t * nx_port)126 skt_add_netif_ipv6_flow(
127 	struct sktc_nexus_handles *handles,
128 	const struct in6_addr *our_ip,
129 	const struct in6_addr *peer_ip,
130 	uint16_t flags,
131 	nexus_port_t *nx_port)
132 {
133 	int error;
134 	struct nx_flow_req nfr;
135 	struct sockaddr_in6 *sin6;
136 
137 	bzero(&nfr, sizeof(nfr));
138 	uuid_generate(nfr.nfr_flow_uuid);
139 	nfr.nfr_nx_port = NEXUS_PORT_ANY;
140 	nfr.nfr_flags |= flags;
141 
142 	if (our_ip) {
143 		sin6 = &nfr.nfr_saddr.sin6;
144 		sin6->sin6_len = sizeof(struct sockaddr_in6);
145 		sin6->sin6_family = AF_INET6;
146 		bcopy(our_ip, &sin6->sin6_addr, sizeof(*our_ip));
147 	}
148 
149 	if (peer_ip) {
150 		sin6 = &nfr.nfr_daddr.sin6;
151 		sin6->sin6_len = sizeof(struct sockaddr_in6);
152 		sin6->sin6_family = AF_INET6;
153 		bcopy(peer_ip, &sin6->sin6_addr, sizeof(*peer_ip));
154 	}
155 
156 	error = __os_nexus_flow_add(handles->controller,
157 	    handles->netif_nx_uuid, &nfr);
158 	SKTC_ASSERT_ERR(error == 0);
159 
160 	if (nx_port) {
161 		*nx_port = nfr.nfr_nx_port;
162 	}
163 }
164 
165 static void
skt_setup_netif_with_ipv6_flow(struct sktc_nexus_handles * handles,const char * ifname,struct in6_addr * our_ip,struct in6_addr * peer_ip,nexus_port_t * nx_port)166 skt_setup_netif_with_ipv6_flow(struct sktc_nexus_handles *handles,
167     const char *ifname, struct in6_addr *our_ip, struct in6_addr *peer_ip,
168     nexus_port_t *nx_port)
169 {
170 	bzero(handles, sizeof(*handles));
171 	strlcpy(handles->netif_ifname, ifname, sizeof(handles->netif_ifname));
172 	handles->controller = os_nexus_controller_create();
173 	assert(handles->controller != NULL);
174 	handles->netif_ip6_addr = *our_ip;
175 	assert(sktc_get_netif_nexus(handles->netif_ifname,
176 	    handles->netif_nx_uuid));
177 	/*
178 	 * Add listener flow just to execute listener code path, the flow
179 	 * itself is not used for anything atm.
180 	 */
181 	skt_add_netif_ipv6_flow(handles, NULL, NULL,
182 	    NXFLOWREQF_IPV6_ULA | NXFLOWREQF_LISTENER, NULL);
183 	skt_add_netif_ipv6_flow(handles, our_ip, peer_ip,
184 	    NXFLOWREQF_IPV6_ULA, nx_port);
185 }
186 
187 static size_t
skt_netif_ipv6_udp_frame_populate(packet_t ph,struct ether_addr * src_mac,struct in6_addr * src_ip,uint16_t src_port,struct ether_addr * dst_mac,struct in6_addr * dst_ip,uint16_t dst_port,const void * data,size_t data_len)188 skt_netif_ipv6_udp_frame_populate(packet_t ph, struct ether_addr *src_mac,
189     struct in6_addr *src_ip, uint16_t src_port, struct ether_addr *dst_mac,
190     struct in6_addr *dst_ip, uint16_t dst_port, const void *data,
191     size_t data_len)
192 {
193 	int                     error;
194 	size_t                  frame_length;
195 	struct ether_header     eth_hdr;
196 	struct ip6_hdr          ip6_hdr;
197 	struct udphdr           udp_hdr, *udp_hdr_p;
198 	char                    *baddr;
199 	buflet_t                buf;
200 	uint16_t                bdlim;
201 
202 	buf = os_packet_get_next_buflet(ph, NULL);
203 	assert(buf != NULL);
204 	error = os_buflet_set_data_offset(buf, 0);
205 	SKTC_ASSERT_ERR(error == 0);
206 	bdlim = os_buflet_get_data_limit(buf);
207 	assert(bdlim != 0);
208 	baddr = os_buflet_get_object_address(buf);
209 	assert(baddr != NULL);
210 
211 	frame_length = SKT_ETH_IPV6_UDP_HDR_LEN + data_len;
212 	assert(os_packet_get_buflet_count(ph) == 1);
213 	assert(bdlim >= frame_length);
214 
215 	/* frame ethernet header */
216 	bcopy(src_mac->octet, eth_hdr.ether_shost, ETHER_ADDR_LEN);
217 	bcopy(dst_mac->octet, eth_hdr.ether_dhost, ETHER_ADDR_LEN);
218 	eth_hdr.ether_type = htons(ETHERTYPE_IPV6);
219 	bcopy(&eth_hdr, baddr, sizeof(eth_hdr));
220 	baddr += sizeof(eth_hdr);
221 	error = os_packet_set_link_header_length(ph, sizeof(eth_hdr));
222 	SKTC_ASSERT_ERR(error == 0);
223 
224 	/* frame IPv6 header */
225 	ip6_hdr.ip6_vfc = IPV6_VERSION;
226 	ip6_hdr.ip6_flow |= (IPV6_FLOWINFO_MASK & 0);
227 	ip6_hdr.ip6_plen = htons(data_len + sizeof(udp_hdr));
228 	ip6_hdr.ip6_nxt = IPPROTO_UDP;
229 	ip6_hdr.ip6_hlim = IPV6_DEFHLIM;
230 	ip6_hdr.ip6_src = *src_ip;
231 	ip6_hdr.ip6_dst = *dst_ip;
232 	bcopy(&ip6_hdr, baddr, sizeof(ip6_hdr));
233 	baddr += sizeof(ip6_hdr);
234 
235 	/* frame UDP header */
236 	udp_hdr_p = (struct udphdr *)baddr;
237 	udp_hdr.uh_ulen = htons(data_len + sizeof(udp_hdr));
238 	udp_hdr.uh_sport = htons(src_port);
239 	udp_hdr.uh_dport = htons(dst_port);
240 	/* psuedo header checksum */
241 	udp_hdr.uh_sum = in6_pseudo(src_ip, dst_ip,
242 	    htonl(ntohs(udp_hdr.uh_ulen) + ip6_hdr.ip6_nxt));
243 	bcopy(&udp_hdr, baddr, sizeof(udp_hdr));
244 	baddr += sizeof(udp_hdr);
245 
246 	/* copy the data */
247 	bcopy(data, baddr, data_len);
248 	error = os_buflet_set_data_length(buf, frame_length);
249 	SKTC_ASSERT_ERR(error == 0);
250 	udp_hdr_p->uh_sum = in_cksum(udp_hdr_p, ntohs(udp_hdr.uh_ulen), 0);
251 	return frame_length;
252 }
253 
254 static size_t
skt_netif_ipv6_udp_frame_process(packet_t ph,void * data,size_t data_max)255 skt_netif_ipv6_udp_frame_process(packet_t ph, void *data, size_t data_max)
256 {
257 	buflet_t buflet;
258 	size_t pkt_len, buf_len, ip_plen, udp_data_len;
259 	char *buf;
260 	struct ether_header *eth_hdr;
261 	struct ip6_hdr *ip6_hdr;
262 	struct udphdr *udp_hdr;
263 	uint16_t csum;
264 
265 	assert(os_packet_get_buflet_count(ph) == 1);
266 	buflet = os_packet_get_next_buflet(ph, NULL);
267 	assert(buflet != NULL);
268 	buf_len = os_buflet_get_data_length(buflet);
269 	buf = os_buflet_get_object_address(buflet) +
270 	    os_buflet_get_data_offset(buflet);
271 	assert(os_packet_get_link_header_length(ph) == sizeof(*eth_hdr));
272 	eth_hdr = (struct ether_header *)buf;
273 	assert(ntohs(eth_hdr->ether_type) == ETHERTYPE_IPV6);
274 	ip6_hdr = (struct ip6_hdr *)(buf + sizeof(*eth_hdr));
275 	pkt_len = os_packet_get_data_length(ph);
276 	ip_plen = ntohs(ip6_hdr->ip6_plen);
277 	assert(pkt_len == (sizeof(*eth_hdr) + sizeof(*ip6_hdr) + ip_plen));
278 	assert(ip6_hdr->ip6_nxt == IPPROTO_UDP);
279 	udp_hdr = (struct udphdr *)(buf + sizeof(*eth_hdr) + sizeof(*ip6_hdr));
280 	udp_data_len = ntohs(udp_hdr->uh_ulen) - sizeof(*udp_hdr);
281 	assert(udp_data_len == (pkt_len - SKT_ETH_IPV6_UDP_HDR_LEN));
282 	assert(data_max == 0 || udp_data_len <= data_max);
283 
284 	/* verify UDP checksum */
285 	csum = in6_pseudo((void *)&ip6_hdr->ip6_src, (void *)&ip6_hdr->ip6_dst,
286 	    htonl(ntohs(udp_hdr->uh_ulen) + ip6_hdr->ip6_nxt));
287 	assert(in_cksum(udp_hdr, ntohs(udp_hdr->uh_ulen), csum) == 0);
288 
289 	if (data != NULL) {
290 		bcopy((buf + SKT_ETH_IPV6_UDP_HDR_LEN), data, udp_data_len);
291 	}
292 	return udp_data_len;
293 }
294 
295 static void
296 skt_netif_channel_send(channel_port_t port, uint16_t src_port,
297     struct ether_addr *dst_mac, struct in6_addr *dst_ip, uint16_t dst_port,
298     netif_payload_t payload, int payload_length, uint32_t limit,
299     void (^packet_prehook)(packet_t p))
300 {
301 	int error;
302 	channel_slot_t last_slot = NULL;
303 	packet_id_t pktid = {OS_PACKET_PKTID_VERSION_CURRENT,
304 		             NETIF_PKTID_PAYLOAD_TYPE, 0, 0, NETIF_PKTID_STREAM_ID, 0};
305 
306 	assert(payload->packet_number < limit);
307 	while (1) {
308 		int                     frame_length;
309 		slot_prop_t             prop;
310 		channel_slot_t          slot;
311 		packet_t                pkt = 0;
312 		void                    *buf;
313 		size_t                  buf_len;
314 		buflet_t                buflet;
315 
316 		/* grab a slot and populate it */
317 		slot = os_channel_get_next_slot(port->tx_ring, last_slot,
318 		    &prop);
319 		if (slot == NULL) {
320 			if (payload->packet_number < limit) {
321 				/* couldn't complete batch */
322 #if SKT_NETIF_DIRECT_DEBUG
323 				T_LOG(
324 					"TX didn't complete batch (%u < %u)\n",
325 					payload->packet_number, limit);
326 #endif
327 			}
328 			break;
329 		}
330 		if (port->user_packet_pool) {
331 			assert(prop.sp_buf_ptr == 0);
332 			assert(prop.sp_len == 0);
333 			error = os_channel_packet_alloc(port->chan, &pkt);
334 			SKTC_ASSERT_ERR(error == 0);
335 		} else {
336 			assert(prop.sp_buf_ptr != 0);
337 			assert(prop.sp_len != 0);
338 			pkt = os_channel_slot_get_packet(port->tx_ring, slot);
339 		}
340 		assert(pkt != 0);
341 		buflet = os_packet_get_next_buflet(pkt, NULL);
342 		assert(buflet != NULL);
343 		buf = os_buflet_get_object_address(buflet) +
344 		    os_buflet_get_data_offset(buflet);
345 		assert(buf != NULL);
346 		buf_len = os_buflet_get_data_limit(buflet);
347 		assert(buf_len != 0);
348 		if (!port->user_packet_pool) {
349 			assert(buf == (void *)prop.sp_buf_ptr);
350 			assert(buf_len == prop.sp_len);
351 		}
352 		frame_length = skt_netif_ipv6_udp_frame_populate(pkt,
353 		    &port->mac_addr, &port->ip6_addr, src_port,
354 		    dst_mac, dst_ip, dst_port, (void *)payload, payload_length);
355 		pktid.pktid_sequence_number = payload->packet_number;
356 		pktid.pktid_timestamp = pktid.pktid_sequence_number;
357 		assert(os_packet_set_packetid(pkt, &pktid) == 0);
358 
359 		if (packet_prehook != NULL) {
360 			packet_prehook(pkt);
361 		}
362 
363 		error = os_packet_finalize(pkt);
364 		SKTC_ASSERT_ERR(error == 0);
365 #if SKT_NETIF_DIRECT_DEBUG
366 		T_LOG("\nPort %d transmitting %d bytes:\n",
367 		    port->port, frame_length);
368 #endif
369 		assert(frame_length != 0);
370 		if (port->user_packet_pool) {
371 			error = os_channel_slot_attach_packet(port->tx_ring,
372 			    slot, pkt);
373 			SKTC_ASSERT_ERR(error == 0);
374 		} else {
375 			prop.sp_len = frame_length;
376 			os_channel_set_slot_properties(port->tx_ring, slot,
377 			    &prop);
378 		}
379 		last_slot = slot;
380 		payload->packet_number++;
381 		if (payload->packet_number >= limit) {
382 			break;
383 		}
384 	}
385 	if (last_slot != NULL) {
386 		error = os_channel_advance_slot(port->tx_ring, last_slot);
387 		SKTC_ASSERT_ERR(error == 0);
388 		error = os_channel_sync(port->chan, CHANNEL_SYNC_TX);
389 		SKTC_ASSERT_ERR(error == 0);
390 	}
391 }
392 
393 static void
skt_netif_channel_receive(int child,channel_port_t port,uint32_t limit,uint32_t * receive_count,uint32_t * receive_index,boolean_t errors_ok,uint32_t * pkts_dropped)394 skt_netif_channel_receive(int child, channel_port_t port, uint32_t limit,
395     uint32_t *receive_count, uint32_t *receive_index, boolean_t errors_ok,
396     uint32_t *pkts_dropped)
397 {
398 	int error;
399 	channel_slot_t last_slot = NULL;
400 	int frame_length = SKT_ETH_IPV6_UDP_HDR_LEN + sizeof(netif_payload);
401 
402 	assert(*receive_index < limit);
403 	*pkts_dropped = 0;
404 
405 	while (1) {
406 		netif_payload           payload;
407 		slot_prop_t             prop;
408 		channel_slot_t          slot;
409 		packet_t                pkt;
410 		char                    *buf;
411 		uint16_t                bdoff, pkt_len;
412 		buflet_t                buflet;
413 
414 		slot = os_channel_get_next_slot(port->rx_ring, last_slot,
415 		    &prop);
416 		if (slot == NULL) {
417 			break;
418 		}
419 		assert(prop.sp_buf_ptr != 0);
420 
421 		pkt = os_channel_slot_get_packet(port->rx_ring, slot);
422 		assert(pkt != 0);
423 		if (port->user_packet_pool) {
424 			error = os_channel_slot_detach_packet(port->rx_ring,
425 			    slot, pkt);
426 			SKTC_ASSERT_ERR(error == 0);
427 		}
428 		buflet = os_packet_get_next_buflet(pkt, NULL);
429 		assert(buflet != NULL);
430 		bdoff = os_buflet_get_data_offset(buflet);
431 		buf = os_buflet_get_object_address(buflet) + bdoff;
432 		pkt_len = os_packet_get_data_length(pkt);
433 		assert(buf == (void *)prop.sp_buf_ptr);
434 		assert(pkt_len == prop.sp_len);
435 		assert(pkt_len <= frame_length);
436 		(void) skt_netif_ipv6_udp_frame_process(pkt, &payload,
437 		    sizeof(payload));
438 #if SKT_NETIF_DIRECT_DEBUG
439 		T_LOG("\nPort %d received %d bytes:\n",
440 		    port->port, pkt_len);
441 #endif
442 		last_slot = slot;
443 		if (*receive_index != payload.packet_number) {
444 			if (!errors_ok) {
445 				assert(payload.packet_number > *receive_index);
446 			}
447 			uint32_t        dropped;
448 
449 			dropped = payload.packet_number - *receive_index;
450 			*pkts_dropped += dropped;
451 #if SKT_NETIF_DIRECT_DEBUG
452 			T_LOG(
453 				"child %d dropped %u (received #%u, expected #%u)\n",
454 				child, dropped, payload.packet_number,
455 				*receive_index);
456 #endif
457 			*receive_index = payload.packet_number;
458 		}
459 		if (port->user_packet_pool) {
460 			error = os_channel_packet_free(port->chan, pkt);
461 			SKTC_ASSERT_ERR(error == 0);
462 		}
463 		(*receive_count)++;
464 		(*receive_index)++;
465 		if (*receive_index == limit) {
466 			break;
467 		}
468 	}
469 	if (last_slot != NULL) {
470 		error = os_channel_advance_slot(port->rx_ring, last_slot);
471 		SKTC_ASSERT_ERR(error == 0);
472 		error = os_channel_sync(port->chan, CHANNEL_SYNC_RX);
473 		SKTC_ASSERT_ERR(error == 0);
474 	}
475 }
476 
477 static void
skt_netif_send_and_receive(channel_port_t port,uint16_t src_port,struct ether_addr * dst_mac,struct in6_addr * dst_ip,uint16_t dst_port,uint32_t how_many,uint32_t batch_size,int child,uint32_t event_flags,boolean_t ifadv_enabled)478 skt_netif_send_and_receive(channel_port_t port, uint16_t src_port,
479     struct ether_addr *dst_mac, struct in6_addr *dst_ip, uint16_t dst_port,
480     uint32_t how_many, uint32_t batch_size, int child, uint32_t event_flags,
481     boolean_t ifadv_enabled)
482 {
483 	int             n_events, i, error;
484 #define N_EVENTS_MAX    4
485 	struct kevent   evlist[N_EVENTS_MAX];
486 	struct kevent   kev[N_EVENTS_MAX];
487 	int             kq;
488 	netif_payload   payload;
489 	double          percent;
490 	boolean_t       errors_ok = FALSE;
491 	uint32_t        receive_packet_count;
492 	uint32_t        receive_packet_index;
493 	boolean_t       rx_complete;
494 	boolean_t       tx_complete;
495 	struct timespec timeout;
496 	uint32_t        pkts_dropped;
497 	char            ip6_str[INET6_ADDRSTRLEN];
498 	uint32_t        n_ifadv_events = 0, n_total_chan_events = 0;
499 	uint32_t        __block n_tx_expired_chan_events = 0, __block n_tx_status_chan_events = 0;
500 	uint64_t        __block total_tx_exp_notif_delay = 0, __block total_tx_exp_prop_delay = 0;
501 
502 	assert(inet_ntop(AF_INET6, dst_ip, ip6_str, INET6_ADDRSTRLEN) != NULL);
503 	T_LOG("Sending to %s:%d\n", ip6_str, dst_port);
504 	for (i = 0; i < sizeof(payload.data); i++) {
505 		payload.data[i] = (uint8_t)(i & 0xff);
506 	}
507 	payload.packet_number = 0;
508 	kq = kqueue();
509 	assert(kq != -1);
510 	rx_complete = tx_complete = FALSE;
511 	receive_packet_count = 0;
512 	receive_packet_index = 0;
513 	EV_SET(kev + 0, port->fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, 0, 0, NULL);
514 	EV_SET(kev + 1, port->fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, NULL);
515 	n_events = 2;
516 	if ((event_flags & SKT_NETIF_DIRECT_EVFLAG_IFADV) != 0) {
517 		assert(n_events < N_EVENTS_MAX);
518 		EV_SET(kev + n_events, port->fd, EVFILT_NW_CHANNEL,
519 		    EV_ADD | EV_ENABLE, NOTE_IF_ADV_UPD, 0, NULL);
520 		n_events++;
521 	}
522 	if ((event_flags & SKT_NETIF_DIRECT_EVFLAG_CHANNEL) != 0) {
523 		assert(n_events < N_EVENTS_MAX);
524 		EV_SET(kev + n_events, port->fd, EVFILT_NW_CHANNEL,
525 		    EV_ADD | EV_ENABLE, NOTE_CHANNEL_EVENT, 0, NULL);
526 		n_events++;
527 		errors_ok = TRUE;
528 	}
529 	error = kevent(kq, kev, n_events, NULL, 0, NULL);
530 	SKTC_ASSERT_ERR(error == 0);
531 	timeout.tv_sec = NETIF_TXRX_TIMEOUT_SECS;
532 	timeout.tv_nsec = NETIF_TXRX_TIMEOUT_NSECS;
533 	while (!rx_complete && !tx_complete) {
534 		/* wait for TX/RX/Channel events to become available */
535 		error = kevent(kq, NULL, 0, evlist, N_EVENTS_MAX, &timeout);
536 		if (error <= 0) {
537 			if (errno == EAGAIN) {
538 				continue;
539 			}
540 			SKTC_ASSERT_ERR(error == 0);
541 		}
542 		if (error == 0) {
543 			/* missed seeing last few packets */
544 			if (!errors_ok) {
545 				T_LOG("child %d: timed out, TX %s "
546 				    "RX %s\n", child,
547 				    tx_complete ? "complete" :"incomplete",
548 				    rx_complete ? "complete" :"incomplete");
549 			}
550 			break;
551 		}
552 		for (int i = 0; i < error; i++) {
553 			if (evlist[i].flags & EV_ERROR) {
554 				int err = evlist[i].data;
555 
556 				T_LOG("child %d: ev_filter %d, "
557 				    "flags 0x%u fflags 0x%u data %"
558 				    PRIxPTR "\n", child, evlist[i].filter,
559 				    evlist[i].flags, evlist[i].fflags,
560 				    evlist[i].data);
561 				if (err == EAGAIN) {
562 					break;
563 				}
564 				SKTC_ASSERT_ERR(err == 0);
565 			}
566 
567 			switch (evlist[i].filter) {
568 			case EVFILT_NW_CHANNEL: {
569 				if ((evlist[i].fflags & NOTE_IF_ADV_UPD)
570 				    != 0) {
571 					skt_process_if_adv(port->port, port->chan);
572 					n_ifadv_events++;
573 				}
574 				if ((evlist[i].fflags & NOTE_CHANNEL_EVENT)
575 				    != 0) {
576 					skt_process_channel_event(port->chan,
577 					    NETIF_PKTID_PAYLOAD_TYPE, NETIF_PKTID_STREAM_ID,
578 					    ^(const os_channel_event_packet_transmit_status_t *pkt_ev) {
579 							n_tx_status_chan_events++;
580 							assert(pkt_ev->packet_status ==
581 							CHANNEL_EVENT_PKT_TRANSMIT_STATUS_ERR_RETRY_FAILED);
582 						},
583 					    ^(const os_channel_event_packet_transmit_expired_t *pkt_ev) {
584 							int64_t exp_notif_delay, exp_prop_delay;
585 							assert(
586 								(pkt_ev->packet_tx_expiration_status ==
587 								CHANNEL_EVENT_PKT_TRANSMIT_EXPIRED_ERR_EXPIRED_DROPPED) ||
588 								(pkt_ev->packet_tx_expiration_status ==
589 								CHANNEL_EVENT_PKT_TRANSMIT_EXPIRED_ERR_EXPIRED_NOT_DROPPED));
590 							n_tx_expired_chan_events++;
591 
592 							exp_notif_delay = mach_absolute_time() - pkt_ev->packet_tx_expiration_timestamp;
593 							total_tx_exp_notif_delay += exp_notif_delay;
594 							exp_prop_delay = pkt_ev->packet_tx_expiration_timestamp - pkt_ev->packet_tx_expiration_deadline;
595 							total_tx_exp_prop_delay += exp_prop_delay;
596 #if SKT_COMMON_DEBUG
597 							T_LOG("tx_expired_event=%p "
598 							"code=%u "
599 							"deadline=%llu "
600 							"ts=%llu "
601 							"exp=%lldm %lldns "
602 							"notif=%lldm %lldns\n",
603 							pkt_ev,
604 							pkt_ev->packet_tx_expiration_status_code,
605 							pkt_ev->packet_tx_expiration_deadline,
606 							pkt_ev->packet_tx_expiration_timestamp,
607 							exp_prop_delay, SKT_NETIF_TIMESTAMP_MACH_TO_NS(exp_prop_delay),
608 							exp_notif_delay, SKT_NETIF_TIMESTAMP_MACH_TO_NS(exp_notif_delay));
609 #endif /* SKT_COMMON_DEBUG */
610 						},
611 					    NULL);
612 				}
613 				n_total_chan_events++;
614 				break;
615 			}
616 			case EVFILT_READ: {
617 				skt_netif_channel_receive(child, port,
618 				    how_many, &receive_packet_count,
619 				    &receive_packet_index, errors_ok,
620 				    &pkts_dropped);
621 				if (receive_packet_index >= how_many) {
622 					assert(receive_packet_index
623 					    == how_many);
624 #if SKT_NETIF_DIRECT_DEBUG
625 					T_LOG(
626 						"child %d: disable RX\n",
627 						child);
628 #endif
629 					EV_SET(kev, port->fd, EVFILT_READ,
630 					    EV_DELETE, 0, 0, NULL);
631 					error = kevent(kq, kev, 1,
632 					    NULL, 0, NULL);
633 					SKTC_ASSERT_ERR(error == 0);
634 					rx_complete = TRUE;
635 				}
636 				break;
637 			}
638 			case EVFILT_WRITE: {
639 				uint32_t next_batch;
640 				void (^packet_prehook)(packet_t p) = NULL;
641 				if (event_flags & SKT_NETIF_DIRECT_EVFLAG_EXPIRY) {
642 					packet_prehook = ^(packet_t pkt) {
643 						uint64_t packet_expire_time = mach_absolute_time() + expiration_deadline_mach;
644 						SKTC_ASSERT_ERR(os_packet_set_expire_time(pkt, packet_expire_time) == 0);
645 					};
646 				}
647 				next_batch = payload.packet_number + batch_size;
648 				if (next_batch > how_many) {
649 					next_batch = how_many;
650 				}
651 				skt_netif_channel_send(port, src_port, dst_mac,
652 				    dst_ip, dst_port, &payload, sizeof(payload),
653 				    next_batch, packet_prehook);
654 				if (payload.packet_number >= how_many) {
655 					assert(payload.packet_number
656 					    == how_many);
657 					T_LOG(
658 						"TX child %d: completed %u\n",
659 						child, how_many);
660 					tx_complete = TRUE;
661 #if SKT_NETIF_DIRECT_DEBUG
662 					T_LOG(
663 						"child %d: disable TX\n",
664 						child);
665 #endif
666 					EV_SET(kev,
667 					    port->fd, EVFILT_WRITE,
668 					    EV_DELETE, 0, 0, NULL);
669 					error = kevent(kq, kev, 1,
670 					    NULL, 0, NULL);
671 					SKTC_ASSERT_ERR(error == 0);
672 				}
673 				/* yield for the peer thread to read */
674 				usleep(1);
675 				break;
676 			}
677 			default:
678 				T_LOG("%lu event %d?\n",
679 				    evlist[i].ident,
680 				    evlist[i].filter);
681 				assert(0);
682 				break;
683 			}
684 		}
685 	}
686 	percent = 1.0 * receive_packet_count / how_many * 100.0;
687 	T_LOG("RX child %d: received %u (of %u) %1.02f%%\n",
688 	    child, receive_packet_count, how_many, percent);
689 	T_LOG("child %d: received %u ifadv events, %u chan events\n",
690 	    child, n_ifadv_events, n_total_chan_events);
691 	T_LOG("child %d: received %u tx status events, %u tx expired events %u total events\n",
692 	    child, n_tx_status_chan_events, n_tx_expired_chan_events, n_total_chan_events);
693 
694 	if (n_tx_expired_chan_events > 0) {
695 		int64_t total_tx_exp_notif_delay_ns = SKT_NETIF_TIMESTAMP_MACH_TO_NS(total_tx_exp_notif_delay);
696 		int64_t total_tx_exp_prop_delay_ns = SKT_NETIF_TIMESTAMP_MACH_TO_NS(total_tx_exp_prop_delay);
697 		int64_t avg_tx_exp_prop_delay_ns = total_tx_exp_prop_delay_ns / n_tx_expired_chan_events;
698 		int64_t avg_tx_notif_delay_ns = total_tx_exp_notif_delay_ns / n_tx_expired_chan_events;
699 		T_LOG("child %d: expiration notification delay avg: %llu total: %llu; "
700 		    "expiration propagation delay avg: %llu total: %llu\n",
701 		    child, avg_tx_notif_delay_ns, total_tx_exp_notif_delay_ns,
702 		    avg_tx_exp_prop_delay_ns, total_tx_exp_prop_delay_ns);
703 	}
704 
705 	if (!errors_ok) {
706 		assert(receive_packet_count > 0);
707 	}
708 	if ((event_flags & SKT_NETIF_DIRECT_EVFLAG_IFADV) != 0) {
709 		if (ifadv_enabled) {
710 			assert(n_ifadv_events != 0);
711 		} else {
712 			assert(n_ifadv_events == 0);
713 		}
714 	}
715 	/*
716 	 * If we are testing expiry events, we may face the possibility
717 	 * that all events were sent within the
718 	 * SKT_NETIF_DIRECT_TEST_EXPIRY_DEADLINE_NS interval,
719 	 * and therefore no expiry events have arrived.
720 	 * For this reason, the check for expiry events
721 	 * is first checking whether any event was received at all.
722 	 *
723 	 * On the other hand, the transmission status events
724 	 * are more deterministic, and we are not predicating the check.
725 	 */
726 	if ((event_flags & SKT_NETIF_DIRECT_EVFLAG_CHANNEL) != 0) {
727 		if ((event_flags & SKT_NETIF_DIRECT_EVFLAG_EXPIRY) != 0) {
728 			/*
729 			 * Check whether any events were received at all.
730 			 */
731 			if (n_total_chan_events) {
732 				/* We expect expiry events. */
733 				assert(n_tx_expired_chan_events != 0);
734 			}
735 		} else {
736 			/*
737 			 * More stringent testing for transmission status events.
738 			 */
739 			assert(n_total_chan_events != 0);
740 			assert(n_tx_status_chan_events != 0);
741 		}
742 	}
743 	close(kq);
744 }
745 
746 int
skt_netifdirect_xfer_ipv6(int child,uint32_t test_id)747 skt_netifdirect_xfer_ipv6(int child, uint32_t test_id)
748 {
749 	struct ether_addr our_mac, peer_mac;
750 	struct in6_addr our_ip6, peer_ip6;
751 	boolean_t ifadv_enabled = FALSE;
752 	uint16_t our_port, peer_port;
753 	uint32_t event_flags = 0;
754 	uuid_string_t uuidstr;
755 	nexus_port_t nx_port;
756 	char buf[1] = { 0 };
757 	channel_port port;
758 	char *ifname;
759 	ssize_t ret;
760 
761 	if (child == 0) {
762 		ifname = FETH0_NAME;
763 		sktc_get_mac_addr(FETH0_NAME, our_mac.octet);
764 		sktc_get_mac_addr(FETH1_NAME, peer_mac.octet);
765 		sktc_feth0_inet6_addr(&our_ip6);
766 		sktc_feth1_inet6_addr(&peer_ip6);
767 		our_port = FETH0_UDP_PORT;
768 		peer_port = FETH1_UDP_PORT;
769 	} else {
770 		assert(child == 1);
771 		ifname = FETH1_NAME;
772 		sktc_get_mac_addr(FETH1_NAME, our_mac.octet);
773 		sktc_get_mac_addr(FETH0_NAME, peer_mac.octet);
774 		sktc_feth1_inet6_addr(&our_ip6);
775 		sktc_feth0_inet6_addr(&peer_ip6);
776 		our_port = FETH1_UDP_PORT;
777 		peer_port = FETH0_UDP_PORT;
778 	}
779 
780 	skt_setup_netif_with_ipv6_flow(&handles, ifname, &our_ip6, &peer_ip6,
781 	    &nx_port);
782 	sktu_channel_port_init(&port, handles.netif_nx_uuid,
783 	    nx_port, true,
784 	    (test_id == SKT_NETIF_DIRECT_TEST_CHANNEL_EVENTS || test_id == SKT_NETIF_DIRECT_TEST_EXPIRY_EVENTS) ? true : false,
785 	    false);
786 	assert(port.chan != NULL);
787 
788 	if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
789 		SKT_LOG("write fail: %s\n", strerror(errno));
790 		return 1;
791 	}
792 	assert(ret == 1);
793 #if SKT_NETIF_DIRECT_DEBUG
794 	T_LOG("child %d signaled\n", child);
795 #endif
796 
797 	/* Wait for go signal */
798 	if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
799 		SKT_LOG("read fail: %s\n", strerror(errno));
800 		return 1;
801 	}
802 	assert(ret == 1);
803 	port.ip6_addr = our_ip6;
804 	port.mac_addr = our_mac;
805 
806 	uuid_unparse(handles.netif_nx_uuid, uuidstr);
807 	T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
808 	fflush(stderr);
809 	T_LOG("child %d: Test Start\n", child);
810 	switch (test_id) {
811 	case SKT_NETIF_DIRECT_TEST_TXRX: {
812 		break;
813 	}
814 	case SKT_NETIF_DIRECT_TEST_IF_ADV_ENABLED: {
815 		event_flags |= SKT_NETIF_DIRECT_EVFLAG_IFADV;
816 		assert(os_channel_configure_interface_advisory(port.chan, TRUE) == 0);
817 		ifadv_enabled = TRUE;
818 		break;
819 	}
820 	case SKT_NETIF_DIRECT_TEST_IF_ADV_DISABLED: {
821 		event_flags |= SKT_NETIF_DIRECT_EVFLAG_IFADV;
822 		assert(os_channel_configure_interface_advisory(port.chan, FALSE) == 0);
823 		break;
824 	}
825 	case SKT_NETIF_DIRECT_TEST_CHANNEL_EVENTS: {
826 		event_flags |= SKT_NETIF_DIRECT_EVFLAG_CHANNEL;
827 		break;
828 	}
829 	case SKT_NETIF_DIRECT_TEST_EXPIRY_EVENTS: {
830 		event_flags |= (SKT_NETIF_DIRECT_EVFLAG_EXPIRY | SKT_NETIF_DIRECT_EVFLAG_CHANNEL);
831 		break;
832 	}
833 	default:
834 		T_LOG("unknown test id %d\n", test_id);
835 		assert(0);
836 		break;
837 	}
838 	skt_netif_send_and_receive(&port, our_port, &peer_mac, &peer_ip6,
839 	    peer_port, NETIF_TXRX_PACKET_COUNT, NETIF_TXRX_BATCH_COUNT,
840 	    child, event_flags, ifadv_enabled);
841 	return 0;
842 }
843 
844 static int
skt_netifdirect_main(int argc,char * argv[])845 skt_netifdirect_main(int argc, char *argv[])
846 {
847 	int child, test_id;
848 
849 	assert(!strcmp(argv[3], "--child"));
850 	child = atoi(argv[4]);
851 	test_id = atoi(argv[5]);
852 
853 	init_expiration_deadline_mach();
854 
855 	skt_netifdirect_xfer_ipv6(child, test_id);
856 	return 0;
857 }
858 
859 static uint32_t skt_netif_nxctl_check;
860 static void
skt_netifdirect_init_native_user_access(void)861 skt_netifdirect_init_native_user_access(void)
862 {
863 	uint32_t nxctl_check = 1;
864 	size_t len = sizeof(skt_netif_nxctl_check);
865 
866 	assert(sysctlbyname("kern.skywalk.disable_nxctl_check",
867 	    &skt_netif_nxctl_check, &len,
868 	    &nxctl_check, sizeof(nxctl_check)) == 0);
869 	sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE |
870 	    FETH_FLAGS_USER_ACCESS | FETH_FLAGS_LOW_LATENCY | FETH_FLAGS_LLINK);
871 }
872 
873 static void
skt_netifdirect_init_native_user_access_splitpools(void)874 skt_netifdirect_init_native_user_access_splitpools(void)
875 {
876 	uint32_t nxctl_check = 1;
877 	size_t len = sizeof(skt_netif_nxctl_check);
878 
879 	assert(sysctlbyname("kern.skywalk.disable_nxctl_check",
880 	    &skt_netif_nxctl_check, &len,
881 	    &nxctl_check, sizeof(nxctl_check)) == 0);
882 	sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE |
883 	    FETH_FLAGS_USER_ACCESS | FETH_FLAGS_LOW_LATENCY |
884 	    FETH_FLAGS_LLINK | FETH_FLAGS_NONSHAREDSPLITPOOLS);
885 }
886 
887 static void
skt_netifdirect_init_ifadv(void)888 skt_netifdirect_init_ifadv(void)
889 {
890 	int intvl = NETIF_IFADV_INTERVAL; /* in milliseconds */
891 	assert(sysctlbyname("net.link.fake.if_adv_intvl",
892 	    NULL, 0, &intvl, sizeof(intvl)) == 0);
893 	skt_netifdirect_init_native_user_access();
894 }
895 
896 static void
skt_netifdirect_init_chan_events(void)897 skt_netifdirect_init_chan_events(void)
898 {
899 	int drops = NETIF_TX_PKT_DROP_RATE;
900 	assert(sysctlbyname("net.link.fake.tx_drops",
901 	    NULL, 0, &drops, sizeof(drops)) == 0);
902 	skt_netifdirect_init_native_user_access();
903 }
904 
905 static void
skt_netifdirect_init_expiry_events(void)906 skt_netifdirect_init_expiry_events(void)
907 {
908 	int tx_exp_policy = 1; /* IFF_TX_EXP_POLICY_DROP_AND_NOTIFY */
909 	assert(sysctlbyname("net.link.fake.tx_exp_policy",
910 	    NULL, 0, &tx_exp_policy, sizeof(tx_exp_policy)) == 0);
911 	skt_netifdirect_init_native_user_access();
912 }
913 
914 static void
skt_netifdirect_fini(void)915 skt_netifdirect_fini(void)
916 {
917 	assert(sysctlbyname("kern.skywalk.disable_nxctl_check",
918 	    NULL, NULL,
919 	    &skt_netif_nxctl_check, sizeof(skt_netif_nxctl_check)) == 0);
920 	sktc_ifnet_feth_pair_destroy();
921 }
922 
923 static void
skt_netifdirect_fini_ifadv(void)924 skt_netifdirect_fini_ifadv(void)
925 {
926 	int intvl = 0; /* disable */
927 	assert(sysctlbyname("net.link.fake.if_adv_intvl",
928 	    NULL, 0, &intvl, sizeof(intvl)) == 0);
929 	skt_netifdirect_fini();
930 }
931 
932 static void
skt_netifdirect_fini_chan_events(void)933 skt_netifdirect_fini_chan_events(void)
934 {
935 	int drops = 0;
936 	assert(sysctlbyname("net.link.fake.tx_drops",
937 	    NULL, 0, &drops, sizeof(drops)) == 0);
938 	skt_netifdirect_fini();
939 }
940 
941 static void
skt_netifdirect_fini_expiry_events(void)942 skt_netifdirect_fini_expiry_events(void)
943 {
944 	int tx_exp_policy = 0; /* IFF_TX_EXPN_POLICY_DISABLED */
945 	assert(sysctlbyname("net.link.fake.tx_exp_policy",
946 	    NULL, 0, &tx_exp_policy, sizeof(tx_exp_policy)) == 0);
947 	skt_netifdirect_fini();
948 }
949 
950 static void
skt_netifdirect_init_native_copy_mode(void)951 skt_netifdirect_init_native_copy_mode(void)
952 {
953 	uint32_t nxctl_check = 1;
954 	size_t len = sizeof(skt_netif_nxctl_check);
955 
956 	assert(sysctlbyname("kern.skywalk.disable_nxctl_check",
957 	    &skt_netif_nxctl_check, &len,
958 	    &nxctl_check, sizeof(nxctl_check)) == 0);
959 	sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_LOW_LATENCY |
960 	    FETH_FLAGS_LLINK);
961 }
962 
963 struct skywalk_mptest skt_netifdirecttxrx = {
964 	"netifdirecttxrx",
965 	"netif direct send receive test",
966 	SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG,
967 	2, skt_netifdirect_main,
968 	{ NULL, NULL, NULL, NULL, NULL, STR(SKT_NETIF_DIRECT_TEST_TXRX)},
969 	skt_netifdirect_init_native_user_access, skt_netifdirect_fini, {},
970 };
971 
972 struct skywalk_mptest skt_netifdirecttxrxcopymode = {
973 	"netifdirecttxrxcopymode",
974 	"netif direct send receive in copy mode test",
975 	SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG,
976 	2, skt_netifdirect_main,
977 	{ NULL, NULL, NULL, NULL, NULL, STR(SKT_NETIF_DIRECT_TEST_TXRX)},
978 	skt_netifdirect_init_native_copy_mode, skt_netifdirect_fini, {},
979 };
980 
981 struct skywalk_mptest skt_netifdirecttxrxsp = {
982 	"netifdirecttxrxsp",
983 	"netif direct send receive test with split rx/tx pools",
984 	SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG,
985 	2, skt_netifdirect_main,
986 	{ NULL, NULL, NULL, NULL, NULL, STR(SKT_NETIF_DIRECT_TEST_TXRX)},
987 	skt_netifdirect_init_native_user_access_splitpools,
988 	skt_netifdirect_fini, {},
989 };
990 
991 struct skywalk_mptest skt_netifdirectifadvenable = {
992 	"netifdirectifadvenable",
993 	"netif interface advisory enabled test",
994 	SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG,
995 	2, skt_netifdirect_main,
996 	{ NULL, NULL, NULL, NULL, NULL,
997 	  STR(SKT_NETIF_DIRECT_TEST_IF_ADV_ENABLED)},
998 	skt_netifdirect_init_ifadv, skt_netifdirect_fini_ifadv, {},
999 };
1000 
1001 struct skywalk_mptest skt_netifdirectifadvdisable = {
1002 	"netifdirectifadvdisable",
1003 	"netif interface advisory disabled test",
1004 	SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG,
1005 	2, skt_netifdirect_main,
1006 	{ NULL, NULL, NULL, NULL, NULL,
1007 	  STR(SKT_NETIF_DIRECT_TEST_IF_ADV_DISABLED)},
1008 	skt_netifdirect_init_ifadv, skt_netifdirect_fini_ifadv, {},
1009 };
1010 
1011 struct skywalk_mptest skt_netifdirectchanevents = {
1012 	"netifdirectchanevents",
1013 	"netif interface channel events test",
1014 	SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG,
1015 	2, skt_netifdirect_main,
1016 	{ NULL, NULL, NULL, NULL, NULL,
1017 	  STR(SKT_NETIF_DIRECT_TEST_CHANNEL_EVENTS)},
1018 	skt_netifdirect_init_chan_events, skt_netifdirect_fini_chan_events, {},
1019 };
1020 
1021 struct skywalk_mptest skt_netifdirectexpiryevents = {
1022 	"netifdirectexpiryevents",
1023 	"netif interface expiry events test",
1024 	SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG,
1025 	2, skt_netifdirect_main,
1026 	{ NULL, NULL, NULL, NULL, NULL,
1027 	  STR(SKT_NETIF_DIRECT_TEST_EXPIRY_EVENTS)},
1028 	skt_netifdirect_init_expiry_events, skt_netifdirect_fini_expiry_events, {},
1029 };
1030