1 /*
2 * Copyright (c) 2017-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* -*- Mode: c; tab-width: 8; indent-tabs-mode: 1; c-basic-offset: 8; -*- */
30
31 #include <stddef.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <assert.h>
36 #include <stdbool.h>
37 #include <sys/event.h>
38 #include <sys/ioctl.h>
39 #include <sys/sockio.h>
40 #include <sys/sysctl.h>
41 #include <net/if.h>
42 #include <netinet/in.h>
43 #include <netinet/ip.h>
44 #include <netinet/udp.h>
45 #include <netinet/tcp.h>
46 #include <TargetConditionals.h>
47 #include <arpa/inet.h>
48 #include <mach/mach.h>
49 #include <mach/mach_time.h>
50 #include <os/log.h>
51 #include <darwintest.h>
52 #include "skywalk_test_driver.h"
53 #include "skywalk_test_utils.h"
54 #include "skywalk_test_common.h"
55
56 #define STR(x) _STR(x)
57 #define _STR(x) #x
58
59 #define ENABLE_UPP true /* channel in user packet pool mode */
60
61 #define OUR_FLOWSWITCH_PORT (NEXUS_PORT_FLOW_SWITCH_CLIENT + 1)
62
63 #define FETH0_PORT 0x1234
64 #define FETH1_PORT 0x5678
65
66 #if TARGET_OS_WATCH
67 #define XFER_TXRX_PACKET_COUNT 20000
68 #define XFER_TXRX_BATCH_COUNT 4
69 #define XFER_TXRX_TIMEOUT_SECS 0
70 #define XFER_TXRX_TIMEOUT_NSECS (100 * 1000 * 1000)
71
72 #define XFER_PING_PACKET_COUNT 10000
73 #define XFER_PING_BATCH_COUNT 64
74 #define XFER_PING_TIMEOUT_SECS 4
75 #define XFER_PING_TIMEOUT_NSECS (500 * 1000 * 1000)
76 #define XFER_PING_CHILD1_RX_TIMEOUT_SECS 4
77 #define XFER_PING_CHILD1_RX_TIMEOUT_NSECS 0
78 #define XFER_PING_FADV_TIMEOUT_SECS 2
79 #define XFER_PING_FADV_TIMEOUT_NSECS 0
80
81 #else /* TARGET_OS_WATCH */
82 #define XFER_TXRX_PACKET_COUNT (250 * 1000)
83 #define XFER_TXRX_BATCH_COUNT 8
84 #define XFER_TXRX_TIMEOUT_SECS 0
85 #define XFER_TXRX_TIMEOUT_NSECS (100 * 1000 * 1000)
86
87 #define XFER_PING_PACKET_COUNT (125 * 1000)
88 #define XFER_PING_BATCH_COUNT 128
89 #define XFER_PING_TIMEOUT_SECS 4
90 #define XFER_PING_TIMEOUT_NSECS (500 * 1000 * 1000)
91 #define XFER_PING_CHILD1_RX_TIMEOUT_SECS 4
92 #define XFER_PING_CHILD1_RX_TIMEOUT_NSECS 0
93 #define XFER_PING_FADV_TIMEOUT_SECS 2
94 #define XFER_PING_FADV_TIMEOUT_NSECS 0
95 #endif /* TARGET_OS_WATCH */
96
97 #define XFER_IFADV_INTERVAL 30
98 #define XFER_TXRX_PACKET_COUNT_LONG (XFER_TXRX_PACKET_COUNT * 10)
99 #define XFER_PING_PACKET_COUNT_LONG (XFER_PING_PACKET_COUNT * 5)
100 #define XFER_AQM_PING_BATCH_COUNT 8
101 #define XFER_AQM_PING_PACKET_COUNT (XFER_AQM_PING_BATCH_COUNT * 4)
102 #define XFER_CLASSQ_UPDATE_INTERVAL 100 /* milliseconds */
103 /*
104 * delay interval for the classq update interval to elapse.
105 * We add some extra delay to the update interval to account for timer drift.
106 */
107 #define XFER_CLASSQ_UPDATE_INTERVAL_ELAPSE_DELAY \
108 (XFER_CLASSQ_UPDATE_INTERVAL + 500) /* milliseconds */
109
110 #define XFER_TXRX_PACKET_COUNT_SHORT (XFER_TXRX_PACKET_COUNT / 10)
111
112 /*
113 * For overwhelm transfer tests we try to send a large batch of packets
114 * over a smaller ring size
115 */
116 #define XFER_TXRX_OVERWHELM_BATCH_COUNT 2048
117 #define XFER_TXRX_OVERWHELM_FSW_TX_RING_SIZE \
118 (XFER_TXRX_OVERWHELM_BATCH_COUNT / 2)
119 #define XFER_TXRX_OVERWHELM_FSW_RX_RING_SIZE \
120 XFER_TXRX_OVERWHELM_FSW_TX_RING_SIZE
121
122 #define XFER_TXRX_MULTI_BUFLET_BUF_SIZE 512
123 #define XFER_TXRX_MULTI_BUFLET_MAX_FRAGS 4 /* packet size = 2048 */
124
125 #define XFER_RECV_END_PAYLOAD "DEADBEEF" /* receiver end payload */
126 #define XFER_QOSMARKING_FASTLANE_PREFIX "FASTLANE."
127 #define XFER_QOSMARKING_RFC4594_PREFIX "RFC4594."
128
129 #define XFER_TX_PKT_DROP_RATE 100
130
131 /* dummy packet identifier constants */
132 #define XFER_PKTID_PAYLOAD_TYPE 0xFA
133 #define XFER_PKTID_STREAM_ID 0xFB
134
135 static struct sktc_nexus_handles handles;
136 static uint32_t inject_error_rmask;
137 static uint32_t skt_disable_nxctl_check;
138
139 #define INJECT_CODE_IDX_MAX 2
140 struct fsw_inject_codes {
141 int ic_code;
142 uint32_t ic_rmask;
143 int ic_stat_idx[INJECT_CODE_IDX_MAX];
144 };
145 #define IC_RMASK_UNSPEC (-1)
146
147 #define _S1(code, a) {(code), IC_RMASK_UNSPEC, {(a), -1}}
148 #define _S2(code, a, b) {(code), IC_RMASK_UNSPEC, {(a), (b)}}
149 #define _S3(code, a, b, c) {(code), a, {(b), (c)}}
150
151 static const struct fsw_inject_codes fsw_inject_codes[] = {
152 /* flow_pkt_classify() returns ENXIO */
153 _S1(1, FSW_STATS_RX_FLOW_EXTRACT_ERR),
154
155 /* ms_copy_to_dev_mbuf() sets mbuf to NULL */
156 /*_S2(11, FSW_STATS_DROP, FSW_STATS_DROP_NOMEM_MBUF), */
157
158 /* ms_copy_to_dev_pkt() set pkt to NULL */
159 _S2(12, FSW_STATS_DROP, FSW_STATS_DROP_NOMEM_PKT),
160
161 /* ms_dev_output() QP_PACKET sets pkt_drop to TRUE */
162 _S2(14, FSW_STATS_DROP, FSW_STATS_TX_AQM_DROP),
163
164 /*
165 * Can result in a later kernel panic when the nexus is closed
166 * so do not use for now.
167 */
168
169 /* fsw_ms_user_port_flush() spkt->pkt_qum_flags set to
170 * (spkt->pkt_qum_flags | QUMF_DROPPED) */
171 _S1(20, FSW_STATS_DROP),
172
173 /* fsw_ms_user_port_flush() is_frag TRUE */
174 /*_S1(21, FSW_STATS_DROP), */
175
176 /*
177 * 31 Triggers a kernel assertion. Do not use.
178 * 32 only makes sense if 31 is also enabled.
179 */
180 /* ms_lookup() fakes flow entry not found */
181 /*_S1(31, FSW_STATS_TXLOOKUP_NOMATCH), */
182 /* ms_lookup() fakes NULL host_na */
183 /*_S1(32, FSW_STATS_HOST_NOT_ATTACHED), */
184
185 /*
186 * 33 to 43 apply to outbound (to device) or inbound to legacy stack
187 * so cannot (yet) be tested. Some of them can also trigger kernel
188 * assertions.
189 */
190
191 /* fsw_resolve() returns EJUSTRETURN */
192 _S1(35, FSW_STATS_TX_RESOLV_PENDING),
193
194 /* fsw_resolve() returns error other than EJUSTRETURN but flow route has stale entry */
195 _S1(36, FSW_STATS_TX_RESOLV_STALE),
196 #if 0
197 /* ms_lookup() fails to track packet */
198 _S2(33, FSW_STATS_RXLOOKUP_TRACKERR, FSW_STATS_TXLOOKUP_TRACKERR),
199 /* ms_lookup() wrong uuid. */
200 _S2(34, FSW_STATS_RXLOOKUP_INVALID_ID, FSW_STATS_TXLOOKUP_INVALID_ID),
201
202 /* ms_dev_port_flush_enqueue_dst() kr_space_avail to zero. */
203 _S1(40, FSW_STATS_DST_KRSPACE_DROP),
204
205 /* ms_dev_port_flush_enqueue_dst() n (needed) to zero. */
206 _S1(41, FSW_STATS_DROP),
207
208 /* ms_dev_port_flush_enqueue_dst() fake pp_alloc_packet_batch()
209 * returning ENOMEM. */
210 _S1(42, FSW_STATS_NOMEM_PKT),
211
212 /* ms_dev_port_flush_enqueue_dst() fake ms_copy_packet_from_dev()
213 * returning EINVAL. */
214 _S1(43, FSW_STATS_DROP)
215 #endif
216 };
217 #define INJECT_CODE_COUNT (sizeof(fsw_inject_codes) / \
218 sizeof(fsw_inject_codes[0]))
219
220 static packet_svc_class_t packet_svc_class[] =
221 {
222 PKT_SC_BK_SYS,
223 PKT_SC_BK,
224 PKT_SC_BE,
225 PKT_SC_RD,
226 PKT_SC_OAM,
227 PKT_SC_AV,
228 PKT_SC_RV,
229 PKT_SC_VI,
230 PKT_SC_VO,
231 PKT_SC_CTL
232 };
233
234 #define NUM_SVC_CLASS \
235 (sizeof (packet_svc_class) / sizeof (packet_svc_class[0]))
236 #define XFER_WMM_PING_BATCH_COUNT 8
237 #define XFER_WMM_PING_PACKET_COUNT \
238 (XFER_WMM_PING_BATCH_COUNT * NUM_SVC_CLASS)
239
240 /* test identifiers for flowswitch event tests */
241 #define SKT_FSW_EVENT_TEST_NONE 0
242 #define SKT_FSW_EVENT_TEST_IF_ADV_ENABLED 1
243 #define SKT_FSW_EVENT_TEST_IF_ADV_DISABLED 2
244 #define SKT_FSW_EVENT_TEST_CHANNEL_EVENTS 3
245
246 /* flowswitch xfer test event flags */
247 #define SKT_FSW_EVFLAG_IFADV 0x1
248 #define SKT_FSW_EVFLAG_CHANNEL 0x2
249
250 /* test identifiers for ping-pong tests */
251 #define SKT_FSW_PING_PONG_TEST_DEFAULT 0
252 #define SKT_FSW_PING_PONG_TEST_LOW_LATENCY 1
253 #define SKT_FSW_PING_PONG_TEST_MULTI_LLINK 2
254 /****************************************************************/
255
256 /* Parent-child tests */
257 #define CHILD_FLOWSWITCH_PORT OUR_FLOWSWITCH_PORT + 1
258 #define DEMUX_PAYLOAD_OFFSET offsetof(my_payload, data)
259 #define DEMUX_PAYLOAD_VALUE 0xFFFF
260 #define MAX_DEMUX_OFFSET 900
261
262 static inline uint16_t
skt_xfer_fold_sum_final(uint32_t sum)263 skt_xfer_fold_sum_final(uint32_t sum)
264 {
265 sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */
266 sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */
267 sum = (sum >> 16) + (sum & 0xffff); /* final carry */
268 return ~sum & 0xffff;
269 }
270
271 static int
connect_flow(nexus_controller_t ncd,const uuid_t fsw,nexus_port_t nx_port,const uuid_t flow,int protocol,uint16_t flags,struct in_addr src_addr,in_port_t src_port,struct in_addr dst_addr,in_port_t dst_port,flowadv_idx_t * flowadv_idx,uint64_t qset_id)272 connect_flow(nexus_controller_t ncd,
273 const uuid_t fsw, nexus_port_t nx_port, const uuid_t flow,
274 int protocol, uint16_t flags,
275 struct in_addr src_addr, in_port_t src_port,
276 struct in_addr dst_addr, in_port_t dst_port,
277 flowadv_idx_t *flowadv_idx, uint64_t qset_id)
278 {
279 struct nx_flow_req nfr;
280 int error;
281
282 memset(&nfr, 0, sizeof(nfr));
283 nfr.nfr_ip_protocol = protocol;
284 nfr.nfr_nx_port = nx_port;
285 uuid_copy(nfr.nfr_flow_uuid, flow);
286 nfr.nfr_flags = flags;
287 /* src */
288 nfr.nfr_saddr.sa.sa_len = sizeof(struct sockaddr_in);
289 nfr.nfr_saddr.sa.sa_family = AF_INET;
290 nfr.nfr_saddr.sin.sin_port = htons(src_port);
291 nfr.nfr_saddr.sin.sin_addr = src_addr;
292 /* dst */
293 nfr.nfr_daddr.sa.sa_len = sizeof(struct sockaddr_in);
294 nfr.nfr_daddr.sa.sa_family = AF_INET;
295 nfr.nfr_daddr.sin.sin_port = htons(dst_port);
296 nfr.nfr_daddr.sin.sin_addr = dst_addr;
297 nfr.nfr_flowadv_idx = FLOWADV_IDX_NONE;
298 nfr.nfr_qset_id = qset_id;
299 error = __os_nexus_flow_add(ncd, fsw, &nfr);
300
301 if (error) {
302 SKT_LOG("__os_nexus_flow_add/nsbind failed %s (%d)\n",
303 strerror(errno), errno);
304 error = errno;
305 } else if (nfr.nfr_nx_port != nx_port) {
306 T_LOG("nfr_nx_port %d != nx_port %d\n",
307 nfr.nfr_nx_port, nx_port);
308 error = EINVAL;
309 }
310 *flowadv_idx = nfr.nfr_flowadv_idx;
311 return error;
312 }
313
314 static int
connect_child_flow(nexus_controller_t ncd,const uuid_t fsw,nexus_port_t nx_port,const uuid_t flow,int protocol,uint16_t flags,struct in_addr src_addr,in_port_t src_port,struct in_addr dst_addr,in_port_t dst_port,flowadv_idx_t * flowadv_idx,uint64_t qset_id,const uuid_t parent_flow,struct flow_demux_pattern * demux_patterns,uint8_t demux_pattern_count)315 connect_child_flow(nexus_controller_t ncd,
316 const uuid_t fsw, nexus_port_t nx_port, const uuid_t flow,
317 int protocol, uint16_t flags,
318 struct in_addr src_addr, in_port_t src_port,
319 struct in_addr dst_addr, in_port_t dst_port,
320 flowadv_idx_t *flowadv_idx, uint64_t qset_id, const uuid_t parent_flow,
321 struct flow_demux_pattern *demux_patterns, uint8_t demux_pattern_count)
322 {
323 struct nx_flow_req nfr;
324 int error;
325
326 memset(&nfr, 0, sizeof(nfr));
327 nfr.nfr_ip_protocol = protocol;
328 nfr.nfr_nx_port = nx_port;
329 uuid_copy(nfr.nfr_flow_uuid, flow);
330 nfr.nfr_flags = flags;
331 /* src */
332 nfr.nfr_saddr.sa.sa_len = sizeof(struct sockaddr_in);
333 nfr.nfr_saddr.sa.sa_family = AF_INET;
334 nfr.nfr_saddr.sin.sin_port = htons(src_port);
335 nfr.nfr_saddr.sin.sin_addr = src_addr;
336 /* dst */
337 nfr.nfr_daddr.sa.sa_len = sizeof(struct sockaddr_in);
338 nfr.nfr_daddr.sa.sa_family = AF_INET;
339 nfr.nfr_daddr.sin.sin_port = htons(dst_port);
340 nfr.nfr_daddr.sin.sin_addr = dst_addr;
341 nfr.nfr_flowadv_idx = FLOWADV_IDX_NONE;
342 nfr.nfr_qset_id = qset_id;
343 uuid_copy(nfr.nfr_parent_flow_uuid, parent_flow);
344
345 for (int i = 0; i < demux_pattern_count; i++) {
346 bcopy(&demux_patterns[i], &nfr.nfr_flow_demux_patterns[i],
347 sizeof(struct flow_demux_pattern));
348 }
349 nfr.nfr_flow_demux_count = demux_pattern_count;
350 error = __os_nexus_flow_add(ncd, fsw, &nfr);
351
352 if (error) {
353 SKT_LOG("__os_nexus_flow_add/nsbind failed %s (%d)\n",
354 strerror(errno), errno);
355 error = errno;
356 } else if (nfr.nfr_nx_port != nx_port) {
357 T_LOG("nfr_nx_port %d != nx_port %d\n",
358 nfr.nfr_nx_port, nx_port);
359 error = EINVAL;
360 }
361 *flowadv_idx = nfr.nfr_flowadv_idx;
362 return error;
363 }
364
365
366 static inline uint32_t
skt_xfer_get_chan_max_frags(const channel_t chd)367 skt_xfer_get_chan_max_frags(const channel_t chd)
368 {
369 return (uint32_t)sktc_get_channel_attr(chd, CHANNEL_ATTR_MAX_FRAGS);
370 }
371
372 static inline void
sktc_xfer_copy_data_to_packet(channel_port_t port,packet_t ph,const void * data,uint16_t data_len,uint16_t start_offset,bool csum_offload,uint32_t * partial_csum)373 sktc_xfer_copy_data_to_packet(channel_port_t port, packet_t ph,
374 const void * data, uint16_t data_len, uint16_t start_offset,
375 bool csum_offload, uint32_t *partial_csum)
376 {
377 char *baddr;
378 buflet_t buf, pbuf = NULL;
379 uint16_t clen, bdlim, blen;
380 uint16_t len = data_len;
381 uint32_t partial = 0;
382 size_t frame_length = data_len + start_offset;
383 int error;
384
385 buf = os_packet_get_next_buflet(ph, NULL);
386 assert(buf != NULL);
387 baddr = os_buflet_get_object_address(buf);
388 assert(baddr != NULL);
389 bdlim = blen = os_buflet_get_data_limit(buf);
390 baddr += start_offset;
391 blen -= start_offset;
392
393 /* copy the data */
394 while (len != 0) {
395 if (blen == 0) {
396 error = os_buflet_set_data_length(buf, bdlim);
397 SKTC_ASSERT_ERR(error == 0);
398 pbuf = buf;
399 #if ENABLE_UPP
400 error = os_channel_buflet_alloc(port->chan, &buf);
401 SKTC_ASSERT_ERR(error == 0);
402 assert(buf != NULL);
403 error = os_packet_add_buflet(ph, pbuf, buf);
404 SKTC_ASSERT_ERR(error == 0);
405 #else
406 buf = os_packet_get_next_buflet(ph, pbuf);
407 assert(buf != NULL);
408 #endif
409 error = os_buflet_set_data_offset(buf, 0);
410 SKTC_ASSERT_ERR(error == 0);
411 baddr = os_buflet_get_object_address(buf);
412 assert(baddr != NULL);
413 bdlim = blen = os_buflet_get_data_limit(buf);
414 }
415 clen = MIN(blen, len);
416 if (csum_offload) {
417 bcopy(data, baddr, clen);
418 } else {
419 partial = ~os_copy_and_inet_checksum(data, baddr, clen,
420 partial);
421 }
422 len -= clen;
423 blen -= clen;
424 data += clen;
425 baddr += clen;
426 assert(len == 0 || blen == 0);
427 }
428 if (pbuf == NULL) {
429 error = os_buflet_set_data_length(buf, frame_length);
430 } else {
431 error = os_buflet_set_data_length(buf, clen);
432 }
433 SKTC_ASSERT_ERR(error == 0);
434 if (!csum_offload) {
435 *partial_csum = partial;
436 }
437 }
438 /****************************************************************/
439
440 #if SKT_XFER_DEBUG
441 static const char *
inet_ptrtoa(const void * ptr)442 inet_ptrtoa(const void * ptr)
443 {
444 struct in_addr ip;
445
446 bcopy(ptr, &ip, sizeof(ip));
447 return inet_ntoa(ip);
448 }
449
450 static void
ip_frame_dump(const void * buf,size_t buf_len)451 ip_frame_dump(const void * buf, size_t buf_len)
452 {
453 ip_tcp_header_t * ip_tcp;
454 ip_udp_header_t * ip_udp;
455 int ip_len;
456
457 assert(buf_len >= sizeof(struct ip));
458 ip_udp = (ip_udp_header_t *)buf;
459 ip_tcp = (ip_tcp_header_t *)buf;
460 ip_len = ntohs(ip_udp->ip.ip_len);
461 T_LOG("ip src %s ", inet_ptrtoa(&ip_udp->ip.ip_src));
462 T_LOG("dst %s len %d id %d\n",
463 inet_ptrtoa(&ip_udp->ip.ip_dst), ip_len,
464 ntohs(ip_udp->ip.ip_id));
465 assert(buf_len >= ip_len);
466 assert(ip_udp->ip.ip_v == IPVERSION);
467 assert(ip_udp->ip.ip_hl == (sizeof(struct ip) >> 2));
468 switch (ip_udp->ip.ip_p) {
469 case IPPROTO_UDP: {
470 int udp_len;
471 int data_len;
472
473 assert(buf_len >= sizeof(*ip_udp));
474 udp_len = ntohs(ip_udp->udp.uh_ulen);
475 data_len = udp_len - (int)sizeof(ip_udp->udp);
476 T_LOG(
477 "UDP src 0x%x dst 0x%x len %d csum 0x%x datalen %d\n",
478 ntohs(ip_udp->udp.uh_sport),
479 ntohs(ip_udp->udp.uh_dport),
480 udp_len,
481 ntohs(ip_udp->udp.uh_sum),
482 data_len);
483 break;
484 }
485 case IPPROTO_TCP: {
486 assert(buf_len >= sizeof(*ip_tcp));
487 T_LOG(
488 "TCP src 0x%x dst 0x%x seq %u ack %u "
489 "off %d flags 0x%x win %d csum 0x%x\n",
490 ntohs(ip_tcp->tcp.th_sport),
491 ntohs(ip_tcp->tcp.th_dport),
492 ntohl(ip_tcp->tcp.th_seq),
493 ntohl(ip_tcp->tcp.th_ack),
494 ip_tcp->tcp.th_off,
495 ip_tcp->tcp.th_flags,
496 ntohs(ip_tcp->tcp.th_win),
497 ntohs(ip_tcp->tcp.th_sum));
498 break;
499 }
500 default:
501 break;
502 }
503 }
504 #endif
505
506 static int ip_id;
507
508 static size_t
tcp_frame_populate(channel_port_t port,packet_t ph,struct in_addr src_ip,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,const void * data,size_t data_len,bool connect,bool csum_offload)509 tcp_frame_populate(channel_port_t port, packet_t ph, struct in_addr src_ip,
510 uint16_t src_port, struct in_addr dst_ip, uint16_t dst_port,
511 const void * data, size_t data_len, bool connect,
512 bool csum_offload)
513 {
514 int error;
515 size_t frame_length;
516 ip_tcp_header_t * ip_tcp;
517 char * baddr;
518 tcp_pseudo_hdr_t * tcp_pseudo;
519 buflet_t buf;
520 uint16_t bdlim;
521 uint32_t partial = 0;
522
523 buf = os_packet_get_next_buflet(ph, NULL);
524 assert(buf != NULL);
525 error = os_buflet_set_data_offset(buf, 0);
526 SKTC_ASSERT_ERR(error == 0);
527 bdlim = os_buflet_get_data_limit(buf);
528 assert(bdlim != 0);
529 baddr = os_buflet_get_object_address(buf);
530 assert(baddr != NULL);
531
532 frame_length = sizeof(*ip_tcp) + data_len;
533 #if ENABLE_UPP
534 assert((os_packet_get_buflet_count(ph) == 1));
535 assert((skt_xfer_get_chan_max_frags(port->chan) * bdlim) >=
536 frame_length);
537 #else
538 assert((os_packet_get_buflet_count(ph) * bdlim) >= frame_length);
539 #endif
540 assert(bdlim >= sizeof(ip_tcp_header_t));
541
542 error = os_packet_set_link_header_length(ph, 0);
543 SKTC_ASSERT_ERR(error == 0);
544 /* determine frame offsets */
545 ip_tcp = (ip_tcp_header_t *)baddr;
546 tcp_pseudo = (tcp_pseudo_hdr_t *)
547 (((char *)&ip_tcp->tcp) - sizeof(*tcp_pseudo));
548 baddr += sizeof(*ip_tcp);
549
550 /* copy the data */
551 sktc_xfer_copy_data_to_packet(port, ph, data, data_len, sizeof(*ip_tcp),
552 csum_offload, &partial);
553
554 /* fill in TCP header */
555 ip_tcp->tcp.th_sport = htons(src_port);
556 ip_tcp->tcp.th_dport = htons(dst_port);
557 ip_tcp->tcp.th_flags |= (connect ? TH_SYN : TH_RST);
558 ip_tcp->tcp.th_off = (sizeof(struct tcphdr)) >> 2;
559 ip_tcp->tcp.th_sum = 0;
560 if (csum_offload) {
561 ip_tcp->tcp.th_sum = in_pseudo(src_ip.s_addr, dst_ip.s_addr,
562 htons(data_len + sizeof(ip_tcp->tcp) + IPPROTO_TCP));
563 os_packet_set_inet_checksum(ph, PACKET_CSUM_PARTIAL,
564 sizeof(struct ip),
565 sizeof(struct ip) + offsetof(struct tcphdr, th_sum));
566 } else {
567 /* fill in TCP pseudo header (overwritten by IP header below) */
568 tcp_pseudo_hdr_t * tcp_pseudo;
569 tcp_pseudo = (tcp_pseudo_hdr_t *)
570 (((char *)&ip_tcp->tcp) - sizeof(*tcp_pseudo));
571 bcopy(&src_ip, &tcp_pseudo->src_ip, sizeof(src_ip));
572 bcopy(&dst_ip, &tcp_pseudo->dst_ip, sizeof(dst_ip));
573 tcp_pseudo->zero = 0;
574 tcp_pseudo->proto = IPPROTO_TCP;
575 tcp_pseudo->length = htons(sizeof(ip_tcp->tcp) + data_len);
576 partial = os_inet_checksum(tcp_pseudo, sizeof(*tcp_pseudo)
577 + sizeof(ip_tcp->tcp), partial);
578 ip_tcp->tcp.th_sum = skt_xfer_fold_sum_final(partial);
579 }
580
581 /* fill in IP header */
582 bzero(ip_tcp, sizeof(ip_tcp->ip));
583 ip_tcp->ip.ip_v = IPVERSION;
584 ip_tcp->ip.ip_hl = sizeof(struct ip) >> 2;
585 ip_tcp->ip.ip_ttl = MAXTTL;
586 ip_tcp->ip.ip_p = IPPROTO_TCP;
587 bcopy(&src_ip, &ip_tcp->ip.ip_src, sizeof(src_ip));
588 bcopy(&dst_ip, &ip_tcp->ip.ip_dst, sizeof(dst_ip));
589 ip_tcp->ip.ip_len = htons(sizeof(*ip_tcp) + data_len);
590 ip_tcp->ip.ip_id = htons(ip_id++);
591
592 /* compute the IP checksum */
593 ip_tcp->ip.ip_sum = 0; /* needs to be zero for checksum */
594 ip_tcp->ip.ip_sum = in_cksum(&ip_tcp->ip, sizeof(ip_tcp->ip), 0);
595 return frame_length;
596 }
597
598 static size_t
udp_frame_populate(channel_port_t port,packet_t ph,struct in_addr src_ip,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,const void * data,size_t data_len,bool csum_offload,uint16_t fragment_id,size_t total_udp_len)599 udp_frame_populate(channel_port_t port, packet_t ph, struct in_addr src_ip,
600 uint16_t src_port, struct in_addr dst_ip, uint16_t dst_port,
601 const void * data, size_t data_len, bool csum_offload,
602 uint16_t fragment_id, size_t total_udp_len)
603 {
604 int error;
605 size_t frame_length;
606 ip_udp_header_t * ip_udp;
607 char * baddr;
608 udp_pseudo_hdr_t * udp_pseudo;
609 buflet_t buf;
610 uint16_t bdlim;
611 uint32_t partial = 0;
612
613 buf = os_packet_get_next_buflet(ph, NULL);
614 assert(buf != NULL);
615 error = os_buflet_set_data_offset(buf, 0);
616 SKTC_ASSERT_ERR(error == 0);
617 bdlim = os_buflet_get_data_limit(buf);
618 assert(bdlim != 0);
619 baddr = os_buflet_get_object_address(buf);
620 assert(baddr != NULL);
621
622 frame_length = sizeof(*ip_udp) + data_len;
623 #if ENABLE_UPP
624 assert((os_packet_get_buflet_count(ph) == 1));
625 assert((skt_xfer_get_chan_max_frags(port->chan) * bdlim) >=
626 frame_length);
627 #else
628 assert((os_packet_get_buflet_count(ph) * bdlim) >= frame_length);
629 #endif
630 assert(bdlim >= sizeof(ip_udp_header_t));
631
632 error = os_packet_set_link_header_length(ph, 0);
633 SKTC_ASSERT_ERR(error == 0);
634 /* determine frame offsets */
635 ip_udp = (ip_udp_header_t *)baddr;
636 udp_pseudo = (udp_pseudo_hdr_t *)
637 (((char *)&ip_udp->udp) - sizeof(*udp_pseudo));
638 baddr += sizeof(*ip_udp);
639
640 /* copy the data */
641 sktc_xfer_copy_data_to_packet(port, ph, data, data_len, sizeof(*ip_udp),
642 csum_offload, &partial);
643
644 /* fill in UDP header */
645 ip_udp->udp.uh_sport = htons(src_port);
646 ip_udp->udp.uh_dport = htons(dst_port);
647 ip_udp->udp.uh_ulen = htons(sizeof(ip_udp->udp) + total_udp_len);
648 ip_udp->udp.uh_sum = 0;
649 if (csum_offload) {
650 ip_udp->udp.uh_sum = in_pseudo(src_ip.s_addr, dst_ip.s_addr,
651 htons(total_udp_len + sizeof(ip_udp->udp) + IPPROTO_UDP));
652 os_packet_set_inet_checksum(ph,
653 PACKET_CSUM_PARTIAL | PACKET_CSUM_ZERO_INVERT,
654 sizeof(struct ip),
655 sizeof(struct ip) + offsetof(struct udphdr, uh_sum));
656 } else {
657 /* fill in UDP pseudo header (overwritten by IP header below) */
658 udp_pseudo_hdr_t *udp_pseudo;
659 udp_pseudo = (udp_pseudo_hdr_t *)
660 (((char *)&ip_udp->udp) - sizeof(*udp_pseudo));
661 bcopy(&src_ip, &udp_pseudo->src_ip, sizeof(src_ip));
662 bcopy(&dst_ip, &udp_pseudo->dst_ip, sizeof(dst_ip));
663 udp_pseudo->zero = 0;
664 udp_pseudo->proto = IPPROTO_UDP;
665 udp_pseudo->length = htons(sizeof(ip_udp->udp) + total_udp_len);
666 partial = os_inet_checksum(udp_pseudo, sizeof(*udp_pseudo)
667 + sizeof(ip_udp->udp), partial);
668 ip_udp->udp.uh_sum = skt_xfer_fold_sum_final(partial);
669 }
670
671 /* fill in IP header */
672 bzero(ip_udp, sizeof(ip_udp->ip));
673 ip_udp->ip.ip_v = IPVERSION;
674 ip_udp->ip.ip_hl = sizeof(struct ip) >> 2;
675 ip_udp->ip.ip_ttl = MAXTTL;
676 ip_udp->ip.ip_p = IPPROTO_UDP;
677 bcopy(&src_ip, &ip_udp->ip.ip_src, sizeof(src_ip));
678 bcopy(&dst_ip, &ip_udp->ip.ip_dst, sizeof(dst_ip));
679 ip_udp->ip.ip_len = htons(sizeof(*ip_udp) + data_len);
680 if (fragment_id != 0) {
681 ip_udp->ip.ip_id = htons(fragment_id);
682 ip_udp->ip.ip_off = htons(IP_MF);
683 } else {
684 ip_udp->ip.ip_id = htons(ip_id++);
685 }
686
687 /* compute the IP header checksum */
688 ip_udp->ip.ip_sum = 0; /* needs to be zero for checksum */
689 ip_udp->ip.ip_sum = in_cksum(&ip_udp->ip, sizeof(ip_udp->ip), 0);
690 return frame_length;
691 }
692
693 static size_t
ip_frame_populate(channel_port_t port,packet_t ph,uint8_t protocol,struct in_addr src_ip,struct in_addr dst_ip,const void * data,size_t data_len,uint16_t fragment_id,uint16_t fragment_offset,bool last_fragment)694 ip_frame_populate(channel_port_t port, packet_t ph, uint8_t protocol,
695 struct in_addr src_ip, struct in_addr dst_ip, const void * data,
696 size_t data_len, uint16_t fragment_id, uint16_t fragment_offset,
697 bool last_fragment)
698 {
699 int error;
700 size_t frame_length;
701 struct ip *ip;
702 char * baddr;
703 buflet_t buf;
704 uint16_t bdlim;
705
706 buf = os_packet_get_next_buflet(ph, NULL);
707 assert(buf != NULL);
708 error = os_buflet_set_data_offset(buf, 0);
709 SKTC_ASSERT_ERR(error == 0);
710 bdlim = os_buflet_get_data_limit(buf);
711 assert(bdlim != 0);
712 baddr = os_buflet_get_object_address(buf);
713 assert(baddr != NULL);
714
715 frame_length = sizeof(*ip) + data_len;
716 #if ENABLE_UPP
717 assert((os_packet_get_buflet_count(ph) == 1));
718 assert((skt_xfer_get_chan_max_frags(port->chan) * bdlim) >=
719 frame_length);
720 #else
721 assert((os_packet_get_buflet_count(ph) * bdlim) >= frame_length);
722 #endif
723 assert(bdlim >= sizeof(*ip));
724
725 error = os_packet_set_link_header_length(ph, 0);
726 SKTC_ASSERT_ERR(error == 0);
727 /* determine frame offsets */
728 ip = (struct ip*)baddr;
729 baddr += sizeof(*ip);
730
731 /* fill in IP header */
732 bzero(ip, sizeof(*ip));
733 ip->ip_v = IPVERSION;
734 ip->ip_hl = sizeof(struct ip) >> 2;
735 ip->ip_ttl = MAXTTL;
736 ip->ip_p = protocol;
737 bcopy(&src_ip, &ip->ip_src, sizeof(src_ip));
738 bcopy(&dst_ip, &ip->ip_dst, sizeof(dst_ip));
739 ip->ip_len = htons(sizeof(*ip) + data_len);
740 if (fragment_id != 0) {
741 ip->ip_id = htons(fragment_id);
742 ip->ip_off = htons(last_fragment ? 0 : IP_MF) | htons(fragment_offset / 8);
743 } else {
744 ip->ip_id = htons(ip_id++);
745 }
746
747 /* compute the IP header checksum */
748 ip->ip_sum = 0; /* needs to be zero for checksum */
749 ip->ip_sum = in_cksum(ip, sizeof(*ip), 0);
750
751 /* copy the data */
752 sktc_xfer_copy_data_to_packet(port, ph, data, data_len, sizeof(*ip),
753 TRUE, NULL);
754 return frame_length;
755 }
756
757 static size_t
frame_populate(channel_port_t port,packet_t ph,int protocol,struct in_addr src_ip,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,const void * data,size_t data_len,uuid_t flow_id,bool connect,packet_svc_class_t svc_class,bool csum_offload,uint16_t fragment_id,size_t total_data_len,uint16_t fragment_offset,bool last_fragment)758 frame_populate(channel_port_t port, packet_t ph, int protocol,
759 struct in_addr src_ip, uint16_t src_port, struct in_addr dst_ip,
760 uint16_t dst_port, const void * data, size_t data_len, uuid_t flow_id,
761 bool connect, packet_svc_class_t svc_class, bool csum_offload,
762 uint16_t fragment_id, size_t total_data_len, uint16_t fragment_offset,
763 bool last_fragment)
764 {
765 size_t ret;
766 int error;
767
768 switch (protocol) {
769 case IPPROTO_TCP:
770 ret = tcp_frame_populate(port, ph, src_ip, src_port, dst_ip,
771 dst_port, data, data_len, connect, csum_offload);
772 break;
773 case IPPROTO_UDP:
774 assert(connect == FALSE);
775 if (fragment_offset > 0) {
776 ret = ip_frame_populate(port, ph, protocol, src_ip,
777 dst_ip, data, data_len, fragment_id,
778 fragment_offset, last_fragment);
779 } else {
780 ret = udp_frame_populate(port, ph, src_ip, src_port,
781 dst_ip, dst_port, data, data_len, csum_offload,
782 fragment_id, total_data_len);
783 }
784 break;
785 default:
786 ret = ip_frame_populate(port, ph, protocol, src_ip, dst_ip,
787 data, data_len, fragment_id, fragment_offset,
788 last_fragment);
789 break;
790 }
791 error = os_packet_set_service_class(ph, svc_class);
792 SKTC_ASSERT_ERR(error == 0);
793 os_packet_set_flow_uuid(ph, flow_id);
794 error = os_packet_finalize(ph);
795 SKTC_ASSERT_ERR(error == 0);
796 assert(ret == os_packet_get_data_length(ph));
797 return ret;
798 }
799
800 static size_t
tcp_frame_process(packet_t ph,void * data,size_t data_max)801 tcp_frame_process(packet_t ph, void *data, size_t data_max)
802 {
803 buflet_t buflet;
804 size_t pkt_len, data_len, ip_len, buf_len;
805 uint32_t bdoff;
806 void *buf;
807 ip_tcp_header_t *ip_tcp;
808 uint16_t csum;
809
810 /**********************************************************************/
811 /* process 1st buflet which contains protocol header */
812 buflet = os_packet_get_next_buflet(ph, NULL);
813 assert(buflet != NULL);
814 buf_len = os_buflet_get_data_length(buflet);
815 buf = os_buflet_get_object_address(buflet) +
816 os_buflet_get_data_offset(buflet);
817 ip_tcp = (ip_tcp_header_t *)buf;
818
819 pkt_len = os_packet_get_data_length(ph);
820 ip_len = ntohs(ip_tcp->ip.ip_len);
821 assert(ip_len <= pkt_len);
822 data_len = ip_len - sizeof(*ip_tcp);
823 assert(data_len <= data_max);
824
825 /* IP */
826 assert(ip_tcp->ip.ip_p == IPPROTO_TCP);
827
828 /* verify IP header checksum */
829 csum = in_cksum(&ip_tcp->ip, sizeof(ip_tcp->ip), 0);
830 if (csum != 0) {
831 sktu_dump_buffer(stderr, "ip header checksum", buf, buf_len);
832 fflush(stderr);
833 assert(0);
834 }
835
836 /* starts TCP partial checksum on 1st buflet */
837 buf_len = MIN(ip_len, buf_len);
838 csum = os_inet_checksum(&ip_tcp->tcp, buf_len - sizeof(struct ip), 0);
839 if (data != NULL) { /* copy the data */
840 bcopy(buf + sizeof(*ip_tcp), data, buf_len - sizeof(*ip_tcp));
841 data += (buf_len - sizeof(*ip_tcp));
842 }
843
844 /**********************************************************************/
845 /* iterate through the rest of buflets */
846 ip_len -= buf_len;
847 while (ip_len != 0) {
848 buflet = os_packet_get_next_buflet(ph, buflet);
849 assert(buflet != NULL);
850 bdoff = os_buflet_get_data_offset(buflet);
851 buf = os_buflet_get_object_address(buflet) + bdoff;
852 assert(buf != 0);
853 buf_len = os_buflet_get_data_length(buflet);
854 assert(buf_len != 0);
855 csum = os_inet_checksum(buf, buf_len, csum);
856 if (data != NULL) { /* copy the data */
857 bcopy(buf, data, buf_len);
858 data += buf_len;
859 }
860 ip_len -= buf_len;
861 }
862
863 csum = in_pseudo(ip_tcp->ip.ip_src.s_addr, ip_tcp->ip.ip_dst.s_addr,
864 csum + htons(data_len + sizeof(struct tcphdr) + IPPROTO_TCP));
865 csum ^= 0xffff;
866 if (csum != 0) {
867 sktu_dump_buffer(stderr, "tcp packet bad checksum", buf,
868 ntohs(ip_tcp->ip.ip_len));
869 fflush(stderr);
870 assert(0);
871 }
872
873 return data_len;
874 }
875
876 static size_t
udp_frame_process(packet_t ph,void * data,size_t data_max)877 udp_frame_process(packet_t ph, void *data, size_t data_max)
878 {
879 buflet_t buflet;
880 size_t pkt_len, buf_len, ip_len, data_len;
881 uint32_t bdoff;
882 void *buf;
883 ip_udp_header_t *ip_udp;
884 uint16_t csum;
885
886 /**********************************************************************/
887 /* process 1st buflet which contains protocol header */
888 buflet = os_packet_get_next_buflet(ph, NULL);
889 assert(buflet != NULL);
890 buf_len = os_buflet_get_data_length(buflet);
891 buf = os_buflet_get_object_address(buflet) +
892 os_buflet_get_data_offset(buflet);
893 ip_udp = (ip_udp_header_t *)buf;
894
895 pkt_len = os_packet_get_data_length(ph);
896 ip_len = ntohs(ip_udp->ip.ip_len);
897 assert(ip_len <= pkt_len);
898 data_len = ip_len - sizeof(*ip_udp);
899 assert(data_len <= data_max);
900
901 assert(ip_udp->ip.ip_p == IPPROTO_UDP);
902
903 /* verify IP header checksum */
904 csum = in_cksum(&ip_udp->ip, sizeof(ip_udp->ip), 0);
905 if (csum != 0) {
906 sktu_dump_buffer(stderr, "ip header checksum", buf, ip_len);
907 fflush(stderr);
908 assert(0);
909 }
910
911 /* starts UDP partial checksum on 1st buflet */
912 buf_len = MIN(ip_len, buf_len);
913 csum = os_inet_checksum(&ip_udp->udp, buf_len - sizeof(struct ip), 0);
914
915 if (data != NULL) { /* copy the data */
916 bcopy(buf + sizeof(*ip_udp), data, buf_len - sizeof(*ip_udp));
917 data += (buf_len - sizeof(*ip_udp));
918 }
919
920 /**********************************************************************/
921 /* iterate through the rest of buflets */
922 ip_len -= buf_len;
923 while (ip_len != 0) {
924 buflet = os_packet_get_next_buflet(ph, buflet);
925 assert(buflet != NULL);
926 bdoff = os_buflet_get_data_offset(buflet);
927 buf = os_buflet_get_object_address(buflet) + bdoff;
928 assert(buf != 0);
929 buf_len = os_buflet_get_data_length(buflet);
930 buf_len = MIN(buf_len, ip_len);
931 assert(buf_len != 0);
932 if (ip_udp->udp.uh_sum != 0) {
933 csum = os_inet_checksum(buf, buf_len, csum);
934 }
935 if (data != NULL) { /* copy the data */
936 bcopy(buf, data, buf_len);
937 data += buf_len;
938 }
939 ip_len -= buf_len;
940 }
941
942 /* verify UDP checksum */
943 if (ip_udp->ip.ip_off == 0 &&
944 ip_udp->udp.uh_sum != 0) {
945 csum = in_pseudo(ip_udp->ip.ip_src.s_addr, ip_udp->ip.ip_dst.s_addr,
946 csum + htons(data_len + sizeof(struct udphdr) + IPPROTO_UDP));
947 csum ^= 0xffff;
948 if (csum != 0) {
949 sktu_dump_buffer(stderr, "udp packet bad checksum", buf,
950 ntohs(ip_udp->ip.ip_len));
951 fflush(stderr);
952 assert(0);
953 }
954 }
955
956 return data_len;
957 }
958
959 static size_t
ip_frame_process(packet_t ph,void * data,size_t data_max)960 ip_frame_process(packet_t ph, void * data, size_t data_max)
961 {
962 buflet_t buflet;
963 size_t pkt_len, buf_len, data_len;
964 uint32_t bdoff;
965 void *buf;
966 struct ip *ip;
967 uint16_t csum;
968
969 /**********************************************************************/
970 /* process 1st buflet which contains protocol header */
971 buflet = os_packet_get_next_buflet(ph, NULL);
972 assert(buflet != NULL);
973 buf_len = os_buflet_get_data_length(buflet);
974 buf = os_buflet_get_object_address(buflet) +
975 os_buflet_get_data_offset(buflet);
976 ip = (struct ip*)buf;
977
978 pkt_len = os_packet_get_data_length(ph);
979 assert(pkt_len == ntohs(ip->ip_len));
980 data_len = pkt_len - sizeof(*ip);
981 assert(data_len <= data_max);
982
983 /* verify IP header checksum */
984 csum = in_cksum(ip, sizeof(*ip), 0);
985 if (csum != 0) {
986 sktu_dump_buffer(stderr, "ip header checksum", buf, buf_len);
987 fflush(stderr);
988 assert(0);
989 }
990
991 if (data != NULL) { /* copy the data */
992 bcopy(buf + sizeof(*ip), data, buf_len - sizeof(*ip));
993 data += (buf_len - sizeof(*ip));
994 }
995
996 /**********************************************************************/
997 /* iterate through the rest of buflets */
998 pkt_len -= buf_len;
999 while (pkt_len != 0) {
1000 buflet = os_packet_get_next_buflet(ph, buflet);
1001 assert(buflet != NULL);
1002 bdoff = os_buflet_get_data_offset(buflet);
1003 buf = os_buflet_get_object_address(buflet) + bdoff;
1004 assert(buf != 0);
1005 buf_len = os_buflet_get_data_length(buflet);
1006 assert(buf_len != 0);
1007 if (data != NULL) { /* copy the data */
1008 bcopy(buf, data, buf_len);
1009 data += buf_len;
1010 }
1011 pkt_len -= buf_len;
1012 }
1013
1014 return data_len;
1015 }
1016
1017 struct qosmarking_mapping {
1018 char *svc_str;
1019 uint32_t svc;
1020 uint32_t dscp;
1021 };
1022
1023 #define QOSMARKING_MAPPINGS(X) \
1024 /*SVC_CLASS FASTLANE RFC4594 */ \
1025 X(PKT_SC_BK, _DSCP_AF11, _DSCP_CS1) \
1026 X(PKT_SC_BK_SYS, _DSCP_AF11, _DSCP_CS1) \
1027 X(PKT_SC_BE, _DSCP_DF, _DSCP_DF) \
1028 X(PKT_SC_RD, _DSCP_AF21, _DSCP_AF21) \
1029 X(PKT_SC_OAM, _DSCP_CS2, _DSCP_CS2) \
1030 X(PKT_SC_AV, _DSCP_AF31, _DSCP_AF31) \
1031 X(PKT_SC_RV, _DSCP_CS4, _DSCP_CS4) \
1032 X(PKT_SC_VI, _DSCP_AF41, _DSCP_AF41) \
1033 X(PKT_SC_SIG, _DSCP_CS3, _DSCP_CS5) \
1034 X(PKT_SC_VO, _DSCP_EF, _DSCP_EF) \
1035 X(PKT_SC_CTL, _DSCP_DF, _DSCP_CS6)
1036
1037 #define MAP_TO_FASTLANE(a, b, c) {#a, a, b},
1038 #define MAP_TO_RFC4594(a, b, c) {#a, a, c},
1039
1040 #define QOSMARKING_SVC_MAX 11
1041
1042 struct qosmarking_mapping fastlane_mappings[] = {
1043 QOSMARKING_MAPPINGS(MAP_TO_FASTLANE)
1044 };
1045
1046 struct qosmarking_mapping rfc4594_mappings[] = {
1047 QOSMARKING_MAPPINGS(MAP_TO_RFC4594)
1048 };
1049
1050 static size_t
frame_process(packet_t ph,void * data,size_t data_max,bool verify_qos)1051 frame_process(packet_t ph, void *data, size_t data_max, bool verify_qos)
1052 {
1053 buflet_t buflet;
1054 void *buf;
1055 struct ip *ip;
1056 size_t buf_len, ret;
1057
1058 buflet = os_packet_get_next_buflet(ph, NULL);
1059 assert(buflet != NULL);
1060 buf = os_buflet_get_object_address(buflet) +
1061 os_buflet_get_data_offset(buflet);
1062 buf_len = os_buflet_get_data_length(buflet);
1063 ip = buf;
1064
1065 switch (ip->ip_p) {
1066 case IPPROTO_TCP:
1067 ret = tcp_frame_process(ph, data, data_max);
1068 break;
1069 case IPPROTO_UDP:
1070 ret = udp_frame_process(ph, data, data_max);
1071 break;
1072 default:
1073 ret = ip_frame_process(ph, data, data_max);
1074 break;
1075 }
1076
1077 if (verify_qos) {
1078 struct qosmarking_mapping *table = NULL;
1079 int i;
1080 my_payload_t payload = data;
1081 char *svc_str = payload->data;
1082 size_t svc_str_len = data_max;
1083 packet_svc_class_t svc = os_packet_get_service_class(ph);
1084 int dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
1085 #define EXPECT(var, val) \
1086 if (var != val) { \
1087 T_LOG("expected "#var" %d got %d\n", \
1088 val, var); \
1089 sktu_dump_buffer(stderr, "packet dump", buf, buf_len); \
1090 fflush(stderr); \
1091 assert(0); \
1092 }
1093
1094 if (strncmp(svc_str, XFER_QOSMARKING_FASTLANE_PREFIX,
1095 strlen(XFER_QOSMARKING_FASTLANE_PREFIX)) == 0) {
1096 table = fastlane_mappings;
1097 svc_str += strlen(XFER_QOSMARKING_FASTLANE_PREFIX);
1098 svc_str_len -= strlen(XFER_QOSMARKING_FASTLANE_PREFIX);
1099 } else if (strncmp(svc_str, XFER_QOSMARKING_RFC4594_PREFIX,
1100 strlen(XFER_QOSMARKING_RFC4594_PREFIX)) == 0) {
1101 table = rfc4594_mappings;
1102 svc_str += strlen(XFER_QOSMARKING_RFC4594_PREFIX);
1103 svc_str_len -= strlen(XFER_QOSMARKING_RFC4594_PREFIX);
1104 } else if (strncmp(svc_str, XFER_RECV_END_PAYLOAD,
1105 strlen(XFER_RECV_END_PAYLOAD)) == 0) {
1106 return ret;
1107 } else {
1108 T_LOG("unkown qosmarking mode %s\n", svc_str);
1109 assert(0);
1110 }
1111
1112 for (i = 0; i < QOSMARKING_SVC_MAX; i++) {
1113 if (strncmp(svc_str, table[i].svc_str, svc_str_len) == 0) {
1114 EXPECT(svc, table[i].svc);
1115 EXPECT(dscp, table[i].dscp);
1116 T_LOG("verified %s\n", svc_str);
1117 break;
1118 }
1119 }
1120
1121 if (i == QOSMARKING_SVC_MAX) {
1122 T_LOG("unkown svc class %s\n", svc_str);
1123 }
1124 }
1125
1126 return ret;
1127 }
1128
1129 static void
1130 channel_port_send(channel_port_t port, uuid_t flow_id,
1131 int protocol,
1132 uint16_t src_port,
1133 struct in_addr dst_ip, uint16_t dst_port,
1134 my_payload_t payload, int payload_length,
1135 uint32_t limit, bool must_complete_batch,
1136 bool connect, packet_svc_class_t svc_class,
1137 bool csum_offload,
1138 void (^packet_prehook)(packet_t p))
1139 {
1140 int error;
1141 channel_slot_t last_slot = NULL;
1142 packet_id_t pktid = {OS_PACKET_PKTID_VERSION_CURRENT,
1143 XFER_PKTID_PAYLOAD_TYPE, 0, 0, XFER_PKTID_STREAM_ID, 0};
1144
1145 assert(payload->packet_number < limit);
1146 while (1) {
1147 int frame_length;
1148 slot_prop_t prop;
1149 channel_slot_t slot;
1150 packet_t pkt = 0;
1151 void *buf;
1152 size_t buf_len;
1153 buflet_t buflet;
1154
1155 /* grab a slot and populate it */
1156 slot = os_channel_get_next_slot(port->tx_ring, last_slot,
1157 &prop);
1158 if (slot == NULL) {
1159 if (must_complete_batch &&
1160 payload->packet_number < limit) {
1161 /* couldn't complete batch */
1162 T_LOG(
1163 "TX didn't complete batch (%u < %u)\n",
1164 payload->packet_number, limit);
1165 assert(0);
1166 }
1167 break;
1168 }
1169
1170 if (port->user_packet_pool) {
1171 assert(prop.sp_buf_ptr == 0);
1172 assert(prop.sp_len == 0);
1173 error = os_channel_packet_alloc(port->chan, &pkt);
1174 SKTC_ASSERT_ERR(error == 0);
1175 } else {
1176 assert(prop.sp_buf_ptr != 0);
1177 assert(prop.sp_len != 0);
1178 pkt = os_channel_slot_get_packet(port->tx_ring, slot);
1179 }
1180 assert(pkt != 0);
1181 buflet = os_packet_get_next_buflet(pkt, NULL);
1182 assert(buflet != NULL);
1183 buf = os_buflet_get_object_address(buflet) +
1184 os_buflet_get_data_offset(buflet);
1185 assert(buf != NULL);
1186 buf_len = os_buflet_get_data_limit(buflet);
1187 assert(buf_len != 0);
1188 if (!port->user_packet_pool) {
1189 assert(buf == (void *)prop.sp_buf_ptr);
1190 assert(buf_len == prop.sp_len);
1191 }
1192
1193 frame_length = frame_populate(port, pkt, protocol,
1194 port->ip_addr, src_port, dst_ip, dst_port, (void *)payload,
1195 payload_length, flow_id, connect, svc_class, csum_offload,
1196 0, payload_length, 0, FALSE);
1197
1198 pktid.pktid_sequence_number = payload->packet_number;
1199 pktid.pktid_timestamp = pktid.pktid_sequence_number;
1200 assert(os_packet_set_packetid(pkt, &pktid) == 0);
1201
1202 if (packet_prehook != NULL) {
1203 packet_prehook(pkt);
1204 }
1205
1206 #if SKT_XFER_DEBUG
1207 T_LOG("\nPort %d transmitting %d bytes:\n",
1208 port->port, frame_length);
1209 ip_frame_dump(buf, frame_length);
1210 #endif
1211 assert(frame_length != 0);
1212 if (port->user_packet_pool) {
1213 error = os_channel_slot_attach_packet(port->tx_ring,
1214 slot, pkt);
1215 SKTC_ASSERT_ERR(error == 0);
1216 } else {
1217 prop.sp_len = frame_length;
1218 os_channel_set_slot_properties(port->tx_ring, slot,
1219 &prop);
1220 }
1221 last_slot = slot;
1222 payload->packet_number++;
1223 if (payload->packet_number >= limit) {
1224 break;
1225 }
1226 }
1227 if (last_slot != NULL) {
1228 error = os_channel_advance_slot(port->tx_ring, last_slot);
1229 SKTC_ASSERT_ERR(error == 0);
1230 error = os_channel_sync(port->chan, CHANNEL_SYNC_TX);
1231 SKTC_ASSERT_ERR(error == 0);
1232 }
1233 }
1234
1235 static void
channel_port_send_fragments(channel_port_t port,uuid_t flow_id,int protocol,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,my_payload_t payload,int payload_length,uint16_t fragment_count,packet_svc_class_t svc_class,bool csum_offload,bool error_ids)1236 channel_port_send_fragments(channel_port_t port, uuid_t flow_id,
1237 int protocol, uint16_t src_port,
1238 struct in_addr dst_ip, uint16_t dst_port,
1239 my_payload_t payload, int payload_length,
1240 uint16_t fragment_count,
1241 packet_svc_class_t svc_class,
1242 bool csum_offload, bool error_ids)
1243 {
1244 int error = 0;
1245 channel_slot_t last_slot = NULL;
1246
1247 uint16_t fragment_id = ip_id++;
1248
1249 for (int fragment_i = 0; fragment_i < fragment_count; fragment_i++) {
1250 int frame_length = 0;
1251 slot_prop_t prop;
1252 channel_slot_t slot = NULL;
1253 packet_t pkt = 0;
1254 void *buf = NULL;
1255 size_t buf_len = 0;
1256 buflet_t buflet = NULL;
1257
1258 /* grab a slot and populate it */
1259 slot = os_channel_get_next_slot(port->tx_ring, last_slot,
1260 &prop);
1261 if (slot == NULL) {
1262 if (fragment_i < fragment_count) {
1263 /* couldn't complete fragment */
1264 T_LOG(
1265 "TX didn't complete fragment (%u < %u)\n",
1266 fragment_i, fragment_count);
1267 assert(0);
1268 }
1269 break;
1270 }
1271
1272 if (port->user_packet_pool) {
1273 assert(prop.sp_buf_ptr == 0);
1274 assert(prop.sp_len == 0);
1275 error = os_channel_packet_alloc(port->chan, &pkt);
1276 SKTC_ASSERT_ERR(error == 0);
1277 } else {
1278 assert(prop.sp_buf_ptr != 0);
1279 assert(prop.sp_len != 0);
1280 pkt = os_channel_slot_get_packet(port->tx_ring, slot);
1281 }
1282 assert(pkt != 0);
1283 buflet = os_packet_get_next_buflet(pkt, NULL);
1284 assert(buflet != NULL);
1285 buf = os_buflet_get_object_address(buflet) +
1286 os_buflet_get_data_offset(buflet);
1287 assert(buf != NULL);
1288 buf_len = os_buflet_get_data_limit(buflet);
1289 assert(buf_len != 0);
1290 if (!port->user_packet_pool) {
1291 assert(buf == (void *)prop.sp_buf_ptr);
1292 assert(buf_len == prop.sp_len);
1293 }
1294
1295 if (fragment_i == 0) {
1296 frame_length = frame_populate(port, pkt, protocol,
1297 port->ip_addr, src_port, dst_ip, dst_port,
1298 (void *)payload, payload_length, flow_id, FALSE,
1299 svc_class, csum_offload, fragment_id,
1300 fragment_count * payload_length, 0, FALSE);
1301 } else {
1302 frame_length = frame_populate(port, pkt, protocol,
1303 port->ip_addr, src_port, dst_ip, dst_port,
1304 (void *)payload, payload_length, flow_id, FALSE,
1305 svc_class, csum_offload,
1306 fragment_id, fragment_count * payload_length,
1307 fragment_i * payload_length + sizeof(struct udphdr),
1308 fragment_i == (fragment_count - 1));
1309 }
1310
1311 #if SKT_XFER_DEBUG
1312 T_LOG("\nPort %d transmitting %d bytes:\n",
1313 port->port, frame_length);
1314 ip_frame_dump(buf, frame_length);
1315 #endif
1316 assert(frame_length != 0);
1317 if (port->user_packet_pool) {
1318 error = os_channel_slot_attach_packet(port->tx_ring,
1319 slot, pkt);
1320 SKTC_ASSERT_ERR(error == 0);
1321 } else {
1322 prop.sp_len = frame_length;
1323 os_channel_set_slot_properties(port->tx_ring, slot,
1324 &prop);
1325 }
1326 last_slot = slot;
1327
1328 if (error_ids) {
1329 fragment_id = ip_id++;
1330 }
1331 }
1332 if (last_slot != NULL) {
1333 error = os_channel_advance_slot(port->tx_ring, last_slot);
1334 SKTC_ASSERT_ERR(error == 0);
1335 error = os_channel_sync(port->chan, CHANNEL_SYNC_TX);
1336 SKTC_ASSERT_ERR(error == 0);
1337 }
1338 }
1339
1340 static int
channel_port_receive_payload(channel_port_t port,my_payload_t payload,bool verify_qos)1341 channel_port_receive_payload(channel_port_t port, my_payload_t payload,
1342 bool verify_qos)
1343 {
1344 int error;
1345 slot_prop_t prop;
1346 channel_slot_t slot;
1347 packet_t pkt;
1348 void *buf;
1349 size_t frame_length;
1350 buflet_t buflet;
1351
1352 slot = os_channel_get_next_slot(port->rx_ring, NULL, &prop);
1353 if (slot == NULL) {
1354 return ENOENT;
1355 }
1356 assert(prop.sp_buf_ptr != 0);
1357
1358 pkt = os_channel_slot_get_packet(port->rx_ring, slot);
1359 assert(pkt != 0);
1360 if (port->user_packet_pool) {
1361 error = os_channel_slot_detach_packet(port->rx_ring,
1362 slot, pkt);
1363 SKTC_ASSERT_ERR(error == 0);
1364 }
1365
1366 buflet = os_packet_get_next_buflet(pkt, NULL);
1367 assert(buflet != NULL);
1368 buf = os_buflet_get_object_address(buflet) +
1369 os_buflet_get_data_offset(buflet);
1370 frame_length = os_packet_get_data_length(pkt);
1371
1372 (void) frame_process(pkt, payload, frame_length, verify_qos);
1373
1374 #if SKT_XFER_DEBUG
1375 T_LOG("\nPort %d received %lu bytes:\n",
1376 port->port, frame_length);
1377
1378 ip_frame_dump(buf, frame_length);
1379 #endif
1380 if (port->user_packet_pool) {
1381 error = os_channel_packet_free(port->chan, pkt);
1382 SKTC_ASSERT_ERR(error == 0);
1383 }
1384
1385 error = os_channel_advance_slot(port->rx_ring, slot);
1386 SKTC_ASSERT_ERR(error == 0);
1387 error = os_channel_sync(port->chan, CHANNEL_SYNC_RX);
1388 SKTC_ASSERT_ERR(error == 0);
1389
1390 return 0;
1391 }
1392
1393 static void
channel_port_receive(int child,channel_port_t port,uint16_t our_port,struct in_addr peer_ip,uint32_t limit,uint32_t * receive_count,uint32_t * receive_index,bool errors_ok,uint32_t * pkts_dropped)1394 channel_port_receive(int child, channel_port_t port, uint16_t our_port,
1395 struct in_addr peer_ip,
1396 uint32_t limit,
1397 uint32_t * receive_count,
1398 uint32_t * receive_index,
1399 bool errors_ok,
1400 uint32_t * pkts_dropped)
1401 {
1402 int error;
1403 channel_slot_t last_slot = NULL;
1404
1405 assert(*receive_index < limit);
1406
1407 *pkts_dropped = 0;
1408
1409 while (1) {
1410 int frame_length;
1411 ip_udp_header_t * ip_udp;
1412 my_payload payload;
1413 slot_prop_t prop;
1414 channel_slot_t slot;
1415 packet_t pkt;
1416 char *buf;
1417 uint16_t pkt_len;
1418 uint32_t bdoff;
1419 buflet_t buflet;
1420 uint8_t aggr_type;
1421 uint32_t buflet_cnt;
1422
1423 slot = os_channel_get_next_slot(port->rx_ring, last_slot,
1424 &prop);
1425 if (slot == NULL) {
1426 break;
1427 }
1428 assert(prop.sp_buf_ptr != 0);
1429
1430 pkt = os_channel_slot_get_packet(port->rx_ring, slot);
1431 assert(pkt != 0);
1432 if (port->user_packet_pool) {
1433 error = os_channel_slot_detach_packet(port->rx_ring,
1434 slot, pkt);
1435 SKTC_ASSERT_ERR(error == 0);
1436 }
1437 buflet = os_packet_get_next_buflet(pkt, NULL);
1438 assert(buflet != NULL);
1439 bdoff = os_buflet_get_data_offset(buflet);
1440 buf = os_buflet_get_object_address(buflet) + bdoff;
1441 pkt_len = os_packet_get_data_length(pkt);
1442 assert(buf == (void *)prop.sp_buf_ptr);
1443 assert(pkt_len == prop.sp_len);
1444
1445 frame_length = sizeof(*ip_udp) + sizeof(my_payload);
1446 assert(os_packet_get_link_header_length(pkt) == 0);
1447
1448 buflet_cnt = os_packet_get_buflet_count(pkt);
1449 aggr_type = os_packet_get_aggregation_type(pkt);
1450 assert((aggr_type == PKT_AGGR_NONE) || (buflet_cnt > 1));
1451
1452 (void) frame_process(pkt, &payload, pkt_len, FALSE);
1453
1454 #if SKT_XFER_DEBUG
1455 T_LOG("\nPort %d received %d bytes:\n",
1456 port->port, frame_length);
1457
1458 ip_frame_dump(buf, frame_length);
1459 #endif
1460 last_slot = slot;
1461 if (*receive_index != payload.packet_number) {
1462 if (!errors_ok) {
1463 assert(payload.packet_number > *receive_index);
1464 }
1465 uint32_t dropped;
1466
1467 dropped = payload.packet_number - *receive_index;
1468 *pkts_dropped += dropped;
1469 #if SKT_XFER_DEBUG
1470 T_LOG(
1471 "child %d dropped %u (received #%u, expected #%u)\n",
1472 child, dropped, payload.packet_number,
1473 *receive_index);
1474 #endif
1475 *receive_index = payload.packet_number;
1476 }
1477
1478 if (port->user_packet_pool) {
1479 error = os_channel_packet_free(port->chan, pkt);
1480 SKTC_ASSERT_ERR(error == 0);
1481 }
1482 (*receive_count)++;
1483 (*receive_index)++;
1484 if (*receive_index == limit) {
1485 break;
1486 }
1487 }
1488 if (last_slot != NULL) {
1489 error = os_channel_advance_slot(port->rx_ring, last_slot);
1490 SKTC_ASSERT_ERR(error == 0);
1491 error = os_channel_sync(port->chan, CHANNEL_SYNC_RX);
1492 SKTC_ASSERT_ERR(error == 0);
1493 }
1494 }
1495
1496 static void
channel_port_receive_all(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,int16_t should_receive_count,bool verify_qos)1497 channel_port_receive_all(channel_port_t port, uuid_t flow_id,
1498 uint16_t src_port, struct in_addr dst_ip, uint16_t dst_port,
1499 int16_t should_receive_count, bool verify_qos)
1500 {
1501 int error;
1502 struct kevent evlist, kev;
1503 int kq;
1504 uint16_t received_count = 0;
1505
1506 kq = kqueue();
1507 assert(kq != -1);
1508
1509 EV_SET(&kev, port->fd, EVFILT_READ,
1510 EV_ADD | EV_ENABLE, 0, 0, NULL);
1511 error = kevent(kq, &kev, 1, NULL, 0, NULL);
1512 SKTC_ASSERT_ERR(error == 0);
1513 struct timespec timeout;
1514 timeout.tv_sec = 10;
1515 timeout.tv_nsec = 0;
1516
1517 for (;;) {
1518 /* wait for RX to become available */
1519 error = kevent(kq, NULL, 0, &evlist, 1, &timeout);
1520 if (error <= 0) {
1521 if (errno == EAGAIN) {
1522 continue;
1523 }
1524 SKTC_ASSERT_ERR(error == 0);
1525 }
1526 if (error == 0) {
1527 /* time out */
1528 T_LOG(
1529 "Error, timeout for final right packet\n");
1530 assert(0);
1531 }
1532 if (evlist.flags & EV_ERROR) {
1533 int err = evlist.data;
1534
1535 if (err == EAGAIN) {
1536 break;
1537 }
1538 SKTC_ASSERT_ERR(err == 0);
1539 }
1540
1541 if (evlist.filter == EVFILT_READ) {
1542 my_payload payload;
1543 channel_port_receive_payload(port, &payload, verify_qos);
1544 /* packet signaling end of test */
1545 if (strcmp(payload.data, XFER_RECV_END_PAYLOAD) == 0) {
1546 if (should_receive_count >= 0 &&
1547 received_count != should_receive_count) {
1548 T_LOG(
1549 "Error, only received %d/%d\n",
1550 received_count,
1551 should_receive_count);
1552 assert(0);
1553 }
1554 T_LOG("received EOF packet\n");
1555 break;
1556 }
1557 received_count++;
1558 T_LOG("Received [%d/%d] %s\n",
1559 received_count, should_receive_count, payload.data);
1560 if (should_receive_count >= 0 &&
1561 received_count > should_receive_count) {
1562 T_LOG("Error, rx wrong packet\n");
1563 assert(0);
1564 }
1565 } else {
1566 T_LOG("%lu event %d?\n", evlist.ident,
1567 evlist.filter);
1568 assert(0);
1569 break;
1570 }
1571 }
1572
1573 T_LOG("child exit\n");
1574 fflush(stderr);
1575
1576 close(kq);
1577 }
1578
1579 static void
send_and_receive(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many,uint32_t batch_size,int child,bool wrong_flow_id,bool errors_ok,uint32_t event_flags,bool ifadv_enabled)1580 send_and_receive(channel_port_t port, uuid_t flow_id, uint16_t src_port,
1581 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many,
1582 uint32_t batch_size, int child, bool wrong_flow_id, bool errors_ok,
1583 uint32_t event_flags, bool ifadv_enabled)
1584 {
1585 int n_events, error;
1586 #define N_EVENTS_MAX 3
1587 struct kevent evlist[N_EVENTS_MAX];
1588 struct kevent kev[N_EVENTS_MAX];
1589 int kq;
1590 my_payload payload;
1591 double percent;
1592 uint32_t receive_packet_count;
1593 uint32_t receive_packet_index;
1594 bool rx_complete;
1595 struct timespec timeout;
1596 bool tx_complete;
1597 uint32_t pkts_dropped;
1598 uint32_t n_ifadv_events = 0, n_chan_events = 0;
1599
1600 T_LOG("Sending to %s:%d\n", inet_ntoa(dst_ip), dst_port);
1601 bzero(&payload, sizeof(payload));
1602 kq = kqueue();
1603 assert(kq != -1);
1604 rx_complete = tx_complete = FALSE;
1605 receive_packet_count = 0;
1606 receive_packet_index = 0;
1607 EV_SET(kev + 0, port->fd, EVFILT_WRITE,
1608 EV_ADD | EV_ENABLE, 0, 0, NULL);
1609 EV_SET(kev + 1, port->fd, EVFILT_READ,
1610 EV_ADD | EV_ENABLE, 0, 0, NULL);
1611 n_events = 2;
1612 if ((event_flags & SKT_FSW_EVFLAG_IFADV) != 0) {
1613 assert(n_events < N_EVENTS_MAX);
1614 EV_SET(kev + n_events, port->fd, EVFILT_NW_CHANNEL,
1615 EV_ADD | EV_ENABLE, NOTE_IF_ADV_UPD, 0, NULL);
1616 n_events++;
1617 }
1618 if ((event_flags & SKT_FSW_EVFLAG_CHANNEL) != 0) {
1619 assert(n_events < N_EVENTS_MAX);
1620 EV_SET(kev + n_events, port->fd, EVFILT_NW_CHANNEL,
1621 EV_ADD | EV_ENABLE, NOTE_CHANNEL_EVENT, 0, NULL);
1622 n_events++;
1623 }
1624 error = kevent(kq, kev, n_events, NULL, 0, NULL);
1625 SKTC_ASSERT_ERR(error == 0);
1626 timeout.tv_sec = XFER_TXRX_TIMEOUT_SECS;
1627 timeout.tv_nsec = XFER_TXRX_TIMEOUT_NSECS;
1628 while (!rx_complete || !tx_complete) {
1629 /* wait for TX/RX to become available */
1630 error = kevent(kq, NULL, 0, evlist, N_EVENTS_MAX, &timeout);
1631 if (error <= 0) {
1632 if (errno == EAGAIN) {
1633 continue;
1634 }
1635 SKTC_ASSERT_ERR(error == 0);
1636 }
1637 if (error == 0) {
1638 /* missed seeing last few packets */
1639 if (!errors_ok) {
1640 T_LOG("child %d: timed out, TX %s "
1641 "RX %s\n", child,
1642 tx_complete ? "complete" :"incomplete",
1643 rx_complete ? "complete" :"incomplete");
1644 }
1645 break;
1646 }
1647 for (int i = 0; i < error; i++) {
1648 if (evlist[i].flags & EV_ERROR) {
1649 int err = evlist[i].data;
1650
1651 if (err == EAGAIN) {
1652 break;
1653 }
1654 SKTC_ASSERT_ERR(err == 0);
1655 }
1656
1657 switch (evlist[i].filter) {
1658 case EVFILT_NW_CHANNEL: {
1659 if ((evlist[i].fflags & NOTE_IF_ADV_UPD) != 0) {
1660 skt_process_if_adv(port->port, port->chan);
1661 n_ifadv_events++;
1662 }
1663 if ((evlist[i].fflags & NOTE_CHANNEL_EVENT) != 0) {
1664 skt_process_channel_event(port->chan,
1665 XFER_PKTID_PAYLOAD_TYPE, XFER_PKTID_STREAM_ID,
1666 ^(const os_channel_event_packet_transmit_status_t *pkt_ev) {
1667 assert(pkt_ev->packet_status ==
1668 CHANNEL_EVENT_PKT_TRANSMIT_STATUS_ERR_RETRY_FAILED);
1669 }, NULL, NULL);
1670 n_chan_events++;
1671 }
1672 break;
1673 }
1674 case EVFILT_WRITE: {
1675 uint32_t next_batch;
1676
1677 next_batch = payload.packet_number
1678 + batch_size;
1679 if (next_batch > how_many) {
1680 next_batch = how_many;
1681 }
1682 channel_port_send(port, flow_id,
1683 IPPROTO_UDP,
1684 src_port,
1685 dst_ip, dst_port,
1686 &payload, sizeof(payload),
1687 next_batch, FALSE, FALSE,
1688 PKT_SC_BE, TRUE, NULL);
1689 if (payload.packet_number >= how_many) {
1690 assert(payload.packet_number
1691 == how_many);
1692 T_LOG(
1693 "TX child %d: completed %u\n",
1694 child, how_many);
1695 tx_complete = TRUE;
1696 #if SKT_XFER_DEBUG
1697 T_LOG(
1698 "child %d: disable TX\n",
1699 child);
1700 #endif
1701 EV_SET(kev,
1702 port->fd, EVFILT_WRITE,
1703 EV_DELETE, 0, 0, NULL);
1704 error = kevent(kq, kev, 1,
1705 NULL, 0, NULL);
1706 SKTC_ASSERT_ERR(error == 0);
1707 }
1708 break;
1709 }
1710 case EVFILT_READ: {
1711 channel_port_receive(child, port, src_port, dst_ip,
1712 how_many,
1713 &receive_packet_count,
1714 &receive_packet_index,
1715 errors_ok, &pkts_dropped);
1716 if (receive_packet_index >= how_many) {
1717 assert(receive_packet_index
1718 == how_many);
1719 #if SKT_XFER_DEBUG
1720 T_LOG(
1721 "child %d: disable RX\n",
1722 child);
1723 #endif
1724 EV_SET(kev, port->fd, EVFILT_READ,
1725 EV_DELETE, 0, 0, NULL);
1726 error = kevent(kq, kev, 1,
1727 NULL, 0, NULL);
1728 SKTC_ASSERT_ERR(error == 0);
1729 rx_complete = TRUE;
1730 }
1731 break;
1732 }
1733 default:
1734 T_LOG("%lu event %d?\n",
1735 evlist[i].ident,
1736 evlist[i].filter);
1737 assert(0);
1738 break;
1739 }
1740 }
1741 }
1742 percent = 1.0 * receive_packet_count / how_many * 100.0;
1743 T_LOG("RX child %d: received %u (of %u) %1.02f%%\n",
1744 child, receive_packet_count, how_many, percent);
1745 T_LOG("child %d: received %u ifadv events\n",
1746 child, n_ifadv_events);
1747
1748 if (!errors_ok) {
1749 if (wrong_flow_id) {
1750 assert(receive_packet_count == 0);
1751 } else {
1752 assert(receive_packet_count > 0);
1753 }
1754 }
1755 if ((event_flags & SKT_FSW_EVFLAG_IFADV) != 0) {
1756 if (ifadv_enabled) {
1757 assert(n_ifadv_events != 0);
1758 } else {
1759 assert(n_ifadv_events == 0);
1760 }
1761 }
1762 if ((event_flags & SKT_FSW_EVFLAG_CHANNEL) != 0) {
1763 assert(n_chan_events != 0);
1764 }
1765 close(kq);
1766 }
1767
1768 static void
ping_pong(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many,uint32_t batch_size,int child,bool wrong_flow_id,flowadv_idx_t flowadv_idx,bool test_aqm,bool test_wmm,uint16_t demux_offset)1769 ping_pong(channel_port_t port, uuid_t flow_id, uint16_t src_port,
1770 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many,
1771 uint32_t batch_size, int child, bool wrong_flow_id,
1772 flowadv_idx_t flowadv_idx, bool test_aqm, bool test_wmm,
1773 uint16_t demux_offset)
1774 {
1775 int error;
1776 #define N_EVENTS 2
1777 struct kevent evlist[N_EVENTS];
1778 bool expect_flowadv = FALSE;
1779 bool expect_stall = FALSE;
1780 struct timespec fadv_timeout;
1781 struct kevent kev[N_EVENTS];
1782 int kq;
1783 my_payload payload;
1784 double percent;
1785 uint32_t next_receive_count;
1786 uint32_t receive_packet_count;
1787 uint32_t receive_packet_index;
1788 struct timespec rcv_timeout;
1789 bool rx_complete;
1790 bool sending;
1791 struct timespec snd_timeout;
1792 int snd_batch_cnt = 0;
1793 struct timespec *timeout;
1794 bool tx_complete;
1795 packet_svc_class_t svc_class = PKT_SC_BE;
1796 uint32_t pkts_dropped;
1797
1798 if (test_aqm) {
1799 assert(how_many / batch_size == 4);
1800 }
1801 T_LOG("Sending to %s:%d\n", inet_ntoa(dst_ip), dst_port);
1802 bzero(&payload, sizeof(payload));
1803 kq = kqueue();
1804 assert(kq != -1);
1805 rx_complete = tx_complete = FALSE;
1806 receive_packet_count = 0;
1807 receive_packet_index = 0;
1808 next_receive_count = batch_size;
1809
1810 snd_timeout.tv_sec = XFER_PING_TIMEOUT_SECS;
1811 snd_timeout.tv_nsec = XFER_PING_TIMEOUT_NSECS;
1812 fadv_timeout.tv_sec = XFER_PING_FADV_TIMEOUT_SECS;
1813 fadv_timeout.tv_nsec = XFER_PING_FADV_TIMEOUT_NSECS;
1814
1815 if (test_aqm && child == 1) {
1816 /*
1817 * child-1 will not receive packets on time when
1818 * child-0's send interface is throttled, hence it
1819 * requires a larger timeout.
1820 */
1821 rcv_timeout.tv_sec = XFER_PING_CHILD1_RX_TIMEOUT_SECS;
1822 rcv_timeout.tv_nsec = XFER_PING_CHILD1_RX_TIMEOUT_NSECS;
1823 } else {
1824 rcv_timeout.tv_sec = XFER_PING_TIMEOUT_SECS;
1825 rcv_timeout.tv_nsec = XFER_PING_TIMEOUT_NSECS;
1826 }
1827
1828 if (test_aqm) {
1829 /*
1830 * flow advisory filter always reports an initial event,
1831 * check for that.
1832 */
1833 EV_SET(kev + 0, port->fd, EVFILT_NW_CHANNEL, EV_ADD,
1834 NOTE_FLOW_ADV_UPDATE, 0, NULL);
1835 error = kevent(kq, kev, 1, NULL, 0, NULL);
1836 SKTC_ASSERT_ERR(error == 0);
1837 timeout = &fadv_timeout;
1838 error = kevent(kq, NULL, 0, evlist, N_EVENTS, timeout);
1839 SKTC_ASSERT_ERR(error == 1);
1840 }
1841
1842 if (demux_offset <= MAX_DEMUX_OFFSET) {
1843 payload.data[demux_offset] = DEMUX_PAYLOAD_VALUE;
1844 payload.data[demux_offset + 1] = DEMUX_PAYLOAD_VALUE >> 8;
1845 }
1846
1847 if (child == 0) {
1848 sending = TRUE;
1849 EV_SET(kev, port->fd, EVFILT_WRITE,
1850 EV_ADD | EV_ENABLE, 0, 0, NULL);
1851 } else {
1852 sending = FALSE;
1853 EV_SET(kev, port->fd, EVFILT_READ,
1854 EV_ADD | EV_ENABLE, 0, 0, NULL);
1855 }
1856 error = kevent(kq, kev, 1, NULL, 0, NULL);
1857 SKTC_ASSERT_ERR(error == 0);
1858
1859 while (!rx_complete || !tx_complete) {
1860 if (expect_flowadv) {
1861 timeout = &fadv_timeout;
1862 } else if (sending) {
1863 timeout = &snd_timeout;
1864 } else {
1865 timeout = &rcv_timeout;
1866 }
1867
1868 /* wait for something to happen */
1869 error = kevent(kq, NULL, 0, evlist, N_EVENTS, timeout);
1870 if (error <= 0) {
1871 int err = errno;
1872
1873 if (err == EAGAIN) {
1874 continue;
1875 }
1876 SKTC_ASSERT_ERR(error == 0);
1877 }
1878 if (error == 0) {
1879 T_LOG(
1880 "child %d: timed out TX %s RX %s FA %s\n",
1881 child,
1882 tx_complete ? "complete" : "incomplete",
1883 rx_complete ? "complete" : "incomplete",
1884 expect_flowadv ? "incomplete" : "complete");
1885 /*
1886 * Test should fail if it times out while expecting a
1887 * channel flow advisory event.
1888 */
1889 assert(!expect_flowadv);
1890 break;
1891 }
1892 if (error != 1) {
1893 T_LOG("child %d: got %d events, expected 1\n",
1894 child, error);
1895 assert(0);
1896 } else if (evlist[0].flags & EV_ERROR) {
1897 int err = evlist[0].data;
1898
1899 if (err == EAGAIN) {
1900 continue;
1901 }
1902 SKTC_ASSERT_ERR(err == 0);
1903 }
1904
1905 /* check that the correct event fired */
1906 if (expect_flowadv) {
1907 int n_kev = 0;
1908 assert(child == 0);
1909 assert(evlist[0].filter == EVFILT_NW_CHANNEL);
1910 assert(evlist[0].fflags & NOTE_FLOW_ADV_UPDATE);
1911 error = os_channel_flow_admissible(port->tx_ring,
1912 flow_id, flowadv_idx);
1913 if (expect_stall) {
1914 /*
1915 * when flow control is enabled
1916 * os_channel_flow_admissible() should return
1917 * ENOBUFS.
1918 */
1919 SKTC_ASSERT_ERR(error == ENOBUFS);
1920 /*
1921 * Now, enable dequeuing on the interface.
1922 * This will allow the buffered 2nd batch of
1923 * packets to be sent out the interface as
1924 * well as trigger a flow advisory event
1925 * to resume send on the channel.
1926 */
1927 T_LOG("child %d, enable dequeue "
1928 "on feth0\n", child);
1929 error =
1930 sktc_ifnet_feth0_set_dequeue_stall(FALSE);
1931 SKTC_ASSERT_ERR(error == 0);
1932 expect_stall = FALSE;
1933 expect_flowadv = TRUE;
1934 #if SKT_XFER_DEBUG
1935 T_LOG("child %d: enable FA "
1936 "no stall\n", child);
1937 #endif
1938 } else {
1939 /* flow must be admissible on the channel */
1940 SKTC_ASSERT_ERR(error == 0);
1941 #if SKT_XFER_DEBUG
1942 T_LOG("child %d: Disable FA\n",
1943 child);
1944 #endif
1945 /*
1946 * Flow control tested so remove flow advisory
1947 * filter.
1948 */
1949 EV_SET(kev + 0, port->fd, EVFILT_NW_CHANNEL,
1950 EV_DELETE, 0, 0, NULL);
1951 expect_flowadv = FALSE;
1952 n_kev = 1;
1953
1954 /*
1955 * Now enabling receiving acks for the 2nd batch
1956 * of packets.
1957 */
1958 assert(!rx_complete);
1959 /* enable RX */
1960 EV_SET(kev + n_kev, port->fd, EVFILT_READ,
1961 EV_ADD, 0, 0, NULL);
1962 n_kev++;
1963 #if SKT_XFER_DEBUG
1964 T_LOG("child %d: enable RX\n", child);
1965 #endif
1966 /*
1967 * child 0 should now expect acks for the 2nd
1968 * batch of packets.
1969 */
1970 sending = FALSE;
1971 timeout = &rcv_timeout;
1972 }
1973 assert(n_kev <= N_EVENTS);
1974 if (n_kev > 0) {
1975 error = kevent(kq, kev, n_kev, NULL, 0, NULL);
1976 SKTC_ASSERT_ERR(error == 0);
1977 }
1978 continue;
1979 } else {
1980 /*
1981 * verify that flow advisory event is reported
1982 * only when expected.
1983 */
1984 assert(evlist[0].filter != EVFILT_NW_CHANNEL);
1985 }
1986
1987 if (sending) {
1988 uint32_t next_batch;
1989 int n_kev = 0;
1990 bool skip_receive = FALSE;
1991
1992 assert(evlist[0].filter == EVFILT_WRITE);
1993 if (test_wmm) {
1994 svc_class = packet_svc_class[(snd_batch_cnt %
1995 NUM_SVC_CLASS)];
1996 }
1997 snd_batch_cnt++;
1998 next_batch = payload.packet_number + batch_size;
1999 if (next_batch > how_many) {
2000 next_batch = how_many;
2001 }
2002
2003 if (test_aqm && child == 0 && snd_batch_cnt == 2) {
2004 /*
2005 * disable dequeue on feth0 before sending the
2006 * 2nd batch of packets.
2007 * These UDP packet will now get buffered at the
2008 * interface AQM.
2009 */
2010 T_LOG("child %d, disable dequeue on"
2011 " feth0\n", child);
2012 error =
2013 sktc_ifnet_feth0_set_dequeue_stall(TRUE);
2014 SKTC_ASSERT_ERR(error == 0);
2015 }
2016
2017 if (test_aqm && child == 0 && snd_batch_cnt == 3) {
2018 /*
2019 * wait for interface update interval to elapse
2020 * before sending the 3rd batch of packets.
2021 * These UDP packets wil be dropped by AQM.
2022 */
2023 T_LOG("child %d, sleep for update"
2024 " interval (%d ms)\n", child,
2025 XFER_CLASSQ_UPDATE_INTERVAL_ELAPSE_DELAY);
2026 usleep(
2027 XFER_CLASSQ_UPDATE_INTERVAL_ELAPSE_DELAY *
2028 1000);
2029 }
2030
2031 /* Flow should be writable */
2032 if (!wrong_flow_id) {
2033 error =
2034 os_channel_flow_admissible(port->tx_ring,
2035 flow_id, flowadv_idx);
2036 SKTC_ASSERT_ERR(error == 0);
2037 }
2038
2039 channel_port_send(port, flow_id, IPPROTO_UDP,
2040 src_port, dst_ip, dst_port, &payload,
2041 sizeof(payload), next_batch, TRUE, FALSE,
2042 svc_class, TRUE, NULL);
2043 #if SKT_XFER_DEBUG
2044 T_LOG(
2045 "TX child %d: %s %u of %u\n", child,
2046 (child == 0) ? "ping" : "pong",
2047 next_batch, how_many);
2048 #endif
2049 if (payload.packet_number >= how_many) {
2050 assert(payload.packet_number
2051 == how_many);
2052 T_LOG(
2053 "TX child %d: completed %u\n",
2054 child,
2055 how_many);
2056 tx_complete = TRUE;
2057 }
2058
2059 if (test_aqm && child == 0 && snd_batch_cnt == 2) {
2060 /* 2nd batch of packets are not going to reach
2061 * the receiver at child 1 until dequeuing is
2062 * re-enabled on feth0.
2063 * Skip receiving and send the 3rd batch of
2064 * packets.
2065 */
2066 continue;
2067 }
2068
2069 if (test_aqm && child == 0 && snd_batch_cnt == 3) {
2070 /*
2071 * sending the 3rd batch of packets should have
2072 * triggered flow advisory event on the channel.
2073 * The flow should not be admissible now.
2074 */
2075 expect_flowadv = TRUE;
2076 expect_stall = TRUE;
2077 timeout = &fadv_timeout;
2078 #if SKT_XFER_DEBUG
2079 T_LOG("child %d: expect stall\n",
2080 child);
2081 #endif
2082 /*
2083 * packets will not reach receiver at child 1,
2084 * until dequeuing on feth0 is re-enabled,
2085 * so skip receiving.
2086 */
2087 skip_receive = TRUE;
2088 }
2089 #if SKT_XFER_DEBUG
2090 T_LOG("child %d disable TX\n", child);
2091 #endif
2092 EV_SET(kev + n_kev, port->fd, EVFILT_WRITE, EV_DELETE,
2093 0, 0, NULL);
2094 n_kev++;
2095
2096 if (!skip_receive && !rx_complete) {
2097 /* enable RX */
2098 assert(n_kev == 1);
2099 EV_SET(kev + n_kev, port->fd, EVFILT_READ,
2100 EV_ADD, 0, 0, NULL);
2101 n_kev++;
2102 #if SKT_XFER_DEBUG
2103 T_LOG("child %d: enable RX\n", child);
2104 #endif
2105 }
2106 assert(n_kev <= N_EVENTS);
2107 if (n_kev > 0) {
2108 error = kevent(kq, kev, n_kev, NULL, 0, NULL);
2109 SKTC_ASSERT_ERR(error == 0);
2110 }
2111 sending = FALSE;
2112 } else {
2113 assert(evlist[0].filter == EVFILT_READ);
2114 pkts_dropped = 0;
2115 channel_port_receive(child, port, src_port, dst_ip,
2116 how_many,
2117 &receive_packet_count,
2118 &receive_packet_index,
2119 false, &pkts_dropped);
2120
2121 if (pkts_dropped != 0) {
2122 /*
2123 * ping-pong test shouldn't have any packet
2124 * drop, unless intentional during AQM test.
2125 */
2126 assert(test_aqm);
2127 assert(pkts_dropped ==
2128 XFER_AQM_PING_BATCH_COUNT);
2129 }
2130 if (receive_packet_index >= how_many) {
2131 assert(receive_packet_index == how_many);
2132 rx_complete = TRUE;
2133 }
2134 if (rx_complete ||
2135 receive_packet_index >= next_receive_count) {
2136 int n_kev;
2137 #if SKT_XFER_DEBUG
2138 T_LOG(
2139 "child %d: disable RX\n", child);
2140 #endif
2141 EV_SET(kev, port->fd, EVFILT_READ, EV_DELETE,
2142 0, 0, NULL);
2143 n_kev = 1;
2144 next_receive_count = receive_packet_index +
2145 batch_size;
2146 if (next_receive_count >= how_many) {
2147 next_receive_count = how_many;
2148 }
2149 if (!tx_complete) {
2150 /* re-enable TX */
2151 EV_SET(kev + n_kev,
2152 port->fd, EVFILT_WRITE,
2153 EV_ADD, 0, 0, NULL);
2154 #if SKT_XFER_DEBUG
2155 T_LOG(
2156 "child %d: enable TX\n", child);
2157 #endif
2158 n_kev++;
2159 sending = TRUE;
2160 if (child == 1) {
2161 payload.packet_number +=
2162 pkts_dropped;
2163 }
2164 } else if (!rx_complete) {
2165 assert(tx_complete);
2166 /*
2167 * If Tx is completed and there are
2168 * packets expected to be received
2169 * re-enable Rx.
2170 */
2171 #if SKT_XFER_DEBUG
2172 T_LOG(
2173 "child %d: enable RX\n", child);
2174 #endif
2175 n_kev = 0;
2176 }
2177 if (n_kev) {
2178 error = kevent(kq, kev, n_kev, NULL, 0, NULL);
2179 SKTC_ASSERT_ERR(error == 0);
2180 }
2181 }
2182 }
2183 }
2184 percent = 1.0 * receive_packet_count / how_many * 100.0;
2185 T_LOG("RX child %d: received %u (of %u) %1.02f%%\n",
2186 child, receive_packet_count, how_many, percent);
2187 /* wait to give the packet(s) a chance to make it to the other end */
2188 usleep(100 * 1000);
2189 if (test_aqm) {
2190 /*
2191 * while testing AQM functionaliy we should have dropped
2192 * one batch of packets out of the 4 batches
2193 */
2194 assert(receive_packet_count == ((how_many * 3) / 4));
2195 } else if (wrong_flow_id) {
2196 assert(receive_packet_count == 0);
2197 } else {
2198 assert(receive_packet_count == how_many);
2199 }
2200 #if SKT_XFER_DEBUG
2201 if (receive_packet_count < how_many) {
2202 T_LOG("Child %d waiting", child);
2203 fflush(stdout);
2204 for (int i = 0; i < 5; i++) {
2205 sleep(1);
2206 T_LOG(".");
2207 fflush(stdout);
2208 }
2209 T_LOG("\n");
2210 assert(0);
2211 }
2212 #endif
2213 close(kq);
2214 }
2215
2216 static void
send_tcp(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many,uint32_t batch_size,int child,bool connect)2217 send_tcp(channel_port_t port, uuid_t flow_id, uint16_t src_port,
2218 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many,
2219 uint32_t batch_size, int child, bool connect)
2220 {
2221 int error;
2222 struct kevent ev;
2223 struct kevent kev;
2224 int kq;
2225 my_payload payload;
2226 struct timespec timeout;
2227 bool tx_complete;
2228
2229 T_LOG("Sending to %s:%d\n", inet_ntoa(dst_ip), dst_port);
2230 bzero(&payload, sizeof(payload));
2231 kq = kqueue();
2232 assert(kq != -1);
2233 tx_complete = FALSE;
2234
2235 EV_SET(&kev, port->fd, EVFILT_WRITE,
2236 EV_ADD | EV_ENABLE, 0, 0, NULL);
2237 error = kevent(kq, &kev, 1, NULL, 0, NULL);
2238 SKTC_ASSERT_ERR(error == 0);
2239 timeout.tv_sec = 1;
2240 timeout.tv_nsec = 0;
2241 while (!tx_complete) {
2242 /* wait for TX to become available */
2243 error = kevent(kq, NULL, 0, &ev, 1, &timeout);
2244 if (error <= 0) {
2245 if (errno == EAGAIN) {
2246 continue;
2247 }
2248 SKTC_ASSERT_ERR(error == 0);
2249 }
2250 if (error == 0) {
2251 /* missed seeing last few packets */
2252 T_LOG("child %d timed out, TX %s\n",
2253 child,
2254 tx_complete ? "complete" : "incomplete");
2255 break;
2256 }
2257 if (ev.flags & EV_ERROR) {
2258 int err = ev.data;
2259
2260 if (err == EAGAIN) {
2261 continue;
2262 }
2263 SKTC_ASSERT_ERR(err == 0);
2264 }
2265 switch (ev.filter) {
2266 case EVFILT_WRITE: {
2267 uint32_t next_batch;
2268
2269 next_batch = payload.packet_number
2270 + batch_size;
2271 if (next_batch > how_many) {
2272 next_batch = how_many;
2273 }
2274 channel_port_send(port, flow_id,
2275 IPPROTO_TCP,
2276 src_port,
2277 dst_ip, dst_port,
2278 &payload, sizeof(payload),
2279 next_batch, FALSE, connect,
2280 PKT_SC_BE, TRUE, NULL);
2281 if (payload.packet_number >= how_many) {
2282 assert(payload.packet_number
2283 == how_many);
2284 T_LOG(
2285 "TX child %d: completed %u\n",
2286 child, how_many);
2287 tx_complete = TRUE;
2288 #if SKT_XFER_DEBUG
2289 T_LOG(
2290 "child %d: disable TX\n",
2291 child);
2292 #endif
2293 EV_SET(&kev,
2294 port->fd, EVFILT_WRITE,
2295 EV_DELETE, 0, 0, NULL);
2296 error = kevent(kq, &kev, 1,
2297 NULL, 0, NULL);
2298 SKTC_ASSERT_ERR(error == 0);
2299 }
2300 break;
2301 }
2302 default:
2303 T_LOG("%lu event %d?\n",
2304 ev.ident,
2305 ev.filter);
2306 assert(0);
2307 break;
2308 }
2309 }
2310 close(kq);
2311 }
2312
2313 static uint64_t
set_error_inject_mask(uint64_t * mask)2314 set_error_inject_mask(uint64_t *mask)
2315 {
2316 uint64_t old_mask = 0;
2317 size_t old_size = sizeof(old_mask);
2318 int error;
2319
2320 error =
2321 sysctlbyname("kern.skywalk.flowswitch.fsw_inject_error",
2322 &old_mask, &old_size, mask, mask ? sizeof(*mask) : 0);
2323
2324 if ((error != 0) && skywalk_in_driver) {
2325 T_LOG("sysctlbyname failed for fsw_inject_error "
2326 "error %d\n", error);
2327 } else {
2328 SKTC_ASSERT_ERR(error == 0);
2329 }
2330 return old_mask;
2331 }
2332
2333 static void
do_error_receive(int child,channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many)2334 do_error_receive(int child, channel_port_t port, uuid_t flow_id, uint16_t src_port,
2335 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many)
2336 {
2337 int error;
2338 struct kevent evlist, kev;
2339 int kq;
2340 my_payload payload;
2341 uint32_t receive_packet_count;
2342 uint32_t receive_packet_index;
2343 struct timespec timeout;
2344 uint32_t pkts_dropped;
2345
2346 bzero(&payload, sizeof(payload));
2347 kq = kqueue();
2348 assert(kq != -1);
2349 receive_packet_count = 0;
2350 receive_packet_index = 0;
2351
2352 EV_SET(&kev, port->fd, EVFILT_READ,
2353 EV_ADD | EV_ENABLE, 0, 0, NULL);
2354 error = kevent(kq, &kev, 1, NULL, 0, NULL);
2355 SKTC_ASSERT_ERR(error == 0);
2356
2357 for (;;) {
2358 /* wait for RX to become available */
2359 timeout.tv_sec = 1;
2360 timeout.tv_nsec = 0;
2361 error = kevent(kq, NULL, 0, &evlist, 1, &timeout);
2362 if (error <= 0) {
2363 if (errno == EAGAIN) {
2364 continue;
2365 }
2366 SKTC_ASSERT_ERR(error == 0);
2367 }
2368 if (error == 0) {
2369 /*
2370 * Timed out. Check if test is complete
2371 * Mask will be zero when parent is finished
2372 */
2373 if (set_error_inject_mask(NULL) == 0) {
2374 break;
2375 }
2376
2377 /* Otherwise continue receiving */
2378 receive_packet_count = 0;
2379 receive_packet_index = 0;
2380 continue;
2381 }
2382 if (evlist.flags & EV_ERROR) {
2383 int err = evlist.data;
2384
2385 if (err == EAGAIN) {
2386 break;
2387 }
2388 SKTC_ASSERT_ERR(err == 0);
2389 }
2390
2391 if (evlist.filter == EVFILT_READ) {
2392 channel_port_receive(child, port, src_port, dst_ip,
2393 how_many,
2394 &receive_packet_count,
2395 &receive_packet_index,
2396 true, &pkts_dropped);
2397 } else {
2398 T_LOG("%lu event %d?\n",
2399 evlist.ident, evlist.filter);
2400 assert(0);
2401 break;
2402 }
2403 }
2404
2405 close(kq);
2406 }
2407
2408 static void
do_error_send(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many,uint32_t batch_size)2409 do_error_send(channel_port_t port, uuid_t flow_id, uint16_t src_port,
2410 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many,
2411 uint32_t batch_size)
2412 {
2413 int error;
2414 struct kevent evlist;
2415 struct kevent kev;
2416 int kq;
2417 my_payload payload;
2418 uint32_t receive_packet_count;
2419 uint32_t receive_packet_index;
2420 bool tx_complete;
2421 struct timespec timeout;
2422
2423 bzero(&payload, sizeof(payload));
2424 kq = kqueue();
2425 assert(kq != -1);
2426 receive_packet_count = 0;
2427 receive_packet_index = 0;
2428 EV_SET(&kev, port->fd, EVFILT_WRITE,
2429 EV_ADD | EV_ENABLE, 0, 0, NULL);
2430 error = kevent(kq, &kev, 1, NULL, 0, NULL);
2431 SKTC_ASSERT_ERR(error == 0);
2432 tx_complete = false;
2433
2434 while (!tx_complete) {
2435 /* wait for TX to become available */
2436 timeout.tv_sec = 5;
2437 timeout.tv_nsec = 0;
2438 error = kevent(kq, NULL, 0, &evlist, 1, &timeout);
2439 if (error < 0) {
2440 if (errno == EAGAIN) {
2441 continue;
2442 }
2443 SKTC_ASSERT_ERR(error == 0);
2444 }
2445 if (error == 0) {
2446 /* Timeout. Not supposed to happen. */
2447 break;
2448 }
2449
2450 if (evlist.flags & EV_ERROR) {
2451 int err = evlist.data;
2452
2453 if (err == EAGAIN) {
2454 break;
2455 }
2456 SKTC_ASSERT_ERR(err == 0);
2457 }
2458
2459 if (evlist.filter == EVFILT_WRITE) {
2460 uint32_t next_batch;
2461
2462 next_batch = payload.packet_number + batch_size;
2463 if (next_batch > how_many) {
2464 next_batch = how_many;
2465 }
2466 channel_port_send(port, flow_id, IPPROTO_UDP, src_port,
2467 dst_ip, dst_port, &payload, sizeof(payload),
2468 next_batch, FALSE, FALSE, PKT_SC_BE, TRUE, NULL);
2469 if (payload.packet_number >= how_many) {
2470 assert(payload.packet_number
2471 == how_many);
2472 tx_complete = true;
2473 #if SKT_XFER_DEBUG
2474 T_LOG("disable TX\n");
2475 #endif
2476 EV_SET(&kev,
2477 port->fd, EVFILT_WRITE,
2478 EV_DELETE, 0, 0, NULL);
2479 error = kevent(kq, &kev, 1,
2480 NULL, 0, NULL);
2481 SKTC_ASSERT_ERR(error == 0);
2482 }
2483 } else {
2484 T_LOG("%lu event %d?\n",
2485 evlist.ident, evlist.filter);
2486 assert(0);
2487 break;
2488 }
2489 }
2490 close(kq);
2491 }
2492
2493 #define MAX_LLINKS 256
2494 static void
get_qset_id_from_llinks(struct sktc_nexus_handles * handles,uint64_t * qset_id)2495 get_qset_id_from_llinks(struct sktc_nexus_handles *handles, uint64_t *qset_id)
2496 {
2497 struct nx_llink_info_req *nlir;
2498 size_t len;
2499 uint64_t qset_select;
2500 int err, i, llink_idx, qset_idx;
2501
2502 len = sizeof(*nlir) + MAX_LLINKS * sizeof(struct nx_llink_info);
2503 nlir = malloc(len);
2504 nlir->nlir_version = NETIF_LLINK_INFO_VERSION;
2505 nlir->nlir_llink_cnt = MAX_LLINKS;
2506
2507 err = __os_nexus_get_llink_info(handles->controller,
2508 handles->netif_nx_uuid, nlir, len);
2509 if (err != 0) {
2510 T_LOG("__os_nexus_llink_info failed: %d\n", err);
2511 free(nlir);
2512 return;
2513 }
2514 qset_select = mach_absolute_time();
2515 T_LOG("\nqset_select: 0x%llx\n", qset_select);
2516 T_LOG("llink_cnt: %d\n", nlir->nlir_llink_cnt);
2517 for (i = 0; i < nlir->nlir_llink_cnt; i++) {
2518 struct nx_llink_info *nli;
2519 int j;
2520
2521 nli = &nlir->nlir_llink[i];
2522 T_LOG("\tlink_id: 0x%llx\n", nli->nli_link_id);
2523 T_LOG("\tlink_id_internal: 0x%x\n", nli->nli_link_id_internal);
2524 T_LOG("\tstate: 0x%x\n", nli->nli_state);
2525 T_LOG("\tflags: 0x%x\n", nli->nli_flags);
2526 T_LOG("\tqset_cnt: %d\n", nli->nli_qset_cnt);
2527 for (j = 0; j < nli->nli_qset_cnt; j++) {
2528 struct nx_qset_info *nqi;
2529
2530 nqi = &nli->nli_qset[j];
2531 T_LOG("\t\tqset_id: %llx\n", nqi->nqi_id);
2532 T_LOG("\t\tflags: 0x%x\n", nqi->nqi_flags);
2533 T_LOG("\t\tnum_rx_queues: %d\n", nqi->nqi_num_rx_queues);
2534 T_LOG("\t\tnum_tx_queues: %d\n", nqi->nqi_num_tx_queues);
2535
2536 /* randomly pick a qset for steering */
2537 if (((qset_select) % nlir->nlir_llink_cnt) == i &&
2538 ((qset_select >> 16) % nli->nli_qset_cnt) == j) {
2539 llink_idx = i;
2540 qset_idx = j;
2541 *qset_id = nqi->nqi_id;
2542 }
2543 }
2544 }
2545 T_LOG("chosen llink_idx: %d\n", llink_idx);
2546 T_LOG("chosen qset_idx: %d\n", qset_idx);
2547 T_LOG("chosen qset_id: 0x%llx\n\n", *qset_id);
2548 free(nlir);
2549 }
2550
2551 static int
setup_flowswitch_and_flow(struct sktc_nexus_handles * handles,const char * ifname,int protocol,uint16_t flags,struct in_addr our_ip,struct in_addr our_mask,uint16_t our_port,pid_t the_pid,struct in_addr peer_ip,uint16_t peer_port,uuid_t flow_id,flowadv_idx_t * flowadv_idx,int tx_ring_size,int rx_ring_size,int buf_size,int max_frags,bool multi_llink)2552 setup_flowswitch_and_flow(struct sktc_nexus_handles * handles,
2553 const char * ifname, int protocol, uint16_t flags, struct in_addr our_ip,
2554 struct in_addr our_mask, uint16_t our_port, pid_t the_pid,
2555 struct in_addr peer_ip, uint16_t peer_port, uuid_t flow_id,
2556 flowadv_idx_t *flowadv_idx, int tx_ring_size, int rx_ring_size,
2557 int buf_size, int max_frags, bool multi_llink)
2558 {
2559 int error;
2560 uint64_t qset_id = 0;
2561
2562 bzero(handles, sizeof(*handles));
2563 strlcpy(handles->netif_ifname, ifname, sizeof(handles->netif_ifname));
2564 handles->netif_addr = our_ip;
2565 handles->netif_mask = our_mask;
2566 sktc_create_flowswitch_no_address(handles, tx_ring_size,
2567 rx_ring_size, buf_size, max_frags, 0);
2568 error = os_nexus_controller_bind_provider_instance(handles->controller,
2569 handles->fsw_nx_uuid, OUR_FLOWSWITCH_PORT, the_pid, NULL, NULL, 0,
2570 NEXUS_BIND_PID);
2571 if (error != 0) {
2572 return error;
2573 }
2574
2575 if (multi_llink) {
2576 get_qset_id_from_llinks(handles, &qset_id);
2577 assert(qset_id != 0);
2578 }
2579 if (uuid_is_null(flow_id)) {
2580 uuid_generate(flow_id);
2581 }
2582 error = connect_flow(handles->controller, handles->fsw_nx_uuid,
2583 OUR_FLOWSWITCH_PORT, flow_id, protocol, flags, handles->netif_addr,
2584 our_port, peer_ip, peer_port, flowadv_idx, qset_id);
2585 return error;
2586 }
2587
2588 static void
setup_flowswitch(struct sktc_nexus_handles * handles,const char * ifname,pid_t the_pid,int tx_ring_size,int rx_ring_size,int buf_size,int max_frags)2589 setup_flowswitch(struct sktc_nexus_handles * handles,
2590 const char * ifname, pid_t the_pid,
2591 int tx_ring_size, int rx_ring_size,
2592 int buf_size, int max_frags)
2593 {
2594 bzero(handles, sizeof(*handles));
2595 strlcpy(handles->netif_ifname, ifname, sizeof(handles->netif_ifname));
2596 sktc_create_flowswitch_no_address(handles, tx_ring_size,
2597 rx_ring_size, buf_size, max_frags, 0);
2598 return;
2599 }
2600
2601 static int
fetch_if_flowswitch_and_setup_flow(struct sktc_nexus_handles * handles,const char * ifname,int protocol,uint16_t flags,struct in_addr our_ip,struct in_addr our_mask,uint16_t our_port,pid_t the_pid,struct in_addr peer_ip,uint16_t peer_port,uuid_t flow_id,flowadv_idx_t * flowadv_idx,int tx_ring_size,int rx_ring_size,int buf_size,int max_frags,bool multi_llink,uuid_t parent_flow_id,struct flow_demux_pattern * demux_patterns,uint8_t demux_pattern_count)2602 fetch_if_flowswitch_and_setup_flow(struct sktc_nexus_handles * handles,
2603 const char * ifname, int protocol, uint16_t flags, struct in_addr our_ip,
2604 struct in_addr our_mask, uint16_t our_port, pid_t the_pid,
2605 struct in_addr peer_ip, uint16_t peer_port, uuid_t flow_id,
2606 flowadv_idx_t *flowadv_idx, int tx_ring_size, int rx_ring_size,
2607 int buf_size, int max_frags, bool multi_llink, uuid_t parent_flow_id,
2608 struct flow_demux_pattern *demux_patterns, uint8_t demux_pattern_count)
2609 {
2610 int error;
2611 uint64_t qset_id = 0;
2612 bool child_flow = (demux_pattern_count > 0);
2613
2614 bzero(handles, sizeof(*handles));
2615 strlcpy(handles->netif_ifname, ifname, sizeof(handles->netif_ifname));
2616 handles->netif_addr = our_ip;
2617 handles->netif_mask = our_mask;
2618
2619 if (handles->netif_ifname[0] == '\0') {
2620 T_LOG("%s: no interface name specified\n",
2621 __func__);
2622 return EINVAL;
2623 }
2624 if (strlen(handles->netif_ifname) >= IFNAMSIZ) {
2625 T_LOG("%s: invalid interface name specified %s\n",
2626 __func__, handles->netif_ifname);
2627 return EINVAL;
2628 }
2629
2630 handles->controller = os_nexus_controller_create();
2631 if (handles->controller == NULL) {
2632 SKT_LOG(
2633 "%s: os_nexus_controller_create failed, %s (%d)\n",
2634 __func__, strerror(errno), errno);
2635 return ENOMEM;
2636 }
2637
2638 if ((sktc_get_netif_nexus(handles->netif_ifname, handles->netif_nx_uuid) &&
2639 sktc_get_flowswitch_nexus(handles->netif_ifname, handles->fsw_nx_uuid))) {
2640 if (child_flow) {
2641 error = os_nexus_controller_bind_provider_instance(handles->controller,
2642 handles->fsw_nx_uuid, CHILD_FLOWSWITCH_PORT, the_pid, NULL, NULL, 0,
2643 NEXUS_BIND_PID);
2644 } else {
2645 error = os_nexus_controller_bind_provider_instance(handles->controller,
2646 handles->fsw_nx_uuid, OUR_FLOWSWITCH_PORT, the_pid, NULL, NULL, 0,
2647 NEXUS_BIND_PID);
2648 }
2649 if (error != 0) {
2650 SKT_LOG("PID %d: nexus controller bind failed: %s\n",
2651 getpid(), strerror(errno));
2652 return error;
2653 }
2654
2655 if (multi_llink) {
2656 get_qset_id_from_llinks(handles, &qset_id);
2657 assert(qset_id != 0);
2658 }
2659 if (uuid_is_null(flow_id)) {
2660 uuid_generate(flow_id);
2661 }
2662 if (child_flow) {
2663 error = connect_child_flow(handles->controller, handles->fsw_nx_uuid,
2664 CHILD_FLOWSWITCH_PORT, flow_id, protocol, flags, handles->netif_addr,
2665 our_port, peer_ip, peer_port, flowadv_idx, qset_id, parent_flow_id,
2666 demux_patterns, demux_pattern_count);
2667 } else {
2668 error = connect_flow(handles->controller, handles->fsw_nx_uuid,
2669 OUR_FLOWSWITCH_PORT, flow_id, protocol, flags, handles->netif_addr,
2670 our_port, peer_ip, peer_port, flowadv_idx, qset_id);
2671 }
2672 } else {
2673 T_LOG(
2674 "%s: failed to find existing netif/flowswitch instance\n", __func__);
2675 return ENOENT;
2676 }
2677
2678 return error;
2679 }
2680
2681 #define FAKE_ETHER_NAME "feth"
2682 #define FAKE_ETHER_NAME_LEN (sizeof(FAKE_ETHER_NAME) - 1)
2683
2684 static void
set_feth_mac_addr(struct ether_addr * feth_macaddr,uint32_t unit)2685 set_feth_mac_addr(struct ether_addr *feth_macaddr, uint32_t unit)
2686 {
2687 /*
2688 * FETH MAC addresses are hardcoded in if_fake.c, but it's not exposed.
2689 * We use the same hardcoded values here.
2690 */
2691 bcopy(FAKE_ETHER_NAME, feth_macaddr->octet, FAKE_ETHER_NAME_LEN);
2692 feth_macaddr->octet[ETHER_ADDR_LEN - 2] = (unit & 0xff00) >> 8;
2693 feth_macaddr->octet[ETHER_ADDR_LEN - 1] = unit & 0xff;
2694 }
2695
2696 static int
skt_xfer_udp_common(int child,uint32_t how_many,uint32_t batch_size,bool do_ping_pong,bool wrong_flow_id,bool test_aqm,bool test_wmm,int tx_ring_size,int rx_ring_size,int buf_size,int max_frags,int event_test_id,bool low_latency,bool multi_llink,bool test_redirect)2697 skt_xfer_udp_common(int child, uint32_t how_many, uint32_t batch_size,
2698 bool do_ping_pong, bool wrong_flow_id, bool test_aqm,
2699 bool test_wmm, int tx_ring_size, int rx_ring_size, int buf_size,
2700 int max_frags, int event_test_id, bool low_latency, bool multi_llink,
2701 bool test_redirect)
2702 {
2703 char buf[1] = { 0 };
2704 int error;
2705 const char * ifname;
2706 uuid_t flow_id = {};
2707 struct in_addr our_ip;
2708 struct in_addr our_mask;
2709 uint16_t our_port;
2710 struct in_addr peer_ip;
2711 uint16_t peer_port;
2712 channel_port port;
2713 ssize_t ret;
2714 flowadv_idx_t flowadv_idx;
2715 uint32_t event_flags = 0;
2716 bool ifadv_enabled = false;
2717 bool chan_event_enabled = false;
2718 bool errors_ok = false;
2719 uint16_t nfr_flags = 0;
2720 struct ether_addr feth0_macaddr;
2721 struct ether_addr feth1_macaddr;
2722
2723 if (test_aqm || test_wmm) {
2724 assert(do_ping_pong);
2725 assert(!wrong_flow_id);
2726 }
2727
2728 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
2729
2730 if (child == 0) {
2731 ifname = FETH0_NAME;
2732 our_ip = sktc_feth0_in_addr();
2733 peer_ip = sktc_feth1_in_addr();
2734 our_port = FETH0_PORT;
2735 peer_port = FETH1_PORT;
2736 } else {
2737 ifname = FETH1_NAME;
2738 our_ip = sktc_feth1_in_addr();
2739 peer_ip = sktc_feth0_in_addr();
2740 our_port = FETH1_PORT;
2741 peer_port = FETH0_PORT;
2742 }
2743
2744 nfr_flags |= (low_latency ? NXFLOWREQF_LOW_LATENCY : 0);
2745
2746 if (test_redirect && child == 0) {
2747 setup_flowswitch(&handles, FETH0_NAME, getpid(),
2748 tx_ring_size, rx_ring_size, buf_size, max_frags);
2749
2750 setup_flowswitch(&handles, RD0_NAME, getpid(),
2751 tx_ring_size, rx_ring_size, buf_size, max_frags);
2752
2753 error = fetch_if_flowswitch_and_setup_flow(&handles, RD0_NAME,
2754 IPPROTO_UDP, 0, our_ip, our_mask, our_port, getpid(), peer_ip,
2755 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false, NULL,
2756 NULL, 0);
2757 } else {
2758 /* set up the flowswitch over the right interface */
2759 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
2760 nfr_flags, our_ip, our_mask, our_port, getpid(), peer_ip,
2761 peer_port, flow_id, &flowadv_idx, tx_ring_size, rx_ring_size,
2762 buf_size, max_frags, multi_llink);
2763 }
2764
2765 if (error == 0) {
2766 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
2767 OUR_FLOWSWITCH_PORT, ENABLE_UPP,
2768 event_test_id == SKT_FSW_EVENT_TEST_CHANNEL_EVENTS ? true : false,
2769 low_latency);
2770 assert(port.chan != NULL);
2771 }
2772
2773 set_feth_mac_addr(&feth0_macaddr, 0);
2774 set_feth_mac_addr(&feth1_macaddr, 1);
2775
2776 /* warm up the arp cache before starting the actual test */
2777 if (child == 0) {
2778 if ((error = skt_add_arp_entry(peer_ip, &feth1_macaddr)) != 0) {
2779 T_LOG("Child 0: ARP entry add failed\n");
2780 return 1;
2781 }
2782 } else {
2783 if ((error = skt_add_arp_entry(peer_ip, &feth0_macaddr)) != 0) {
2784 T_LOG("Child 1: ARP entry add failed\n");
2785 return 1;
2786 }
2787 }
2788
2789 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
2790 SKT_LOG("write fail: %s\n", strerror(errno));
2791 return 1;
2792 }
2793 assert(ret == 1);
2794 #if SKT_XFER_DEBUG
2795 T_LOG("child %d signaled\n", child);
2796 #endif
2797 /* Wait for go signal */
2798 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
2799 SKT_LOG("read fail: %s\n", strerror(errno));
2800 return 1;
2801 }
2802 assert(ret == 1);
2803 if (error != 0) {
2804 return 1;
2805 }
2806 port.ip_addr = our_ip;
2807
2808 if (wrong_flow_id) {
2809 uuid_generate(flow_id);
2810 }
2811 if (do_ping_pong) {
2812 ping_pong(&port, flow_id, our_port, peer_ip, peer_port,
2813 how_many, batch_size, child, wrong_flow_id, flowadv_idx,
2814 test_aqm, test_wmm, MAX_DEMUX_OFFSET + 1);
2815 } else {
2816 switch (event_test_id) {
2817 case SKT_FSW_EVENT_TEST_NONE:
2818 break;
2819 case SKT_FSW_EVENT_TEST_IF_ADV_ENABLED: {
2820 event_flags |= SKT_FSW_EVFLAG_IFADV;
2821 assert(os_channel_configure_interface_advisory(port.chan, TRUE) == 0);
2822 ifadv_enabled = true;
2823 break;
2824 }
2825 case SKT_FSW_EVENT_TEST_IF_ADV_DISABLED: {
2826 event_flags |= SKT_FSW_EVFLAG_IFADV;
2827 assert(os_channel_configure_interface_advisory(port.chan, FALSE) == 0);
2828 break;
2829 }
2830 case SKT_FSW_EVENT_TEST_CHANNEL_EVENTS: {
2831 chan_event_enabled = true;
2832 event_flags |= SKT_FSW_EVFLAG_CHANNEL;
2833 errors_ok = true;
2834 break;
2835 }
2836 default:
2837 T_LOG("unknown event test id %d\n",
2838 event_test_id);
2839 assert(0);
2840 break;
2841 }
2842 send_and_receive(&port, flow_id, our_port, peer_ip, peer_port,
2843 how_many, batch_size, child, wrong_flow_id, errors_ok,
2844 event_flags, ifadv_enabled);
2845 }
2846
2847 #if SKT_XFER_DEBUG
2848 T_LOG("got input %d from parent in child %d, starting test\n",
2849 buf[0], child);
2850 #endif
2851 return 0;
2852 }
2853
2854 static int
get_fsw_stats(struct fsw_stats * result)2855 get_fsw_stats(struct fsw_stats *result)
2856 {
2857 int i, ret;
2858 size_t length = 0;
2859 size_t width = sizeof(struct sk_stats_flow_switch);
2860 void *buffer, *scan;
2861 struct sk_stats_flow_switch *sfs;
2862
2863 ret = sysctl_buf(SK_STATS_FLOW_SWITCH, &buffer, &length, NULL, 0);
2864 if (ret != 0 || buffer == NULL || length == 0) {
2865 T_LOG("get_fsw_stats: Failed to get stats\n");
2866 return ret;
2867 }
2868
2869 assert((length % width) == 0);
2870 scan = buffer;
2871 memset(result, 0, sizeof(*result));
2872
2873 /*
2874 * XXX: I don't like pointer arithmetic on a void ptr, but
2875 * this code was lifted from skywalk_cmds and clang doesn't
2876 * seem to care.
2877 */
2878 ret = ENOENT;
2879 while (scan < (buffer + length)) {
2880 sfs = scan;
2881 scan += sizeof(*sfs);
2882
2883 if (strcmp(sfs->sfs_if_name, FETH0_NAME) != 0 &&
2884 strcmp(sfs->sfs_if_name, FETH1_NAME) != 0) {
2885 continue;
2886 }
2887 ret = 0;
2888
2889 for (i = 0;
2890 i < (sizeof(*result) / sizeof(STATS_VAL(result, 0))); i++) {
2891 STATS_ADD(result, i, STATS_VAL(&sfs->sfs_fsws, i));
2892 }
2893 }
2894
2895 free(buffer);
2896
2897 return ret;
2898 }
2899
2900 static int
skt_xfer_udp_with_errors_common(int child,uint32_t how_many,uint32_t batch_size)2901 skt_xfer_udp_with_errors_common(int child, uint32_t how_many,
2902 uint32_t batch_size)
2903 {
2904 char buf[1] = { 0 };
2905 int error;
2906 const char * ifname;
2907 uuid_t flow_id = {};
2908 struct in_addr our_ip;
2909 struct in_addr our_mask;
2910 uint16_t our_port;
2911 struct in_addr peer_ip;
2912 uint16_t peer_port;
2913 channel_port port;
2914 ssize_t ret;
2915 int errbit, rv;
2916 uint64_t emask;
2917 uuid_string_t uuidstr;
2918 flowadv_idx_t flowadv_idx;
2919
2920 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
2921
2922 if (child == 0) {
2923 ifname = FETH0_NAME;
2924 our_ip = sktc_feth0_in_addr();
2925 peer_ip = sktc_feth1_in_addr();
2926 our_port = FETH0_PORT;
2927 peer_port = FETH1_PORT;
2928 } else {
2929 child = 1;
2930 ifname = FETH1_NAME;
2931 our_ip = sktc_feth1_in_addr();
2932 peer_ip = sktc_feth0_in_addr();
2933 our_port = FETH1_PORT;
2934 peer_port = FETH0_PORT;
2935 }
2936
2937 /* set up the flowswitch over the right interface */
2938 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
2939 0, our_ip, our_mask, our_port, getpid(), peer_ip,
2940 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
2941 if (error == 0) {
2942 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
2943 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
2944 assert(port.chan != NULL);
2945 }
2946 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
2947 SKT_LOG("write fail: %s\n", strerror(errno));
2948 return 1;
2949 }
2950 assert(ret == 1);
2951 #if SKT_XFER_DEBUG
2952 T_LOG("child %d signaled\n", child);
2953 #endif
2954 /* Wait for go signal */
2955 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
2956 SKT_LOG("read fail: %s\n", strerror(errno));
2957 return 1;
2958 }
2959 assert(ret == 1);
2960 if (error != 0) {
2961 return 1;
2962 }
2963 port.ip_addr = our_ip;
2964
2965 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
2966 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
2967 fflush(stderr);
2968
2969 /* warm up the arp cache before starting the actual test */
2970 T_LOG("child %d: Warm up ARP cache\n", child);
2971 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
2972 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
2973 T_LOG("child %d: Test Start\n", child);
2974
2975 /*
2976 * Start the receiver
2977 */
2978 if (child == 0) {
2979 do_error_receive(child, &port, flow_id, our_port, peer_ip, peer_port,
2980 INJECT_CODE_COUNT * how_many);
2981 return 0;
2982 }
2983
2984 /*
2985 * For each injection code:
2986 * Take a snapshot of fsw_stats
2987 * Invoke send_and_receive()
2988 * Take a snapshot of fsw_stats
2989 * Verify stats counter associated to injection code increased.
2990 */
2991 for (errbit = 0; errbit < INJECT_CODE_COUNT; errbit++) {
2992 struct fsw_stats stats_before, stats_after;
2993 const struct fsw_inject_codes *ic;
2994 uint32_t error_rmask;
2995 const int *sidx;
2996 int st;
2997
2998 ic = &fsw_inject_codes[errbit];
2999
3000 T_LOG("Injecting error bit %d\n", ic->ic_code);
3001 fflush(stderr);
3002
3003 emask = (1ULL << ic->ic_code);
3004 emask = set_error_inject_mask(&emask);
3005
3006 rv = get_fsw_stats(&stats_before);
3007 assert(rv == 0);
3008
3009 if (ic->ic_rmask != IC_RMASK_UNSPEC) {
3010 error_rmask = ic->ic_rmask;
3011 error_rmask = sktu_set_inject_error_rmask(&error_rmask);
3012 }
3013
3014 do_error_send(&port, flow_id, our_port, peer_ip, peer_port,
3015 how_many, batch_size);
3016
3017 T_LOG("Tx completed for error bit %d\n", ic->ic_code);
3018
3019 rv = get_fsw_stats(&stats_after);
3020 assert(rv == 0);
3021
3022 if (ic->ic_rmask != IC_RMASK_UNSPEC) {
3023 error_rmask = sktu_set_inject_error_rmask(&error_rmask);
3024 }
3025
3026 /* random error injection could fail to inject at all */
3027 if (STATS_VAL(&stats_after, _FSW_STATS_ERROR_INJECTIONS) ==
3028 STATS_VAL(&stats_before, _FSW_STATS_ERROR_INJECTIONS)) {
3029 T_LOG("skip non-injected error bit %d\n",
3030 ic->ic_code);
3031 continue;
3032 }
3033
3034 for (sidx = ic->ic_stat_idx, st = 0;
3035 st < INJECT_CODE_IDX_MAX; st++, sidx++) {
3036 uint64_t counter;
3037
3038 if (*sidx < 0) {
3039 continue;
3040 }
3041
3042 counter = STATS_VAL(&stats_after, *sidx);
3043 counter -= STATS_VAL(&stats_before, *sidx);
3044
3045 if (counter == 0) {
3046 T_LOG("Counter idx %d didn't "
3047 "change for error %d. Before %lld, "
3048 "After %lld\n", st, ic->ic_code,
3049 STATS_VAL(&stats_before, *sidx),
3050 STATS_VAL(&stats_after, *sidx));
3051 return 1;
3052 }
3053 }
3054 }
3055
3056 emask = 0;
3057 set_error_inject_mask(&emask);
3058
3059 return 0;
3060 }
3061
3062 static int
skt_xfer_tcpflood(int child,uint32_t how_many,uint32_t batch_size,bool synflood)3063 skt_xfer_tcpflood(int child, uint32_t how_many, uint32_t batch_size, bool synflood)
3064 {
3065 char buf[1] = { 0 };
3066 int error;
3067 const char * ifname;
3068 uuid_t flow_id = {};
3069 struct in_addr our_ip;
3070 struct in_addr our_mask;
3071 uint16_t our_port;
3072 struct in_addr peer_ip;
3073 uint16_t peer_port;
3074 channel_port port;
3075 ssize_t ret;
3076 flowadv_idx_t flowadv_idx;
3077
3078 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3079
3080 if (child == 0) {
3081 ifname = FETH0_NAME;
3082 our_ip = sktc_feth0_in_addr();
3083 peer_ip = sktc_feth1_in_addr();
3084 our_port = FETH0_PORT;
3085 peer_port = FETH1_PORT;
3086 } else {
3087 ifname = FETH1_NAME;
3088 our_ip = sktc_feth1_in_addr();
3089 peer_ip = sktc_feth0_in_addr();
3090 our_port = FETH1_PORT;
3091 peer_port = FETH0_PORT;
3092 }
3093
3094 /* set up the flowswitch over the right interface */
3095 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_TCP,
3096 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3097 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3098 if (error == 0) {
3099 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3100 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3101 assert(port.chan != NULL);
3102 }
3103 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3104 SKT_LOG("write fail: %s\n", strerror(errno));
3105 return 1;
3106 }
3107 assert(ret == 1);
3108 #if SKT_XFER_DEBUG
3109 T_LOG("child %d signaled\n", child);
3110 #endif
3111 /* Wait for go signal */
3112 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3113 SKT_LOG("read fail: %s\n", strerror(errno));
3114 return 1;
3115 }
3116 assert(ret == 1);
3117 if (error != 0) {
3118 return 1;
3119 }
3120 #if SKT_XFER_DEBUG
3121 T_LOG("got input %d from parent in child %d, starting test\n",
3122 buf[0], child);
3123 #endif
3124 port.ip_addr = our_ip;
3125 send_tcp(&port, flow_id, our_port, peer_ip, peer_port,
3126 how_many, batch_size, child, synflood);
3127 return 0;
3128 }
3129
3130 static int
skt_xfer_portzero(int child,int protocol)3131 skt_xfer_portzero(int child, int protocol)
3132 {
3133 char buf[1] = { 0 };
3134 int error;
3135 const char * ifname;
3136 uuid_t flow_id = {};
3137 struct in_addr our_ip;
3138 struct in_addr our_mask;
3139 uint16_t our_port;
3140 struct in_addr peer_ip;
3141 uint16_t peer_port;
3142 ssize_t ret;
3143 flowadv_idx_t flowadv_idx;
3144
3145 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3146
3147 if (child == 0) {
3148 ifname = "feth0";
3149 our_ip = sktc_feth0_in_addr();
3150 peer_ip = sktc_feth1_in_addr();
3151 our_port = FETH0_PORT;
3152 peer_port = 0;
3153 } else {
3154 ifname = "feth1";
3155 our_ip = sktc_feth1_in_addr();
3156 peer_ip = sktc_feth0_in_addr();
3157 our_port = FETH1_PORT;
3158 peer_port = 0;
3159 }
3160
3161 /* this should fail with EADDRNOTAVAIL (port 0) */
3162 error = setup_flowswitch_and_flow(&handles, ifname, protocol,
3163 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3164 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3165
3166 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3167 SKT_LOG("write fail: %s\n", strerror(errno));
3168 return 1;
3169 }
3170 assert(ret == 1);
3171 #if SKT_XFER_DEBUG
3172 T_LOG("child %d signaled\n", child);
3173 #endif
3174 /* Wait for go signal */
3175 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3176 SKT_LOG("read fail: %s\n", strerror(errno));
3177 return 1;
3178 }
3179 assert(ret == 1);
3180
3181 if (error != EINVAL) {
3182 T_LOG("expected %d but got %s (%d)\n", EINVAL,
3183 strerror(error), error);
3184 return 1;
3185 }
3186 return 0;
3187 }
3188
3189 static int
skt_xfer_setuponly(int child)3190 skt_xfer_setuponly(int child)
3191 {
3192 char buf[1] = { 0 };
3193 int error;
3194 const char * ifname;
3195 uuid_t flow_id = {};
3196 struct in_addr our_ip;
3197 struct in_addr our_mask;
3198 uint16_t our_port;
3199 struct in_addr peer_ip;
3200 uint16_t peer_port;
3201 channel_port port;
3202 ssize_t ret;
3203 flowadv_idx_t flowadv_idx;
3204
3205 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3206
3207 if (child == 0) {
3208 ifname = FETH0_NAME;
3209 our_ip = sktc_feth0_in_addr();
3210 peer_ip = sktc_feth1_in_addr();
3211 our_port = FETH0_PORT;
3212 peer_port = FETH1_PORT;
3213 } else {
3214 ifname = FETH1_NAME;
3215 our_ip = sktc_feth1_in_addr();
3216 peer_ip = sktc_feth0_in_addr();
3217 our_port = FETH1_PORT;
3218 peer_port = FETH0_PORT;
3219 }
3220
3221 /* set up the flowswitch over the right interface */
3222 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_TCP,
3223 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3224 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3225 if (error == 0) {
3226 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3227 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3228 assert(port.chan != NULL);
3229 } else {
3230 while (1) {
3231 T_LOG("Child %d waiting\n", child);
3232 sleep(5);
3233 }
3234 }
3235 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3236 SKT_LOG("write fail: %s\n", strerror(errno));
3237 return 1;
3238 }
3239 assert(ret == 1);
3240 #if SKT_XFER_DEBUG
3241 T_LOG("child %d signaled\n", child);
3242 #endif
3243 /* Wait for go signal */
3244 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3245 SKT_LOG("read fail: %s\n", strerror(errno));
3246 return 1;
3247 }
3248 assert(ret == 1);
3249 if (error != 0) {
3250 return 1;
3251 }
3252 #if SKT_XFER_DEBUG
3253 T_LOG("got input %d from parent in child %d, starting test\n",
3254 buf[0], child);
3255 #endif
3256 return 0;
3257 }
3258
3259 static void
send_bad_flow(channel_port_t port,uuid_t flow_id,int protocol,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,my_payload_t payload)3260 send_bad_flow(channel_port_t port, uuid_t flow_id,
3261 int protocol, uint16_t src_port, struct in_addr dst_ip, uint16_t dst_port,
3262 my_payload_t payload)
3263 {
3264 struct fsw_stats stats_before, stats_after;
3265 uint64_t counter;
3266 int ret;
3267
3268 ret = get_fsw_stats(&stats_before);
3269 assert(ret == 0);
3270
3271 channel_port_send(port, flow_id, protocol, src_port, dst_ip, dst_port,
3272 payload, sizeof(*payload), 1, FALSE, FALSE, PKT_SC_BE, FALSE, NULL);
3273
3274 ret = get_fsw_stats(&stats_after);
3275 assert(ret == 0);
3276
3277 counter = STATS_VAL(&stats_after, FSW_STATS_DROP);
3278 counter -= STATS_VAL(&stats_before, FSW_STATS_DROP);
3279
3280 if (counter == 0) {
3281 T_LOG("Flow not ours wasn't dropped");
3282 assert(0);
3283 }
3284 T_LOG("dropped %"PRIu64"\n", counter);
3285 }
3286
3287 static int
skt_xfer_flowmatch(int child)3288 skt_xfer_flowmatch(int child)
3289 {
3290 char buf[1] = { 0 };
3291 int error;
3292 const char * ifname;
3293 uuid_t flow_id = {};
3294 uuid_t nowhere_flow_id;
3295 struct in_addr our_ip, peer_ip, nowhere_ip;
3296 struct in_addr our_mask;
3297 uint16_t our_port, peer_port;
3298 channel_port port;
3299 ssize_t ret;
3300 uuid_string_t uuidstr;
3301 flowadv_idx_t flowadv_idx;
3302
3303 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3304
3305 if (child == 0) {
3306 ifname = FETH0_NAME;
3307 our_ip = sktc_feth0_in_addr();
3308 peer_ip = sktc_feth1_in_addr();
3309 our_port = FETH0_PORT;
3310 peer_port = FETH1_PORT;
3311 } else {
3312 child = 1;
3313 ifname = FETH1_NAME;
3314 our_ip = sktc_feth1_in_addr();
3315 peer_ip = sktc_feth0_in_addr();
3316 our_port = FETH1_PORT;
3317 peer_port = FETH0_PORT;
3318 }
3319
3320 /* set up the flowswitch over the right interface */
3321 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3322 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3323 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3324 if (error == 0) {
3325 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3326 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3327 assert(port.chan != NULL);
3328 }
3329 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3330 SKT_LOG("write fail: %s\n", strerror(errno));
3331 return 1;
3332 }
3333 assert(ret == 1);
3334 #if SKT_XFER_DEBUG
3335 T_LOG("child %d signaled\n", child);
3336 #endif
3337 /* Wait for go signal */
3338 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3339 SKT_LOG("read fail: %s\n", strerror(errno));
3340 return 1;
3341 }
3342 assert(ret == 1);
3343 if (error != 0) {
3344 return 1;
3345 }
3346 port.ip_addr = our_ip;
3347
3348 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3349 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3350 fflush(stderr);
3351
3352 /* warm up the arp cache before starting the actual test */
3353 T_LOG("child %d: Warm up ARP cache\n", child);
3354 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3355 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3356 T_LOG("child %d: Test Start\n", child);
3357
3358 /* Start the receiver */
3359 if (child == 0) {
3360 channel_port_receive_all(&port, flow_id, our_port, peer_ip,
3361 peer_port, 0, FALSE);
3362 return 0;
3363 }
3364
3365 my_payload payload;
3366 bzero(&payload, sizeof(payload));
3367 payload.packet_number = 0;
3368
3369 nowhere_ip = sktc_nowhere_in_addr();
3370 do{
3371 uuid_generate_random(nowhere_flow_id);
3372 } while (!uuid_compare(nowhere_flow_id, flow_id));
3373
3374 /* Send with wrong flow id */
3375 T_LOG("Send with wrong flow id...\t");
3376 payload.packet_number = 0;
3377 strncpy(payload.data, "wrong flow id", sizeof(payload.data));
3378 send_bad_flow(&port, nowhere_flow_id,
3379 IPPROTO_UDP, our_port, peer_ip, peer_port, &payload);
3380
3381 /* Send with wrong protocol */
3382 T_LOG("Send with wrong protocol...\t");
3383 payload.packet_number = 0;
3384 strncpy(payload.data, "wrong protocol", sizeof(payload.data));
3385 send_bad_flow(&port, flow_id,
3386 IPPROTO_TCP, our_port, peer_ip, peer_port, &payload);
3387
3388 /* Send with wrong src port */
3389 T_LOG("Send with wrong src port...\t");
3390 payload.packet_number = 0;
3391 strncpy(payload.data, "wrong src port", sizeof(payload.data));
3392 send_bad_flow(&port, flow_id,
3393 IPPROTO_UDP, our_port + 1, peer_ip, peer_port, &payload);
3394
3395 /* Send with wrong dst IP */
3396 T_LOG("Send with wrong dst IP...\t");
3397 payload.packet_number = 0;
3398 strncpy(payload.data, "wrong dst IP", sizeof(payload.data));
3399 send_bad_flow(&port, flow_id,
3400 IPPROTO_UDP, our_port, nowhere_ip, peer_port, &payload);
3401
3402 /* Send with wrong dst port */
3403 T_LOG("Send with wrong dst port...\t");
3404 payload.packet_number = 0;
3405 strncpy(payload.data, "wrong dst port", sizeof(payload.data));
3406 send_bad_flow(&port, flow_id,
3407 IPPROTO_UDP, our_port, peer_ip, peer_port + 1, &payload);
3408
3409 /* Send something right to single receiver to end */
3410 payload.packet_number = 0;
3411 strncpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3412 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3413 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3414 FALSE, NULL);
3415
3416 return 0;
3417 }
3418
3419 /* see rdar://problem/38427726 for details */
3420 static int
skt_xfer_flowcleanup(int child,uint32_t how_many,uint32_t batch_size)3421 skt_xfer_flowcleanup(int child, uint32_t how_many, uint32_t batch_size)
3422 {
3423 char buf[1] = { 0 };
3424 int error;
3425 const char * ifname;
3426 uuid_t flow_id = {};
3427 struct in_addr our_ip;
3428 struct in_addr our_mask;
3429 uint16_t our_port;
3430 struct in_addr peer_ip;
3431 uint16_t peer_port;
3432 channel_port port;
3433 ssize_t ret;
3434 flowadv_idx_t flowadv_idx = FLOWADV_IDX_NONE;
3435
3436 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3437
3438 if (child == 0) {
3439 ifname = FETH0_NAME;
3440 our_ip = sktc_feth0_in_addr();
3441 peer_ip = sktc_feth1_in_addr();
3442 our_port = FETH0_PORT;
3443 peer_port = FETH1_PORT;
3444 } else {
3445 ifname = FETH1_NAME;
3446 our_ip = sktc_feth1_in_addr();
3447 peer_ip = sktc_feth0_in_addr();
3448 our_port = FETH1_PORT;
3449 peer_port = FETH0_PORT;
3450 }
3451
3452 /*
3453 * set up the flowswitch over the right interface and bind a
3454 * 5 tuple flow.
3455 */
3456 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3457 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3458 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3459 SKTC_ASSERT_ERR(error == 0);
3460 assert(flowadv_idx != FLOWADV_IDX_NONE);
3461
3462 /* open channel */
3463 sktu_channel_port_init(&port, handles.fsw_nx_uuid, OUR_FLOWSWITCH_PORT,
3464 ENABLE_UPP, false, false);
3465 assert(port.chan != NULL);
3466
3467 /*
3468 * Close the channel. This also triggers the closure of the flow
3469 * created above and the removal of nexus port binding.
3470 */
3471 os_channel_destroy(port.chan);
3472
3473 /* bind again to the same port */
3474 error = os_nexus_controller_bind_provider_instance(handles.controller,
3475 handles.fsw_nx_uuid, OUR_FLOWSWITCH_PORT, getpid(),
3476 NULL, NULL, 0, NEXUS_BIND_PID);
3477 SKTC_ASSERT_ERR(!error);
3478
3479 /* open a new flow */
3480 uuid_generate(flow_id);
3481 flowadv_idx = FLOWADV_IDX_NONE;
3482 error = connect_flow(handles.controller, handles.fsw_nx_uuid,
3483 OUR_FLOWSWITCH_PORT, flow_id, IPPROTO_UDP, 0,
3484 handles.netif_addr, our_port, peer_ip, peer_port, &flowadv_idx, 0);
3485 SKTC_ASSERT_ERR(!error);
3486 assert(flowadv_idx != FLOWADV_IDX_NONE);
3487
3488 /* re-open channel on the same port */
3489 sktu_channel_port_init(&port, handles.fsw_nx_uuid, OUR_FLOWSWITCH_PORT,
3490 ENABLE_UPP, false, false);
3491 assert(port.chan != NULL);
3492 port.ip_addr = our_ip;
3493
3494 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3495 SKT_LOG("write fail: %s\n", strerror(errno));
3496 return 1;
3497 }
3498 assert(ret == 1);
3499 /* Wait for go signal */
3500 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3501 SKT_LOG("read fail: %s\n", strerror(errno));
3502 return 1;
3503 }
3504 assert(ret == 1);
3505 if (error != 0) {
3506 return 1;
3507 }
3508
3509 /* warm up the arp cache before starting the actual test */
3510 T_LOG("child %d: Warm up ARP cache\n", child);
3511 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3512 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3513 T_LOG("child %d: Test Start\n", child);
3514
3515 /* perform ping pong test */
3516 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, how_many,
3517 batch_size, child, FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3518
3519 return 0;
3520 }
3521
3522 static int
skt_xfer_csumoffload(int child,int protocol)3523 skt_xfer_csumoffload(int child, int protocol)
3524 {
3525 char buf[1] = { 0 };
3526 int error;
3527 const char * ifname;
3528 uuid_t flow_id = {};
3529 uuid_t nowhere_flow_id;
3530 struct in_addr our_ip, peer_ip;
3531 struct in_addr our_mask;
3532 uint16_t our_port, peer_port;
3533 channel_port port;
3534 ssize_t ret;
3535 uuid_string_t uuidstr;
3536 flowadv_idx_t flowadv_idx;
3537
3538 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3539
3540 if (child == 0) {
3541 ifname = FETH0_NAME;
3542 our_ip = sktc_feth0_in_addr();
3543 peer_ip = sktc_feth1_in_addr();
3544 our_port = FETH0_PORT;
3545 peer_port = FETH1_PORT;
3546 } else {
3547 ifname = FETH1_NAME;
3548 our_ip = sktc_feth1_in_addr();
3549 peer_ip = sktc_feth0_in_addr();
3550 our_port = FETH1_PORT;
3551 peer_port = FETH0_PORT;
3552 }
3553
3554 /* set up the flowswitch over the right interface */
3555 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3556 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3557 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3558 if (error == 0) {
3559 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3560 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3561 assert(port.chan != NULL);
3562 }
3563 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3564 SKT_LOG("write fail: %s\n", strerror(errno));
3565 return 1;
3566 }
3567 assert(ret == 1);
3568 #if SKT_XFER_DEBUG
3569 T_LOG("child %d signaled\n", child);
3570 #endif
3571 /* Wait for go signal */
3572 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3573 SKT_LOG("read fail: %s\n", strerror(errno));
3574 return 1;
3575 }
3576 assert(ret == 1);
3577 if (error != 0) {
3578 return 1;
3579 }
3580 port.ip_addr = our_ip;
3581
3582 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3583 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3584 fflush(stderr);
3585
3586 /* warm up the arp cache before starting the actual test */
3587 T_LOG("child %d: Warm up ARP cache\n", child);
3588 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3589 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3590 T_LOG("child %d: Test Start\n", child);
3591
3592 /* Start the receiver */
3593 if (child == 0) {
3594 channel_port_receive_all(&port, flow_id, our_port, peer_ip,
3595 peer_port, 2, FALSE);
3596 return 0;
3597 }
3598
3599 my_payload payload;
3600 bzero(&payload, sizeof(payload));
3601 payload.packet_number = 0;
3602
3603 do{
3604 uuid_generate_random(nowhere_flow_id);
3605 } while (!uuid_compare(nowhere_flow_id, flow_id));
3606
3607 /* send with checksum offloading */
3608 payload.packet_number = 0;
3609 strlcpy(payload.data, "any", sizeof(payload.data));
3610 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3611 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3612 TRUE, NULL);
3613
3614 /* send without checksum offloading */
3615 payload.packet_number = 0;
3616 strlcpy(payload.data, "any", sizeof(payload.data));
3617 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3618 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3619 FALSE, NULL);
3620
3621 /* signal receiver to stop */
3622 payload.packet_number = 0;
3623 strlcpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3624 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3625 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3626 FALSE, NULL);
3627 return 0;
3628 }
3629
3630 static void
skt_xfer_enable_qos_marking_interface(const char * ifname,uint32_t mode)3631 skt_xfer_enable_qos_marking_interface(const char *ifname, uint32_t mode)
3632 {
3633 /* setup ifnet for qos marking */
3634 int s;
3635 struct ifreq ifr;
3636 unsigned long ioc;
3637
3638 assert(mode != IFRTYPE_QOSMARKING_MODE_NONE);
3639
3640 assert((s = socket(AF_INET, SOCK_DGRAM, 0)) >= 0);
3641
3642 bzero(&ifr, sizeof(ifr));
3643 strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
3644 ioc = SIOCSQOSMARKINGMODE;
3645 ifr.ifr_qosmarking_mode = mode;
3646 assert(ioctl(s, ioc, (caddr_t)&ifr) == 0);
3647
3648 bzero(&ifr, sizeof(ifr));
3649 strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
3650 ioc = SIOCSQOSMARKINGENABLED;
3651 ifr.ifr_qosmarking_enabled = 1;
3652 assert(ioctl(s, ioc, (caddr_t)&ifr) == 0);
3653 }
3654
3655 static int
skt_xfer_qosmarking(int child,uint32_t mode)3656 skt_xfer_qosmarking(int child, uint32_t mode)
3657 {
3658 char buf[1] = { 0 };
3659 int error;
3660 const char * ifname;
3661 uuid_t flow_id = {};
3662 uuid_t nowhere_flow_id;
3663 struct in_addr our_ip, peer_ip;
3664 struct in_addr our_mask;
3665 uint16_t our_port, peer_port;
3666 channel_port port;
3667 ssize_t ret;
3668 uuid_string_t uuidstr;
3669 flowadv_idx_t flowadv_idx;
3670
3671 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3672
3673 if (child == 0) {
3674 ifname = FETH0_NAME;
3675 our_ip = sktc_feth0_in_addr();
3676 peer_ip = sktc_feth1_in_addr();
3677 our_port = FETH0_PORT;
3678 peer_port = FETH1_PORT;
3679 skt_xfer_enable_qos_marking_interface(ifname, mode);
3680 } else {
3681 ifname = FETH1_NAME;
3682 our_ip = sktc_feth1_in_addr();
3683 peer_ip = sktc_feth0_in_addr();
3684 our_port = FETH1_PORT;
3685 peer_port = FETH0_PORT;
3686 skt_xfer_enable_qos_marking_interface(ifname, mode);
3687 }
3688
3689 /* set up the flowswitch over the right interface */
3690 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3691 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3692 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3693 if (error == 0) {
3694 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3695 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3696 assert(port.chan != NULL);
3697 }
3698 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3699 SKT_LOG("write fail: %s\n", strerror(errno));
3700 return 1;
3701 }
3702 assert(ret == 1);
3703 #if SKT_XFER_DEBUG
3704 T_LOG("child %d signaled\n", child);
3705 #endif
3706 /* Wait for go signal */
3707 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3708 SKT_LOG("read fail: %s\n", strerror(errno));
3709 return 1;
3710 }
3711 assert(ret == 1);
3712 if (error != 0) {
3713 return 1;
3714 }
3715 port.ip_addr = our_ip;
3716
3717 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3718 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3719 fflush(stderr);
3720
3721 /* warm up the arp cache before starting the actual test */
3722 T_LOG("child %d: Warm up ARP cache\n", child);
3723 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3724 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3725 T_LOG("child %d: Test Start\n", child);
3726
3727 /* Start the receiver who would verify the Qos marking */
3728 if (child == 0) {
3729 channel_port_receive_all(&port, flow_id, our_port, peer_ip,
3730 peer_port, -1, TRUE);
3731 return 0;
3732 }
3733
3734 my_payload payload;
3735
3736 do{
3737 uuid_generate_random(nowhere_flow_id);
3738 } while (!uuid_compare(nowhere_flow_id, flow_id));
3739
3740 /* test qos marking with and without checksum offload */
3741
3742 #define __SEND_SC(svc, csum_offload) \
3743 bzero(&payload, sizeof(payload));\
3744 payload.packet_number = 0;\
3745 if (mode == IFRTYPE_QOSMARKING_FASTLANE) { \
3746 strlcpy(payload.data, XFER_QOSMARKING_FASTLANE_PREFIX, sizeof(payload.data)); \
3747 } else if (mode == IFRTYPE_QOSMARKING_RFC4594) { \
3748 strlcpy(payload.data, XFER_QOSMARKING_RFC4594_PREFIX, sizeof(payload.data)); \
3749 } \
3750 strlcat(payload.data, #svc, sizeof(payload.data));\
3751 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,\
3752 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, svc, csum_offload, NULL); \
3753
3754 #define SEND_SC(svc) \
3755 __SEND_SC(svc, FALSE); \
3756 __SEND_SC(svc, TRUE);
3757
3758 SEND_SC(PKT_SC_BK);
3759 SEND_SC(PKT_SC_BK_SYS);
3760 SEND_SC(PKT_SC_BE);
3761 SEND_SC(PKT_SC_RD);
3762 SEND_SC(PKT_SC_OAM);
3763 SEND_SC(PKT_SC_AV);
3764 SEND_SC(PKT_SC_RV);
3765 SEND_SC(PKT_SC_VI);
3766 SEND_SC(PKT_SC_SIG);
3767 SEND_SC(PKT_SC_VO);
3768 SEND_SC(PKT_SC_CTL);
3769
3770 #undef SEND_SC
3771 #undef __SEND_SC
3772
3773 /* signal receiver to stop */
3774 payload.packet_number = 0;
3775 strlcpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3776 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3777 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3778 FALSE, NULL);
3779
3780 return 0;
3781 }
3782
3783 static int
skt_xfer_listener_tcp_rst(int child)3784 skt_xfer_listener_tcp_rst(int child)
3785 {
3786 char buf[1] = { 0 };
3787 int error;
3788 const char * ifname;
3789 uuid_t flow_id = {};
3790 uuid_t listener_flow_id, connecting_flow_id;
3791 struct in_addr our_ip, peer_ip, zero_ip;
3792 struct in_addr our_mask;
3793 uint16_t our_port, peer_port, listener_port;
3794 channel_port port;
3795 ssize_t ret;
3796 uuid_string_t uuidstr;
3797 flowadv_idx_t flowadv_idx;
3798
3799 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3800
3801 if (child == 0) {
3802 /* connector and RST receiver */
3803 ifname = FETH0_NAME;
3804 our_ip = sktc_feth0_in_addr();
3805 peer_ip = sktc_feth1_in_addr();
3806 our_port = FETH0_PORT;
3807 peer_port = FETH1_PORT;
3808 listener_port = FETH0_PORT + 1;
3809 } else {
3810 /* listener */
3811 ifname = FETH1_NAME;
3812 our_ip = sktc_feth1_in_addr();
3813 peer_ip = sktc_feth0_in_addr();
3814 our_port = FETH1_PORT;
3815 peer_port = FETH0_PORT;
3816 listener_port = FETH0_PORT + 1;
3817 }
3818
3819 zero_ip = (struct in_addr){.s_addr = htonl(INADDR_ANY)};
3820
3821 /* set up the flowswitch over the right interface */
3822 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3823 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3824 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3825 if (error == 0) {
3826 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3827 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3828 assert(port.chan != NULL);
3829 }
3830 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3831 SKT_LOG("write fail: %s\n", strerror(errno));
3832 return 1;
3833 }
3834 assert(ret == 1);
3835 #if SKT_XFER_DEBUG
3836 T_LOG("child %d signaled\n", child);
3837 #endif
3838
3839 if (child == 0) {
3840 do{
3841 uuid_generate_random(connecting_flow_id);
3842 } while (!uuid_compare(connecting_flow_id, flow_id));
3843 flowadv_idx_t tmp_flowadv_idx = FLOWADV_IDX_NONE;
3844 error = connect_flow(handles.controller, handles.fsw_nx_uuid,
3845 OUR_FLOWSWITCH_PORT, connecting_flow_id, IPPROTO_TCP, 0,
3846 our_ip, our_port, peer_ip, listener_port, &tmp_flowadv_idx,
3847 0);
3848 SKTC_ASSERT_ERR(!error);
3849 assert(tmp_flowadv_idx != FLOWADV_IDX_NONE);
3850 } else {
3851 do{
3852 uuid_generate_random(listener_flow_id);
3853 } while (!uuid_compare(listener_flow_id, flow_id));
3854 flowadv_idx_t tmp_flowadv_idx = FLOWADV_IDX_NONE;
3855 error = connect_flow(handles.controller, handles.fsw_nx_uuid,
3856 OUR_FLOWSWITCH_PORT, listener_flow_id, IPPROTO_TCP, 0,
3857 our_ip, listener_port, zero_ip, 0, &tmp_flowadv_idx, 0);
3858 SKTC_ASSERT_ERR(!error);
3859 assert(tmp_flowadv_idx != FLOWADV_IDX_NONE);
3860 }
3861
3862 /* Wait for go signal */
3863 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3864 SKT_LOG("read fail: %s\n", strerror(errno));
3865 return 1;
3866 }
3867 assert(ret == 1);
3868 if (error != 0) {
3869 return 1;
3870 }
3871 port.ip_addr = our_ip;
3872
3873 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3874 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3875 fflush(stderr);
3876
3877 /* warm up the arp cache before starting the actual test */
3878 T_LOG("child %d: Warm up ARP cache\n", child);
3879 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3880 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3881 T_LOG("child %d: Test Start\n", child);
3882
3883 /* Start the receiver */
3884 if (child == 0) {
3885 channel_port_receive_all(&port, flow_id, listener_port, peer_ip,
3886 peer_port, 1, FALSE);
3887 return 0;
3888 }
3889
3890 my_payload payload;
3891 bzero(&payload, sizeof(payload));
3892 payload.packet_number = 0;
3893 channel_port_send(&port, listener_flow_id, IPPROTO_TCP, listener_port,
3894 peer_ip, peer_port, &payload, sizeof(payload), 1, FALSE, FALSE,
3895 PKT_SC_BE, TRUE, NULL);
3896
3897 sleep(1);
3898
3899 /* Send something right to single receiver to end */
3900 payload.packet_number = 1;
3901 strncpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3902 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3903 peer_port, &payload, sizeof(payload), 2, FALSE, FALSE, PKT_SC_BE,
3904 FALSE, NULL);
3905
3906 return 0;
3907 }
3908
3909 int
skt_xfer_udp_frags(int child,bool error_ids)3910 skt_xfer_udp_frags(int child, bool error_ids)
3911 {
3912 char buf[1] = { 0 };
3913 int error;
3914 const char * ifname;
3915 uuid_t flow_id = {};
3916 struct in_addr our_ip, peer_ip;
3917 struct in_addr our_mask;
3918 uint16_t our_port, peer_port;
3919 channel_port port;
3920 ssize_t ret;
3921 uuid_string_t uuidstr;
3922 flowadv_idx_t flowadv_idx;
3923
3924 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3925
3926 if (child == 0) {
3927 ifname = FETH0_NAME;
3928 our_ip = sktc_feth0_in_addr();
3929 peer_ip = sktc_feth1_in_addr();
3930 our_port = FETH0_PORT;
3931 peer_port = FETH1_PORT;
3932 } else {
3933 child = 1;
3934 ifname = FETH1_NAME;
3935 our_ip = sktc_feth1_in_addr();
3936 peer_ip = sktc_feth0_in_addr();
3937 our_port = FETH1_PORT;
3938 peer_port = FETH0_PORT;
3939 }
3940
3941 /* set up the flowswitch over the right interface */
3942 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3943 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3944 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3945 if (error == 0) {
3946 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3947 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3948 assert(port.chan != NULL);
3949 }
3950 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3951 SKT_LOG("write fail: %s\n", strerror(errno));
3952 return 1;
3953 }
3954 assert(ret == 1);
3955 #if SKT_XFER_DEBUG
3956 T_LOG("child %d signaled\n", child);
3957 #endif
3958
3959 /* Wait for go signal */
3960 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3961 SKT_LOG("read fail: %s\n", strerror(errno));
3962 return 1;
3963 }
3964 assert(ret == 1);
3965 if (error != 0) {
3966 return 1;
3967 }
3968 port.ip_addr = our_ip;
3969
3970 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3971 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3972 fflush(stderr);
3973
3974 /* warm up the arp cache before starting the actual test */
3975 T_LOG("child %d: Warm up ARP cache\n", child);
3976 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3977 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3978 T_LOG("child %d: Test Start\n", child);
3979
3980 /* Start the receiver */
3981 if (child == 0) {
3982 channel_port_receive_all(&port, flow_id, our_port, peer_ip,
3983 peer_port, error_ids ? 0 : 4, FALSE);
3984 return 0;
3985 }
3986
3987 my_payload payload;
3988 bzero(&payload, sizeof(payload));
3989 payload.packet_number = 0;
3990
3991 channel_port_send_fragments(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3992 peer_port, &payload, (sizeof(payload.data) & ~0x7), 4,
3993 PKT_SC_BE, FALSE, error_ids);
3994
3995 bzero(&payload, sizeof(payload));
3996 payload.packet_number = 0;
3997 strncpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3998 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3999 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
4000 FALSE, NULL);
4001
4002 return 0;
4003 }
4004
4005 static int
skt_xfer_udp_parent_child(int id,uint16_t demux_offset)4006 skt_xfer_udp_parent_child(int id, uint16_t demux_offset)
4007 {
4008 #define CHILD_ID 0
4009 #define REMOTE_ID 1
4010 #define PARENT_ID 2
4011
4012 #define PARENT_FLOW_UUID "1B4E28BA-2FA1-11D2-883F-B9A761BDE3FB"
4013 #define CHILD_FLOW_UUID "1B4E28BA-2FA1-11D2-883F-B9A761BDE3FD"
4014
4015 char buf[1] = { 0 };
4016 int error = 0;
4017 const char * ifname;
4018 uuid_t flow_id = {};
4019 struct in_addr our_ip;
4020 struct in_addr our_mask;
4021 uint16_t our_port;
4022 struct in_addr peer_ip;
4023 uint16_t peer_port;
4024 channel_port port;
4025 ssize_t ret;
4026 flowadv_idx_t flowadv_idx;
4027 nexus_port_t nx_port;
4028 uuid_t parent_flow_id = {};
4029 uint16_t flags = 0;
4030
4031 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
4032
4033 if (id == PARENT_ID) {
4034 ifname = FETH0_NAME;
4035 our_ip = sktc_feth0_in_addr();
4036 peer_ip = sktc_feth1_in_addr();
4037 our_port = FETH0_PORT;
4038 peer_port = FETH1_PORT;
4039 nx_port = OUR_FLOWSWITCH_PORT;
4040 flags = NXFLOWREQF_PARENT;
4041 uuid_parse(PARENT_FLOW_UUID, flow_id);
4042 } else if (id == CHILD_ID) {
4043 ifname = FETH0_NAME;
4044 our_ip = sktc_feth0_in_addr();
4045 peer_ip = sktc_feth1_in_addr();
4046 our_port = FETH0_PORT;
4047 peer_port = FETH1_PORT;
4048 nx_port = CHILD_FLOWSWITCH_PORT;
4049 uuid_parse(CHILD_FLOW_UUID, flow_id);
4050 uuid_parse(PARENT_FLOW_UUID, parent_flow_id);
4051 // Wait for the parent to setup the flow-switch
4052 sleep(1);
4053 } else if (id == REMOTE_ID) {
4054 ifname = FETH1_NAME;
4055 our_ip = sktc_feth1_in_addr();
4056 peer_ip = sktc_feth0_in_addr();
4057 our_port = FETH1_PORT;
4058 peer_port = FETH0_PORT;
4059 nx_port = OUR_FLOWSWITCH_PORT;
4060 }
4061
4062 if (id == PARENT_ID || id == REMOTE_ID) {
4063 // set up the flowswitch
4064 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
4065 flags, our_ip, our_mask, our_port, getpid(), peer_ip,
4066 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
4067 } else if (id == CHILD_ID) {
4068 // child will reuse parent interface and flowswitch
4069 struct flow_demux_pattern demux_patterns[1];
4070 memset(demux_patterns, 0, sizeof(struct flow_demux_pattern));
4071
4072 uint16_t payload_byte = DEMUX_PAYLOAD_VALUE;
4073 demux_patterns[0].fdp_offset = DEMUX_PAYLOAD_OFFSET + demux_offset;
4074 demux_patterns[0].fdp_mask[0] = 0xFF;
4075 demux_patterns[0].fdp_mask[1] = 0xFF;
4076 demux_patterns[0].fdp_value[0] = payload_byte;
4077 demux_patterns[0].fdp_value[1] = payload_byte >> 8;
4078 demux_patterns[0].fdp_len = sizeof(payload_byte);
4079 error = fetch_if_flowswitch_and_setup_flow(&handles, ifname,
4080 IPPROTO_UDP, 0, our_ip, our_mask, our_port, getpid(), peer_ip,
4081 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false, parent_flow_id,
4082 demux_patterns, 1);
4083 }
4084 if (error == 0) {
4085 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
4086 nx_port, ENABLE_UPP, false, false);
4087 assert(port.chan != NULL);
4088 }
4089
4090 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
4091 SKT_LOG("write fail: %s\n", strerror(errno));
4092 return 1;
4093 }
4094 assert(ret == 1);
4095 #if SKT_XFER_DEBUG
4096 T_LOG("ID %d signaled\n", id);
4097 #endif
4098 /* Wait for go signal */
4099 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
4100 SKT_LOG("read fail: %s\n", strerror(errno));
4101 return 1;
4102 }
4103 assert(ret == 1);
4104 if (error != 0) {
4105 return 1;
4106 }
4107 port.ip_addr = our_ip;
4108
4109 /* warm up the arp cache before starting the actual test */
4110 if (id == CHILD_ID || id == REMOTE_ID) {
4111 T_LOG("child %d: Warm up ARP cache\n", id);
4112 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, id,
4113 FALSE, flowadv_idx, FALSE, FALSE, demux_offset);
4114
4115 T_LOG("child %d: Test Start\n", id);
4116 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 5, 5, id,
4117 FALSE, flowadv_idx, FALSE, FALSE, demux_offset);
4118 } else if (id == PARENT_ID) {
4119 // Wait for the child ping-pong to complete
4120 sleep(1);
4121 }
4122
4123 return 0;
4124 }
4125
4126 static int
skt_xfer_udp_main(int argc,char * argv[])4127 skt_xfer_udp_main(int argc, char *argv[])
4128 {
4129 int child;
4130
4131 assert(!strcmp(argv[3], "--child"));
4132 child = atoi(argv[4]);
4133
4134 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT,
4135 XFER_TXRX_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE, -1, -1,
4136 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4137 }
4138
4139 static int
skt_xfer_udp_long_main(int argc,char * argv[])4140 skt_xfer_udp_long_main(int argc, char *argv[])
4141 {
4142 int child;
4143
4144 assert(!strcmp(argv[3], "--child"));
4145 child = atoi(argv[4]);
4146
4147 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT_LONG,
4148 XFER_TXRX_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE, -1, -1,
4149 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4150 }
4151
4152 static int
skt_xfer_udp_overwhelm_main(int argc,char * argv[])4153 skt_xfer_udp_overwhelm_main(int argc, char *argv[])
4154 {
4155 int child;
4156
4157 assert(!strcmp(argv[3], "--child"));
4158 child = atoi(argv[4]);
4159
4160 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT,
4161 XFER_TXRX_OVERWHELM_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE,
4162 XFER_TXRX_OVERWHELM_FSW_TX_RING_SIZE,
4163 XFER_TXRX_OVERWHELM_FSW_RX_RING_SIZE, -1, -1,
4164 SKT_FSW_EVENT_TEST_NONE, false, false, false);
4165 }
4166
4167 static int
skt_xfer_udp_overwhelm_long_main(int argc,char * argv[])4168 skt_xfer_udp_overwhelm_long_main(int argc, char *argv[])
4169 {
4170 int child;
4171
4172 assert(!strcmp(argv[3], "--child"));
4173 child = atoi(argv[4]);
4174
4175 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT_LONG,
4176 XFER_TXRX_OVERWHELM_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE,
4177 XFER_TXRX_OVERWHELM_FSW_TX_RING_SIZE,
4178 XFER_TXRX_OVERWHELM_FSW_RX_RING_SIZE, -1, -1,
4179 SKT_FSW_EVENT_TEST_NONE, false, false, false);
4180 }
4181
4182 static int
skt_xfer_udp_ping_pong_main(int argc,char * argv[])4183 skt_xfer_udp_ping_pong_main(int argc, char *argv[])
4184 {
4185 int child, test_id;
4186 bool low_latency;
4187 bool multi_llink;
4188
4189 assert(!strcmp(argv[3], "--child"));
4190 child = atoi(argv[4]);
4191 test_id = atoi(argv[5]);
4192
4193 low_latency = (test_id == SKT_FSW_PING_PONG_TEST_LOW_LATENCY);
4194 multi_llink = (test_id == SKT_FSW_PING_PONG_TEST_MULTI_LLINK);
4195 return skt_xfer_udp_common(child, XFER_PING_PACKET_COUNT,
4196 XFER_PING_BATCH_COUNT, TRUE, FALSE, FALSE, FALSE, -1, -1,
4197 -1, -1, SKT_FSW_EVENT_TEST_NONE, low_latency, multi_llink,
4198 false);
4199 }
4200
4201 static int
skt_xfer_rd_udp_ping_pong_main(int argc,char * argv[])4202 skt_xfer_rd_udp_ping_pong_main(int argc, char *argv[])
4203 {
4204 int child, test_id;
4205
4206 assert(!strcmp(argv[3], "--child"));
4207 child = atoi(argv[4]);
4208 test_id = atoi(argv[5]);
4209
4210 return skt_xfer_udp_common(child, XFER_PING_PACKET_COUNT,
4211 XFER_PING_BATCH_COUNT, TRUE, FALSE, FALSE, FALSE, -1, -1,
4212 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, true);
4213 }
4214
4215 static int
skt_xfer_udp_ping_pong_one_main(int argc,char * argv[])4216 skt_xfer_udp_ping_pong_one_main(int argc, char *argv[])
4217 {
4218 int child;
4219
4220 assert(!strcmp(argv[3], "--child"));
4221 child = atoi(argv[4]);
4222
4223 return skt_xfer_udp_common(child, 1, 1, TRUE, FALSE, FALSE, FALSE,
4224 -1, -1, -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4225 }
4226
4227 static int
skt_xfer_udp_ping_pong_long_main(int argc,char * argv[])4228 skt_xfer_udp_ping_pong_long_main(int argc, char *argv[])
4229 {
4230 int child;
4231
4232 assert(!strcmp(argv[3], "--child"));
4233 child = atoi(argv[4]);
4234
4235 return skt_xfer_udp_common(child, XFER_PING_PACKET_COUNT_LONG,
4236 XFER_PING_BATCH_COUNT, TRUE, FALSE, FALSE, FALSE, -1, -1,
4237 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4238 }
4239
4240 static int
skt_xfer_udp_ping_pong_one_wrong_main(int argc,char * argv[])4241 skt_xfer_udp_ping_pong_one_wrong_main(int argc, char *argv[])
4242 {
4243 int child;
4244
4245 assert(!strcmp(argv[3], "--child"));
4246 child = atoi(argv[4]);
4247
4248 return skt_xfer_udp_common(child, 1, 1, TRUE, TRUE, FALSE, FALSE,
4249 -1, -1, -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4250 }
4251
4252 static int
skt_xfer_tcp_syn_flood_main(int argc,char * argv[])4253 skt_xfer_tcp_syn_flood_main(int argc, char *argv[])
4254 {
4255 int child;
4256
4257 assert(!strcmp(argv[3], "--child"));
4258 child = atoi(argv[4]);
4259
4260 return skt_xfer_tcpflood(child, 10000, 64, TRUE);
4261 }
4262
4263 static int
skt_xfer_tcp_rst_flood_main(int argc,char * argv[])4264 skt_xfer_tcp_rst_flood_main(int argc, char *argv[])
4265 {
4266 int child;
4267
4268 assert(!strcmp(argv[3], "--child"));
4269 child = atoi(argv[4]);
4270
4271 return skt_xfer_tcpflood(child, 10000, 64, FALSE);
4272 }
4273
4274 static int
skt_xfer_udp_ping_pong_aqm_main(int argc,char * argv[])4275 skt_xfer_udp_ping_pong_aqm_main(int argc, char *argv[])
4276 {
4277 int child;
4278
4279 assert(!strcmp(argv[3], "--child"));
4280 child = atoi(argv[4]);
4281
4282 return skt_xfer_udp_common(child, XFER_AQM_PING_PACKET_COUNT,
4283 XFER_AQM_PING_BATCH_COUNT, TRUE, FALSE, TRUE, FALSE, -1, -1,
4284 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4285 }
4286
4287 static int
skt_xfer_udp_with_errors_main(int argc,char * argv[])4288 skt_xfer_udp_with_errors_main(int argc, char *argv[])
4289 {
4290 int child;
4291
4292 assert(!strcmp(argv[3], "--child"));
4293 child = atoi(argv[4]);
4294
4295 return skt_xfer_udp_with_errors_common(child,
4296 XFER_TXRX_PACKET_COUNT, XFER_TXRX_BATCH_COUNT);
4297 }
4298
4299 static int
skt_xfer_tcp_port_zero_main(int argc,char * argv[])4300 skt_xfer_tcp_port_zero_main(int argc, char *argv[])
4301 {
4302 int child;
4303
4304 assert(!strcmp(argv[3], "--child"));
4305 child = atoi(argv[4]);
4306
4307 return skt_xfer_portzero(child, IPPROTO_TCP);
4308 }
4309
4310 static int
skt_xfer_udp_port_zero_main(int argc,char * argv[])4311 skt_xfer_udp_port_zero_main(int argc, char *argv[])
4312 {
4313 int child;
4314
4315 assert(!strcmp(argv[3], "--child"));
4316 child = atoi(argv[4]);
4317
4318 return skt_xfer_portzero(child, IPPROTO_UDP);
4319 }
4320
4321 static int
skt_xfer_setuponly_main(int argc,char * argv[])4322 skt_xfer_setuponly_main(int argc, char *argv[])
4323 {
4324 int child;
4325
4326 assert(!strcmp(argv[3], "--child"));
4327 child = atoi(argv[4]);
4328 return skt_xfer_setuponly(child);
4329 }
4330
4331 static int
skt_xfer_udp_ping_pong_wmm_main(int argc,char * argv[])4332 skt_xfer_udp_ping_pong_wmm_main(int argc, char *argv[])
4333 {
4334 int child;
4335
4336 assert(!strcmp(argv[3], "--child"));
4337 child = atoi(argv[4]);
4338
4339 return skt_xfer_udp_common(child, XFER_WMM_PING_PACKET_COUNT,
4340 XFER_WMM_PING_BATCH_COUNT, TRUE, FALSE, FALSE, TRUE, -1, -1,
4341 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4342 }
4343
4344 int
skt_xfer_flowmatch_main(int argc,char * argv[])4345 skt_xfer_flowmatch_main(int argc, char *argv[])
4346 {
4347 int child;
4348
4349 assert(!strcmp(argv[3], "--child"));
4350 child = atoi(argv[4]);
4351
4352 return skt_xfer_flowmatch(child);
4353 }
4354
4355 static int
skt_xfer_flowcleanup_main(int argc,char * argv[])4356 skt_xfer_flowcleanup_main(int argc, char *argv[])
4357 {
4358 int child;
4359
4360 assert(!strcmp(argv[3], "--child"));
4361 child = atoi(argv[4]);
4362
4363 return skt_xfer_flowcleanup(child, 128, 8);
4364 }
4365
4366 static int
skt_xfer_udp_ping_pong_multi_buflet_main(int argc,char * argv[])4367 skt_xfer_udp_ping_pong_multi_buflet_main(int argc, char *argv[])
4368 {
4369 int child;
4370
4371 assert(!strcmp(argv[3], "--child"));
4372 child = atoi(argv[4]);
4373
4374 return skt_xfer_udp_common(child, XFER_PING_PACKET_COUNT,
4375 XFER_PING_BATCH_COUNT, TRUE, FALSE, FALSE, FALSE, -1, -1,
4376 XFER_TXRX_MULTI_BUFLET_BUF_SIZE,
4377 XFER_TXRX_MULTI_BUFLET_MAX_FRAGS, SKT_FSW_EVENT_TEST_NONE,
4378 false, false, false);
4379 }
4380
4381 static int
skt_xfer_csumoffload_main(int argc,char * argv[])4382 skt_xfer_csumoffload_main(int argc, char *argv[])
4383 {
4384 int child;
4385
4386 assert(!strcmp(argv[3], "--child"));
4387 child = atoi(argv[4]);
4388 skt_xfer_csumoffload(child, IPPROTO_UDP);
4389
4390 return 0;
4391 }
4392
4393 static int
skt_xfer_fastlane_main(int argc,char * argv[])4394 skt_xfer_fastlane_main(int argc, char *argv[])
4395 {
4396 int child;
4397
4398 assert(!strcmp(argv[3], "--child"));
4399 child = atoi(argv[4]);
4400
4401 skt_xfer_qosmarking(child, IFRTYPE_QOSMARKING_FASTLANE);
4402
4403 return 0;
4404 }
4405
4406 static int
skt_xfer_rfc4594_main(int argc,char * argv[])4407 skt_xfer_rfc4594_main(int argc, char *argv[])
4408 {
4409 int child;
4410
4411 assert(!strcmp(argv[3], "--child"));
4412 child = atoi(argv[4]);
4413
4414 skt_xfer_qosmarking(child, IFRTYPE_QOSMARKING_RFC4594);
4415
4416 return 0;
4417 }
4418
4419 static int
skt_xfer_listener_tcp_rst_main(int argc,char * argv[])4420 skt_xfer_listener_tcp_rst_main(int argc, char *argv[])
4421 {
4422 int child;
4423
4424 assert(!strcmp(argv[3], "--child"));
4425 child = atoi(argv[4]);
4426
4427 skt_xfer_listener_tcp_rst(child);
4428
4429 return 0;
4430 }
4431
4432 static int
skt_xfer_udp_frags_main(int argc,char * argv[])4433 skt_xfer_udp_frags_main(int argc, char *argv[])
4434 {
4435 int child;
4436
4437 assert(!strcmp(argv[3], "--child"));
4438 child = atoi(argv[4]);
4439
4440 skt_xfer_udp_frags(child, FALSE);
4441
4442 return 0;
4443 }
4444
4445 static int
skt_xfer_udp_bad_frags_main(int argc,char * argv[])4446 skt_xfer_udp_bad_frags_main(int argc, char *argv[])
4447 {
4448 int child;
4449
4450 assert(!strcmp(argv[3], "--child"));
4451 child = atoi(argv[4]);
4452
4453 skt_xfer_udp_frags(child, TRUE);
4454
4455 return 0;
4456 }
4457
4458 static int
skt_xfer_udp_ifadv_main(int argc,char * argv[])4459 skt_xfer_udp_ifadv_main(int argc, char *argv[])
4460 {
4461 int child, test_id;
4462
4463 assert(!strcmp(argv[3], "--child"));
4464 child = atoi(argv[4]);
4465 test_id = atoi(argv[5]);
4466
4467 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT,
4468 XFER_TXRX_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE,
4469 -1, -1, -1, -1, test_id, false, false, false);
4470 }
4471
4472 static int
skt_xfer_parent_child_flow_main(int argc,char * argv[])4473 skt_xfer_parent_child_flow_main(int argc, char *argv[])
4474 {
4475 int child, test_id;
4476
4477 assert(!strcmp(argv[3], "--child"));
4478 child = atoi(argv[4]);
4479 test_id = 0;
4480
4481 return skt_xfer_udp_parent_child(child, 0);
4482 }
4483
4484 static int
skt_xfer_parent_child_flow_main_offset_400(int argc,char * argv[])4485 skt_xfer_parent_child_flow_main_offset_400(int argc, char *argv[])
4486 {
4487 int child, test_id;
4488
4489 assert(!strcmp(argv[3], "--child"));
4490 child = atoi(argv[4]);
4491 test_id = 0;
4492
4493 return skt_xfer_udp_parent_child(child, 400);
4494 }
4495
4496 static void
skt_xfer_init_txstart(void)4497 skt_xfer_init_txstart(void)
4498 {
4499 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4500 1000 * 1000);
4501 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART);
4502 sktc_reset_classq_update_intervals();
4503 sktc_enable_ip_reass();
4504 sktc_config_fsw_rx_agg_tcp(0);
4505 }
4506
4507 static void
skt_xfer_init_native(void)4508 skt_xfer_init_native(void)
4509 {
4510 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4511 1000 * 1000);
4512 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE);
4513 sktc_reset_classq_update_intervals();
4514 sktc_enable_ip_reass();
4515 sktc_config_fsw_rx_agg_tcp(0);
4516 }
4517
4518 static void
skt_xfer_rd_init(void)4519 skt_xfer_rd_init(void)
4520 {
4521 int err;
4522 uint32_t disable_nxctl_check = 1;
4523 size_t len = sizeof(skt_disable_nxctl_check);
4524
4525 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4526 1000 * 1000);
4527 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4528 &skt_disable_nxctl_check, &len, &disable_nxctl_check,
4529 sizeof(disable_nxctl_check));
4530 assert(err == 0);
4531 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE);
4532 sktc_ifnet_rd_create();
4533 sktc_reset_classq_update_intervals();
4534 sktc_enable_ip_reass();
4535 sktc_config_fsw_rx_agg_tcp(0);
4536 }
4537
4538 /* QoS Marking App Policy needs to be set before child is launched */
4539 static int restricted_old;
4540 static void
skt_xfer_init_enable_qos_marking_policy(void)4541 skt_xfer_init_enable_qos_marking_policy(void)
4542 {
4543 int zero = 0;
4544 size_t restricted_old_size = sizeof(restricted_old);
4545 assert(sysctlbyname("net.qos.policy.restricted",
4546 &restricted_old, &restricted_old_size,
4547 &zero, sizeof(zero)) == 0);
4548 }
4549
4550 static void
skt_xfer_init_txstart_fastlane(void)4551 skt_xfer_init_txstart_fastlane(void)
4552 {
4553 skt_xfer_init_txstart();
4554 skt_xfer_init_enable_qos_marking_policy();
4555 }
4556
4557 static void
skt_xfer_init_txstart_fcs(void)4558 skt_xfer_init_txstart_fcs(void)
4559 {
4560 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4561 1000 * 1000);
4562 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART |
4563 FETH_FLAGS_FCS);
4564 sktc_reset_classq_update_intervals();
4565 sktc_enable_ip_reass();
4566 sktc_config_fsw_rx_agg_tcp(0);
4567 }
4568
4569 static void
skt_xfer_init_txstart_trailer(void)4570 skt_xfer_init_txstart_trailer(void)
4571 {
4572 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4573 1000 * 1000);
4574 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART |
4575 FETH_FLAGS_TRAILER);
4576 sktc_reset_classq_update_intervals();
4577 sktc_enable_ip_reass();
4578 sktc_config_fsw_rx_agg_tcp(0);
4579 }
4580
4581 static void
skt_xfer_init_native_fastlane(void)4582 skt_xfer_init_native_fastlane(void)
4583 {
4584 skt_xfer_init_native();
4585 skt_xfer_init_enable_qos_marking_policy();
4586 }
4587
4588 static void
skt_xfer_init_native_split_pools(void)4589 skt_xfer_init_native_split_pools(void)
4590 {
4591 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4592 1000 * 1000);
4593 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE |
4594 FETH_FLAGS_NONSHAREDSPLITPOOLS);
4595 sktc_reset_classq_update_intervals();
4596 sktc_enable_ip_reass();
4597 sktc_config_fsw_rx_agg_tcp(0);
4598 }
4599
4600 static void
skt_xfer_init_native_fcs(void)4601 skt_xfer_init_native_fcs(void)
4602 {
4603 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4604 1000 * 1000);
4605 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE |
4606 FETH_FLAGS_FCS);
4607 sktc_reset_classq_update_intervals();
4608 sktc_enable_ip_reass();
4609 sktc_config_fsw_rx_agg_tcp(0);
4610 }
4611
4612 static void
skt_xfer_init_native_trailer(void)4613 skt_xfer_init_native_trailer(void)
4614 {
4615 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4616 1000 * 1000);
4617 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE |
4618 FETH_FLAGS_TRAILER);
4619 sktc_reset_classq_update_intervals();
4620 sktc_enable_ip_reass();
4621 sktc_config_fsw_rx_agg_tcp(0);
4622 }
4623
4624 static void
skt_xfer_init_llink(void)4625 skt_xfer_init_llink(void)
4626 {
4627 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4628 1000 * 1000);
4629 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_LLINK);
4630 sktc_reset_classq_update_intervals();
4631 sktc_enable_ip_reass();
4632 sktc_config_fsw_rx_agg_tcp(0);
4633 }
4634
4635 static void
skt_xfer_init_llink_wmm(void)4636 skt_xfer_init_llink_wmm(void)
4637 {
4638 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4639 1000 * 1000);
4640 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_LLINK |
4641 FETH_FLAGS_WMM);
4642 sktc_reset_classq_update_intervals();
4643 sktc_enable_ip_reass();
4644 sktc_config_fsw_rx_agg_tcp(0);
4645 }
4646
4647 static void
skt_xfer_init_llink_multi(void)4648 skt_xfer_init_llink_multi(void)
4649 {
4650 int err;
4651 uint32_t disable_nxctl_check = 1;
4652 size_t len = sizeof(skt_disable_nxctl_check);
4653
4654 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4655 1000 * 1000);
4656 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4657 &skt_disable_nxctl_check, &len, &disable_nxctl_check,
4658 sizeof(disable_nxctl_check));
4659 assert(err == 0);
4660 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_MULTI_LLINK);
4661 sktc_reset_classq_update_intervals();
4662 sktc_enable_ip_reass();
4663 sktc_config_fsw_rx_agg_tcp(0);
4664 }
4665
4666 static void
skt_xfer_fini(void)4667 skt_xfer_fini(void)
4668 {
4669 #if SKT_XFER_DEBUG
4670 T_LOG("Waiting");
4671 fflush(stdout);
4672 for (int i = 0; i < 5; i++) {
4673 sleep(1);
4674 T_LOG(".");
4675 fflush(stdout);
4676 }
4677 T_LOG("\n");
4678 #endif
4679 sktc_ifnet_feth0_set_dequeue_stall(FALSE);
4680 sktc_ifnet_feth1_set_dequeue_stall(FALSE);
4681 sktc_ifnet_feth_pair_destroy();
4682 sktc_restore_ip_reass();
4683 sktc_restore_fsw_rx_agg_tcp();
4684 }
4685
4686 static void
skt_xfer_rd_fini(void)4687 skt_xfer_rd_fini(void)
4688 {
4689 #if SKT_XFER_DEBUG
4690 T_LOG("Waiting");
4691 fflush(stdout);
4692 for (int i = 0; i < 5; i++) {
4693 sleep(1);
4694 T_LOG(".");
4695 fflush(stdout);
4696 }
4697 T_LOG("\n");
4698 #endif
4699 int err;
4700
4701 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4702 NULL, NULL, &skt_disable_nxctl_check,
4703 sizeof(skt_disable_nxctl_check));
4704 assert(err == 0);
4705 sktc_ifnet_feth0_set_dequeue_stall(FALSE);
4706 sktc_ifnet_feth1_set_dequeue_stall(FALSE);
4707 sktc_ifnet_feth_pair_destroy();
4708 sktc_ifnet_rd_destroy();
4709 sktc_restore_ip_reass();
4710 sktc_restore_fsw_rx_agg_tcp();
4711 }
4712
4713 static void
skt_xfer_fini_fastlane(void)4714 skt_xfer_fini_fastlane(void)
4715 {
4716 /* restore sysctl */
4717 assert(sysctlbyname("net.qos.policy.restricted", NULL, NULL,
4718 &restricted_old, sizeof(restricted_old)) == 0);
4719
4720 skt_xfer_fini();
4721 }
4722
4723 static void
skt_xfer_fini_llink_multi(void)4724 skt_xfer_fini_llink_multi(void)
4725 {
4726 int err;
4727
4728 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4729 NULL, NULL, &skt_disable_nxctl_check,
4730 sizeof(skt_disable_nxctl_check));
4731 assert(err == 0);
4732 skt_xfer_fini();
4733 }
4734
4735 static void
skt_xfer_errors_init(void)4736 skt_xfer_errors_init(void)
4737 {
4738 uint64_t emask = (1ull << 63);
4739 uint32_t rmask = 0x7ff;
4740
4741 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE);
4742 set_error_inject_mask(&emask);
4743 inject_error_rmask = sktu_set_inject_error_rmask(&rmask);
4744 sktc_config_fsw_rx_agg_tcp(0);
4745 }
4746
4747 static void
skt_xfer_errors_compat_init(void)4748 skt_xfer_errors_compat_init(void)
4749 {
4750 uint64_t emask = (1ull << 63);
4751 uint32_t rmask = 0x7ff;
4752
4753 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART);
4754 set_error_inject_mask(&emask);
4755 inject_error_rmask = sktu_set_inject_error_rmask(&rmask);
4756 sktc_config_fsw_rx_agg_tcp(0);
4757 }
4758
4759 static void
skt_xfer_errors_fini(void)4760 skt_xfer_errors_fini(void)
4761 {
4762 uint64_t emask = 0;
4763
4764 set_error_inject_mask(&emask);
4765 (void) sktu_set_inject_error_rmask(&inject_error_rmask);
4766 sktc_ifnet_feth_pair_destroy();
4767 sktc_restore_fsw_rx_agg_tcp();
4768 }
4769
4770 static void
skt_xfer_multi_buflet_fini()4771 skt_xfer_multi_buflet_fini()
4772 {
4773 sktc_restore_channel_buflet_alloc();
4774 skt_xfer_fini();
4775 }
4776
4777 static void
skt_xfer_init_native_wmm(void)4778 skt_xfer_init_native_wmm(void)
4779 {
4780 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_WMM);
4781 sktc_enable_ip_reass();
4782 sktc_config_fsw_rx_agg_tcp(0);
4783 }
4784
4785 static void
skt_xfer_init_native_multi_buflet(void)4786 skt_xfer_init_native_multi_buflet(void)
4787 {
4788 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_MULTI_BUFLET |
4789 FETH_FLAGS_TX_HEADROOM);
4790 sktc_enable_channel_buflet_alloc();
4791 sktc_enable_ip_reass();
4792 sktc_config_fsw_rx_agg_tcp(0);
4793 }
4794
4795 static void
skt_xfer_init_native_multi_buflet_copy(void)4796 skt_xfer_init_native_multi_buflet_copy(void)
4797 {
4798 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_MULTI_BUFLET |
4799 FETH_FLAGS_NONSHAREDPOOL | FETH_FLAGS_TX_HEADROOM);
4800 sktc_enable_ip_reass();
4801 sktc_enable_channel_buflet_alloc();
4802 sktc_config_fsw_rx_agg_tcp(0);
4803 }
4804
4805 static void
skt_xfer_init_ifadv(void)4806 skt_xfer_init_ifadv(void)
4807 {
4808 int intvl = XFER_IFADV_INTERVAL; /* in milliseconds */
4809
4810 assert(sysctlbyname("net.link.fake.if_adv_intvl",
4811 NULL, 0, &intvl, sizeof(intvl)) == 0);
4812 skt_xfer_init_native();
4813 }
4814
4815 static void
skt_xfer_fini_ifadv(void)4816 skt_xfer_fini_ifadv(void)
4817 {
4818 int intvl = 0; /* disable */
4819
4820 skt_xfer_fini();
4821 assert(sysctlbyname("net.link.fake.if_adv_intvl",
4822 NULL, 0, &intvl, sizeof(intvl)) == 0);
4823 }
4824
4825 static void
skt_xfer_init_chan_event(void)4826 skt_xfer_init_chan_event(void)
4827 {
4828 int drops = XFER_TX_PKT_DROP_RATE;
4829 assert(sysctlbyname("net.link.fake.tx_drops",
4830 NULL, 0, &drops, sizeof(drops)) == 0);
4831
4832 skt_xfer_init_native();
4833 }
4834
4835 static void
skt_xfer_fini_chan_event(void)4836 skt_xfer_fini_chan_event(void)
4837 {
4838 skt_xfer_fini();
4839 int drops = 0;
4840 assert(sysctlbyname("net.link.fake.tx_drops",
4841 NULL, 0, &drops, sizeof(drops)) == 0);
4842 }
4843
4844 static void
skt_xfer_init_chan_event_async(void)4845 skt_xfer_init_chan_event_async(void)
4846 {
4847 int tx_compl_mode = 1; /* async mode */
4848 assert(sysctlbyname("net.link.fake.tx_completion_mode",
4849 NULL, 0, &tx_compl_mode, sizeof(tx_compl_mode)) == 0);
4850 skt_xfer_init_chan_event();
4851 }
4852
4853 static void
skt_xfer_fini_chan_event_async(void)4854 skt_xfer_fini_chan_event_async(void)
4855 {
4856 int tx_compl_mode = 0; /* sync mode (default) */
4857 skt_xfer_fini_chan_event();
4858 assert(sysctlbyname("net.link.fake.tx_completion_mode",
4859 NULL, 0, &tx_compl_mode, sizeof(tx_compl_mode)) == 0);
4860 }
4861
4862 static void
skt_xfer_init_parent_child_flow(void)4863 skt_xfer_init_parent_child_flow(void)
4864 {
4865 int err;
4866 uint32_t disable_nxctl_check = 1;
4867 size_t len = sizeof(skt_disable_nxctl_check);
4868
4869 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4870 1000 * 1000);
4871 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4872 &skt_disable_nxctl_check, &len, &disable_nxctl_check,
4873 sizeof(disable_nxctl_check));
4874 assert(err == 0);
4875 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART);
4876 sktc_reset_classq_update_intervals();
4877 sktc_enable_ip_reass();
4878 sktc_config_fsw_rx_agg_tcp(0);
4879 }
4880
4881 static void
skt_xfer_init_parent_child_flow_native(void)4882 skt_xfer_init_parent_child_flow_native(void)
4883 {
4884 int err;
4885 uint32_t disable_nxctl_check = 1;
4886 size_t len = sizeof(skt_disable_nxctl_check);
4887
4888 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4889 1000 * 1000);
4890 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4891 &skt_disable_nxctl_check, &len, &disable_nxctl_check,
4892 sizeof(disable_nxctl_check));
4893 assert(err == 0);
4894 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE);
4895 sktc_reset_classq_update_intervals();
4896 sktc_enable_ip_reass();
4897 sktc_config_fsw_rx_agg_tcp(0);
4898 }
4899
4900 static void
skt_xfer_fini_parent_child_flow(void)4901 skt_xfer_fini_parent_child_flow(void)
4902 {
4903 #if SKT_XFER_DEBUG
4904 T_LOG("Waiting");
4905 fflush(stdout);
4906 for (int i = 0; i < 5; i++) {
4907 sleep(1);
4908 T_LOG(".");
4909 fflush(stdout);
4910 }
4911 T_LOG("\n");
4912 #endif
4913 int err;
4914
4915 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4916 NULL, NULL, &skt_disable_nxctl_check,
4917 sizeof(skt_disable_nxctl_check));
4918 assert(err == 0);
4919 sktc_ifnet_feth0_set_dequeue_stall(FALSE);
4920 sktc_ifnet_feth1_set_dequeue_stall(FALSE);
4921 sktc_ifnet_feth_pair_destroy();
4922 sktc_restore_ip_reass();
4923 sktc_restore_fsw_rx_agg_tcp();
4924 }
4925
4926 struct skywalk_mptest skt_xferudp = {
4927 "xferudp", "UDP bi-directional transfer over fake ethernet pair",
4928 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
4929 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
4930 2, skt_xfer_udp_main,
4931 { NULL, NULL, NULL, NULL, NULL, NULL },
4932 skt_xfer_init_txstart, skt_xfer_fini, {},
4933 };
4934
4935 struct skywalk_mptest skt_xferudpn = {
4936 "xferudpn",
4937 "UDP bi-directional transfer over native fake ethernet pair",
4938 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
4939 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
4940 2, skt_xfer_udp_main,
4941 { NULL, NULL, NULL, NULL, NULL, NULL },
4942 skt_xfer_init_native, skt_xfer_fini, {},
4943 };
4944
4945 struct skywalk_mptest skt_xferudpnsp = {
4946 "xferudpnsp",
4947 "UDP bi-directional transfer over native fake ethernet pair"
4948 " with split rx/tx pools",
4949 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
4950 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
4951 2, skt_xfer_udp_main,
4952 { NULL, NULL, NULL, NULL, NULL, NULL },
4953 skt_xfer_init_native_split_pools, skt_xfer_fini, {},
4954 };
4955
4956 struct skywalk_mptest skt_xferudpfcs = {
4957 "xferudpfcs",
4958 "UDP bi-directional transfer over fake ethernet pair"
4959 " with link frame check sequence",
4960 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
4961 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
4962 2, skt_xfer_udp_main,
4963 { NULL, NULL, NULL, NULL, NULL, NULL },
4964 skt_xfer_init_txstart_fcs, skt_xfer_fini, {},
4965 };
4966
4967 struct skywalk_mptest skt_xferudptrailer = {
4968 "xferudptrailer",
4969 "UDP bi-directional transfer over fake ethernet pair"
4970 " with link trailer",
4971 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
4972 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
4973 2, skt_xfer_udp_main,
4974 { NULL, NULL, NULL, NULL, NULL, NULL },
4975 skt_xfer_init_txstart_trailer, skt_xfer_fini, {},
4976 };
4977
4978 struct skywalk_mptest skt_xferudpnfcs = {
4979 "xferudpnfcs",
4980 "UDP bi-directional transfer over native fake ethernet pair"
4981 " with link frame check sequence",
4982 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
4983 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
4984 2, skt_xfer_udp_main,
4985 { NULL, NULL, NULL, NULL, NULL, NULL },
4986 skt_xfer_init_native_fcs, skt_xfer_fini, {},
4987 };
4988
4989 struct skywalk_mptest skt_xferudpntrailer = {
4990 "xferudpntrailer",
4991 "UDP bi-directional transfer over native fake ethernet pair"
4992 " with link trailer",
4993 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
4994 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
4995 2, skt_xfer_udp_main,
4996 { NULL, NULL, NULL, NULL, NULL, NULL },
4997 skt_xfer_init_native_trailer, skt_xfer_fini, {},
4998 };
4999
5000 struct skywalk_mptest skt_xferudplong = {
5001 "xferudplong",
5002 "UDP bi-directional transfer over fake ethernet pair longer duration",
5003 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5004 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5005 2, skt_xfer_udp_long_main,
5006 { NULL, NULL, NULL, NULL, NULL, NULL },
5007 skt_xfer_init_txstart, skt_xfer_fini, {},
5008 };
5009
5010 struct skywalk_mptest skt_xferudplongn = {
5011 "xferudplongn",
5012 "UDP bi-directional transfer over"
5013 " native fake ethernet pair longer duration",
5014 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5015 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5016 2, skt_xfer_udp_long_main,
5017 { NULL, NULL, NULL, NULL, NULL, NULL },
5018 skt_xfer_init_native, skt_xfer_fini, {},
5019 };
5020
5021 struct skywalk_mptest skt_xferudpoverwhelm = {
5022 "xferudpoverwhelm",
5023 "UDP bi-directional transfer over fake ethernet pair overwhelm",
5024 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5025 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5026 2, skt_xfer_udp_overwhelm_main,
5027 { NULL, NULL, NULL, NULL, NULL, NULL },
5028 skt_xfer_init_txstart, skt_xfer_fini, {},
5029 };
5030
5031 struct skywalk_mptest skt_xferudpoverwhelmn = {
5032 "xferudpoverwhelmn",
5033 "UDP bi-directional transfer over native fake ethernet pair overwhelm",
5034 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5035 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5036 2, skt_xfer_udp_overwhelm_main,
5037 { NULL, NULL, NULL, NULL, NULL, NULL },
5038 skt_xfer_init_native, skt_xfer_fini, {},
5039 };
5040
5041 struct skywalk_mptest skt_xferudpoverwhelmnsp = {
5042 "xferudpoverwhelmnsp",
5043 "UDP bi-directional transfer over native fake ethernet pair overwhelm"
5044 " with split rx/tx pools",
5045 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5046 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5047 2, skt_xfer_udp_overwhelm_main,
5048 { NULL, NULL, NULL, NULL, NULL, NULL },
5049 skt_xfer_init_native_split_pools, skt_xfer_fini, {},
5050 };
5051
5052 struct skywalk_mptest skt_xferudpoverwhelmlong = {
5053 "xferudpoverwhelmlong",
5054 "UDP bi-directional transfer over fake ethernet pair overwhelm long",
5055 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5056 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5057 2, skt_xfer_udp_overwhelm_long_main,
5058 { NULL, NULL, NULL, NULL, NULL, NULL },
5059 skt_xfer_init_txstart, skt_xfer_fini, {},
5060 };
5061
5062 struct skywalk_mptest skt_xferudpoverwhelmlongn = {
5063 "xferudpoverwhelmlongn",
5064 "UDP bi-directional transfer over"
5065 " native fake ethernet pair overwhelm long",
5066 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5067 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5068 2, skt_xfer_udp_overwhelm_long_main,
5069 { NULL, NULL, NULL, NULL, NULL, NULL },
5070 skt_xfer_init_native, skt_xfer_fini, {},
5071 };
5072
5073 struct skywalk_mptest skt_xferudpping = {
5074 "xferudpping", "UDP ping-pong over fake ethernet pair",
5075 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5076 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5077 2, skt_xfer_udp_ping_pong_main,
5078 { NULL, NULL, NULL, NULL, NULL,
5079 STR(SKT_FSW_PING_PONG_TEST_DEFAULT)},
5080 skt_xfer_init_txstart, skt_xfer_fini, {},
5081 };
5082
5083 struct skywalk_mptest skt_xferudppingn = {
5084 "xferudppingn", "UDP ping-pong over native fake ethernet pair",
5085 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5086 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5087 2, skt_xfer_udp_ping_pong_main,
5088 { NULL, NULL, NULL, NULL, NULL,
5089 STR(SKT_FSW_PING_PONG_TEST_DEFAULT)},
5090 skt_xfer_init_native, skt_xfer_fini, {},
5091 };
5092
5093 struct skywalk_mptest skt_xferudpping1 = {
5094 "xferudpping1", "UDP ping-pong once over fake ethernet pair",
5095 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5096 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5097 2, skt_xfer_udp_ping_pong_one_main,
5098 { NULL, NULL, NULL, NULL, NULL, NULL },
5099 skt_xfer_init_txstart, skt_xfer_fini, {},
5100 };
5101
5102 struct skywalk_mptest skt_xferudpping1n = {
5103 "xferudpping1n", "UDP ping-pong once over native fake ethernet pair",
5104 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5105 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5106 2, skt_xfer_udp_ping_pong_one_main,
5107 { NULL, NULL, NULL, NULL, NULL, NULL },
5108 skt_xfer_init_native, skt_xfer_fini, {},
5109 };
5110
5111 struct skywalk_mptest skt_xferudppinglong = {
5112 "xferudppinglong",
5113 "UDP ping-pong over fake ethernet pair longer duration",
5114 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5115 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5116 2, skt_xfer_udp_ping_pong_long_main,
5117 { NULL, NULL, NULL, NULL, NULL, NULL },
5118 skt_xfer_init_txstart, skt_xfer_fini, {},
5119 };
5120
5121 struct skywalk_mptest skt_xferudppinglongn = {
5122 "xferudppinglongn",
5123 "UDP ping-pong over native fake ethernet pair longer duration",
5124 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5125 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5126 2, skt_xfer_udp_ping_pong_long_main,
5127 { NULL, NULL, NULL, NULL, NULL, NULL },
5128 skt_xfer_init_native, skt_xfer_fini, {},
5129 };
5130
5131 struct skywalk_mptest skt_xferudpping1wrong = {
5132 "xferudpping1wrong",
5133 "UDP ping-pong once over fake ethernet pair with wrong flow IDs",
5134 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5135 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5136 2, skt_xfer_udp_ping_pong_one_wrong_main,
5137 { NULL, NULL, NULL, NULL, NULL, NULL },
5138 skt_xfer_init_txstart, skt_xfer_fini, {},
5139 };
5140
5141 struct skywalk_mptest skt_xferrdudpping = {
5142 "xferrdudpping",
5143 "UDP ping-pong between redirect and fake ethernet interface",
5144 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5145 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5146 2, skt_xfer_rd_udp_ping_pong_main,
5147 { NULL, NULL, NULL, NULL, NULL,
5148 STR(SKT_FSW_PING_PONG_TEST_DEFAULT)},
5149 skt_xfer_rd_init, skt_xfer_rd_fini, {},
5150 };
5151
5152 struct skywalk_mptest skt_xfertcpsynflood = {
5153 "xfertcpsynflood",
5154 "TCP SYN flood",
5155 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5156 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5157 1, skt_xfer_tcp_syn_flood_main,
5158 { NULL, NULL, NULL, NULL, NULL, NULL },
5159 skt_xfer_init_txstart, skt_xfer_fini, {},
5160 };
5161
5162 struct skywalk_mptest skt_xfertcprstflood = {
5163 "xfertcprstflood",
5164 "TCP RST flood",
5165 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5166 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5167 1, skt_xfer_tcp_rst_flood_main,
5168 { NULL, NULL, NULL, NULL, NULL, NULL },
5169 skt_xfer_init_txstart, skt_xfer_fini, {},
5170 };
5171
5172 struct skywalk_mptest skt_xferudpping_aqm = {
5173 "xferudppingaqm", "UDP ping-pong over fake ethernet pair with AQM",
5174 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5175 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5176 2, skt_xfer_udp_ping_pong_aqm_main,
5177 { NULL, NULL, NULL, NULL, NULL, NULL },
5178 skt_xfer_init_txstart, skt_xfer_fini, {},
5179 };
5180
5181 struct skywalk_mptest skt_xferudppingn_aqm = {
5182 "xferudppingnaqm", "UDP ping-pong over native fake ethernet pair with"
5183 " AQM",
5184 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5185 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5186 2, skt_xfer_udp_ping_pong_aqm_main,
5187 { NULL, NULL, NULL, NULL, NULL, NULL },
5188 skt_xfer_init_native, skt_xfer_fini, {},
5189 };
5190
5191 struct skywalk_mptest skt_xferudpwitherrors = {
5192 "xferudpwitherrors",
5193 "UDP bi-directional transfer over"
5194 " native fake ethernet pair with injected errors",
5195 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5196 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS |
5197 SK_FEATURE_DEV_OR_DEBUG,
5198 2, skt_xfer_udp_with_errors_main,
5199 { NULL, NULL, NULL, NULL, NULL, NULL },
5200 skt_xfer_errors_init, skt_xfer_errors_fini, {},
5201 };
5202
5203 struct skywalk_mptest skt_xferudpwitherrorscompat = {
5204 "xferudpwitherrorscompat",
5205 "UDP bi-directional transfer over"
5206 " compat fake ethernet pair with injected errors",
5207 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5208 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS |
5209 SK_FEATURE_DEV_OR_DEBUG,
5210 2, skt_xfer_udp_with_errors_main,
5211 { NULL, NULL, NULL, NULL, NULL, NULL },
5212 skt_xfer_errors_compat_init, skt_xfer_errors_fini, {},
5213 };
5214
5215 struct skywalk_mptest skt_xfertcpportzero = {
5216 "xfertcpportzero",
5217 "TCP connect to port 0",
5218 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5219 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5220 1, skt_xfer_tcp_port_zero_main,
5221 { NULL, NULL, NULL, NULL, NULL, NULL },
5222 skt_xfer_init_txstart, skt_xfer_fini, {},
5223 };
5224
5225 struct skywalk_mptest skt_xferudpportzero = {
5226 "xferudpportzero",
5227 "UDP connect to port 0",
5228 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5229 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5230 1, skt_xfer_udp_port_zero_main,
5231 { NULL, NULL, NULL, NULL, NULL, NULL },
5232 skt_xfer_init_txstart, skt_xfer_fini, {},
5233 };
5234
5235 struct skywalk_mptest skt_xfersetuponly = {
5236 "xfersetuponly", "setup fake ethernet pair only",
5237 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5238 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5239 2, skt_xfer_setuponly_main,
5240 { NULL, NULL, NULL, NULL, NULL, NULL },
5241 skt_xfer_init_txstart, skt_xfer_fini, {},
5242 };
5243
5244 struct skywalk_mptest skt_xfersetuponlyn = {
5245 "xfersetuponlyn", "setup native fake ethernet pair only",
5246 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5247 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5248 2, skt_xfer_setuponly_main,
5249 { NULL, NULL, NULL, NULL, NULL, NULL },
5250 skt_xfer_init_native, skt_xfer_fini, {},
5251 };
5252
5253 struct skywalk_mptest skt_xferudppingn_wmm = {
5254 "xferudppingnwmm", "UDP ping-pong over native fake ethernet pair in wmm"
5255 " mode",
5256 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5257 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5258 2, skt_xfer_udp_ping_pong_wmm_main,
5259 { NULL, NULL, NULL, NULL, NULL, NULL },
5260 skt_xfer_init_native_wmm, skt_xfer_fini, {},
5261 };
5262
5263 struct skywalk_mptest skt_xferflowmatch = {
5264 "xferflowmatch",
5265 "Packets not matching registered flow tuple should be dropped",
5266 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5267 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5268 2, skt_xfer_flowmatch_main,
5269 { NULL, NULL, NULL, NULL, NULL, NULL },
5270 skt_xfer_init_native, skt_xfer_fini, {},
5271 };
5272
5273 struct skywalk_mptest skt_xferflowcleanup = {
5274 "xferflowcleanup",
5275 "verification of flow cleanup on channel close",
5276 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5277 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5278 2, skt_xfer_flowcleanup_main,
5279 { NULL, NULL, NULL, NULL, NULL, NULL },
5280 skt_xfer_init_native, skt_xfer_fini, {},
5281 };
5282
5283 struct skywalk_mptest skt_xferudppingn_mb = {
5284 "xferudppingnmb", "UDP ping-pong over native fake ethernet pair with"
5285 " multi-buflet packet",
5286 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5287 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS |
5288 SK_FEATURE_DEV_OR_DEBUG,
5289 2, skt_xfer_udp_ping_pong_multi_buflet_main,
5290 { NULL, NULL, NULL, NULL, NULL, NULL },
5291 skt_xfer_init_native_multi_buflet, skt_xfer_multi_buflet_fini, {},
5292 };
5293
5294 struct skywalk_mptest skt_xferudppingn_mbc = {
5295 "xferudppingnmbc", "UDP ping-pong over native fake ethernet pair with"
5296 " multi-buflet packet in copy packet mode",
5297 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5298 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS |
5299 SK_FEATURE_DEV_OR_DEBUG,
5300 2, skt_xfer_udp_ping_pong_multi_buflet_main,
5301 { NULL, NULL, NULL, NULL, NULL, NULL },
5302 skt_xfer_init_native_multi_buflet_copy, skt_xfer_multi_buflet_fini, {},
5303 };
5304
5305 struct skywalk_mptest skt_xfercsumoffload = {
5306 "xfercsumoffload",
5307 "Packet checksum offload",
5308 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5309 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5310 2, skt_xfer_csumoffload_main,
5311 { NULL, NULL, NULL, NULL, NULL, NULL },
5312 skt_xfer_init_txstart, skt_xfer_fini, {},
5313 };
5314
5315 struct skywalk_mptest skt_xfercsumoffloadn = {
5316 "xfercsumoffloadn",
5317 "Packet checksum offload over native",
5318 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5319 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5320 2, skt_xfer_csumoffload_main,
5321 { NULL, NULL, NULL, NULL, NULL, NULL },
5322 skt_xfer_init_native, skt_xfer_fini, {},
5323 };
5324
5325 struct skywalk_mptest skt_xferfastlane = {
5326 "xferqosmarking_fastlane",
5327 "fastlane qos marking",
5328 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5329 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5330 2, skt_xfer_fastlane_main,
5331 { NULL, NULL, NULL, NULL, NULL, NULL },
5332 skt_xfer_init_txstart_fastlane, skt_xfer_fini_fastlane, {},
5333 };
5334
5335 struct skywalk_mptest skt_xferfastlanen = {
5336 "xferqosmarking_fastlanen",
5337 "fastlane qos marking over native",
5338 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5339 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5340 2, skt_xfer_fastlane_main,
5341 { NULL, NULL, NULL, NULL, NULL, NULL },
5342 skt_xfer_init_native_fastlane, skt_xfer_fini_fastlane, {},
5343 };
5344
5345 struct skywalk_mptest skt_xferrfc4594 = {
5346 "xferqosmarking_rfc4594",
5347 "rfc4594 qos marking",
5348 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5349 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5350 2, skt_xfer_rfc4594_main,
5351 { NULL, NULL, NULL, NULL, NULL, NULL },
5352 skt_xfer_init_txstart_fastlane, skt_xfer_fini_fastlane, {},
5353 };
5354
5355 struct skywalk_mptest skt_xferrfc4594n = {
5356 "xferqosmarking_rfc4594n",
5357 "rfc4594 qos marking over native",
5358 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5359 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5360 2, skt_xfer_rfc4594_main,
5361 { NULL, NULL, NULL, NULL, NULL, NULL },
5362 skt_xfer_init_native_fastlane, skt_xfer_fini_fastlane, {},
5363 };
5364
5365 struct skywalk_mptest skt_xferlistenertcprst = {
5366 "xferlistenertcprst",
5367 "TCP Listner should be able to send RST",
5368 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5369 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5370 2, skt_xfer_listener_tcp_rst_main,
5371 { NULL, NULL, NULL, NULL, NULL, NULL },
5372 skt_xfer_init_native, skt_xfer_fini, {},
5373 };
5374
5375 struct skywalk_mptest skt_xferudpfrags = {
5376 "xferudpfrags",
5377 "UDP fragmentation test (channel flow Tx)",
5378 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5379 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5380 2, skt_xfer_udp_frags_main,
5381 { NULL, NULL, NULL, NULL, NULL, NULL },
5382 skt_xfer_init_native, skt_xfer_fini, {},
5383 };
5384
5385 struct skywalk_mptest skt_xferudpbadfrags = {
5386 "xferudpbadfrags",
5387 "UDP fragmentation test (channel flow Tx)",
5388 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5389 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5390 2, skt_xfer_udp_bad_frags_main,
5391 { NULL, NULL, NULL, NULL, NULL, NULL },
5392 skt_xfer_init_native, skt_xfer_fini, {},
5393 };
5394
5395 struct skywalk_mptest skt_xferudpifadvenable = {
5396 "xferudpifadvenable",
5397 "flowswitch interface advisory enabled test",
5398 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5399 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5400 2, skt_xfer_udp_ifadv_main,
5401 { NULL, NULL, NULL, NULL, NULL,
5402 STR(SKT_FSW_EVENT_TEST_IF_ADV_ENABLED)},
5403 skt_xfer_init_ifadv, skt_xfer_fini_ifadv, {},
5404 };
5405
5406 struct skywalk_mptest skt_xferudpifadvdisable = {
5407 "xferudpifadvdisable",
5408 "flowswitch interface advisory disabled test",
5409 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5410 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5411 2, skt_xfer_udp_ifadv_main,
5412 { NULL, NULL, NULL, NULL, NULL,
5413 STR(SKT_FSW_EVENT_TEST_IF_ADV_DISABLED)},
5414 skt_xfer_init_ifadv, skt_xfer_fini_ifadv, {},
5415 };
5416
5417 struct skywalk_mptest skt_xferudppingnll = {
5418 "xferudppingnll",
5419 "UDP ping-pong over low latency channel on native fake ethernet pair",
5420 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5421 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5422 2, skt_xfer_udp_ping_pong_main,
5423 { NULL, NULL, NULL, NULL, NULL,
5424 STR(SKT_FSW_PING_PONG_TEST_LOW_LATENCY)},
5425 skt_xfer_init_native, skt_xfer_fini, {},
5426 };
5427
5428 struct skywalk_mptest skt_xferudppingllink = {
5429 "xferudppingllink", "UDP ping-pong over fake ethernet pair in llink mode",
5430 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5431 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5432 2, skt_xfer_udp_ping_pong_main,
5433 { NULL, NULL, NULL, NULL, NULL,
5434 STR(SKT_FSW_PING_PONG_TEST_DEFAULT)},
5435 skt_xfer_init_llink, skt_xfer_fini, {},
5436 };
5437
5438 struct skywalk_mptest skt_xferudppingllink_wmm = {
5439 "xferudppingllinkwmm", "UDP ping-pong over fake ethernet pair in llink & wmm mode",
5440 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5441 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5442 2, skt_xfer_udp_ping_pong_wmm_main,
5443 { NULL, NULL, NULL, NULL, NULL, NULL },
5444 skt_xfer_init_llink_wmm, skt_xfer_fini, {},
5445 };
5446
5447 struct skywalk_mptest skt_xferudppingllink_multi = {
5448 "xferudppingllinkmulti", "UDP ping-pong over fake ethernet pair in multi llink mode",
5449 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5450 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5451 2, skt_xfer_udp_ping_pong_main,
5452 { NULL, NULL, NULL, NULL, NULL,
5453 STR(SKT_FSW_PING_PONG_TEST_MULTI_LLINK)},
5454 skt_xfer_init_llink_multi, skt_xfer_fini_llink_multi, {},
5455 };
5456
5457 struct skywalk_mptest skt_xferudpchanevents = {
5458 "skt_xferudpchanevents",
5459 "flowswitch channel events test",
5460 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5461 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5462 2, skt_xfer_udp_ifadv_main,
5463 { NULL, NULL, NULL, NULL, NULL,
5464 STR(SKT_FSW_EVENT_TEST_CHANNEL_EVENTS)},
5465 skt_xfer_init_chan_event, skt_xfer_fini_chan_event, {},
5466 };
5467
5468 struct skywalk_mptest skt_xferudpchaneventsasync = {
5469 "skt_xferudpchaneventsasync",
5470 "flowswitch channel events in async mode test",
5471 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5472 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5473 2, skt_xfer_udp_ifadv_main,
5474 { NULL, NULL, NULL, NULL, NULL,
5475 STR(SKT_FSW_EVENT_TEST_CHANNEL_EVENTS)},
5476 skt_xfer_init_chan_event_async, skt_xfer_fini_chan_event_async, {},
5477 };
5478
5479 struct skywalk_mptest skt_xferparentchildflow = {
5480 "skt_xferparentchild",
5481 "flowswitch parent child flows test",
5482 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5483 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5484 3, skt_xfer_parent_child_flow_main,
5485 { NULL, NULL, NULL, NULL, NULL, NULL },
5486 skt_xfer_init_parent_child_flow, skt_xfer_fini_parent_child_flow, {},
5487 };
5488
5489 struct skywalk_mptest skt_xferparentchildflow_offset_400 = {
5490 "skt_xferparentchild_offset_400",
5491 "flowswitch parent child flows test with demux offset 400",
5492 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5493 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5494 3, skt_xfer_parent_child_flow_main_offset_400,
5495 { NULL, NULL, NULL, NULL, NULL, NULL },
5496 skt_xfer_init_parent_child_flow, skt_xfer_fini_parent_child_flow, {},
5497 };
5498
5499 struct skywalk_mptest skt_xferparentchildflown = {
5500 "skt_xferparentchildn",
5501 "flowswitch parent child flows on native fake ethernet interface test",
5502 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5503 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5504 3, skt_xfer_parent_child_flow_main,
5505 { NULL, NULL, NULL, NULL, NULL, NULL },
5506 skt_xfer_init_parent_child_flow_native, skt_xfer_fini_parent_child_flow, {},
5507 };
5508
5509 struct skywalk_mptest skt_xferparentchildflown_offset_400 = {
5510 "skt_xferparentchildn_offset_400",
5511 "flowswitch parent child flows on native fake ethernet interface test with demux offset 400",
5512 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5513 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5514 3, skt_xfer_parent_child_flow_main_offset_400,
5515 { NULL, NULL, NULL, NULL, NULL, NULL },
5516 skt_xfer_init_parent_child_flow_native, skt_xfer_fini_parent_child_flow, {},
5517 };
5518