1 /*
2 * Copyright (c) 2017-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* -*- Mode: c; tab-width: 8; indent-tabs-mode: 1; c-basic-offset: 8; -*- */
30
31 #include <stddef.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <assert.h>
36 #include <stdbool.h>
37 #include <sys/event.h>
38 #include <sys/ioctl.h>
39 #include <sys/sockio.h>
40 #include <sys/sysctl.h>
41 #include <net/if.h>
42 #include <netinet/in.h>
43 #include <netinet/ip.h>
44 #include <netinet/udp.h>
45 #include <netinet/tcp.h>
46 #include <TargetConditionals.h>
47 #include <arpa/inet.h>
48 #include <mach/mach.h>
49 #include <mach/mach_time.h>
50 #include <os/log.h>
51 #include <darwintest.h>
52 #include "skywalk_test_driver.h"
53 #include "skywalk_test_utils.h"
54 #include "skywalk_test_common.h"
55
56 #define STR(x) _STR(x)
57 #define _STR(x) #x
58
59 #define ENABLE_UPP true /* channel in user packet pool mode */
60
61 #define OUR_FLOWSWITCH_PORT (NEXUS_PORT_FLOW_SWITCH_CLIENT + 1)
62
63 #define FETH0_PORT 0x1234
64 #define FETH1_PORT 0x5678
65
66 #if TARGET_OS_WATCH
67 #define XFER_TXRX_PACKET_COUNT 20000
68 #define XFER_TXRX_BATCH_COUNT 4
69 #define XFER_TXRX_TIMEOUT_SECS 0
70 #define XFER_TXRX_TIMEOUT_NSECS (100 * 1000 * 1000)
71
72 #define XFER_PING_PACKET_COUNT 10000
73 #define XFER_PING_BATCH_COUNT 64
74 #define XFER_PING_TIMEOUT_SECS 4
75 #define XFER_PING_TIMEOUT_NSECS (500 * 1000 * 1000)
76 #define XFER_PING_CHILD1_RX_TIMEOUT_SECS 4
77 #define XFER_PING_CHILD1_RX_TIMEOUT_NSECS 0
78 #define XFER_PING_FADV_TIMEOUT_SECS 2
79 #define XFER_PING_FADV_TIMEOUT_NSECS 0
80
81 #else /* TARGET_OS_WATCH */
82 #define XFER_TXRX_PACKET_COUNT (250 * 1000)
83 #define XFER_TXRX_BATCH_COUNT 8
84 #define XFER_TXRX_TIMEOUT_SECS 0
85 #define XFER_TXRX_TIMEOUT_NSECS (100 * 1000 * 1000)
86
87 #define XFER_PING_PACKET_COUNT (125 * 1000)
88 #define XFER_PING_BATCH_COUNT 128
89 #define XFER_PING_TIMEOUT_SECS 4
90 #define XFER_PING_TIMEOUT_NSECS (500 * 1000 * 1000)
91 #define XFER_PING_CHILD1_RX_TIMEOUT_SECS 4
92 #define XFER_PING_CHILD1_RX_TIMEOUT_NSECS 0
93 #define XFER_PING_FADV_TIMEOUT_SECS 2
94 #define XFER_PING_FADV_TIMEOUT_NSECS 0
95 #endif /* TARGET_OS_WATCH */
96
97 #define XFER_IFADV_INTERVAL 30
98 #define XFER_TXRX_PACKET_COUNT_LONG (XFER_TXRX_PACKET_COUNT * 10)
99 #define XFER_PING_PACKET_COUNT_LONG (XFER_PING_PACKET_COUNT * 5)
100 #define XFER_AQM_PING_BATCH_COUNT 8
101 #define XFER_AQM_PING_PACKET_COUNT (XFER_AQM_PING_BATCH_COUNT * 4)
102 #define XFER_CLASSQ_UPDATE_INTERVAL 100 /* milliseconds */
103 /*
104 * delay interval for the classq update interval to elapse.
105 * We add some extra delay to the update interval to account for timer drift.
106 */
107 #define XFER_CLASSQ_UPDATE_INTERVAL_ELAPSE_DELAY \
108 (XFER_CLASSQ_UPDATE_INTERVAL + 500) /* milliseconds */
109
110 #define XFER_TXRX_PACKET_COUNT_SHORT (XFER_TXRX_PACKET_COUNT / 10)
111
112 /*
113 * For overwhelm transfer tests we try to send a large batch of packets
114 * over a smaller ring size
115 */
116 #define XFER_TXRX_OVERWHELM_BATCH_COUNT 2048
117 #define XFER_TXRX_OVERWHELM_FSW_TX_RING_SIZE \
118 (XFER_TXRX_OVERWHELM_BATCH_COUNT / 2)
119 #define XFER_TXRX_OVERWHELM_FSW_RX_RING_SIZE \
120 XFER_TXRX_OVERWHELM_FSW_TX_RING_SIZE
121
122 #define XFER_TXRX_MULTI_BUFLET_BUF_SIZE 512
123 #define XFER_TXRX_MULTI_BUFLET_MAX_FRAGS 4 /* packet size = 2048 */
124
125 #define XFER_RECV_END_PAYLOAD "DEADBEEF" /* receiver end payload */
126 #define XFER_QOSMARKING_FASTLANE_PREFIX "FASTLANE."
127 #define XFER_QOSMARKING_RFC4594_PREFIX "RFC4594."
128
129 #define XFER_TX_PKT_DROP_RATE 100
130
131 /* dummy packet identifier constants */
132 #define XFER_PKTID_PAYLOAD_TYPE 0xFA
133 #define XFER_PKTID_STREAM_ID 0xFB
134
135 static struct sktc_nexus_handles handles;
136 static uint32_t inject_error_rmask;
137 static uint32_t skt_disable_nxctl_check;
138
139 #define INJECT_CODE_IDX_MAX 2
140 struct fsw_inject_codes {
141 int ic_code;
142 uint32_t ic_rmask;
143 int ic_stat_idx[INJECT_CODE_IDX_MAX];
144 };
145 #define IC_RMASK_UNSPEC (-1)
146
147 #define _S1(code, a) {(code), IC_RMASK_UNSPEC, {(a), -1}}
148 #define _S2(code, a, b) {(code), IC_RMASK_UNSPEC, {(a), (b)}}
149 #define _S3(code, a, b, c) {(code), a, {(b), (c)}}
150
151 static const struct fsw_inject_codes fsw_inject_codes[] = {
152 /* flow_pkt_classify() returns ENXIO */
153 _S1(1, FSW_STATS_RX_FLOW_EXTRACT_ERR),
154
155 /* ms_copy_to_dev_mbuf() sets mbuf to NULL */
156 _S2(11, FSW_STATS_DROP, FSW_STATS_DROP_NOMEM_MBUF),
157
158 /* ms_copy_to_dev_pkt() set pkt to NULL */
159 _S2(12, FSW_STATS_DROP, FSW_STATS_DROP_NOMEM_PKT),
160
161 /* ms_dev_output() QP_PACKET sets pkt_drop to TRUE */
162 _S2(14, FSW_STATS_DROP, FSW_STATS_TX_AQM_DROP),
163
164 /*
165 * Can result in a later kernel panic when the nexus is closed
166 * so do not use for now.
167 */
168
169 /* fsw_ms_user_port_flush() spkt->pkt_qum_flags set to
170 * (spkt->pkt_qum_flags | QUMF_DROPPED) */
171 _S1(20, FSW_STATS_DROP),
172
173 /* fsw_ms_user_port_flush() is_frag TRUE */
174 /*_S1(21, FSW_STATS_DROP), */
175
176 /*
177 * 31 Triggers a kernel assertion. Do not use.
178 * 32 only makes sense if 31 is also enabled.
179 */
180 /* ms_lookup() fakes flow entry not found */
181 /*_S1(31, FSW_STATS_TXLOOKUP_NOMATCH), */
182 /* ms_lookup() fakes NULL host_na */
183 /*_S1(32, FSW_STATS_HOST_NOT_ATTACHED), */
184
185 /*
186 * 33 to 43 apply to outbound (to device) or inbound to legacy stack
187 * so cannot (yet) be tested. Some of them can also trigger kernel
188 * assertions.
189 */
190
191 /* fsw_resolve() returns EJUSTRETURN */
192 _S1(35, FSW_STATS_TX_RESOLV_PENDING),
193
194 /* fsw_resolve() returns error other than EJUSTRETURN but flow route has stale entry */
195 _S1(36, FSW_STATS_TX_RESOLV_STALE),
196 #if 0
197 /* ms_lookup() fails to track packet */
198 _S2(33, FSW_STATS_RXLOOKUP_TRACKERR, FSW_STATS_TXLOOKUP_TRACKERR),
199 /* ms_lookup() wrong uuid. */
200 _S2(34, FSW_STATS_RXLOOKUP_INVALID_ID, FSW_STATS_TXLOOKUP_INVALID_ID),
201
202 /* ms_dev_port_flush_enqueue_dst() kr_space_avail to zero. */
203 _S1(40, FSW_STATS_DST_KRSPACE_DROP),
204
205 /* ms_dev_port_flush_enqueue_dst() n (needed) to zero. */
206 _S1(41, FSW_STATS_DROP),
207
208 /* ms_dev_port_flush_enqueue_dst() fake pp_alloc_packet_batch()
209 * returning ENOMEM. */
210 _S1(42, FSW_STATS_NOMEM_PKT),
211
212 /* ms_dev_port_flush_enqueue_dst() fake ms_copy_packet_from_dev()
213 * returning EINVAL. */
214 _S1(43, FSW_STATS_DROP)
215 #endif
216 };
217 #define INJECT_CODE_COUNT (sizeof(fsw_inject_codes) / \
218 sizeof(fsw_inject_codes[0]))
219
220 static packet_svc_class_t packet_svc_class[] =
221 {
222 PKT_SC_BK_SYS,
223 PKT_SC_BK,
224 PKT_SC_BE,
225 PKT_SC_RD,
226 PKT_SC_OAM,
227 PKT_SC_AV,
228 PKT_SC_RV,
229 PKT_SC_VI,
230 PKT_SC_VO,
231 PKT_SC_CTL
232 };
233
234 #define NUM_SVC_CLASS \
235 (sizeof (packet_svc_class) / sizeof (packet_svc_class[0]))
236 #define XFER_WMM_PING_BATCH_COUNT 8
237 #define XFER_WMM_PING_PACKET_COUNT \
238 (XFER_WMM_PING_BATCH_COUNT * NUM_SVC_CLASS)
239
240 /* test identifiers for flowswitch event tests */
241 #define SKT_FSW_EVENT_TEST_NONE 0
242 #define SKT_FSW_EVENT_TEST_IF_ADV_ENABLED 1
243 #define SKT_FSW_EVENT_TEST_IF_ADV_DISABLED 2
244 #define SKT_FSW_EVENT_TEST_CHANNEL_EVENTS 3
245
246 /* flowswitch xfer test event flags */
247 #define SKT_FSW_EVFLAG_IFADV 0x1
248 #define SKT_FSW_EVFLAG_CHANNEL 0x2
249
250 /* test identifiers for ping-pong tests */
251 #define SKT_FSW_PING_PONG_TEST_DEFAULT 0
252 #define SKT_FSW_PING_PONG_TEST_LOW_LATENCY 1
253 #define SKT_FSW_PING_PONG_TEST_MULTI_LLINK 2
254 /****************************************************************/
255
256 /* Parent-child tests */
257 #define CHILD_FLOWSWITCH_PORT OUR_FLOWSWITCH_PORT + 1
258 #define DEMUX_PAYLOAD_OFFSET offsetof(my_payload, data)
259 #define DEMUX_PAYLOAD_VALUE 0xFFFF
260 #define MAX_DEMUX_OFFSET 900
261
262 static inline uint16_t
skt_xfer_fold_sum_final(uint32_t sum)263 skt_xfer_fold_sum_final(uint32_t sum)
264 {
265 sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */
266 sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */
267 sum = (sum >> 16) + (sum & 0xffff); /* final carry */
268 return ~sum & 0xffff;
269 }
270
271 static int
connect_flow(nexus_controller_t ncd,const uuid_t fsw,nexus_port_t nx_port,const uuid_t flow,int protocol,uint16_t flags,struct in_addr src_addr,in_port_t src_port,struct in_addr dst_addr,in_port_t dst_port,flowadv_idx_t * flowadv_idx,uint64_t qset_id)272 connect_flow(nexus_controller_t ncd,
273 const uuid_t fsw, nexus_port_t nx_port, const uuid_t flow,
274 int protocol, uint16_t flags,
275 struct in_addr src_addr, in_port_t src_port,
276 struct in_addr dst_addr, in_port_t dst_port,
277 flowadv_idx_t *flowadv_idx, uint64_t qset_id)
278 {
279 struct nx_flow_req nfr;
280 int error;
281
282 memset(&nfr, 0, sizeof(nfr));
283 nfr.nfr_ip_protocol = protocol;
284 nfr.nfr_nx_port = nx_port;
285 uuid_copy(nfr.nfr_flow_uuid, flow);
286 nfr.nfr_flags = flags;
287 /* src */
288 nfr.nfr_saddr.sa.sa_len = sizeof(struct sockaddr_in);
289 nfr.nfr_saddr.sa.sa_family = AF_INET;
290 nfr.nfr_saddr.sin.sin_port = htons(src_port);
291 nfr.nfr_saddr.sin.sin_addr = src_addr;
292 /* dst */
293 nfr.nfr_daddr.sa.sa_len = sizeof(struct sockaddr_in);
294 nfr.nfr_daddr.sa.sa_family = AF_INET;
295 nfr.nfr_daddr.sin.sin_port = htons(dst_port);
296 nfr.nfr_daddr.sin.sin_addr = dst_addr;
297 nfr.nfr_flowadv_idx = FLOWADV_IDX_NONE;
298 nfr.nfr_qset_id = qset_id;
299
300 error = __os_nexus_flow_add(ncd, fsw, &nfr);
301
302 if (error) {
303 SKT_LOG("__os_nexus_flow_add/nsbind failed %s (%d)\n",
304 strerror(errno), errno);
305 error = errno;
306 } else if (nfr.nfr_nx_port != nx_port) {
307 T_LOG("nfr_nx_port %d != nx_port %d\n",
308 nfr.nfr_nx_port, nx_port);
309 error = EINVAL;
310 }
311 *flowadv_idx = nfr.nfr_flowadv_idx;
312 return error;
313 }
314
315 static int
connect_child_flow(nexus_controller_t ncd,const uuid_t fsw,nexus_port_t nx_port,const uuid_t flow,int protocol,uint16_t flags,struct in_addr src_addr,in_port_t src_port,struct in_addr dst_addr,in_port_t dst_port,flowadv_idx_t * flowadv_idx,uint64_t qset_id,const uuid_t parent_flow,struct flow_demux_pattern * demux_patterns,uint8_t demux_pattern_count)316 connect_child_flow(nexus_controller_t ncd,
317 const uuid_t fsw, nexus_port_t nx_port, const uuid_t flow,
318 int protocol, uint16_t flags,
319 struct in_addr src_addr, in_port_t src_port,
320 struct in_addr dst_addr, in_port_t dst_port,
321 flowadv_idx_t *flowadv_idx, uint64_t qset_id, const uuid_t parent_flow,
322 struct flow_demux_pattern *demux_patterns, uint8_t demux_pattern_count)
323 {
324 struct nx_flow_req nfr;
325 int error;
326
327 memset(&nfr, 0, sizeof(nfr));
328 nfr.nfr_ip_protocol = protocol;
329 nfr.nfr_nx_port = nx_port;
330 uuid_copy(nfr.nfr_flow_uuid, flow);
331 nfr.nfr_flags = flags;
332 /* src */
333 nfr.nfr_saddr.sa.sa_len = sizeof(struct sockaddr_in);
334 nfr.nfr_saddr.sa.sa_family = AF_INET;
335 nfr.nfr_saddr.sin.sin_port = htons(src_port);
336 nfr.nfr_saddr.sin.sin_addr = src_addr;
337 /* dst */
338 nfr.nfr_daddr.sa.sa_len = sizeof(struct sockaddr_in);
339 nfr.nfr_daddr.sa.sa_family = AF_INET;
340 nfr.nfr_daddr.sin.sin_port = htons(dst_port);
341 nfr.nfr_daddr.sin.sin_addr = dst_addr;
342 nfr.nfr_flowadv_idx = FLOWADV_IDX_NONE;
343 nfr.nfr_qset_id = qset_id;
344 uuid_copy(nfr.nfr_parent_flow_uuid, parent_flow);
345
346 for (int i = 0; i < demux_pattern_count; i++) {
347 bcopy(&demux_patterns[i], &nfr.nfr_flow_demux_patterns[i],
348 sizeof(struct flow_demux_pattern));
349 }
350 nfr.nfr_flow_demux_count = demux_pattern_count;
351 error = __os_nexus_flow_add(ncd, fsw, &nfr);
352
353 if (error) {
354 SKT_LOG("__os_nexus_flow_add/nsbind failed %s (%d)\n",
355 strerror(errno), errno);
356 error = errno;
357 } else if (nfr.nfr_nx_port != nx_port) {
358 T_LOG("nfr_nx_port %d != nx_port %d\n",
359 nfr.nfr_nx_port, nx_port);
360 error = EINVAL;
361 }
362 *flowadv_idx = nfr.nfr_flowadv_idx;
363 return error;
364 }
365
366
367 static inline uint32_t
skt_xfer_get_chan_max_frags(const channel_t chd)368 skt_xfer_get_chan_max_frags(const channel_t chd)
369 {
370 return (uint32_t)sktc_get_channel_attr(chd, CHANNEL_ATTR_MAX_FRAGS);
371 }
372
373 static inline void
sktc_xfer_copy_data_to_packet(channel_port_t port,packet_t ph,const void * data,uint16_t data_len,uint16_t start_offset,bool csum_offload,uint32_t * partial_csum)374 sktc_xfer_copy_data_to_packet(channel_port_t port, packet_t ph,
375 const void * data, uint16_t data_len, uint16_t start_offset,
376 bool csum_offload, uint32_t *partial_csum)
377 {
378 char *baddr;
379 buflet_t buf, pbuf = NULL;
380 uint16_t clen, bdlim, blen;
381 uint16_t len = data_len;
382 uint32_t partial = 0;
383 size_t frame_length = data_len + start_offset;
384 int error;
385
386 buf = os_packet_get_next_buflet(ph, NULL);
387 assert(buf != NULL);
388 baddr = os_buflet_get_object_address(buf);
389 assert(baddr != NULL);
390 bdlim = blen = os_buflet_get_data_limit(buf);
391 baddr += start_offset;
392 blen -= start_offset;
393
394 /* copy the data */
395 while (len != 0) {
396 if (blen == 0) {
397 error = os_buflet_set_data_length(buf, bdlim);
398 SKTC_ASSERT_ERR(error == 0);
399 pbuf = buf;
400 #if ENABLE_UPP
401 error = os_channel_buflet_alloc(port->chan, &buf);
402 SKTC_ASSERT_ERR(error == 0);
403 assert(buf != NULL);
404 error = os_packet_add_buflet(ph, pbuf, buf);
405 SKTC_ASSERT_ERR(error == 0);
406 #else
407 buf = os_packet_get_next_buflet(ph, pbuf);
408 assert(buf != NULL);
409 #endif
410 error = os_buflet_set_data_offset(buf, 0);
411 SKTC_ASSERT_ERR(error == 0);
412 baddr = os_buflet_get_object_address(buf);
413 assert(baddr != NULL);
414 bdlim = blen = os_buflet_get_data_limit(buf);
415 }
416 clen = MIN(blen, len);
417 if (csum_offload) {
418 bcopy(data, baddr, clen);
419 } else {
420 partial = ~os_copy_and_inet_checksum(data, baddr, clen,
421 partial);
422 }
423 len -= clen;
424 blen -= clen;
425 data += clen;
426 baddr += clen;
427 assert(len == 0 || blen == 0);
428 }
429 if (pbuf == NULL) {
430 error = os_buflet_set_data_length(buf, frame_length);
431 } else {
432 error = os_buflet_set_data_length(buf, clen);
433 }
434 SKTC_ASSERT_ERR(error == 0);
435 if (!csum_offload) {
436 *partial_csum = partial;
437 }
438 }
439 /****************************************************************/
440
441 #if SKT_XFER_DEBUG
442 static const char *
inet_ptrtoa(const void * ptr)443 inet_ptrtoa(const void * ptr)
444 {
445 struct in_addr ip;
446
447 bcopy(ptr, &ip, sizeof(ip));
448 return inet_ntoa(ip);
449 }
450
451 static void
ip_frame_dump(const void * buf,size_t buf_len)452 ip_frame_dump(const void * buf, size_t buf_len)
453 {
454 ip_tcp_header_t * ip_tcp;
455 ip_udp_header_t * ip_udp;
456 int ip_len;
457
458 assert(buf_len >= sizeof(struct ip));
459 ip_udp = (ip_udp_header_t *)buf;
460 ip_tcp = (ip_tcp_header_t *)buf;
461 ip_len = ntohs(ip_udp->ip.ip_len);
462 T_LOG("ip src %s ", inet_ptrtoa(&ip_udp->ip.ip_src));
463 T_LOG("dst %s len %d id %d\n",
464 inet_ptrtoa(&ip_udp->ip.ip_dst), ip_len,
465 ntohs(ip_udp->ip.ip_id));
466 assert(buf_len >= ip_len);
467 assert(ip_udp->ip.ip_v == IPVERSION);
468 assert(ip_udp->ip.ip_hl == (sizeof(struct ip) >> 2));
469 switch (ip_udp->ip.ip_p) {
470 case IPPROTO_UDP: {
471 int udp_len;
472 int data_len;
473
474 assert(buf_len >= sizeof(*ip_udp));
475 udp_len = ntohs(ip_udp->udp.uh_ulen);
476 data_len = udp_len - (int)sizeof(ip_udp->udp);
477 T_LOG(
478 "UDP src 0x%x dst 0x%x len %d csum 0x%x datalen %d\n",
479 ntohs(ip_udp->udp.uh_sport),
480 ntohs(ip_udp->udp.uh_dport),
481 udp_len,
482 ntohs(ip_udp->udp.uh_sum),
483 data_len);
484 break;
485 }
486 case IPPROTO_TCP: {
487 assert(buf_len >= sizeof(*ip_tcp));
488 T_LOG(
489 "TCP src 0x%x dst 0x%x seq %u ack %u "
490 "off %d flags 0x%x win %d csum 0x%x\n",
491 ntohs(ip_tcp->tcp.th_sport),
492 ntohs(ip_tcp->tcp.th_dport),
493 ntohl(ip_tcp->tcp.th_seq),
494 ntohl(ip_tcp->tcp.th_ack),
495 ip_tcp->tcp.th_off,
496 ip_tcp->tcp.th_flags,
497 ntohs(ip_tcp->tcp.th_win),
498 ntohs(ip_tcp->tcp.th_sum));
499 break;
500 }
501 default:
502 break;
503 }
504 }
505 #endif
506
507 static int ip_id;
508
509 static size_t
tcp_frame_populate(channel_port_t port,packet_t ph,struct in_addr src_ip,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,const void * data,size_t data_len,bool connect,bool csum_offload)510 tcp_frame_populate(channel_port_t port, packet_t ph, struct in_addr src_ip,
511 uint16_t src_port, struct in_addr dst_ip, uint16_t dst_port,
512 const void * data, size_t data_len, bool connect,
513 bool csum_offload)
514 {
515 int error;
516 size_t frame_length;
517 ip_tcp_header_t * ip_tcp;
518 char * baddr;
519 tcp_pseudo_hdr_t * tcp_pseudo;
520 buflet_t buf;
521 uint16_t bdlim;
522 uint32_t partial = 0;
523
524 buf = os_packet_get_next_buflet(ph, NULL);
525 assert(buf != NULL);
526 error = os_buflet_set_data_offset(buf, 0);
527 SKTC_ASSERT_ERR(error == 0);
528 bdlim = os_buflet_get_data_limit(buf);
529 assert(bdlim != 0);
530 baddr = os_buflet_get_object_address(buf);
531 assert(baddr != NULL);
532
533 frame_length = sizeof(*ip_tcp) + data_len;
534 #if ENABLE_UPP
535 assert((os_packet_get_buflet_count(ph) == 1));
536 assert((skt_xfer_get_chan_max_frags(port->chan) * bdlim) >=
537 frame_length);
538 #else
539 assert((os_packet_get_buflet_count(ph) * bdlim) >= frame_length);
540 #endif
541 assert(bdlim >= sizeof(ip_tcp_header_t));
542
543 error = os_packet_set_link_header_length(ph, 0);
544 SKTC_ASSERT_ERR(error == 0);
545 /* determine frame offsets */
546 ip_tcp = (ip_tcp_header_t *)baddr;
547 tcp_pseudo = (tcp_pseudo_hdr_t *)
548 (((char *)&ip_tcp->tcp) - sizeof(*tcp_pseudo));
549 baddr += sizeof(*ip_tcp);
550
551 /* copy the data */
552 sktc_xfer_copy_data_to_packet(port, ph, data, data_len, sizeof(*ip_tcp),
553 csum_offload, &partial);
554
555 /* fill in TCP header */
556 ip_tcp->tcp.th_sport = htons(src_port);
557 ip_tcp->tcp.th_dport = htons(dst_port);
558 ip_tcp->tcp.th_flags |= (connect ? TH_SYN : TH_RST);
559 ip_tcp->tcp.th_off = (sizeof(struct tcphdr)) >> 2;
560 ip_tcp->tcp.th_sum = 0;
561 if (csum_offload) {
562 ip_tcp->tcp.th_sum = in_pseudo(src_ip.s_addr, dst_ip.s_addr,
563 htons(data_len + sizeof(ip_tcp->tcp) + IPPROTO_TCP));
564 os_packet_set_inet_checksum(ph, PACKET_CSUM_PARTIAL,
565 sizeof(struct ip),
566 sizeof(struct ip) + offsetof(struct tcphdr, th_sum));
567 } else {
568 /* fill in TCP pseudo header (overwritten by IP header below) */
569 tcp_pseudo_hdr_t * tcp_pseudo;
570 tcp_pseudo = (tcp_pseudo_hdr_t *)
571 (((char *)&ip_tcp->tcp) - sizeof(*tcp_pseudo));
572 bcopy(&src_ip, &tcp_pseudo->src_ip, sizeof(src_ip));
573 bcopy(&dst_ip, &tcp_pseudo->dst_ip, sizeof(dst_ip));
574 tcp_pseudo->zero = 0;
575 tcp_pseudo->proto = IPPROTO_TCP;
576 tcp_pseudo->length = htons(sizeof(ip_tcp->tcp) + data_len);
577 partial = os_inet_checksum(tcp_pseudo, sizeof(*tcp_pseudo)
578 + sizeof(ip_tcp->tcp), partial);
579 ip_tcp->tcp.th_sum = skt_xfer_fold_sum_final(partial);
580 }
581
582 /* fill in IP header */
583 bzero(ip_tcp, sizeof(ip_tcp->ip));
584 ip_tcp->ip.ip_v = IPVERSION;
585 ip_tcp->ip.ip_hl = sizeof(struct ip) >> 2;
586 ip_tcp->ip.ip_ttl = MAXTTL;
587 ip_tcp->ip.ip_p = IPPROTO_TCP;
588 bcopy(&src_ip, &ip_tcp->ip.ip_src, sizeof(src_ip));
589 bcopy(&dst_ip, &ip_tcp->ip.ip_dst, sizeof(dst_ip));
590 ip_tcp->ip.ip_len = htons(sizeof(*ip_tcp) + data_len);
591 ip_tcp->ip.ip_id = htons(ip_id++);
592
593 /* compute the IP checksum */
594 ip_tcp->ip.ip_sum = 0; /* needs to be zero for checksum */
595 ip_tcp->ip.ip_sum = in_cksum(&ip_tcp->ip, sizeof(ip_tcp->ip), 0);
596 return frame_length;
597 }
598
599 static size_t
udp_frame_populate(channel_port_t port,packet_t ph,struct in_addr src_ip,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,const void * data,size_t data_len,bool csum_offload,uint16_t fragment_id,size_t total_udp_len)600 udp_frame_populate(channel_port_t port, packet_t ph, struct in_addr src_ip,
601 uint16_t src_port, struct in_addr dst_ip, uint16_t dst_port,
602 const void * data, size_t data_len, bool csum_offload,
603 uint16_t fragment_id, size_t total_udp_len)
604 {
605 int error;
606 size_t frame_length;
607 ip_udp_header_t * ip_udp;
608 char * baddr;
609 udp_pseudo_hdr_t * udp_pseudo;
610 buflet_t buf;
611 uint16_t bdlim;
612 uint32_t partial = 0;
613
614 buf = os_packet_get_next_buflet(ph, NULL);
615 assert(buf != NULL);
616 error = os_buflet_set_data_offset(buf, 0);
617 SKTC_ASSERT_ERR(error == 0);
618 bdlim = os_buflet_get_data_limit(buf);
619 assert(bdlim != 0);
620 baddr = os_buflet_get_object_address(buf);
621 assert(baddr != NULL);
622
623 frame_length = sizeof(*ip_udp) + data_len;
624 #if ENABLE_UPP
625 assert((os_packet_get_buflet_count(ph) == 1));
626 assert((skt_xfer_get_chan_max_frags(port->chan) * bdlim) >=
627 frame_length);
628 #else
629 assert((os_packet_get_buflet_count(ph) * bdlim) >= frame_length);
630 #endif
631 assert(bdlim >= sizeof(ip_udp_header_t));
632
633 error = os_packet_set_link_header_length(ph, 0);
634 SKTC_ASSERT_ERR(error == 0);
635 /* determine frame offsets */
636 ip_udp = (ip_udp_header_t *)baddr;
637 udp_pseudo = (udp_pseudo_hdr_t *)
638 (((char *)&ip_udp->udp) - sizeof(*udp_pseudo));
639 baddr += sizeof(*ip_udp);
640
641 /* copy the data */
642 sktc_xfer_copy_data_to_packet(port, ph, data, data_len, sizeof(*ip_udp),
643 csum_offload, &partial);
644
645 /* fill in UDP header */
646 ip_udp->udp.uh_sport = htons(src_port);
647 ip_udp->udp.uh_dport = htons(dst_port);
648 ip_udp->udp.uh_ulen = htons(sizeof(ip_udp->udp) + total_udp_len);
649 ip_udp->udp.uh_sum = 0;
650 if (csum_offload) {
651 ip_udp->udp.uh_sum = in_pseudo(src_ip.s_addr, dst_ip.s_addr,
652 htons(total_udp_len + sizeof(ip_udp->udp) + IPPROTO_UDP));
653 os_packet_set_inet_checksum(ph,
654 PACKET_CSUM_PARTIAL | PACKET_CSUM_ZERO_INVERT,
655 sizeof(struct ip),
656 sizeof(struct ip) + offsetof(struct udphdr, uh_sum));
657 } else {
658 /* fill in UDP pseudo header (overwritten by IP header below) */
659 udp_pseudo_hdr_t *udp_pseudo;
660 udp_pseudo = (udp_pseudo_hdr_t *)
661 (((char *)&ip_udp->udp) - sizeof(*udp_pseudo));
662 bcopy(&src_ip, &udp_pseudo->src_ip, sizeof(src_ip));
663 bcopy(&dst_ip, &udp_pseudo->dst_ip, sizeof(dst_ip));
664 udp_pseudo->zero = 0;
665 udp_pseudo->proto = IPPROTO_UDP;
666 udp_pseudo->length = htons(sizeof(ip_udp->udp) + total_udp_len);
667 partial = os_inet_checksum(udp_pseudo, sizeof(*udp_pseudo)
668 + sizeof(ip_udp->udp), partial);
669 ip_udp->udp.uh_sum = skt_xfer_fold_sum_final(partial);
670 }
671
672 /* fill in IP header */
673 bzero(ip_udp, sizeof(ip_udp->ip));
674 ip_udp->ip.ip_v = IPVERSION;
675 ip_udp->ip.ip_hl = sizeof(struct ip) >> 2;
676 ip_udp->ip.ip_ttl = MAXTTL;
677 ip_udp->ip.ip_p = IPPROTO_UDP;
678 bcopy(&src_ip, &ip_udp->ip.ip_src, sizeof(src_ip));
679 bcopy(&dst_ip, &ip_udp->ip.ip_dst, sizeof(dst_ip));
680 ip_udp->ip.ip_len = htons(sizeof(*ip_udp) + data_len);
681 if (fragment_id != 0) {
682 ip_udp->ip.ip_id = htons(fragment_id);
683 ip_udp->ip.ip_off = htons(IP_MF);
684 } else {
685 ip_udp->ip.ip_id = htons(ip_id++);
686 }
687
688 /* compute the IP header checksum */
689 ip_udp->ip.ip_sum = 0; /* needs to be zero for checksum */
690 ip_udp->ip.ip_sum = in_cksum(&ip_udp->ip, sizeof(ip_udp->ip), 0);
691 return frame_length;
692 }
693
694 static size_t
ip_frame_populate(channel_port_t port,packet_t ph,uint8_t protocol,struct in_addr src_ip,struct in_addr dst_ip,const void * data,size_t data_len,uint16_t fragment_id,uint16_t fragment_offset,bool last_fragment)695 ip_frame_populate(channel_port_t port, packet_t ph, uint8_t protocol,
696 struct in_addr src_ip, struct in_addr dst_ip, const void * data,
697 size_t data_len, uint16_t fragment_id, uint16_t fragment_offset,
698 bool last_fragment)
699 {
700 int error;
701 size_t frame_length;
702 struct ip *ip;
703 char * baddr;
704 buflet_t buf;
705 uint16_t bdlim;
706
707 buf = os_packet_get_next_buflet(ph, NULL);
708 assert(buf != NULL);
709 error = os_buflet_set_data_offset(buf, 0);
710 SKTC_ASSERT_ERR(error == 0);
711 bdlim = os_buflet_get_data_limit(buf);
712 assert(bdlim != 0);
713 baddr = os_buflet_get_object_address(buf);
714 assert(baddr != NULL);
715
716 frame_length = sizeof(*ip) + data_len;
717 #if ENABLE_UPP
718 assert((os_packet_get_buflet_count(ph) == 1));
719 assert((skt_xfer_get_chan_max_frags(port->chan) * bdlim) >=
720 frame_length);
721 #else
722 assert((os_packet_get_buflet_count(ph) * bdlim) >= frame_length);
723 #endif
724 assert(bdlim >= sizeof(*ip));
725
726 error = os_packet_set_link_header_length(ph, 0);
727 SKTC_ASSERT_ERR(error == 0);
728 /* determine frame offsets */
729 ip = (struct ip*)baddr;
730 baddr += sizeof(*ip);
731
732 /* fill in IP header */
733 bzero(ip, sizeof(*ip));
734 ip->ip_v = IPVERSION;
735 ip->ip_hl = sizeof(struct ip) >> 2;
736 ip->ip_ttl = MAXTTL;
737 ip->ip_p = protocol;
738 bcopy(&src_ip, &ip->ip_src, sizeof(src_ip));
739 bcopy(&dst_ip, &ip->ip_dst, sizeof(dst_ip));
740 ip->ip_len = htons(sizeof(*ip) + data_len);
741 if (fragment_id != 0) {
742 ip->ip_id = htons(fragment_id);
743 ip->ip_off = htons(last_fragment ? 0 : IP_MF) | htons(fragment_offset / 8);
744 } else {
745 ip->ip_id = htons(ip_id++);
746 }
747
748 /* compute the IP header checksum */
749 ip->ip_sum = 0; /* needs to be zero for checksum */
750 ip->ip_sum = in_cksum(ip, sizeof(*ip), 0);
751
752 /* copy the data */
753 sktc_xfer_copy_data_to_packet(port, ph, data, data_len, sizeof(*ip),
754 TRUE, NULL);
755 return frame_length;
756 }
757
758 static size_t
frame_populate(channel_port_t port,packet_t ph,int protocol,struct in_addr src_ip,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,const void * data,size_t data_len,uuid_t flow_id,bool connect,packet_svc_class_t svc_class,bool csum_offload,uint16_t fragment_id,size_t total_data_len,uint16_t fragment_offset,bool last_fragment)759 frame_populate(channel_port_t port, packet_t ph, int protocol,
760 struct in_addr src_ip, uint16_t src_port, struct in_addr dst_ip,
761 uint16_t dst_port, const void * data, size_t data_len, uuid_t flow_id,
762 bool connect, packet_svc_class_t svc_class, bool csum_offload,
763 uint16_t fragment_id, size_t total_data_len, uint16_t fragment_offset,
764 bool last_fragment)
765 {
766 size_t ret;
767 int error;
768
769 switch (protocol) {
770 case IPPROTO_TCP:
771 ret = tcp_frame_populate(port, ph, src_ip, src_port, dst_ip,
772 dst_port, data, data_len, connect, csum_offload);
773 break;
774 case IPPROTO_UDP:
775 assert(connect == FALSE);
776 if (fragment_offset > 0) {
777 ret = ip_frame_populate(port, ph, protocol, src_ip,
778 dst_ip, data, data_len, fragment_id,
779 fragment_offset, last_fragment);
780 } else {
781 ret = udp_frame_populate(port, ph, src_ip, src_port,
782 dst_ip, dst_port, data, data_len, csum_offload,
783 fragment_id, total_data_len);
784 }
785 break;
786 default:
787 ret = ip_frame_populate(port, ph, protocol, src_ip, dst_ip,
788 data, data_len, fragment_id, fragment_offset,
789 last_fragment);
790 break;
791 }
792 error = os_packet_set_service_class(ph, svc_class);
793 SKTC_ASSERT_ERR(error == 0);
794 os_packet_set_flow_uuid(ph, flow_id);
795 error = os_packet_finalize(ph);
796 SKTC_ASSERT_ERR(error == 0);
797 assert(ret == os_packet_get_data_length(ph));
798 return ret;
799 }
800
801 static size_t
tcp_frame_process(packet_t ph,void * data,size_t data_max)802 tcp_frame_process(packet_t ph, void *data, size_t data_max)
803 {
804 buflet_t buflet;
805 size_t pkt_len, data_len, ip_len, buf_len;
806 uint32_t bdoff;
807 void *buf;
808 ip_tcp_header_t *ip_tcp;
809 uint16_t csum;
810
811 /**********************************************************************/
812 /* process 1st buflet which contains protocol header */
813 buflet = os_packet_get_next_buflet(ph, NULL);
814 assert(buflet != NULL);
815 buf_len = os_buflet_get_data_length(buflet);
816 buf = os_buflet_get_object_address(buflet) +
817 os_buflet_get_data_offset(buflet);
818 ip_tcp = (ip_tcp_header_t *)buf;
819
820 pkt_len = os_packet_get_data_length(ph);
821 ip_len = ntohs(ip_tcp->ip.ip_len);
822 assert(ip_len <= pkt_len);
823 data_len = ip_len - sizeof(*ip_tcp);
824 assert(data_len <= data_max);
825
826 /* IP */
827 assert(ip_tcp->ip.ip_p == IPPROTO_TCP);
828
829 /* verify IP header checksum */
830 csum = in_cksum(&ip_tcp->ip, sizeof(ip_tcp->ip), 0);
831 if (csum != 0) {
832 sktu_dump_buffer(stderr, "ip header checksum", buf, buf_len);
833 fflush(stderr);
834 assert(0);
835 }
836
837 /* starts TCP partial checksum on 1st buflet */
838 buf_len = MIN(ip_len, buf_len);
839 csum = os_inet_checksum(&ip_tcp->tcp, buf_len - sizeof(struct ip), 0);
840 if (data != NULL) { /* copy the data */
841 bcopy(buf + sizeof(*ip_tcp), data, buf_len - sizeof(*ip_tcp));
842 data += (buf_len - sizeof(*ip_tcp));
843 }
844
845 /**********************************************************************/
846 /* iterate through the rest of buflets */
847 ip_len -= buf_len;
848 while (ip_len != 0) {
849 buflet = os_packet_get_next_buflet(ph, buflet);
850 assert(buflet != NULL);
851 bdoff = os_buflet_get_data_offset(buflet);
852 buf = os_buflet_get_object_address(buflet) + bdoff;
853 assert(buf != 0);
854 buf_len = os_buflet_get_data_length(buflet);
855 assert(buf_len != 0);
856 csum = os_inet_checksum(buf, buf_len, csum);
857 if (data != NULL) { /* copy the data */
858 bcopy(buf, data, buf_len);
859 data += buf_len;
860 }
861 ip_len -= buf_len;
862 }
863
864 csum = in_pseudo(ip_tcp->ip.ip_src.s_addr, ip_tcp->ip.ip_dst.s_addr,
865 csum + htons(data_len + sizeof(struct tcphdr) + IPPROTO_TCP));
866 csum ^= 0xffff;
867 if (csum != 0) {
868 sktu_dump_buffer(stderr, "tcp packet bad checksum", buf,
869 ntohs(ip_tcp->ip.ip_len));
870 fflush(stderr);
871 assert(0);
872 }
873
874 return data_len;
875 }
876
877 static size_t
udp_frame_process(packet_t ph,void * data,size_t data_max)878 udp_frame_process(packet_t ph, void *data, size_t data_max)
879 {
880 buflet_t buflet;
881 size_t pkt_len, buf_len, ip_len, data_len;
882 uint32_t bdoff;
883 void *buf;
884 ip_udp_header_t *ip_udp;
885 uint16_t csum;
886
887 /**********************************************************************/
888 /* process 1st buflet which contains protocol header */
889 buflet = os_packet_get_next_buflet(ph, NULL);
890 assert(buflet != NULL);
891 buf_len = os_buflet_get_data_length(buflet);
892 buf = os_buflet_get_object_address(buflet) +
893 os_buflet_get_data_offset(buflet);
894 ip_udp = (ip_udp_header_t *)buf;
895
896 pkt_len = os_packet_get_data_length(ph);
897 ip_len = ntohs(ip_udp->ip.ip_len);
898 assert(ip_len <= pkt_len);
899 data_len = ip_len - sizeof(*ip_udp);
900 assert(data_len <= data_max);
901
902 assert(ip_udp->ip.ip_p == IPPROTO_UDP);
903
904 /* verify IP header checksum */
905 csum = in_cksum(&ip_udp->ip, sizeof(ip_udp->ip), 0);
906 if (csum != 0) {
907 sktu_dump_buffer(stderr, "ip header checksum", buf, ip_len);
908 fflush(stderr);
909 assert(0);
910 }
911
912 /* starts UDP partial checksum on 1st buflet */
913 buf_len = MIN(ip_len, buf_len);
914 csum = os_inet_checksum(&ip_udp->udp, buf_len - sizeof(struct ip), 0);
915
916 if (data != NULL) { /* copy the data */
917 bcopy(buf + sizeof(*ip_udp), data, buf_len - sizeof(*ip_udp));
918 data += (buf_len - sizeof(*ip_udp));
919 }
920
921 /**********************************************************************/
922 /* iterate through the rest of buflets */
923 ip_len -= buf_len;
924 while (ip_len != 0) {
925 buflet = os_packet_get_next_buflet(ph, buflet);
926 assert(buflet != NULL);
927 bdoff = os_buflet_get_data_offset(buflet);
928 buf = os_buflet_get_object_address(buflet) + bdoff;
929 assert(buf != 0);
930 buf_len = os_buflet_get_data_length(buflet);
931 buf_len = MIN(buf_len, ip_len);
932 assert(buf_len != 0);
933 if (ip_udp->udp.uh_sum != 0) {
934 csum = os_inet_checksum(buf, buf_len, csum);
935 }
936 if (data != NULL) { /* copy the data */
937 bcopy(buf, data, buf_len);
938 data += buf_len;
939 }
940 ip_len -= buf_len;
941 }
942
943 /* verify UDP checksum */
944 if (ip_udp->ip.ip_off == 0 &&
945 ip_udp->udp.uh_sum != 0) {
946 csum = in_pseudo(ip_udp->ip.ip_src.s_addr, ip_udp->ip.ip_dst.s_addr,
947 csum + htons(data_len + sizeof(struct udphdr) + IPPROTO_UDP));
948 csum ^= 0xffff;
949 if (csum != 0) {
950 sktu_dump_buffer(stderr, "udp packet bad checksum", buf,
951 ntohs(ip_udp->ip.ip_len));
952 fflush(stderr);
953 assert(0);
954 }
955 }
956
957 return data_len;
958 }
959
960 static size_t
ip_frame_process(packet_t ph,void * data,size_t data_max)961 ip_frame_process(packet_t ph, void * data, size_t data_max)
962 {
963 buflet_t buflet;
964 size_t pkt_len, buf_len, data_len;
965 uint32_t bdoff;
966 void *buf;
967 struct ip *ip;
968 uint16_t csum;
969
970 /**********************************************************************/
971 /* process 1st buflet which contains protocol header */
972 buflet = os_packet_get_next_buflet(ph, NULL);
973 assert(buflet != NULL);
974 buf_len = os_buflet_get_data_length(buflet);
975 buf = os_buflet_get_object_address(buflet) +
976 os_buflet_get_data_offset(buflet);
977 ip = (struct ip*)buf;
978
979 pkt_len = os_packet_get_data_length(ph);
980 assert(pkt_len == ntohs(ip->ip_len));
981 data_len = pkt_len - sizeof(*ip);
982 assert(data_len <= data_max);
983
984 /* verify IP header checksum */
985 csum = in_cksum(ip, sizeof(*ip), 0);
986 if (csum != 0) {
987 sktu_dump_buffer(stderr, "ip header checksum", buf, buf_len);
988 fflush(stderr);
989 assert(0);
990 }
991
992 if (data != NULL) { /* copy the data */
993 bcopy(buf + sizeof(*ip), data, buf_len - sizeof(*ip));
994 data += (buf_len - sizeof(*ip));
995 }
996
997 /**********************************************************************/
998 /* iterate through the rest of buflets */
999 pkt_len -= buf_len;
1000 while (pkt_len != 0) {
1001 buflet = os_packet_get_next_buflet(ph, buflet);
1002 assert(buflet != NULL);
1003 bdoff = os_buflet_get_data_offset(buflet);
1004 buf = os_buflet_get_object_address(buflet) + bdoff;
1005 assert(buf != 0);
1006 buf_len = os_buflet_get_data_length(buflet);
1007 assert(buf_len != 0);
1008 if (data != NULL) { /* copy the data */
1009 bcopy(buf, data, buf_len);
1010 data += buf_len;
1011 }
1012 pkt_len -= buf_len;
1013 }
1014
1015 return data_len;
1016 }
1017
1018 struct qosmarking_mapping {
1019 char *svc_str;
1020 uint32_t svc;
1021 uint32_t dscp;
1022 };
1023
1024 #define QOSMARKING_MAPPINGS(X) \
1025 /*SVC_CLASS FASTLANE RFC4594 */ \
1026 X(PKT_SC_BK, _DSCP_AF11, _DSCP_CS1) \
1027 X(PKT_SC_BK_SYS, _DSCP_AF11, _DSCP_CS1) \
1028 X(PKT_SC_BE, _DSCP_DF, _DSCP_DF) \
1029 X(PKT_SC_RD, _DSCP_AF21, _DSCP_AF21) \
1030 X(PKT_SC_OAM, _DSCP_CS2, _DSCP_CS2) \
1031 X(PKT_SC_AV, _DSCP_AF31, _DSCP_AF31) \
1032 X(PKT_SC_RV, _DSCP_CS4, _DSCP_CS4) \
1033 X(PKT_SC_VI, _DSCP_AF41, _DSCP_AF41) \
1034 X(PKT_SC_SIG, _DSCP_CS3, _DSCP_CS5) \
1035 X(PKT_SC_VO, _DSCP_EF, _DSCP_EF) \
1036 X(PKT_SC_CTL, _DSCP_DF, _DSCP_CS6)
1037
1038 #define MAP_TO_FASTLANE(a, b, c) {#a, a, b},
1039 #define MAP_TO_RFC4594(a, b, c) {#a, a, c},
1040
1041 #define QOSMARKING_SVC_MAX 11
1042
1043 struct qosmarking_mapping fastlane_mappings[] = {
1044 QOSMARKING_MAPPINGS(MAP_TO_FASTLANE)
1045 };
1046
1047 struct qosmarking_mapping rfc4594_mappings[] = {
1048 QOSMARKING_MAPPINGS(MAP_TO_RFC4594)
1049 };
1050
1051 static size_t
frame_process(packet_t ph,void * data,size_t data_max,bool verify_qos)1052 frame_process(packet_t ph, void *data, size_t data_max, bool verify_qos)
1053 {
1054 buflet_t buflet;
1055 void *buf;
1056 struct ip *ip;
1057 size_t buf_len, ret;
1058
1059 buflet = os_packet_get_next_buflet(ph, NULL);
1060 assert(buflet != NULL);
1061 buf = os_buflet_get_object_address(buflet) +
1062 os_buflet_get_data_offset(buflet);
1063 buf_len = os_buflet_get_data_length(buflet);
1064 ip = buf;
1065
1066 switch (ip->ip_p) {
1067 case IPPROTO_TCP:
1068 ret = tcp_frame_process(ph, data, data_max);
1069 break;
1070 case IPPROTO_UDP:
1071 ret = udp_frame_process(ph, data, data_max);
1072 break;
1073 default:
1074 ret = ip_frame_process(ph, data, data_max);
1075 break;
1076 }
1077
1078 if (verify_qos) {
1079 struct qosmarking_mapping *table = NULL;
1080 int i;
1081 my_payload_t payload = data;
1082 char *svc_str = payload->data;
1083 size_t svc_str_len = data_max;
1084 packet_svc_class_t svc = os_packet_get_service_class(ph);
1085 int dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
1086 #define EXPECT(var, val) \
1087 if (var != val) { \
1088 T_LOG("expected "#var" %d got %d\n", \
1089 val, var); \
1090 sktu_dump_buffer(stderr, "packet dump", buf, buf_len); \
1091 fflush(stderr); \
1092 assert(0); \
1093 }
1094
1095 if (strncmp(svc_str, XFER_QOSMARKING_FASTLANE_PREFIX,
1096 strlen(XFER_QOSMARKING_FASTLANE_PREFIX)) == 0) {
1097 table = fastlane_mappings;
1098 svc_str += strlen(XFER_QOSMARKING_FASTLANE_PREFIX);
1099 svc_str_len -= strlen(XFER_QOSMARKING_FASTLANE_PREFIX);
1100 } else if (strncmp(svc_str, XFER_QOSMARKING_RFC4594_PREFIX,
1101 strlen(XFER_QOSMARKING_RFC4594_PREFIX)) == 0) {
1102 table = rfc4594_mappings;
1103 svc_str += strlen(XFER_QOSMARKING_RFC4594_PREFIX);
1104 svc_str_len -= strlen(XFER_QOSMARKING_RFC4594_PREFIX);
1105 } else if (strncmp(svc_str, XFER_RECV_END_PAYLOAD,
1106 strlen(XFER_RECV_END_PAYLOAD)) == 0) {
1107 return ret;
1108 } else {
1109 T_LOG("unkown qosmarking mode %s\n", svc_str);
1110 assert(0);
1111 }
1112
1113 for (i = 0; i < QOSMARKING_SVC_MAX; i++) {
1114 if (strncmp(svc_str, table[i].svc_str, svc_str_len) == 0) {
1115 EXPECT(svc, table[i].svc);
1116 EXPECT(dscp, table[i].dscp);
1117 T_LOG("verified %s\n", svc_str);
1118 break;
1119 }
1120 }
1121
1122 if (i == QOSMARKING_SVC_MAX) {
1123 T_LOG("unkown svc class %s\n", svc_str);
1124 }
1125 }
1126
1127 return ret;
1128 }
1129
1130 static void
1131 channel_port_send(channel_port_t port, uuid_t flow_id,
1132 int protocol,
1133 uint16_t src_port,
1134 struct in_addr dst_ip, uint16_t dst_port,
1135 my_payload_t payload, int payload_length,
1136 uint32_t limit, bool must_complete_batch,
1137 bool connect, packet_svc_class_t svc_class,
1138 bool csum_offload,
1139 void (^packet_prehook)(packet_t p))
1140 {
1141 int error;
1142 channel_slot_t last_slot = NULL;
1143 packet_id_t pktid = {OS_PACKET_PKTID_VERSION_CURRENT,
1144 XFER_PKTID_PAYLOAD_TYPE, 0, 0, XFER_PKTID_STREAM_ID, 0};
1145
1146 assert(payload->packet_number < limit);
1147 while (1) {
1148 int frame_length;
1149 slot_prop_t prop;
1150 channel_slot_t slot;
1151 packet_t pkt = 0;
1152 void *buf;
1153 size_t buf_len;
1154 buflet_t buflet;
1155
1156 /* grab a slot and populate it */
1157 slot = os_channel_get_next_slot(port->tx_ring, last_slot,
1158 &prop);
1159 if (slot == NULL) {
1160 if (must_complete_batch &&
1161 payload->packet_number < limit) {
1162 /* couldn't complete batch */
1163 T_LOG(
1164 "TX didn't complete batch (%u < %u)\n",
1165 payload->packet_number, limit);
1166 assert(0);
1167 }
1168 break;
1169 }
1170
1171 if (port->user_packet_pool) {
1172 assert(prop.sp_buf_ptr == 0);
1173 assert(prop.sp_len == 0);
1174 error = os_channel_packet_alloc(port->chan, &pkt);
1175 SKTC_ASSERT_ERR(error == 0);
1176 } else {
1177 assert(prop.sp_buf_ptr != 0);
1178 assert(prop.sp_len != 0);
1179 pkt = os_channel_slot_get_packet(port->tx_ring, slot);
1180 }
1181 assert(pkt != 0);
1182 buflet = os_packet_get_next_buflet(pkt, NULL);
1183 assert(buflet != NULL);
1184 buf = os_buflet_get_object_address(buflet) +
1185 os_buflet_get_data_offset(buflet);
1186 assert(buf != NULL);
1187 buf_len = os_buflet_get_data_limit(buflet);
1188 assert(buf_len != 0);
1189 if (!port->user_packet_pool) {
1190 assert(buf == (void *)prop.sp_buf_ptr);
1191 assert(buf_len == prop.sp_len);
1192 }
1193
1194 frame_length = frame_populate(port, pkt, protocol,
1195 port->ip_addr, src_port, dst_ip, dst_port, (void *)payload,
1196 payload_length, flow_id, connect, svc_class, csum_offload,
1197 0, payload_length, 0, FALSE);
1198
1199 pktid.pktid_sequence_number = payload->packet_number;
1200 pktid.pktid_timestamp = pktid.pktid_sequence_number;
1201 assert(os_packet_set_packetid(pkt, &pktid) == 0);
1202
1203 if (packet_prehook != NULL) {
1204 packet_prehook(pkt);
1205 }
1206
1207 #if SKT_XFER_DEBUG
1208 T_LOG("\nPort %d transmitting %d bytes:\n",
1209 port->port, frame_length);
1210 ip_frame_dump(buf, frame_length);
1211 #endif
1212 assert(frame_length != 0);
1213 if (port->user_packet_pool) {
1214 error = os_channel_slot_attach_packet(port->tx_ring,
1215 slot, pkt);
1216 SKTC_ASSERT_ERR(error == 0);
1217 } else {
1218 prop.sp_len = frame_length;
1219 os_channel_set_slot_properties(port->tx_ring, slot,
1220 &prop);
1221 }
1222 last_slot = slot;
1223 payload->packet_number++;
1224 if (payload->packet_number >= limit) {
1225 break;
1226 }
1227 }
1228 if (last_slot != NULL) {
1229 error = os_channel_advance_slot(port->tx_ring, last_slot);
1230 SKTC_ASSERT_ERR(error == 0);
1231 error = os_channel_sync(port->chan, CHANNEL_SYNC_TX);
1232 SKTC_ASSERT_ERR(error == 0);
1233 }
1234 }
1235
1236 static void
channel_port_send_fragments(channel_port_t port,uuid_t flow_id,int protocol,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,my_payload_t payload,int payload_length,uint16_t fragment_count,packet_svc_class_t svc_class,bool csum_offload,bool error_ids)1237 channel_port_send_fragments(channel_port_t port, uuid_t flow_id,
1238 int protocol, uint16_t src_port,
1239 struct in_addr dst_ip, uint16_t dst_port,
1240 my_payload_t payload, int payload_length,
1241 uint16_t fragment_count,
1242 packet_svc_class_t svc_class,
1243 bool csum_offload, bool error_ids)
1244 {
1245 int error = 0;
1246 channel_slot_t last_slot = NULL;
1247
1248 uint16_t fragment_id = ip_id++;
1249
1250 for (int fragment_i = 0; fragment_i < fragment_count; fragment_i++) {
1251 int frame_length = 0;
1252 slot_prop_t prop;
1253 channel_slot_t slot = NULL;
1254 packet_t pkt = 0;
1255 void *buf = NULL;
1256 size_t buf_len = 0;
1257 buflet_t buflet = NULL;
1258
1259 /* grab a slot and populate it */
1260 slot = os_channel_get_next_slot(port->tx_ring, last_slot,
1261 &prop);
1262 if (slot == NULL) {
1263 if (fragment_i < fragment_count) {
1264 /* couldn't complete fragment */
1265 T_LOG(
1266 "TX didn't complete fragment (%u < %u)\n",
1267 fragment_i, fragment_count);
1268 assert(0);
1269 }
1270 break;
1271 }
1272
1273 if (port->user_packet_pool) {
1274 assert(prop.sp_buf_ptr == 0);
1275 assert(prop.sp_len == 0);
1276 error = os_channel_packet_alloc(port->chan, &pkt);
1277 SKTC_ASSERT_ERR(error == 0);
1278 } else {
1279 assert(prop.sp_buf_ptr != 0);
1280 assert(prop.sp_len != 0);
1281 pkt = os_channel_slot_get_packet(port->tx_ring, slot);
1282 }
1283 assert(pkt != 0);
1284 buflet = os_packet_get_next_buflet(pkt, NULL);
1285 assert(buflet != NULL);
1286 buf = os_buflet_get_object_address(buflet) +
1287 os_buflet_get_data_offset(buflet);
1288 assert(buf != NULL);
1289 buf_len = os_buflet_get_data_limit(buflet);
1290 assert(buf_len != 0);
1291 if (!port->user_packet_pool) {
1292 assert(buf == (void *)prop.sp_buf_ptr);
1293 assert(buf_len == prop.sp_len);
1294 }
1295
1296 if (fragment_i == 0) {
1297 frame_length = frame_populate(port, pkt, protocol,
1298 port->ip_addr, src_port, dst_ip, dst_port,
1299 (void *)payload, payload_length, flow_id, FALSE,
1300 svc_class, csum_offload, fragment_id,
1301 fragment_count * payload_length, 0, FALSE);
1302 } else {
1303 frame_length = frame_populate(port, pkt, protocol,
1304 port->ip_addr, src_port, dst_ip, dst_port,
1305 (void *)payload, payload_length, flow_id, FALSE,
1306 svc_class, csum_offload,
1307 fragment_id, fragment_count * payload_length,
1308 fragment_i * payload_length + sizeof(struct udphdr),
1309 fragment_i == (fragment_count - 1));
1310 }
1311
1312 #if SKT_XFER_DEBUG
1313 T_LOG("\nPort %d transmitting %d bytes:\n",
1314 port->port, frame_length);
1315 ip_frame_dump(buf, frame_length);
1316 #endif
1317 assert(frame_length != 0);
1318 if (port->user_packet_pool) {
1319 error = os_channel_slot_attach_packet(port->tx_ring,
1320 slot, pkt);
1321 SKTC_ASSERT_ERR(error == 0);
1322 } else {
1323 prop.sp_len = frame_length;
1324 os_channel_set_slot_properties(port->tx_ring, slot,
1325 &prop);
1326 }
1327 last_slot = slot;
1328
1329 if (error_ids) {
1330 fragment_id = ip_id++;
1331 }
1332 }
1333 if (last_slot != NULL) {
1334 error = os_channel_advance_slot(port->tx_ring, last_slot);
1335 SKTC_ASSERT_ERR(error == 0);
1336 error = os_channel_sync(port->chan, CHANNEL_SYNC_TX);
1337 SKTC_ASSERT_ERR(error == 0);
1338 }
1339 }
1340
1341 static int
channel_port_receive_payload(channel_port_t port,my_payload_t payload,bool verify_qos)1342 channel_port_receive_payload(channel_port_t port, my_payload_t payload,
1343 bool verify_qos)
1344 {
1345 int error;
1346 slot_prop_t prop;
1347 channel_slot_t slot;
1348 packet_t pkt;
1349 void *buf;
1350 size_t frame_length;
1351 buflet_t buflet;
1352
1353 slot = os_channel_get_next_slot(port->rx_ring, NULL, &prop);
1354 if (slot == NULL) {
1355 return ENOENT;
1356 }
1357 assert(prop.sp_buf_ptr != 0);
1358
1359 pkt = os_channel_slot_get_packet(port->rx_ring, slot);
1360 assert(pkt != 0);
1361 if (port->user_packet_pool) {
1362 error = os_channel_slot_detach_packet(port->rx_ring,
1363 slot, pkt);
1364 SKTC_ASSERT_ERR(error == 0);
1365 }
1366
1367 buflet = os_packet_get_next_buflet(pkt, NULL);
1368 assert(buflet != NULL);
1369 buf = os_buflet_get_object_address(buflet) +
1370 os_buflet_get_data_offset(buflet);
1371 frame_length = os_packet_get_data_length(pkt);
1372
1373 (void) frame_process(pkt, payload, frame_length, verify_qos);
1374
1375 #if SKT_XFER_DEBUG
1376 T_LOG("\nPort %d received %lu bytes:\n",
1377 port->port, frame_length);
1378
1379 ip_frame_dump(buf, frame_length);
1380 #endif
1381 if (port->user_packet_pool) {
1382 error = os_channel_packet_free(port->chan, pkt);
1383 SKTC_ASSERT_ERR(error == 0);
1384 }
1385
1386 error = os_channel_advance_slot(port->rx_ring, slot);
1387 SKTC_ASSERT_ERR(error == 0);
1388 error = os_channel_sync(port->chan, CHANNEL_SYNC_RX);
1389 SKTC_ASSERT_ERR(error == 0);
1390
1391 return 0;
1392 }
1393
1394 static void
channel_port_receive(int child,channel_port_t port,uint16_t our_port,struct in_addr peer_ip,uint32_t limit,uint32_t * receive_count,uint32_t * receive_index,bool errors_ok,uint32_t * pkts_dropped)1395 channel_port_receive(int child, channel_port_t port, uint16_t our_port,
1396 struct in_addr peer_ip,
1397 uint32_t limit,
1398 uint32_t * receive_count,
1399 uint32_t * receive_index,
1400 bool errors_ok,
1401 uint32_t * pkts_dropped)
1402 {
1403 int error;
1404 channel_slot_t last_slot = NULL;
1405
1406 assert(*receive_index < limit);
1407
1408 *pkts_dropped = 0;
1409
1410 while (1) {
1411 int frame_length;
1412 ip_udp_header_t * ip_udp;
1413 my_payload payload;
1414 slot_prop_t prop;
1415 channel_slot_t slot;
1416 packet_t pkt;
1417 char *buf;
1418 uint16_t pkt_len;
1419 uint32_t bdoff;
1420 buflet_t buflet;
1421 uint8_t aggr_type;
1422 uint32_t buflet_cnt;
1423
1424 slot = os_channel_get_next_slot(port->rx_ring, last_slot,
1425 &prop);
1426 if (slot == NULL) {
1427 break;
1428 }
1429 assert(prop.sp_buf_ptr != 0);
1430
1431 pkt = os_channel_slot_get_packet(port->rx_ring, slot);
1432 assert(pkt != 0);
1433 if (port->user_packet_pool) {
1434 error = os_channel_slot_detach_packet(port->rx_ring,
1435 slot, pkt);
1436 SKTC_ASSERT_ERR(error == 0);
1437 }
1438 buflet = os_packet_get_next_buflet(pkt, NULL);
1439 assert(buflet != NULL);
1440 bdoff = os_buflet_get_data_offset(buflet);
1441 buf = os_buflet_get_object_address(buflet) + bdoff;
1442 pkt_len = os_packet_get_data_length(pkt);
1443 assert(buf == (void *)prop.sp_buf_ptr);
1444 assert(pkt_len == prop.sp_len);
1445
1446 frame_length = sizeof(*ip_udp) + sizeof(my_payload);
1447 assert(os_packet_get_link_header_length(pkt) == 0);
1448
1449 buflet_cnt = os_packet_get_buflet_count(pkt);
1450 aggr_type = os_packet_get_aggregation_type(pkt);
1451 assert((aggr_type == PKT_AGGR_NONE) || (buflet_cnt > 1));
1452
1453 (void) frame_process(pkt, &payload, pkt_len, FALSE);
1454
1455 #if SKT_XFER_DEBUG
1456 T_LOG("\nPort %d received %d bytes:\n",
1457 port->port, frame_length);
1458
1459 ip_frame_dump(buf, frame_length);
1460 #endif
1461 last_slot = slot;
1462 if (*receive_index != payload.packet_number) {
1463 if (!errors_ok) {
1464 assert(payload.packet_number > *receive_index);
1465 }
1466 uint32_t dropped;
1467
1468 dropped = payload.packet_number - *receive_index;
1469 *pkts_dropped += dropped;
1470 #if SKT_XFER_DEBUG
1471 T_LOG(
1472 "child %d dropped %u (received #%u, expected #%u)\n",
1473 child, dropped, payload.packet_number,
1474 *receive_index);
1475 #endif
1476 *receive_index = payload.packet_number;
1477 }
1478
1479 if (port->user_packet_pool) {
1480 error = os_channel_packet_free(port->chan, pkt);
1481 SKTC_ASSERT_ERR(error == 0);
1482 }
1483 (*receive_count)++;
1484 (*receive_index)++;
1485 if (*receive_index == limit) {
1486 break;
1487 }
1488 }
1489 if (last_slot != NULL) {
1490 error = os_channel_advance_slot(port->rx_ring, last_slot);
1491 SKTC_ASSERT_ERR(error == 0);
1492 error = os_channel_sync(port->chan, CHANNEL_SYNC_RX);
1493 SKTC_ASSERT_ERR(error == 0);
1494 }
1495 }
1496
1497 static void
channel_port_receive_all(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,int16_t should_receive_count,bool verify_qos)1498 channel_port_receive_all(channel_port_t port, uuid_t flow_id,
1499 uint16_t src_port, struct in_addr dst_ip, uint16_t dst_port,
1500 int16_t should_receive_count, bool verify_qos)
1501 {
1502 int error;
1503 struct kevent evlist, kev;
1504 int kq;
1505 uint16_t received_count = 0;
1506
1507 kq = kqueue();
1508 assert(kq != -1);
1509
1510 EV_SET(&kev, port->fd, EVFILT_READ,
1511 EV_ADD | EV_ENABLE, 0, 0, NULL);
1512 error = kevent(kq, &kev, 1, NULL, 0, NULL);
1513 SKTC_ASSERT_ERR(error == 0);
1514 struct timespec timeout;
1515 timeout.tv_sec = 10;
1516 timeout.tv_nsec = 0;
1517
1518 for (;;) {
1519 /* wait for RX to become available */
1520 error = kevent(kq, NULL, 0, &evlist, 1, &timeout);
1521 if (error <= 0) {
1522 if (errno == EAGAIN) {
1523 continue;
1524 }
1525 SKTC_ASSERT_ERR(error == 0);
1526 }
1527 if (error == 0) {
1528 /* time out */
1529 T_LOG(
1530 "Error, timeout for final right packet\n");
1531 assert(0);
1532 }
1533 if (evlist.flags & EV_ERROR) {
1534 int err = evlist.data;
1535
1536 if (err == EAGAIN) {
1537 break;
1538 }
1539 SKTC_ASSERT_ERR(err == 0);
1540 }
1541
1542 if (evlist.filter == EVFILT_READ) {
1543 my_payload payload;
1544 channel_port_receive_payload(port, &payload, verify_qos);
1545 /* packet signaling end of test */
1546 if (strcmp(payload.data, XFER_RECV_END_PAYLOAD) == 0) {
1547 if (should_receive_count >= 0 &&
1548 received_count != should_receive_count) {
1549 T_LOG(
1550 "Error, only received %d/%d\n",
1551 received_count,
1552 should_receive_count);
1553 assert(0);
1554 }
1555 T_LOG("received EOF packet\n");
1556 break;
1557 }
1558 received_count++;
1559 T_LOG("Received [%d/%d] %s\n",
1560 received_count, should_receive_count, payload.data);
1561 if (should_receive_count >= 0 &&
1562 received_count > should_receive_count) {
1563 T_LOG("Error, rx wrong packet\n");
1564 assert(0);
1565 }
1566 } else {
1567 T_LOG("%lu event %d?\n", evlist.ident,
1568 evlist.filter);
1569 assert(0);
1570 break;
1571 }
1572 }
1573
1574 T_LOG("child exit\n");
1575 fflush(stderr);
1576
1577 close(kq);
1578 }
1579
1580 static void
send_and_receive(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many,uint32_t batch_size,int child,bool wrong_flow_id,bool errors_ok,uint32_t event_flags,bool ifadv_enabled)1581 send_and_receive(channel_port_t port, uuid_t flow_id, uint16_t src_port,
1582 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many,
1583 uint32_t batch_size, int child, bool wrong_flow_id, bool errors_ok,
1584 uint32_t event_flags, bool ifadv_enabled)
1585 {
1586 int n_events, error;
1587 #define N_EVENTS_MAX 3
1588 struct kevent evlist[N_EVENTS_MAX];
1589 struct kevent kev[N_EVENTS_MAX];
1590 int kq;
1591 my_payload payload;
1592 double percent;
1593 uint32_t receive_packet_count;
1594 uint32_t receive_packet_index;
1595 bool rx_complete;
1596 struct timespec timeout;
1597 bool tx_complete;
1598 uint32_t pkts_dropped;
1599 uint32_t n_ifadv_events = 0, n_chan_events = 0;
1600
1601 T_LOG("Sending to %s:%d\n", inet_ntoa(dst_ip), dst_port);
1602 bzero(&payload, sizeof(payload));
1603 kq = kqueue();
1604 assert(kq != -1);
1605 rx_complete = tx_complete = FALSE;
1606 receive_packet_count = 0;
1607 receive_packet_index = 0;
1608 EV_SET(kev + 0, port->fd, EVFILT_WRITE,
1609 EV_ADD | EV_ENABLE, 0, 0, NULL);
1610 EV_SET(kev + 1, port->fd, EVFILT_READ,
1611 EV_ADD | EV_ENABLE, 0, 0, NULL);
1612 n_events = 2;
1613 if ((event_flags & SKT_FSW_EVFLAG_IFADV) != 0) {
1614 assert(n_events < N_EVENTS_MAX);
1615 EV_SET(kev + n_events, port->fd, EVFILT_NW_CHANNEL,
1616 EV_ADD | EV_ENABLE, NOTE_IF_ADV_UPD, 0, NULL);
1617 n_events++;
1618 }
1619 if ((event_flags & SKT_FSW_EVFLAG_CHANNEL) != 0) {
1620 assert(n_events < N_EVENTS_MAX);
1621 EV_SET(kev + n_events, port->fd, EVFILT_NW_CHANNEL,
1622 EV_ADD | EV_ENABLE, NOTE_CHANNEL_EVENT, 0, NULL);
1623 n_events++;
1624 }
1625 error = kevent(kq, kev, n_events, NULL, 0, NULL);
1626 SKTC_ASSERT_ERR(error == 0);
1627 timeout.tv_sec = XFER_TXRX_TIMEOUT_SECS;
1628 timeout.tv_nsec = XFER_TXRX_TIMEOUT_NSECS;
1629 while (!rx_complete || !tx_complete) {
1630 /* wait for TX/RX to become available */
1631 error = kevent(kq, NULL, 0, evlist, N_EVENTS_MAX, &timeout);
1632 if (error <= 0) {
1633 if (errno == EAGAIN) {
1634 continue;
1635 }
1636 SKTC_ASSERT_ERR(error == 0);
1637 }
1638 if (error == 0) {
1639 /* missed seeing last few packets */
1640 if (!errors_ok) {
1641 T_LOG("child %d: timed out, TX %s "
1642 "RX %s\n", child,
1643 tx_complete ? "complete" :"incomplete",
1644 rx_complete ? "complete" :"incomplete");
1645 }
1646 break;
1647 }
1648 for (int i = 0; i < error; i++) {
1649 if (evlist[i].flags & EV_ERROR) {
1650 int err = evlist[i].data;
1651
1652 if (err == EAGAIN) {
1653 break;
1654 }
1655 SKTC_ASSERT_ERR(err == 0);
1656 }
1657
1658 switch (evlist[i].filter) {
1659 case EVFILT_NW_CHANNEL: {
1660 if ((evlist[i].fflags & NOTE_IF_ADV_UPD) != 0) {
1661 skt_process_if_adv(port->port, port->chan);
1662 n_ifadv_events++;
1663 }
1664 if ((evlist[i].fflags & NOTE_CHANNEL_EVENT) != 0) {
1665 skt_process_channel_event(port->chan,
1666 XFER_PKTID_PAYLOAD_TYPE, XFER_PKTID_STREAM_ID,
1667 ^(const os_channel_event_packet_transmit_status_t *pkt_ev) {
1668 assert(pkt_ev->packet_status ==
1669 CHANNEL_EVENT_PKT_TRANSMIT_STATUS_ERR_RETRY_FAILED);
1670 }, NULL, NULL);
1671 n_chan_events++;
1672 }
1673 break;
1674 }
1675 case EVFILT_WRITE: {
1676 uint32_t next_batch;
1677
1678 next_batch = payload.packet_number
1679 + batch_size;
1680 if (next_batch > how_many) {
1681 next_batch = how_many;
1682 }
1683 channel_port_send(port, flow_id,
1684 IPPROTO_UDP,
1685 src_port,
1686 dst_ip, dst_port,
1687 &payload, sizeof(payload),
1688 next_batch, FALSE, FALSE,
1689 PKT_SC_BE, TRUE, NULL);
1690 if (payload.packet_number >= how_many) {
1691 assert(payload.packet_number
1692 == how_many);
1693 T_LOG(
1694 "TX child %d: completed %u\n",
1695 child, how_many);
1696 tx_complete = TRUE;
1697 #if SKT_XFER_DEBUG
1698 T_LOG(
1699 "child %d: disable TX\n",
1700 child);
1701 #endif
1702 EV_SET(kev,
1703 port->fd, EVFILT_WRITE,
1704 EV_DELETE, 0, 0, NULL);
1705 error = kevent(kq, kev, 1,
1706 NULL, 0, NULL);
1707 SKTC_ASSERT_ERR(error == 0);
1708 }
1709 break;
1710 }
1711 case EVFILT_READ: {
1712 channel_port_receive(child, port, src_port, dst_ip,
1713 how_many,
1714 &receive_packet_count,
1715 &receive_packet_index,
1716 errors_ok, &pkts_dropped);
1717 if (receive_packet_index >= how_many) {
1718 assert(receive_packet_index
1719 == how_many);
1720 #if SKT_XFER_DEBUG
1721 T_LOG(
1722 "child %d: disable RX\n",
1723 child);
1724 #endif
1725 EV_SET(kev, port->fd, EVFILT_READ,
1726 EV_DELETE, 0, 0, NULL);
1727 error = kevent(kq, kev, 1,
1728 NULL, 0, NULL);
1729 SKTC_ASSERT_ERR(error == 0);
1730 rx_complete = TRUE;
1731 }
1732 break;
1733 }
1734 default:
1735 T_LOG("%lu event %d?\n",
1736 evlist[i].ident,
1737 evlist[i].filter);
1738 assert(0);
1739 break;
1740 }
1741 }
1742 }
1743 percent = 1.0 * receive_packet_count / how_many * 100.0;
1744 T_LOG("RX child %d: received %u (of %u) %1.02f%%\n",
1745 child, receive_packet_count, how_many, percent);
1746 T_LOG("child %d: received %u ifadv events\n",
1747 child, n_ifadv_events);
1748
1749 if (!errors_ok) {
1750 if (wrong_flow_id) {
1751 assert(receive_packet_count == 0);
1752 } else {
1753 assert(receive_packet_count > 0);
1754 }
1755 }
1756 if ((event_flags & SKT_FSW_EVFLAG_IFADV) != 0) {
1757 if (ifadv_enabled) {
1758 assert(n_ifadv_events != 0);
1759 } else {
1760 assert(n_ifadv_events == 0);
1761 }
1762 }
1763 if ((event_flags & SKT_FSW_EVFLAG_CHANNEL) != 0) {
1764 assert(n_chan_events != 0);
1765 }
1766 close(kq);
1767 }
1768
1769 static void
ping_pong(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many,uint32_t batch_size,int child,bool wrong_flow_id,flowadv_idx_t flowadv_idx,bool test_aqm,bool test_wmm,uint16_t demux_offset)1770 ping_pong(channel_port_t port, uuid_t flow_id, uint16_t src_port,
1771 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many,
1772 uint32_t batch_size, int child, bool wrong_flow_id,
1773 flowadv_idx_t flowadv_idx, bool test_aqm, bool test_wmm,
1774 uint16_t demux_offset)
1775 {
1776 int error;
1777 #define N_EVENTS 2
1778 struct kevent evlist[N_EVENTS];
1779 bool expect_flowadv = FALSE;
1780 bool expect_stall = FALSE;
1781 struct timespec fadv_timeout;
1782 struct kevent kev[N_EVENTS];
1783 int kq;
1784 my_payload payload;
1785 double percent;
1786 uint32_t next_receive_count;
1787 uint32_t receive_packet_count;
1788 uint32_t receive_packet_index;
1789 struct timespec rcv_timeout;
1790 bool rx_complete;
1791 bool sending;
1792 struct timespec snd_timeout;
1793 int snd_batch_cnt = 0;
1794 struct timespec *timeout;
1795 bool tx_complete;
1796 packet_svc_class_t svc_class = PKT_SC_BE;
1797 uint32_t pkts_dropped;
1798
1799 if (test_aqm) {
1800 assert(how_many / batch_size == 4);
1801 }
1802 T_LOG("Sending to %s:%d\n", inet_ntoa(dst_ip), dst_port);
1803 bzero(&payload, sizeof(payload));
1804 kq = kqueue();
1805 assert(kq != -1);
1806 rx_complete = tx_complete = FALSE;
1807 receive_packet_count = 0;
1808 receive_packet_index = 0;
1809 next_receive_count = batch_size;
1810
1811 snd_timeout.tv_sec = XFER_PING_TIMEOUT_SECS;
1812 snd_timeout.tv_nsec = XFER_PING_TIMEOUT_NSECS;
1813 fadv_timeout.tv_sec = XFER_PING_FADV_TIMEOUT_SECS;
1814 fadv_timeout.tv_nsec = XFER_PING_FADV_TIMEOUT_NSECS;
1815
1816 if (test_aqm && child == 1) {
1817 /*
1818 * child-1 will not receive packets on time when
1819 * child-0's send interface is throttled, hence it
1820 * requires a larger timeout.
1821 */
1822 rcv_timeout.tv_sec = XFER_PING_CHILD1_RX_TIMEOUT_SECS;
1823 rcv_timeout.tv_nsec = XFER_PING_CHILD1_RX_TIMEOUT_NSECS;
1824 } else {
1825 rcv_timeout.tv_sec = XFER_PING_TIMEOUT_SECS;
1826 rcv_timeout.tv_nsec = XFER_PING_TIMEOUT_NSECS;
1827 }
1828
1829 if (test_aqm) {
1830 /*
1831 * flow advisory filter always reports an initial event,
1832 * check for that.
1833 */
1834 EV_SET(kev + 0, port->fd, EVFILT_NW_CHANNEL, EV_ADD,
1835 NOTE_FLOW_ADV_UPDATE, 0, NULL);
1836 error = kevent(kq, kev, 1, NULL, 0, NULL);
1837 SKTC_ASSERT_ERR(error == 0);
1838 timeout = &fadv_timeout;
1839 error = kevent(kq, NULL, 0, evlist, N_EVENTS, timeout);
1840 SKTC_ASSERT_ERR(error == 1);
1841 }
1842
1843 if (demux_offset <= MAX_DEMUX_OFFSET) {
1844 payload.data[demux_offset] = (char)DEMUX_PAYLOAD_VALUE;
1845 payload.data[demux_offset + 1] = (char)DEMUX_PAYLOAD_VALUE >> 8;
1846 }
1847
1848 if (child == 0) {
1849 sending = TRUE;
1850 EV_SET(kev, port->fd, EVFILT_WRITE,
1851 EV_ADD | EV_ENABLE, 0, 0, NULL);
1852 } else {
1853 sending = FALSE;
1854 EV_SET(kev, port->fd, EVFILT_READ,
1855 EV_ADD | EV_ENABLE, 0, 0, NULL);
1856 }
1857 error = kevent(kq, kev, 1, NULL, 0, NULL);
1858 SKTC_ASSERT_ERR(error == 0);
1859
1860 while (!rx_complete || !tx_complete) {
1861 if (expect_flowadv) {
1862 timeout = &fadv_timeout;
1863 } else if (sending) {
1864 timeout = &snd_timeout;
1865 } else {
1866 timeout = &rcv_timeout;
1867 }
1868
1869 /* wait for something to happen */
1870 error = kevent(kq, NULL, 0, evlist, N_EVENTS, timeout);
1871 if (error <= 0) {
1872 int err = errno;
1873
1874 if (err == EAGAIN) {
1875 continue;
1876 }
1877 SKTC_ASSERT_ERR(error == 0);
1878 }
1879 if (error == 0) {
1880 T_LOG(
1881 "child %d: timed out TX %s RX %s FA %s\n",
1882 child,
1883 tx_complete ? "complete" : "incomplete",
1884 rx_complete ? "complete" : "incomplete",
1885 expect_flowadv ? "incomplete" : "complete");
1886 /*
1887 * Test should fail if it times out while expecting a
1888 * channel flow advisory event.
1889 */
1890 assert(!expect_flowadv);
1891 break;
1892 }
1893 if (error != 1) {
1894 T_LOG("child %d: got %d events, expected 1\n",
1895 child, error);
1896 assert(0);
1897 } else if (evlist[0].flags & EV_ERROR) {
1898 int err = evlist[0].data;
1899
1900 if (err == EAGAIN) {
1901 continue;
1902 }
1903 SKTC_ASSERT_ERR(err == 0);
1904 }
1905
1906 /* check that the correct event fired */
1907 if (expect_flowadv) {
1908 int n_kev = 0;
1909 assert(child == 0);
1910 assert(evlist[0].filter == EVFILT_NW_CHANNEL);
1911 assert(evlist[0].fflags & NOTE_FLOW_ADV_UPDATE);
1912 error = os_channel_flow_admissible(port->tx_ring,
1913 flow_id, flowadv_idx);
1914 if (expect_stall) {
1915 /*
1916 * when flow control is enabled
1917 * os_channel_flow_admissible() should return
1918 * ENOBUFS.
1919 */
1920 SKTC_ASSERT_ERR(error == ENOBUFS);
1921 /*
1922 * Now, enable dequeuing on the interface.
1923 * This will allow the buffered 2nd batch of
1924 * packets to be sent out the interface as
1925 * well as trigger a flow advisory event
1926 * to resume send on the channel.
1927 */
1928 T_LOG("child %d, enable dequeue "
1929 "on feth0\n", child);
1930 error =
1931 sktc_ifnet_feth0_set_dequeue_stall(FALSE);
1932 SKTC_ASSERT_ERR(error == 0);
1933 expect_stall = FALSE;
1934 expect_flowadv = TRUE;
1935 #if SKT_XFER_DEBUG
1936 T_LOG("child %d: enable FA "
1937 "no stall\n", child);
1938 #endif
1939 } else {
1940 /* flow must be admissible on the channel */
1941 SKTC_ASSERT_ERR(error == 0);
1942 #if SKT_XFER_DEBUG
1943 T_LOG("child %d: Disable FA\n",
1944 child);
1945 #endif
1946 /*
1947 * Flow control tested so remove flow advisory
1948 * filter.
1949 */
1950 EV_SET(kev + 0, port->fd, EVFILT_NW_CHANNEL,
1951 EV_DELETE, 0, 0, NULL);
1952 expect_flowadv = FALSE;
1953 n_kev = 1;
1954
1955 /*
1956 * Now enabling receiving acks for the 2nd batch
1957 * of packets.
1958 */
1959 assert(!rx_complete);
1960 /* enable RX */
1961 EV_SET(kev + n_kev, port->fd, EVFILT_READ,
1962 EV_ADD, 0, 0, NULL);
1963 n_kev++;
1964 #if SKT_XFER_DEBUG
1965 T_LOG("child %d: enable RX\n", child);
1966 #endif
1967 /*
1968 * child 0 should now expect acks for the 2nd
1969 * batch of packets.
1970 */
1971 sending = FALSE;
1972 timeout = &rcv_timeout;
1973 }
1974 assert(n_kev <= N_EVENTS);
1975 if (n_kev > 0) {
1976 error = kevent(kq, kev, n_kev, NULL, 0, NULL);
1977 SKTC_ASSERT_ERR(error == 0);
1978 }
1979 continue;
1980 } else {
1981 /*
1982 * verify that flow advisory event is reported
1983 * only when expected.
1984 */
1985 assert(evlist[0].filter != EVFILT_NW_CHANNEL);
1986 }
1987
1988 if (sending) {
1989 uint32_t next_batch;
1990 int n_kev = 0;
1991 bool skip_receive = FALSE;
1992
1993 assert(evlist[0].filter == EVFILT_WRITE);
1994 if (test_wmm) {
1995 svc_class = packet_svc_class[(snd_batch_cnt %
1996 NUM_SVC_CLASS)];
1997 }
1998 snd_batch_cnt++;
1999 next_batch = payload.packet_number + batch_size;
2000 if (next_batch > how_many) {
2001 next_batch = how_many;
2002 }
2003
2004 if (test_aqm && child == 0 && snd_batch_cnt == 2) {
2005 /*
2006 * disable dequeue on feth0 before sending the
2007 * 2nd batch of packets.
2008 * These UDP packet will now get buffered at the
2009 * interface AQM.
2010 */
2011 T_LOG("child %d, disable dequeue on"
2012 " feth0\n", child);
2013 error =
2014 sktc_ifnet_feth0_set_dequeue_stall(TRUE);
2015 SKTC_ASSERT_ERR(error == 0);
2016 }
2017
2018 if (test_aqm && child == 0 && snd_batch_cnt == 3) {
2019 /*
2020 * wait for interface update interval to elapse
2021 * before sending the 3rd batch of packets.
2022 * These UDP packets wil be dropped by AQM.
2023 */
2024 T_LOG("child %d, sleep for update"
2025 " interval (%d ms)\n", child,
2026 XFER_CLASSQ_UPDATE_INTERVAL_ELAPSE_DELAY);
2027 usleep(
2028 XFER_CLASSQ_UPDATE_INTERVAL_ELAPSE_DELAY *
2029 1000);
2030 }
2031
2032 /* Flow should be writable */
2033 if (!wrong_flow_id) {
2034 error =
2035 os_channel_flow_admissible(port->tx_ring,
2036 flow_id, flowadv_idx);
2037 SKTC_ASSERT_ERR(error == 0);
2038 }
2039
2040 channel_port_send(port, flow_id, IPPROTO_UDP,
2041 src_port, dst_ip, dst_port, &payload,
2042 sizeof(payload), next_batch, TRUE, FALSE,
2043 svc_class, TRUE, NULL);
2044 #if SKT_XFER_DEBUG
2045 T_LOG(
2046 "TX child %d: %s %u of %u\n", child,
2047 (child == 0) ? "ping" : "pong",
2048 next_batch, how_many);
2049 #endif
2050 if (payload.packet_number >= how_many) {
2051 assert(payload.packet_number
2052 == how_many);
2053 T_LOG(
2054 "TX child %d: completed %u\n",
2055 child,
2056 how_many);
2057 tx_complete = TRUE;
2058 }
2059
2060 if (test_aqm && child == 0 && snd_batch_cnt == 2) {
2061 /* 2nd batch of packets are not going to reach
2062 * the receiver at child 1 until dequeuing is
2063 * re-enabled on feth0.
2064 * Skip receiving and send the 3rd batch of
2065 * packets.
2066 */
2067 continue;
2068 }
2069
2070 if (test_aqm && child == 0 && snd_batch_cnt == 3) {
2071 /*
2072 * sending the 3rd batch of packets should have
2073 * triggered flow advisory event on the channel.
2074 * The flow should not be admissible now.
2075 */
2076 expect_flowadv = TRUE;
2077 expect_stall = TRUE;
2078 timeout = &fadv_timeout;
2079 #if SKT_XFER_DEBUG
2080 T_LOG("child %d: expect stall\n",
2081 child);
2082 #endif
2083 /*
2084 * packets will not reach receiver at child 1,
2085 * until dequeuing on feth0 is re-enabled,
2086 * so skip receiving.
2087 */
2088 skip_receive = TRUE;
2089 }
2090 #if SKT_XFER_DEBUG
2091 T_LOG("child %d disable TX\n", child);
2092 #endif
2093 EV_SET(kev + n_kev, port->fd, EVFILT_WRITE, EV_DELETE,
2094 0, 0, NULL);
2095 n_kev++;
2096
2097 if (!skip_receive && !rx_complete) {
2098 /* enable RX */
2099 assert(n_kev == 1);
2100 EV_SET(kev + n_kev, port->fd, EVFILT_READ,
2101 EV_ADD, 0, 0, NULL);
2102 n_kev++;
2103 #if SKT_XFER_DEBUG
2104 T_LOG("child %d: enable RX\n", child);
2105 #endif
2106 }
2107 assert(n_kev <= N_EVENTS);
2108 if (n_kev > 0) {
2109 error = kevent(kq, kev, n_kev, NULL, 0, NULL);
2110 SKTC_ASSERT_ERR(error == 0);
2111 }
2112 sending = FALSE;
2113 } else {
2114 assert(evlist[0].filter == EVFILT_READ);
2115 pkts_dropped = 0;
2116 channel_port_receive(child, port, src_port, dst_ip,
2117 how_many,
2118 &receive_packet_count,
2119 &receive_packet_index,
2120 false, &pkts_dropped);
2121
2122 if (pkts_dropped != 0) {
2123 /*
2124 * ping-pong test shouldn't have any packet
2125 * drop, unless intentional during AQM test.
2126 */
2127 assert(test_aqm);
2128 assert(pkts_dropped ==
2129 XFER_AQM_PING_BATCH_COUNT);
2130 }
2131 if (receive_packet_index >= how_many) {
2132 assert(receive_packet_index == how_many);
2133 rx_complete = TRUE;
2134 }
2135 if (rx_complete ||
2136 receive_packet_index >= next_receive_count) {
2137 int n_kev;
2138 #if SKT_XFER_DEBUG
2139 T_LOG(
2140 "child %d: disable RX\n", child);
2141 #endif
2142 EV_SET(kev, port->fd, EVFILT_READ, EV_DELETE,
2143 0, 0, NULL);
2144 n_kev = 1;
2145 next_receive_count = receive_packet_index +
2146 batch_size;
2147 if (next_receive_count >= how_many) {
2148 next_receive_count = how_many;
2149 }
2150 if (!tx_complete) {
2151 /* re-enable TX */
2152 EV_SET(kev + n_kev,
2153 port->fd, EVFILT_WRITE,
2154 EV_ADD, 0, 0, NULL);
2155 #if SKT_XFER_DEBUG
2156 T_LOG(
2157 "child %d: enable TX\n", child);
2158 #endif
2159 n_kev++;
2160 sending = TRUE;
2161 if (child == 1) {
2162 payload.packet_number +=
2163 pkts_dropped;
2164 }
2165 } else if (!rx_complete) {
2166 assert(tx_complete);
2167 /*
2168 * If Tx is completed and there are
2169 * packets expected to be received
2170 * re-enable Rx.
2171 */
2172 #if SKT_XFER_DEBUG
2173 T_LOG(
2174 "child %d: enable RX\n", child);
2175 #endif
2176 n_kev = 0;
2177 }
2178 if (n_kev) {
2179 error = kevent(kq, kev, n_kev, NULL, 0, NULL);
2180 SKTC_ASSERT_ERR(error == 0);
2181 }
2182 }
2183 }
2184 }
2185 percent = 1.0 * receive_packet_count / how_many * 100.0;
2186 T_LOG("RX child %d: received %u (of %u) %1.02f%%\n",
2187 child, receive_packet_count, how_many, percent);
2188 /* wait to give the packet(s) a chance to make it to the other end */
2189 usleep(100 * 1000);
2190 if (test_aqm) {
2191 /*
2192 * while testing AQM functionaliy we should have dropped
2193 * one batch of packets out of the 4 batches
2194 */
2195 assert(receive_packet_count == ((how_many * 3) / 4));
2196 } else if (wrong_flow_id) {
2197 assert(receive_packet_count == 0);
2198 } else {
2199 assert(receive_packet_count == how_many);
2200 }
2201 #if SKT_XFER_DEBUG
2202 if (receive_packet_count < how_many) {
2203 T_LOG("Child %d waiting", child);
2204 fflush(stdout);
2205 for (int i = 0; i < 5; i++) {
2206 sleep(1);
2207 T_LOG(".");
2208 fflush(stdout);
2209 }
2210 T_LOG("\n");
2211 assert(0);
2212 }
2213 #endif
2214 close(kq);
2215 }
2216
2217 static void
send_tcp(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many,uint32_t batch_size,int child,bool connect)2218 send_tcp(channel_port_t port, uuid_t flow_id, uint16_t src_port,
2219 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many,
2220 uint32_t batch_size, int child, bool connect)
2221 {
2222 int error;
2223 struct kevent ev;
2224 struct kevent kev;
2225 int kq;
2226 my_payload payload;
2227 struct timespec timeout;
2228 bool tx_complete;
2229
2230 T_LOG("Sending to %s:%d\n", inet_ntoa(dst_ip), dst_port);
2231 bzero(&payload, sizeof(payload));
2232 kq = kqueue();
2233 assert(kq != -1);
2234 tx_complete = FALSE;
2235
2236 EV_SET(&kev, port->fd, EVFILT_WRITE,
2237 EV_ADD | EV_ENABLE, 0, 0, NULL);
2238 error = kevent(kq, &kev, 1, NULL, 0, NULL);
2239 SKTC_ASSERT_ERR(error == 0);
2240 timeout.tv_sec = 1;
2241 timeout.tv_nsec = 0;
2242 while (!tx_complete) {
2243 /* wait for TX to become available */
2244 error = kevent(kq, NULL, 0, &ev, 1, &timeout);
2245 if (error <= 0) {
2246 if (errno == EAGAIN) {
2247 continue;
2248 }
2249 SKTC_ASSERT_ERR(error == 0);
2250 }
2251 if (error == 0) {
2252 /* missed seeing last few packets */
2253 T_LOG("child %d timed out, TX %s\n",
2254 child,
2255 tx_complete ? "complete" : "incomplete");
2256 break;
2257 }
2258 if (ev.flags & EV_ERROR) {
2259 int err = ev.data;
2260
2261 if (err == EAGAIN) {
2262 continue;
2263 }
2264 SKTC_ASSERT_ERR(err == 0);
2265 }
2266 switch (ev.filter) {
2267 case EVFILT_WRITE: {
2268 uint32_t next_batch;
2269
2270 next_batch = payload.packet_number
2271 + batch_size;
2272 if (next_batch > how_many) {
2273 next_batch = how_many;
2274 }
2275 channel_port_send(port, flow_id,
2276 IPPROTO_TCP,
2277 src_port,
2278 dst_ip, dst_port,
2279 &payload, sizeof(payload),
2280 next_batch, FALSE, connect,
2281 PKT_SC_BE, TRUE, NULL);
2282 if (payload.packet_number >= how_many) {
2283 assert(payload.packet_number
2284 == how_many);
2285 T_LOG(
2286 "TX child %d: completed %u\n",
2287 child, how_many);
2288 tx_complete = TRUE;
2289 #if SKT_XFER_DEBUG
2290 T_LOG(
2291 "child %d: disable TX\n",
2292 child);
2293 #endif
2294 EV_SET(&kev,
2295 port->fd, EVFILT_WRITE,
2296 EV_DELETE, 0, 0, NULL);
2297 error = kevent(kq, &kev, 1,
2298 NULL, 0, NULL);
2299 SKTC_ASSERT_ERR(error == 0);
2300 }
2301 break;
2302 }
2303 default:
2304 T_LOG("%lu event %d?\n",
2305 ev.ident,
2306 ev.filter);
2307 assert(0);
2308 break;
2309 }
2310 }
2311 close(kq);
2312 }
2313
2314 static uint64_t
set_error_inject_mask(uint64_t * mask)2315 set_error_inject_mask(uint64_t *mask)
2316 {
2317 uint64_t old_mask = 0;
2318 size_t old_size = sizeof(old_mask);
2319 int error;
2320
2321 error =
2322 sysctlbyname("kern.skywalk.flowswitch.fsw_inject_error",
2323 &old_mask, &old_size, mask, mask ? sizeof(*mask) : 0);
2324
2325 if ((error != 0) && skywalk_in_driver) {
2326 T_LOG("sysctlbyname failed for fsw_inject_error "
2327 "error %d\n", error);
2328 } else {
2329 SKTC_ASSERT_ERR(error == 0);
2330 }
2331 return old_mask;
2332 }
2333
2334 static void
do_error_receive(int child,channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many)2335 do_error_receive(int child, channel_port_t port, uuid_t flow_id, uint16_t src_port,
2336 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many)
2337 {
2338 int error;
2339 struct kevent evlist, kev;
2340 int kq;
2341 my_payload payload;
2342 uint32_t receive_packet_count;
2343 uint32_t receive_packet_index;
2344 struct timespec timeout;
2345 uint32_t pkts_dropped;
2346
2347 bzero(&payload, sizeof(payload));
2348 kq = kqueue();
2349 assert(kq != -1);
2350 receive_packet_count = 0;
2351 receive_packet_index = 0;
2352
2353 EV_SET(&kev, port->fd, EVFILT_READ,
2354 EV_ADD | EV_ENABLE, 0, 0, NULL);
2355 error = kevent(kq, &kev, 1, NULL, 0, NULL);
2356 SKTC_ASSERT_ERR(error == 0);
2357
2358 for (;;) {
2359 /* wait for RX to become available */
2360 timeout.tv_sec = 1;
2361 timeout.tv_nsec = 0;
2362 error = kevent(kq, NULL, 0, &evlist, 1, &timeout);
2363 if (error <= 0) {
2364 if (errno == EAGAIN) {
2365 continue;
2366 }
2367 SKTC_ASSERT_ERR(error == 0);
2368 }
2369 if (error == 0) {
2370 /*
2371 * Timed out. Check if test is complete
2372 * Mask will be zero when parent is finished
2373 */
2374 if (set_error_inject_mask(NULL) == 0) {
2375 break;
2376 }
2377
2378 /* Otherwise continue receiving */
2379 receive_packet_count = 0;
2380 receive_packet_index = 0;
2381 continue;
2382 }
2383 if (evlist.flags & EV_ERROR) {
2384 int err = evlist.data;
2385
2386 if (err == EAGAIN) {
2387 break;
2388 }
2389 SKTC_ASSERT_ERR(err == 0);
2390 }
2391
2392 if (evlist.filter == EVFILT_READ) {
2393 channel_port_receive(child, port, src_port, dst_ip,
2394 how_many,
2395 &receive_packet_count,
2396 &receive_packet_index,
2397 true, &pkts_dropped);
2398 } else {
2399 T_LOG("%lu event %d?\n",
2400 evlist.ident, evlist.filter);
2401 assert(0);
2402 break;
2403 }
2404 }
2405
2406 close(kq);
2407 }
2408
2409 static void
do_error_send(channel_port_t port,uuid_t flow_id,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,uint32_t how_many,uint32_t batch_size)2410 do_error_send(channel_port_t port, uuid_t flow_id, uint16_t src_port,
2411 struct in_addr dst_ip, uint16_t dst_port, uint32_t how_many,
2412 uint32_t batch_size)
2413 {
2414 int error;
2415 struct kevent evlist;
2416 struct kevent kev;
2417 int kq;
2418 my_payload payload;
2419 uint32_t receive_packet_count;
2420 uint32_t receive_packet_index;
2421 bool tx_complete;
2422 struct timespec timeout;
2423
2424 bzero(&payload, sizeof(payload));
2425 kq = kqueue();
2426 assert(kq != -1);
2427 receive_packet_count = 0;
2428 receive_packet_index = 0;
2429 EV_SET(&kev, port->fd, EVFILT_WRITE,
2430 EV_ADD | EV_ENABLE, 0, 0, NULL);
2431 error = kevent(kq, &kev, 1, NULL, 0, NULL);
2432 SKTC_ASSERT_ERR(error == 0);
2433 tx_complete = false;
2434
2435 while (!tx_complete) {
2436 /* wait for TX to become available */
2437 timeout.tv_sec = 5;
2438 timeout.tv_nsec = 0;
2439 error = kevent(kq, NULL, 0, &evlist, 1, &timeout);
2440 if (error < 0) {
2441 if (errno == EAGAIN) {
2442 continue;
2443 }
2444 SKTC_ASSERT_ERR(error == 0);
2445 }
2446 if (error == 0) {
2447 /* Timeout. Not supposed to happen. */
2448 break;
2449 }
2450
2451 if (evlist.flags & EV_ERROR) {
2452 int err = evlist.data;
2453
2454 if (err == EAGAIN) {
2455 break;
2456 }
2457 SKTC_ASSERT_ERR(err == 0);
2458 }
2459
2460 if (evlist.filter == EVFILT_WRITE) {
2461 uint32_t next_batch;
2462
2463 next_batch = payload.packet_number + batch_size;
2464 if (next_batch > how_many) {
2465 next_batch = how_many;
2466 }
2467 channel_port_send(port, flow_id, IPPROTO_UDP, src_port,
2468 dst_ip, dst_port, &payload, sizeof(payload),
2469 next_batch, FALSE, FALSE, PKT_SC_BE, TRUE, NULL);
2470 if (payload.packet_number >= how_many) {
2471 assert(payload.packet_number
2472 == how_many);
2473 tx_complete = true;
2474 #if SKT_XFER_DEBUG
2475 T_LOG("disable TX\n");
2476 #endif
2477 EV_SET(&kev,
2478 port->fd, EVFILT_WRITE,
2479 EV_DELETE, 0, 0, NULL);
2480 error = kevent(kq, &kev, 1,
2481 NULL, 0, NULL);
2482 SKTC_ASSERT_ERR(error == 0);
2483 }
2484 } else {
2485 T_LOG("%lu event %d?\n",
2486 evlist.ident, evlist.filter);
2487 assert(0);
2488 break;
2489 }
2490 }
2491 close(kq);
2492 }
2493
2494 #define MAX_LLINKS 256
2495 static void
get_qset_id_from_llinks(struct sktc_nexus_handles * handles,uint64_t * qset_id)2496 get_qset_id_from_llinks(struct sktc_nexus_handles *handles, uint64_t *qset_id)
2497 {
2498 struct nx_llink_info_req *nlir;
2499 size_t len;
2500 uint64_t qset_select;
2501 int err, i, llink_idx, qset_idx;
2502
2503 len = sizeof(*nlir) + MAX_LLINKS * sizeof(struct nx_llink_info);
2504 nlir = malloc(len);
2505 nlir->nlir_version = NETIF_LLINK_INFO_VERSION;
2506 nlir->nlir_llink_cnt = MAX_LLINKS;
2507
2508 err = __os_nexus_get_llink_info(handles->controller,
2509 handles->netif_nx_uuid, nlir, len);
2510 if (err != 0) {
2511 T_LOG("__os_nexus_llink_info failed: %d\n", err);
2512 free(nlir);
2513 return;
2514 }
2515 qset_select = mach_absolute_time();
2516 T_LOG("\nqset_select: 0x%llx\n", qset_select);
2517 T_LOG("llink_cnt: %d\n", nlir->nlir_llink_cnt);
2518 for (i = 0; i < nlir->nlir_llink_cnt; i++) {
2519 struct nx_llink_info *nli;
2520 int j;
2521
2522 nli = &nlir->nlir_llink[i];
2523 T_LOG("\tlink_id: 0x%llx\n", nli->nli_link_id);
2524 T_LOG("\tlink_id_internal: 0x%x\n", nli->nli_link_id_internal);
2525 T_LOG("\tstate: 0x%x\n", nli->nli_state);
2526 T_LOG("\tflags: 0x%x\n", nli->nli_flags);
2527 T_LOG("\tqset_cnt: %d\n", nli->nli_qset_cnt);
2528 for (j = 0; j < nli->nli_qset_cnt; j++) {
2529 struct nx_qset_info *nqi;
2530
2531 nqi = &nli->nli_qset[j];
2532 T_LOG("\t\tqset_id: %llx\n", nqi->nqi_id);
2533 T_LOG("\t\tflags: 0x%x\n", nqi->nqi_flags);
2534 T_LOG("\t\tnum_rx_queues: %d\n", nqi->nqi_num_rx_queues);
2535 T_LOG("\t\tnum_tx_queues: %d\n", nqi->nqi_num_tx_queues);
2536
2537 /* randomly pick a qset for steering */
2538 if (((qset_select) % nlir->nlir_llink_cnt) == i &&
2539 ((qset_select >> 16) % nli->nli_qset_cnt) == j) {
2540 llink_idx = i;
2541 qset_idx = j;
2542 *qset_id = nqi->nqi_id;
2543 }
2544 }
2545 }
2546 T_LOG("chosen llink_idx: %d\n", llink_idx);
2547 T_LOG("chosen qset_idx: %d\n", qset_idx);
2548 T_LOG("chosen qset_id: 0x%llx\n\n", *qset_id);
2549 free(nlir);
2550 }
2551
2552 static int
setup_flowswitch_and_flow(struct sktc_nexus_handles * handles,const char * ifname,int protocol,uint16_t flags,struct in_addr our_ip,struct in_addr our_mask,uint16_t our_port,pid_t the_pid,struct in_addr peer_ip,uint16_t peer_port,uuid_t flow_id,flowadv_idx_t * flowadv_idx,int tx_ring_size,int rx_ring_size,int buf_size,int max_frags,bool multi_llink)2553 setup_flowswitch_and_flow(struct sktc_nexus_handles * handles,
2554 const char * ifname, int protocol, uint16_t flags, struct in_addr our_ip,
2555 struct in_addr our_mask, uint16_t our_port, pid_t the_pid,
2556 struct in_addr peer_ip, uint16_t peer_port, uuid_t flow_id,
2557 flowadv_idx_t *flowadv_idx, int tx_ring_size, int rx_ring_size,
2558 int buf_size, int max_frags, bool multi_llink)
2559 {
2560 int error;
2561 uint64_t qset_id = 0;
2562
2563 bzero(handles, sizeof(*handles));
2564 strlcpy(handles->netif_ifname, ifname, sizeof(handles->netif_ifname));
2565 handles->netif_addr = our_ip;
2566 handles->netif_mask = our_mask;
2567 sktc_create_flowswitch_no_address(handles, tx_ring_size,
2568 rx_ring_size, buf_size, max_frags, 0);
2569 error = os_nexus_controller_bind_provider_instance(handles->controller,
2570 handles->fsw_nx_uuid, OUR_FLOWSWITCH_PORT, the_pid, NULL, NULL, 0,
2571 NEXUS_BIND_PID);
2572 if (error != 0) {
2573 return error;
2574 }
2575
2576 if (multi_llink) {
2577 get_qset_id_from_llinks(handles, &qset_id);
2578 assert(qset_id != 0);
2579 }
2580 if (uuid_is_null(flow_id)) {
2581 uuid_generate(flow_id);
2582 }
2583 error = connect_flow(handles->controller, handles->fsw_nx_uuid,
2584 OUR_FLOWSWITCH_PORT, flow_id, protocol, flags, handles->netif_addr,
2585 our_port, peer_ip, peer_port, flowadv_idx, qset_id);
2586 return error;
2587 }
2588
2589 static void
setup_flowswitch(struct sktc_nexus_handles * handles,const char * ifname,pid_t the_pid,int tx_ring_size,int rx_ring_size,int buf_size,int max_frags)2590 setup_flowswitch(struct sktc_nexus_handles * handles,
2591 const char * ifname, pid_t the_pid,
2592 int tx_ring_size, int rx_ring_size,
2593 int buf_size, int max_frags)
2594 {
2595 bzero(handles, sizeof(*handles));
2596 strlcpy(handles->netif_ifname, ifname, sizeof(handles->netif_ifname));
2597 sktc_create_flowswitch_no_address(handles, tx_ring_size,
2598 rx_ring_size, buf_size, max_frags, 0);
2599 return;
2600 }
2601
2602 static int
fetch_if_flowswitch_and_setup_flow(struct sktc_nexus_handles * handles,const char * ifname,int protocol,uint16_t flags,struct in_addr our_ip,struct in_addr our_mask,uint16_t our_port,pid_t the_pid,struct in_addr peer_ip,uint16_t peer_port,uuid_t flow_id,flowadv_idx_t * flowadv_idx,int tx_ring_size,int rx_ring_size,int buf_size,int max_frags,bool multi_llink,uuid_t parent_flow_id,struct flow_demux_pattern * demux_patterns,uint8_t demux_pattern_count)2603 fetch_if_flowswitch_and_setup_flow(struct sktc_nexus_handles * handles,
2604 const char * ifname, int protocol, uint16_t flags, struct in_addr our_ip,
2605 struct in_addr our_mask, uint16_t our_port, pid_t the_pid,
2606 struct in_addr peer_ip, uint16_t peer_port, uuid_t flow_id,
2607 flowadv_idx_t *flowadv_idx, int tx_ring_size, int rx_ring_size,
2608 int buf_size, int max_frags, bool multi_llink, uuid_t parent_flow_id,
2609 struct flow_demux_pattern *demux_patterns, uint8_t demux_pattern_count)
2610 {
2611 int error;
2612 uint64_t qset_id = 0;
2613 bool child_flow = (demux_pattern_count > 0);
2614
2615 bzero(handles, sizeof(*handles));
2616 strlcpy(handles->netif_ifname, ifname, sizeof(handles->netif_ifname));
2617 handles->netif_addr = our_ip;
2618 handles->netif_mask = our_mask;
2619
2620 if (handles->netif_ifname[0] == '\0') {
2621 T_LOG("%s: no interface name specified\n",
2622 __func__);
2623 return EINVAL;
2624 }
2625 if (strlen(handles->netif_ifname) >= IFNAMSIZ) {
2626 T_LOG("%s: invalid interface name specified %s\n",
2627 __func__, handles->netif_ifname);
2628 return EINVAL;
2629 }
2630
2631 handles->controller = os_nexus_controller_create();
2632 if (handles->controller == NULL) {
2633 SKT_LOG(
2634 "%s: os_nexus_controller_create failed, %s (%d)\n",
2635 __func__, strerror(errno), errno);
2636 return ENOMEM;
2637 }
2638
2639 if ((sktc_get_netif_nexus(handles->netif_ifname, handles->netif_nx_uuid) &&
2640 sktc_get_flowswitch_nexus(handles->netif_ifname, handles->fsw_nx_uuid))) {
2641 if (child_flow) {
2642 error = os_nexus_controller_bind_provider_instance(handles->controller,
2643 handles->fsw_nx_uuid, CHILD_FLOWSWITCH_PORT, the_pid, NULL, NULL, 0,
2644 NEXUS_BIND_PID);
2645 } else {
2646 error = os_nexus_controller_bind_provider_instance(handles->controller,
2647 handles->fsw_nx_uuid, OUR_FLOWSWITCH_PORT, the_pid, NULL, NULL, 0,
2648 NEXUS_BIND_PID);
2649 }
2650 if (error != 0) {
2651 SKT_LOG("PID %d: nexus controller bind failed: %s\n",
2652 getpid(), strerror(errno));
2653 return error;
2654 }
2655
2656 if (multi_llink) {
2657 get_qset_id_from_llinks(handles, &qset_id);
2658 assert(qset_id != 0);
2659 }
2660 if (uuid_is_null(flow_id)) {
2661 uuid_generate(flow_id);
2662 }
2663 if (child_flow) {
2664 error = connect_child_flow(handles->controller, handles->fsw_nx_uuid,
2665 CHILD_FLOWSWITCH_PORT, flow_id, protocol, flags, handles->netif_addr,
2666 our_port, peer_ip, peer_port, flowadv_idx, qset_id, parent_flow_id,
2667 demux_patterns, demux_pattern_count);
2668 } else {
2669 error = connect_flow(handles->controller, handles->fsw_nx_uuid,
2670 OUR_FLOWSWITCH_PORT, flow_id, protocol, flags, handles->netif_addr,
2671 our_port, peer_ip, peer_port, flowadv_idx, qset_id);
2672 }
2673 } else {
2674 T_LOG(
2675 "%s: failed to find existing netif/flowswitch instance\n", __func__);
2676 return ENOENT;
2677 }
2678
2679 return error;
2680 }
2681
2682 #define FAKE_ETHER_NAME "feth"
2683 #define FAKE_ETHER_NAME_LEN (sizeof(FAKE_ETHER_NAME) - 1)
2684
2685 static void
set_feth_mac_addr(struct ether_addr * feth_macaddr,uint32_t unit)2686 set_feth_mac_addr(struct ether_addr *feth_macaddr, uint32_t unit)
2687 {
2688 /*
2689 * FETH MAC addresses are hardcoded in if_fake.c, but it's not exposed.
2690 * We use the same hardcoded values here.
2691 */
2692 bcopy(FAKE_ETHER_NAME, feth_macaddr->octet, FAKE_ETHER_NAME_LEN);
2693 feth_macaddr->octet[ETHER_ADDR_LEN - 2] = (unit & 0xff00) >> 8;
2694 feth_macaddr->octet[ETHER_ADDR_LEN - 1] = unit & 0xff;
2695 }
2696
2697 static int
skt_xfer_udp_common(int child,uint32_t how_many,uint32_t batch_size,bool do_ping_pong,bool wrong_flow_id,bool test_aqm,bool test_wmm,int tx_ring_size,int rx_ring_size,int buf_size,int max_frags,int event_test_id,bool low_latency,bool multi_llink,bool test_redirect)2698 skt_xfer_udp_common(int child, uint32_t how_many, uint32_t batch_size,
2699 bool do_ping_pong, bool wrong_flow_id, bool test_aqm,
2700 bool test_wmm, int tx_ring_size, int rx_ring_size, int buf_size,
2701 int max_frags, int event_test_id, bool low_latency, bool multi_llink,
2702 bool test_redirect)
2703 {
2704 char buf[1] = { 0 };
2705 int error;
2706 const char * ifname;
2707 uuid_t flow_id = {};
2708 struct in_addr our_ip;
2709 struct in_addr our_mask;
2710 uint16_t our_port;
2711 struct in_addr peer_ip;
2712 uint16_t peer_port;
2713 channel_port port;
2714 ssize_t ret;
2715 flowadv_idx_t flowadv_idx;
2716 uint32_t event_flags = 0;
2717 bool ifadv_enabled = false;
2718 bool chan_event_enabled = false;
2719 bool errors_ok = false;
2720 uint16_t nfr_flags = 0;
2721 struct ether_addr feth0_macaddr;
2722 struct ether_addr feth1_macaddr;
2723
2724 if (test_aqm || test_wmm) {
2725 assert(do_ping_pong);
2726 assert(!wrong_flow_id);
2727 }
2728
2729 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
2730
2731 if (child == 0) {
2732 ifname = FETH0_NAME;
2733 our_ip = sktc_feth0_in_addr();
2734 peer_ip = sktc_feth1_in_addr();
2735 our_port = FETH0_PORT;
2736 peer_port = FETH1_PORT;
2737 } else {
2738 ifname = FETH1_NAME;
2739 our_ip = sktc_feth1_in_addr();
2740 peer_ip = sktc_feth0_in_addr();
2741 our_port = FETH1_PORT;
2742 peer_port = FETH0_PORT;
2743 }
2744
2745 nfr_flags |= (low_latency ? NXFLOWREQF_LOW_LATENCY : 0);
2746
2747 if (test_redirect && child == 0) {
2748 setup_flowswitch(&handles, FETH0_NAME, getpid(),
2749 tx_ring_size, rx_ring_size, buf_size, max_frags);
2750
2751 setup_flowswitch(&handles, RD0_NAME, getpid(),
2752 tx_ring_size, rx_ring_size, buf_size, max_frags);
2753
2754 error = fetch_if_flowswitch_and_setup_flow(&handles, RD0_NAME,
2755 IPPROTO_UDP, 0, our_ip, our_mask, our_port, getpid(), peer_ip,
2756 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false, NULL,
2757 NULL, 0);
2758 } else {
2759 /* set up the flowswitch over the right interface */
2760 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
2761 nfr_flags, our_ip, our_mask, our_port, getpid(), peer_ip,
2762 peer_port, flow_id, &flowadv_idx, tx_ring_size, rx_ring_size,
2763 buf_size, max_frags, multi_llink);
2764 }
2765
2766 if (error == 0) {
2767 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
2768 OUR_FLOWSWITCH_PORT, ENABLE_UPP,
2769 event_test_id == SKT_FSW_EVENT_TEST_CHANNEL_EVENTS ? true : false,
2770 low_latency);
2771 assert(port.chan != NULL);
2772 }
2773
2774 set_feth_mac_addr(&feth0_macaddr, 0);
2775 set_feth_mac_addr(&feth1_macaddr, 1);
2776
2777 /* warm up the arp cache before starting the actual test */
2778 if (child == 0) {
2779 if ((error = skt_add_arp_entry(peer_ip, &feth1_macaddr)) != 0) {
2780 T_LOG("Child 0: ARP entry add failed\n");
2781 return 1;
2782 }
2783 } else {
2784 if ((error = skt_add_arp_entry(peer_ip, &feth0_macaddr)) != 0) {
2785 T_LOG("Child 1: ARP entry add failed\n");
2786 return 1;
2787 }
2788 }
2789
2790 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
2791 SKT_LOG("write fail: %s\n", strerror(errno));
2792 return 1;
2793 }
2794 assert(ret == 1);
2795 #if SKT_XFER_DEBUG
2796 T_LOG("child %d signaled\n", child);
2797 #endif
2798 /* Wait for go signal */
2799 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
2800 SKT_LOG("read fail: %s\n", strerror(errno));
2801 return 1;
2802 }
2803 assert(ret == 1);
2804 if (error != 0) {
2805 return 1;
2806 }
2807 port.ip_addr = our_ip;
2808
2809 if (wrong_flow_id) {
2810 uuid_generate(flow_id);
2811 }
2812 if (do_ping_pong) {
2813 ping_pong(&port, flow_id, our_port, peer_ip, peer_port,
2814 how_many, batch_size, child, wrong_flow_id, flowadv_idx,
2815 test_aqm, test_wmm, MAX_DEMUX_OFFSET + 1);
2816 } else {
2817 switch (event_test_id) {
2818 case SKT_FSW_EVENT_TEST_NONE:
2819 break;
2820 case SKT_FSW_EVENT_TEST_IF_ADV_ENABLED: {
2821 event_flags |= SKT_FSW_EVFLAG_IFADV;
2822 assert(os_channel_configure_interface_advisory(port.chan, TRUE) == 0);
2823 ifadv_enabled = true;
2824 break;
2825 }
2826 case SKT_FSW_EVENT_TEST_IF_ADV_DISABLED: {
2827 event_flags |= SKT_FSW_EVFLAG_IFADV;
2828 assert(os_channel_configure_interface_advisory(port.chan, FALSE) == 0);
2829 break;
2830 }
2831 case SKT_FSW_EVENT_TEST_CHANNEL_EVENTS: {
2832 chan_event_enabled = true;
2833 event_flags |= SKT_FSW_EVFLAG_CHANNEL;
2834 errors_ok = true;
2835 break;
2836 }
2837 default:
2838 T_LOG("unknown event test id %d\n",
2839 event_test_id);
2840 assert(0);
2841 break;
2842 }
2843 send_and_receive(&port, flow_id, our_port, peer_ip, peer_port,
2844 how_many, batch_size, child, wrong_flow_id, errors_ok,
2845 event_flags, ifadv_enabled);
2846 }
2847
2848 #if SKT_XFER_DEBUG
2849 T_LOG("got input %d from parent in child %d, starting test\n",
2850 buf[0], child);
2851 #endif
2852 return 0;
2853 }
2854
2855 static int
get_fsw_stats(struct fsw_stats * result)2856 get_fsw_stats(struct fsw_stats *result)
2857 {
2858 int i, ret;
2859 size_t length = 0;
2860 size_t width = sizeof(struct sk_stats_flow_switch);
2861 void *buffer, *scan;
2862 struct sk_stats_flow_switch *sfs;
2863
2864 ret = sysctl_buf(SK_STATS_FLOW_SWITCH, &buffer, &length, NULL, 0);
2865 if (ret != 0 || buffer == NULL || length == 0) {
2866 T_LOG("get_fsw_stats: Failed to get stats\n");
2867 return ret;
2868 }
2869
2870 assert((length % width) == 0);
2871 scan = buffer;
2872 memset(result, 0, sizeof(*result));
2873
2874 /*
2875 * XXX: I don't like pointer arithmetic on a void ptr, but
2876 * this code was lifted from skywalk_cmds and clang doesn't
2877 * seem to care.
2878 */
2879 ret = ENOENT;
2880 while (scan < (buffer + length)) {
2881 sfs = scan;
2882 scan += sizeof(*sfs);
2883
2884 if (strcmp(sfs->sfs_if_name, FETH0_NAME) != 0 &&
2885 strcmp(sfs->sfs_if_name, FETH1_NAME) != 0) {
2886 continue;
2887 }
2888 ret = 0;
2889
2890 for (i = 0;
2891 i < (sizeof(*result) / sizeof(STATS_VAL(result, 0))); i++) {
2892 STATS_ADD(result, i, STATS_VAL(&sfs->sfs_fsws, i));
2893 }
2894 }
2895
2896 free(buffer);
2897
2898 return ret;
2899 }
2900
2901 static int
skt_xfer_udp_with_errors_common(int child,uint32_t how_many,uint32_t batch_size)2902 skt_xfer_udp_with_errors_common(int child, uint32_t how_many,
2903 uint32_t batch_size)
2904 {
2905 char buf[1] = { 0 };
2906 int error;
2907 const char * ifname;
2908 uuid_t flow_id = {};
2909 struct in_addr our_ip;
2910 struct in_addr our_mask;
2911 uint16_t our_port;
2912 struct in_addr peer_ip;
2913 uint16_t peer_port;
2914 channel_port port;
2915 ssize_t ret;
2916 int errbit, rv;
2917 uint64_t emask;
2918 uuid_string_t uuidstr;
2919 flowadv_idx_t flowadv_idx;
2920
2921 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
2922
2923 if (child == 0) {
2924 ifname = FETH0_NAME;
2925 our_ip = sktc_feth0_in_addr();
2926 peer_ip = sktc_feth1_in_addr();
2927 our_port = FETH0_PORT;
2928 peer_port = FETH1_PORT;
2929 } else {
2930 child = 1;
2931 ifname = FETH1_NAME;
2932 our_ip = sktc_feth1_in_addr();
2933 peer_ip = sktc_feth0_in_addr();
2934 our_port = FETH1_PORT;
2935 peer_port = FETH0_PORT;
2936 }
2937
2938 /* set up the flowswitch over the right interface */
2939 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
2940 0, our_ip, our_mask, our_port, getpid(), peer_ip,
2941 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
2942 if (error == 0) {
2943 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
2944 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
2945 assert(port.chan != NULL);
2946 }
2947 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
2948 SKT_LOG("write fail: %s\n", strerror(errno));
2949 return 1;
2950 }
2951 assert(ret == 1);
2952 #if SKT_XFER_DEBUG
2953 T_LOG("child %d signaled\n", child);
2954 #endif
2955 /* Wait for go signal */
2956 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
2957 SKT_LOG("read fail: %s\n", strerror(errno));
2958 return 1;
2959 }
2960 assert(ret == 1);
2961 if (error != 0) {
2962 return 1;
2963 }
2964 port.ip_addr = our_ip;
2965
2966 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
2967 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
2968 fflush(stderr);
2969
2970 /* warm up the arp cache before starting the actual test */
2971 T_LOG("child %d: Warm up ARP cache\n", child);
2972 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
2973 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
2974 T_LOG("child %d: Test Start\n", child);
2975
2976 /*
2977 * Start the receiver
2978 */
2979 if (child == 0) {
2980 do_error_receive(child, &port, flow_id, our_port, peer_ip, peer_port,
2981 INJECT_CODE_COUNT * how_many);
2982 return 0;
2983 }
2984
2985 /*
2986 * For each injection code:
2987 * Take a snapshot of fsw_stats
2988 * Invoke send_and_receive()
2989 * Take a snapshot of fsw_stats
2990 * Verify stats counter associated to injection code increased.
2991 */
2992 for (errbit = 0; errbit < INJECT_CODE_COUNT; errbit++) {
2993 struct fsw_stats stats_before, stats_after;
2994 const struct fsw_inject_codes *ic;
2995 uint32_t error_rmask;
2996 const int *sidx;
2997 int st;
2998
2999 ic = &fsw_inject_codes[errbit];
3000
3001 T_LOG("Injecting error bit %d\n", ic->ic_code);
3002 fflush(stderr);
3003
3004 emask = (1ULL << ic->ic_code);
3005 emask = set_error_inject_mask(&emask);
3006
3007 rv = get_fsw_stats(&stats_before);
3008 assert(rv == 0);
3009
3010 if (ic->ic_rmask != IC_RMASK_UNSPEC) {
3011 error_rmask = ic->ic_rmask;
3012 error_rmask = sktu_set_inject_error_rmask(&error_rmask);
3013 }
3014
3015 do_error_send(&port, flow_id, our_port, peer_ip, peer_port,
3016 how_many, batch_size);
3017
3018 T_LOG("Tx completed for error bit %d\n", ic->ic_code);
3019
3020 rv = get_fsw_stats(&stats_after);
3021 assert(rv == 0);
3022
3023 if (ic->ic_rmask != IC_RMASK_UNSPEC) {
3024 error_rmask = sktu_set_inject_error_rmask(&error_rmask);
3025 }
3026
3027 /* random error injection could fail to inject at all */
3028 if (STATS_VAL(&stats_after, _FSW_STATS_ERROR_INJECTIONS) ==
3029 STATS_VAL(&stats_before, _FSW_STATS_ERROR_INJECTIONS)) {
3030 T_LOG("skip non-injected error bit %d\n",
3031 ic->ic_code);
3032 continue;
3033 }
3034
3035 for (sidx = ic->ic_stat_idx, st = 0;
3036 st < INJECT_CODE_IDX_MAX; st++, sidx++) {
3037 uint64_t counter;
3038
3039 if (*sidx < 0) {
3040 continue;
3041 }
3042
3043 counter = STATS_VAL(&stats_after, *sidx);
3044 counter -= STATS_VAL(&stats_before, *sidx);
3045
3046 if (counter == 0) {
3047 T_LOG("Counter idx %d didn't "
3048 "change for error %d. Before %lld, "
3049 "After %lld\n", st, ic->ic_code,
3050 STATS_VAL(&stats_before, *sidx),
3051 STATS_VAL(&stats_after, *sidx));
3052 return 1;
3053 }
3054 }
3055 }
3056
3057 emask = 0;
3058 set_error_inject_mask(&emask);
3059
3060 return 0;
3061 }
3062
3063 static int
skt_xfer_tcpflood(int child,uint32_t how_many,uint32_t batch_size,bool synflood)3064 skt_xfer_tcpflood(int child, uint32_t how_many, uint32_t batch_size, bool synflood)
3065 {
3066 char buf[1] = { 0 };
3067 int error;
3068 const char * ifname;
3069 uuid_t flow_id = {};
3070 struct in_addr our_ip;
3071 struct in_addr our_mask;
3072 uint16_t our_port;
3073 struct in_addr peer_ip;
3074 uint16_t peer_port;
3075 channel_port port;
3076 ssize_t ret;
3077 flowadv_idx_t flowadv_idx;
3078
3079 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3080
3081 if (child == 0) {
3082 ifname = FETH0_NAME;
3083 our_ip = sktc_feth0_in_addr();
3084 peer_ip = sktc_feth1_in_addr();
3085 our_port = FETH0_PORT;
3086 peer_port = FETH1_PORT;
3087 } else {
3088 ifname = FETH1_NAME;
3089 our_ip = sktc_feth1_in_addr();
3090 peer_ip = sktc_feth0_in_addr();
3091 our_port = FETH1_PORT;
3092 peer_port = FETH0_PORT;
3093 }
3094
3095 /* set up the flowswitch over the right interface */
3096 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_TCP,
3097 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3098 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3099 if (error == 0) {
3100 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3101 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3102 assert(port.chan != NULL);
3103 }
3104 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3105 SKT_LOG("write fail: %s\n", strerror(errno));
3106 return 1;
3107 }
3108 assert(ret == 1);
3109 #if SKT_XFER_DEBUG
3110 T_LOG("child %d signaled\n", child);
3111 #endif
3112 /* Wait for go signal */
3113 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3114 SKT_LOG("read fail: %s\n", strerror(errno));
3115 return 1;
3116 }
3117 assert(ret == 1);
3118 if (error != 0) {
3119 return 1;
3120 }
3121 #if SKT_XFER_DEBUG
3122 T_LOG("got input %d from parent in child %d, starting test\n",
3123 buf[0], child);
3124 #endif
3125 port.ip_addr = our_ip;
3126 send_tcp(&port, flow_id, our_port, peer_ip, peer_port,
3127 how_many, batch_size, child, synflood);
3128 return 0;
3129 }
3130
3131 static int
skt_xfer_portzero(int child,int protocol)3132 skt_xfer_portzero(int child, int protocol)
3133 {
3134 char buf[1] = { 0 };
3135 int error;
3136 const char * ifname;
3137 uuid_t flow_id = {};
3138 struct in_addr our_ip;
3139 struct in_addr our_mask;
3140 uint16_t our_port;
3141 struct in_addr peer_ip;
3142 uint16_t peer_port;
3143 ssize_t ret;
3144 flowadv_idx_t flowadv_idx;
3145
3146 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3147
3148 if (child == 0) {
3149 ifname = "feth0";
3150 our_ip = sktc_feth0_in_addr();
3151 peer_ip = sktc_feth1_in_addr();
3152 our_port = FETH0_PORT;
3153 peer_port = 0;
3154 } else {
3155 ifname = "feth1";
3156 our_ip = sktc_feth1_in_addr();
3157 peer_ip = sktc_feth0_in_addr();
3158 our_port = FETH1_PORT;
3159 peer_port = 0;
3160 }
3161
3162 /* this should fail with EADDRNOTAVAIL (port 0) */
3163 error = setup_flowswitch_and_flow(&handles, ifname, protocol,
3164 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3165 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3166
3167 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3168 SKT_LOG("write fail: %s\n", strerror(errno));
3169 return 1;
3170 }
3171 assert(ret == 1);
3172 #if SKT_XFER_DEBUG
3173 T_LOG("child %d signaled\n", child);
3174 #endif
3175 /* Wait for go signal */
3176 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3177 SKT_LOG("read fail: %s\n", strerror(errno));
3178 return 1;
3179 }
3180 assert(ret == 1);
3181
3182 if (error != EINVAL) {
3183 T_LOG("expected %d but got %s (%d)\n", EINVAL,
3184 strerror(error), error);
3185 return 1;
3186 }
3187 return 0;
3188 }
3189
3190 static int
skt_xfer_setuponly(int child)3191 skt_xfer_setuponly(int child)
3192 {
3193 char buf[1] = { 0 };
3194 int error;
3195 const char * ifname;
3196 uuid_t flow_id = {};
3197 struct in_addr our_ip;
3198 struct in_addr our_mask;
3199 uint16_t our_port;
3200 struct in_addr peer_ip;
3201 uint16_t peer_port;
3202 channel_port port;
3203 ssize_t ret;
3204 flowadv_idx_t flowadv_idx;
3205
3206 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3207
3208 if (child == 0) {
3209 ifname = FETH0_NAME;
3210 our_ip = sktc_feth0_in_addr();
3211 peer_ip = sktc_feth1_in_addr();
3212 our_port = FETH0_PORT;
3213 peer_port = FETH1_PORT;
3214 } else {
3215 ifname = FETH1_NAME;
3216 our_ip = sktc_feth1_in_addr();
3217 peer_ip = sktc_feth0_in_addr();
3218 our_port = FETH1_PORT;
3219 peer_port = FETH0_PORT;
3220 }
3221
3222 /* set up the flowswitch over the right interface */
3223 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_TCP,
3224 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3225 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3226 if (error == 0) {
3227 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3228 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3229 assert(port.chan != NULL);
3230 } else {
3231 while (1) {
3232 T_LOG("Child %d waiting\n", child);
3233 sleep(5);
3234 }
3235 }
3236 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3237 SKT_LOG("write fail: %s\n", strerror(errno));
3238 return 1;
3239 }
3240 assert(ret == 1);
3241 #if SKT_XFER_DEBUG
3242 T_LOG("child %d signaled\n", child);
3243 #endif
3244 /* Wait for go signal */
3245 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3246 SKT_LOG("read fail: %s\n", strerror(errno));
3247 return 1;
3248 }
3249 assert(ret == 1);
3250 if (error != 0) {
3251 return 1;
3252 }
3253 #if SKT_XFER_DEBUG
3254 T_LOG("got input %d from parent in child %d, starting test\n",
3255 buf[0], child);
3256 #endif
3257 return 0;
3258 }
3259
3260 static void
send_bad_flow(channel_port_t port,uuid_t flow_id,int protocol,uint16_t src_port,struct in_addr dst_ip,uint16_t dst_port,my_payload_t payload)3261 send_bad_flow(channel_port_t port, uuid_t flow_id,
3262 int protocol, uint16_t src_port, struct in_addr dst_ip, uint16_t dst_port,
3263 my_payload_t payload)
3264 {
3265 struct fsw_stats stats_before, stats_after;
3266 uint64_t counter;
3267 int ret;
3268
3269 ret = get_fsw_stats(&stats_before);
3270 assert(ret == 0);
3271
3272 channel_port_send(port, flow_id, protocol, src_port, dst_ip, dst_port,
3273 payload, sizeof(*payload), 1, FALSE, FALSE, PKT_SC_BE, FALSE, NULL);
3274
3275 ret = get_fsw_stats(&stats_after);
3276 assert(ret == 0);
3277
3278 counter = STATS_VAL(&stats_after, FSW_STATS_DROP);
3279 counter -= STATS_VAL(&stats_before, FSW_STATS_DROP);
3280
3281 if (counter == 0) {
3282 T_LOG("Flow not ours wasn't dropped");
3283 assert(0);
3284 }
3285 T_LOG("dropped %"PRIu64"\n", counter);
3286 }
3287
3288 static int
skt_xfer_flowmatch(int child)3289 skt_xfer_flowmatch(int child)
3290 {
3291 char buf[1] = { 0 };
3292 int error;
3293 const char * ifname;
3294 uuid_t flow_id = {};
3295 uuid_t nowhere_flow_id;
3296 struct in_addr our_ip, peer_ip, nowhere_ip;
3297 struct in_addr our_mask;
3298 uint16_t our_port, peer_port;
3299 channel_port port;
3300 ssize_t ret;
3301 uuid_string_t uuidstr;
3302 flowadv_idx_t flowadv_idx;
3303
3304 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3305
3306 if (child == 0) {
3307 ifname = FETH0_NAME;
3308 our_ip = sktc_feth0_in_addr();
3309 peer_ip = sktc_feth1_in_addr();
3310 our_port = FETH0_PORT;
3311 peer_port = FETH1_PORT;
3312 } else {
3313 child = 1;
3314 ifname = FETH1_NAME;
3315 our_ip = sktc_feth1_in_addr();
3316 peer_ip = sktc_feth0_in_addr();
3317 our_port = FETH1_PORT;
3318 peer_port = FETH0_PORT;
3319 }
3320
3321 /* set up the flowswitch over the right interface */
3322 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3323 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3324 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3325 if (error == 0) {
3326 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3327 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3328 assert(port.chan != NULL);
3329 }
3330 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3331 SKT_LOG("write fail: %s\n", strerror(errno));
3332 return 1;
3333 }
3334 assert(ret == 1);
3335 #if SKT_XFER_DEBUG
3336 T_LOG("child %d signaled\n", child);
3337 #endif
3338 /* Wait for go signal */
3339 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3340 SKT_LOG("read fail: %s\n", strerror(errno));
3341 return 1;
3342 }
3343 assert(ret == 1);
3344 if (error != 0) {
3345 return 1;
3346 }
3347 port.ip_addr = our_ip;
3348
3349 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3350 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3351 fflush(stderr);
3352
3353 /* warm up the arp cache before starting the actual test */
3354 T_LOG("child %d: Warm up ARP cache\n", child);
3355 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3356 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3357 T_LOG("child %d: Test Start\n", child);
3358
3359 /* Start the receiver */
3360 if (child == 0) {
3361 channel_port_receive_all(&port, flow_id, our_port, peer_ip,
3362 peer_port, 0, FALSE);
3363 return 0;
3364 }
3365
3366 my_payload payload;
3367 bzero(&payload, sizeof(payload));
3368 payload.packet_number = 0;
3369
3370 nowhere_ip = sktc_nowhere_in_addr();
3371 do{
3372 uuid_generate_random(nowhere_flow_id);
3373 } while (!uuid_compare(nowhere_flow_id, flow_id));
3374
3375 /* Send with wrong flow id */
3376 T_LOG("Send with wrong flow id...\t");
3377 payload.packet_number = 0;
3378 strncpy(payload.data, "wrong flow id", sizeof(payload.data));
3379 send_bad_flow(&port, nowhere_flow_id,
3380 IPPROTO_UDP, our_port, peer_ip, peer_port, &payload);
3381
3382 /* Send with wrong protocol */
3383 T_LOG("Send with wrong protocol...\t");
3384 payload.packet_number = 0;
3385 strncpy(payload.data, "wrong protocol", sizeof(payload.data));
3386 send_bad_flow(&port, flow_id,
3387 IPPROTO_TCP, our_port, peer_ip, peer_port, &payload);
3388
3389 /* Send with wrong src port */
3390 T_LOG("Send with wrong src port...\t");
3391 payload.packet_number = 0;
3392 strncpy(payload.data, "wrong src port", sizeof(payload.data));
3393 send_bad_flow(&port, flow_id,
3394 IPPROTO_UDP, our_port + 1, peer_ip, peer_port, &payload);
3395
3396 /* Send with wrong dst IP */
3397 T_LOG("Send with wrong dst IP...\t");
3398 payload.packet_number = 0;
3399 strncpy(payload.data, "wrong dst IP", sizeof(payload.data));
3400 send_bad_flow(&port, flow_id,
3401 IPPROTO_UDP, our_port, nowhere_ip, peer_port, &payload);
3402
3403 /* Send with wrong dst port */
3404 T_LOG("Send with wrong dst port...\t");
3405 payload.packet_number = 0;
3406 strncpy(payload.data, "wrong dst port", sizeof(payload.data));
3407 send_bad_flow(&port, flow_id,
3408 IPPROTO_UDP, our_port, peer_ip, peer_port + 1, &payload);
3409
3410 /* Send something right to single receiver to end */
3411 payload.packet_number = 0;
3412 strncpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3413 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3414 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3415 FALSE, NULL);
3416
3417 return 0;
3418 }
3419
3420 /* see rdar://problem/38427726 for details */
3421 static int
skt_xfer_flowcleanup(int child,uint32_t how_many,uint32_t batch_size)3422 skt_xfer_flowcleanup(int child, uint32_t how_many, uint32_t batch_size)
3423 {
3424 char buf[1] = { 0 };
3425 int error;
3426 const char * ifname;
3427 uuid_t flow_id = {};
3428 struct in_addr our_ip;
3429 struct in_addr our_mask;
3430 uint16_t our_port;
3431 struct in_addr peer_ip;
3432 uint16_t peer_port;
3433 channel_port port;
3434 ssize_t ret;
3435 flowadv_idx_t flowadv_idx = FLOWADV_IDX_NONE;
3436
3437 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3438
3439 if (child == 0) {
3440 ifname = FETH0_NAME;
3441 our_ip = sktc_feth0_in_addr();
3442 peer_ip = sktc_feth1_in_addr();
3443 our_port = FETH0_PORT;
3444 peer_port = FETH1_PORT;
3445 } else {
3446 ifname = FETH1_NAME;
3447 our_ip = sktc_feth1_in_addr();
3448 peer_ip = sktc_feth0_in_addr();
3449 our_port = FETH1_PORT;
3450 peer_port = FETH0_PORT;
3451 }
3452
3453 /*
3454 * set up the flowswitch over the right interface and bind a
3455 * 5 tuple flow.
3456 */
3457 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3458 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3459 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3460 SKTC_ASSERT_ERR(error == 0);
3461 assert(flowadv_idx != FLOWADV_IDX_NONE);
3462
3463 /* open channel */
3464 sktu_channel_port_init(&port, handles.fsw_nx_uuid, OUR_FLOWSWITCH_PORT,
3465 ENABLE_UPP, false, false);
3466 assert(port.chan != NULL);
3467
3468 /*
3469 * Close the channel. This also triggers the closure of the flow
3470 * created above and the removal of nexus port binding.
3471 */
3472 os_channel_destroy(port.chan);
3473
3474 /* bind again to the same port */
3475 error = os_nexus_controller_bind_provider_instance(handles.controller,
3476 handles.fsw_nx_uuid, OUR_FLOWSWITCH_PORT, getpid(),
3477 NULL, NULL, 0, NEXUS_BIND_PID);
3478 SKTC_ASSERT_ERR(!error);
3479
3480 /* open a new flow */
3481 uuid_generate(flow_id);
3482 flowadv_idx = FLOWADV_IDX_NONE;
3483 error = connect_flow(handles.controller, handles.fsw_nx_uuid,
3484 OUR_FLOWSWITCH_PORT, flow_id, IPPROTO_UDP, 0,
3485 handles.netif_addr, our_port, peer_ip, peer_port, &flowadv_idx, 0);
3486 SKTC_ASSERT_ERR(!error);
3487 assert(flowadv_idx != FLOWADV_IDX_NONE);
3488
3489 /* re-open channel on the same port */
3490 sktu_channel_port_init(&port, handles.fsw_nx_uuid, OUR_FLOWSWITCH_PORT,
3491 ENABLE_UPP, false, false);
3492 assert(port.chan != NULL);
3493 port.ip_addr = our_ip;
3494
3495 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3496 SKT_LOG("write fail: %s\n", strerror(errno));
3497 return 1;
3498 }
3499 assert(ret == 1);
3500 /* Wait for go signal */
3501 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3502 SKT_LOG("read fail: %s\n", strerror(errno));
3503 return 1;
3504 }
3505 assert(ret == 1);
3506 if (error != 0) {
3507 return 1;
3508 }
3509
3510 /* warm up the arp cache before starting the actual test */
3511 T_LOG("child %d: Warm up ARP cache\n", child);
3512 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3513 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3514 T_LOG("child %d: Test Start\n", child);
3515
3516 /* perform ping pong test */
3517 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, how_many,
3518 batch_size, child, FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3519
3520 return 0;
3521 }
3522
3523 static int
skt_xfer_csumoffload(int child,int protocol)3524 skt_xfer_csumoffload(int child, int protocol)
3525 {
3526 char buf[1] = { 0 };
3527 int error;
3528 const char * ifname;
3529 uuid_t flow_id = {};
3530 uuid_t nowhere_flow_id;
3531 struct in_addr our_ip, peer_ip;
3532 struct in_addr our_mask;
3533 uint16_t our_port, peer_port;
3534 channel_port port;
3535 ssize_t ret;
3536 uuid_string_t uuidstr;
3537 flowadv_idx_t flowadv_idx;
3538
3539 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3540
3541 if (child == 0) {
3542 ifname = FETH0_NAME;
3543 our_ip = sktc_feth0_in_addr();
3544 peer_ip = sktc_feth1_in_addr();
3545 our_port = FETH0_PORT;
3546 peer_port = FETH1_PORT;
3547 } else {
3548 ifname = FETH1_NAME;
3549 our_ip = sktc_feth1_in_addr();
3550 peer_ip = sktc_feth0_in_addr();
3551 our_port = FETH1_PORT;
3552 peer_port = FETH0_PORT;
3553 }
3554
3555 /* set up the flowswitch over the right interface */
3556 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3557 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3558 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3559 if (error == 0) {
3560 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3561 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3562 assert(port.chan != NULL);
3563 }
3564 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3565 SKT_LOG("write fail: %s\n", strerror(errno));
3566 return 1;
3567 }
3568 assert(ret == 1);
3569 #if SKT_XFER_DEBUG
3570 T_LOG("child %d signaled\n", child);
3571 #endif
3572 /* Wait for go signal */
3573 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3574 SKT_LOG("read fail: %s\n", strerror(errno));
3575 return 1;
3576 }
3577 assert(ret == 1);
3578 if (error != 0) {
3579 return 1;
3580 }
3581 port.ip_addr = our_ip;
3582
3583 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3584 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3585 fflush(stderr);
3586
3587 /* warm up the arp cache before starting the actual test */
3588 T_LOG("child %d: Warm up ARP cache\n", child);
3589 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3590 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3591 T_LOG("child %d: Test Start\n", child);
3592
3593 /* Start the receiver */
3594 if (child == 0) {
3595 channel_port_receive_all(&port, flow_id, our_port, peer_ip,
3596 peer_port, 2, FALSE);
3597 return 0;
3598 }
3599
3600 my_payload payload;
3601 bzero(&payload, sizeof(payload));
3602 payload.packet_number = 0;
3603
3604 do{
3605 uuid_generate_random(nowhere_flow_id);
3606 } while (!uuid_compare(nowhere_flow_id, flow_id));
3607
3608 /* send with checksum offloading */
3609 payload.packet_number = 0;
3610 strlcpy(payload.data, "any", sizeof(payload.data));
3611 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3612 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3613 TRUE, NULL);
3614
3615 /* send without checksum offloading */
3616 payload.packet_number = 0;
3617 strlcpy(payload.data, "any", sizeof(payload.data));
3618 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3619 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3620 FALSE, NULL);
3621
3622 /* signal receiver to stop */
3623 payload.packet_number = 0;
3624 strlcpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3625 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3626 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3627 FALSE, NULL);
3628 return 0;
3629 }
3630
3631 static void
skt_xfer_enable_qos_marking_interface(const char * ifname,uint32_t mode)3632 skt_xfer_enable_qos_marking_interface(const char *ifname, uint32_t mode)
3633 {
3634 /* setup ifnet for qos marking */
3635 int s;
3636 struct ifreq ifr;
3637 unsigned long ioc;
3638
3639 assert(mode != IFRTYPE_QOSMARKING_MODE_NONE);
3640
3641 assert((s = socket(AF_INET, SOCK_DGRAM, 0)) >= 0);
3642
3643 bzero(&ifr, sizeof(ifr));
3644 strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
3645 ioc = SIOCSQOSMARKINGMODE;
3646 ifr.ifr_qosmarking_mode = mode;
3647 assert(ioctl(s, ioc, (caddr_t)&ifr) == 0);
3648
3649 bzero(&ifr, sizeof(ifr));
3650 strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
3651 ioc = SIOCSQOSMARKINGENABLED;
3652 ifr.ifr_qosmarking_enabled = 1;
3653 assert(ioctl(s, ioc, (caddr_t)&ifr) == 0);
3654 }
3655
3656 static int
skt_xfer_qosmarking(int child,uint32_t mode)3657 skt_xfer_qosmarking(int child, uint32_t mode)
3658 {
3659 char buf[1] = { 0 };
3660 int error;
3661 const char * ifname;
3662 uuid_t flow_id = {};
3663 uuid_t nowhere_flow_id;
3664 struct in_addr our_ip, peer_ip;
3665 struct in_addr our_mask;
3666 uint16_t our_port, peer_port;
3667 channel_port port;
3668 ssize_t ret;
3669 uuid_string_t uuidstr;
3670 flowadv_idx_t flowadv_idx;
3671
3672 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3673
3674 if (child == 0) {
3675 ifname = FETH0_NAME;
3676 our_ip = sktc_feth0_in_addr();
3677 peer_ip = sktc_feth1_in_addr();
3678 our_port = FETH0_PORT;
3679 peer_port = FETH1_PORT;
3680 skt_xfer_enable_qos_marking_interface(ifname, mode);
3681 } else {
3682 ifname = FETH1_NAME;
3683 our_ip = sktc_feth1_in_addr();
3684 peer_ip = sktc_feth0_in_addr();
3685 our_port = FETH1_PORT;
3686 peer_port = FETH0_PORT;
3687 skt_xfer_enable_qos_marking_interface(ifname, mode);
3688 }
3689
3690 /* set up the flowswitch over the right interface */
3691 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3692 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3693 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3694 if (error == 0) {
3695 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3696 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3697 assert(port.chan != NULL);
3698 }
3699 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3700 SKT_LOG("write fail: %s\n", strerror(errno));
3701 return 1;
3702 }
3703 assert(ret == 1);
3704 #if SKT_XFER_DEBUG
3705 T_LOG("child %d signaled\n", child);
3706 #endif
3707 /* Wait for go signal */
3708 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3709 SKT_LOG("read fail: %s\n", strerror(errno));
3710 return 1;
3711 }
3712 assert(ret == 1);
3713 if (error != 0) {
3714 return 1;
3715 }
3716 port.ip_addr = our_ip;
3717
3718 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3719 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3720 fflush(stderr);
3721
3722 /* warm up the arp cache before starting the actual test */
3723 T_LOG("child %d: Warm up ARP cache\n", child);
3724 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3725 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3726 T_LOG("child %d: Test Start\n", child);
3727
3728 /* Start the receiver who would verify the Qos marking */
3729 if (child == 0) {
3730 channel_port_receive_all(&port, flow_id, our_port, peer_ip,
3731 peer_port, -1, TRUE);
3732 return 0;
3733 }
3734
3735 my_payload payload;
3736
3737 do{
3738 uuid_generate_random(nowhere_flow_id);
3739 } while (!uuid_compare(nowhere_flow_id, flow_id));
3740
3741 /* test qos marking with and without checksum offload */
3742
3743 #define __SEND_SC(svc, csum_offload) \
3744 bzero(&payload, sizeof(payload));\
3745 payload.packet_number = 0;\
3746 if (mode == IFRTYPE_QOSMARKING_FASTLANE) { \
3747 strlcpy(payload.data, XFER_QOSMARKING_FASTLANE_PREFIX, sizeof(payload.data)); \
3748 } else if (mode == IFRTYPE_QOSMARKING_RFC4594) { \
3749 strlcpy(payload.data, XFER_QOSMARKING_RFC4594_PREFIX, sizeof(payload.data)); \
3750 } \
3751 strlcat(payload.data, #svc, sizeof(payload.data));\
3752 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,\
3753 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, svc, csum_offload, NULL); \
3754
3755 #define SEND_SC(svc) \
3756 __SEND_SC(svc, FALSE); \
3757 __SEND_SC(svc, TRUE);
3758
3759 SEND_SC(PKT_SC_BK);
3760 SEND_SC(PKT_SC_BK_SYS);
3761 SEND_SC(PKT_SC_BE);
3762 SEND_SC(PKT_SC_RD);
3763 SEND_SC(PKT_SC_OAM);
3764 SEND_SC(PKT_SC_AV);
3765 SEND_SC(PKT_SC_RV);
3766 SEND_SC(PKT_SC_VI);
3767 SEND_SC(PKT_SC_SIG);
3768 SEND_SC(PKT_SC_VO);
3769 SEND_SC(PKT_SC_CTL);
3770
3771 #undef SEND_SC
3772 #undef __SEND_SC
3773
3774 /* signal receiver to stop */
3775 payload.packet_number = 0;
3776 strlcpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3777 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3778 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
3779 FALSE, NULL);
3780
3781 return 0;
3782 }
3783
3784 static int
skt_xfer_listener_tcp_rst(int child)3785 skt_xfer_listener_tcp_rst(int child)
3786 {
3787 char buf[1] = { 0 };
3788 int error;
3789 const char * ifname;
3790 uuid_t flow_id = {};
3791 uuid_t listener_flow_id, connecting_flow_id;
3792 struct in_addr our_ip, peer_ip, zero_ip;
3793 struct in_addr our_mask;
3794 uint16_t our_port, peer_port, listener_port;
3795 channel_port port;
3796 ssize_t ret;
3797 uuid_string_t uuidstr;
3798 flowadv_idx_t flowadv_idx;
3799
3800 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3801
3802 if (child == 0) {
3803 /* connector and RST receiver */
3804 ifname = FETH0_NAME;
3805 our_ip = sktc_feth0_in_addr();
3806 peer_ip = sktc_feth1_in_addr();
3807 our_port = FETH0_PORT;
3808 peer_port = FETH1_PORT;
3809 listener_port = FETH0_PORT + 1;
3810 } else {
3811 /* listener */
3812 ifname = FETH1_NAME;
3813 our_ip = sktc_feth1_in_addr();
3814 peer_ip = sktc_feth0_in_addr();
3815 our_port = FETH1_PORT;
3816 peer_port = FETH0_PORT;
3817 listener_port = FETH0_PORT + 1;
3818 }
3819
3820 zero_ip = (struct in_addr){.s_addr = htonl(INADDR_ANY)};
3821
3822 /* set up the flowswitch over the right interface */
3823 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3824 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3825 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3826 if (error == 0) {
3827 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3828 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3829 assert(port.chan != NULL);
3830 }
3831 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3832 SKT_LOG("write fail: %s\n", strerror(errno));
3833 return 1;
3834 }
3835 assert(ret == 1);
3836 #if SKT_XFER_DEBUG
3837 T_LOG("child %d signaled\n", child);
3838 #endif
3839
3840 if (child == 0) {
3841 do{
3842 uuid_generate_random(connecting_flow_id);
3843 } while (!uuid_compare(connecting_flow_id, flow_id));
3844 flowadv_idx_t tmp_flowadv_idx = FLOWADV_IDX_NONE;
3845 error = connect_flow(handles.controller, handles.fsw_nx_uuid,
3846 OUR_FLOWSWITCH_PORT, connecting_flow_id, IPPROTO_TCP, 0,
3847 our_ip, our_port, peer_ip, listener_port, &tmp_flowadv_idx,
3848 0);
3849 SKTC_ASSERT_ERR(!error);
3850 assert(tmp_flowadv_idx != FLOWADV_IDX_NONE);
3851 } else {
3852 do{
3853 uuid_generate_random(listener_flow_id);
3854 } while (!uuid_compare(listener_flow_id, flow_id));
3855 flowadv_idx_t tmp_flowadv_idx = FLOWADV_IDX_NONE;
3856 error = connect_flow(handles.controller, handles.fsw_nx_uuid,
3857 OUR_FLOWSWITCH_PORT, listener_flow_id, IPPROTO_TCP, 0,
3858 our_ip, listener_port, zero_ip, 0, &tmp_flowadv_idx, 0);
3859 SKTC_ASSERT_ERR(!error);
3860 assert(tmp_flowadv_idx != FLOWADV_IDX_NONE);
3861 }
3862
3863 /* Wait for go signal */
3864 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3865 SKT_LOG("read fail: %s\n", strerror(errno));
3866 return 1;
3867 }
3868 assert(ret == 1);
3869 if (error != 0) {
3870 return 1;
3871 }
3872 port.ip_addr = our_ip;
3873
3874 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3875 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3876 fflush(stderr);
3877
3878 /* warm up the arp cache before starting the actual test */
3879 T_LOG("child %d: Warm up ARP cache\n", child);
3880 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3881 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3882 T_LOG("child %d: Test Start\n", child);
3883
3884 /* Start the receiver */
3885 if (child == 0) {
3886 channel_port_receive_all(&port, flow_id, listener_port, peer_ip,
3887 peer_port, 1, FALSE);
3888 return 0;
3889 }
3890
3891 my_payload payload;
3892 bzero(&payload, sizeof(payload));
3893 payload.packet_number = 0;
3894 channel_port_send(&port, listener_flow_id, IPPROTO_TCP, listener_port,
3895 peer_ip, peer_port, &payload, sizeof(payload), 1, FALSE, FALSE,
3896 PKT_SC_BE, TRUE, NULL);
3897
3898 sleep(1);
3899
3900 /* Send something right to single receiver to end */
3901 payload.packet_number = 1;
3902 strncpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3903 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3904 peer_port, &payload, sizeof(payload), 2, FALSE, FALSE, PKT_SC_BE,
3905 FALSE, NULL);
3906
3907 return 0;
3908 }
3909
3910 int
skt_xfer_udp_frags(int child,bool error_ids)3911 skt_xfer_udp_frags(int child, bool error_ids)
3912 {
3913 char buf[1] = { 0 };
3914 int error;
3915 const char * ifname;
3916 uuid_t flow_id = {};
3917 struct in_addr our_ip, peer_ip;
3918 struct in_addr our_mask;
3919 uint16_t our_port, peer_port;
3920 channel_port port;
3921 ssize_t ret;
3922 uuid_string_t uuidstr;
3923 flowadv_idx_t flowadv_idx;
3924
3925 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
3926
3927 if (child == 0) {
3928 ifname = FETH0_NAME;
3929 our_ip = sktc_feth0_in_addr();
3930 peer_ip = sktc_feth1_in_addr();
3931 our_port = FETH0_PORT;
3932 peer_port = FETH1_PORT;
3933 } else {
3934 child = 1;
3935 ifname = FETH1_NAME;
3936 our_ip = sktc_feth1_in_addr();
3937 peer_ip = sktc_feth0_in_addr();
3938 our_port = FETH1_PORT;
3939 peer_port = FETH0_PORT;
3940 }
3941
3942 /* set up the flowswitch over the right interface */
3943 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
3944 0, our_ip, our_mask, our_port, getpid(), peer_ip,
3945 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
3946 if (error == 0) {
3947 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
3948 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
3949 assert(port.chan != NULL);
3950 }
3951 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3952 SKT_LOG("write fail: %s\n", strerror(errno));
3953 return 1;
3954 }
3955 assert(ret == 1);
3956 #if SKT_XFER_DEBUG
3957 T_LOG("child %d signaled\n", child);
3958 #endif
3959
3960 /* Wait for go signal */
3961 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
3962 SKT_LOG("read fail: %s\n", strerror(errno));
3963 return 1;
3964 }
3965 assert(ret == 1);
3966 if (error != 0) {
3967 return 1;
3968 }
3969 port.ip_addr = our_ip;
3970
3971 uuid_unparse(handles.fsw_nx_uuid, uuidstr);
3972 T_LOG("Child %d nexus uuid: '%s'\n", child, uuidstr);
3973 fflush(stderr);
3974
3975 /* warm up the arp cache before starting the actual test */
3976 T_LOG("child %d: Warm up ARP cache\n", child);
3977 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, child,
3978 FALSE, flowadv_idx, FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
3979 T_LOG("child %d: Test Start\n", child);
3980
3981 /* Start the receiver */
3982 if (child == 0) {
3983 channel_port_receive_all(&port, flow_id, our_port, peer_ip,
3984 peer_port, error_ids ? 0 : 4, FALSE);
3985 return 0;
3986 }
3987
3988 my_payload payload;
3989 bzero(&payload, sizeof(payload));
3990 payload.packet_number = 0;
3991
3992 channel_port_send_fragments(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
3993 peer_port, &payload, (sizeof(payload.data) & ~0x7), 4,
3994 PKT_SC_BE, FALSE, error_ids);
3995
3996 bzero(&payload, sizeof(payload));
3997 payload.packet_number = 0;
3998 strncpy(payload.data, XFER_RECV_END_PAYLOAD, sizeof(payload.data));
3999 channel_port_send(&port, flow_id, IPPROTO_UDP, our_port, peer_ip,
4000 peer_port, &payload, sizeof(payload), 1, FALSE, FALSE, PKT_SC_BE,
4001 FALSE, NULL);
4002
4003 return 0;
4004 }
4005
4006 static int
skt_xfer_udp_parent_child(int id,uint16_t demux_offset)4007 skt_xfer_udp_parent_child(int id, uint16_t demux_offset)
4008 {
4009 #define CHILD_ID 0
4010 #define REMOTE_ID 1
4011 #define PARENT_ID 2
4012
4013 #define PARENT_FLOW_UUID "1B4E28BA-2FA1-11D2-883F-B9A761BDE3FB"
4014 #define CHILD_FLOW_UUID "1B4E28BA-2FA1-11D2-883F-B9A761BDE3FD"
4015
4016 char buf[1] = { 0 };
4017 int error = 0;
4018 const char * ifname;
4019 uuid_t flow_id = {};
4020 struct in_addr our_ip;
4021 struct in_addr our_mask;
4022 uint16_t our_port;
4023 struct in_addr peer_ip;
4024 uint16_t peer_port;
4025 channel_port port;
4026 ssize_t ret;
4027 flowadv_idx_t flowadv_idx;
4028 nexus_port_t nx_port;
4029 uuid_t parent_flow_id = {};
4030 uint16_t flags = 0;
4031
4032 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
4033
4034 if (id == PARENT_ID) {
4035 ifname = FETH0_NAME;
4036 our_ip = sktc_feth0_in_addr();
4037 peer_ip = sktc_feth1_in_addr();
4038 our_port = FETH0_PORT;
4039 peer_port = FETH1_PORT;
4040 nx_port = OUR_FLOWSWITCH_PORT;
4041 flags = NXFLOWREQF_PARENT;
4042 uuid_parse(PARENT_FLOW_UUID, flow_id);
4043 } else if (id == CHILD_ID) {
4044 ifname = FETH0_NAME;
4045 our_ip = sktc_feth0_in_addr();
4046 peer_ip = sktc_feth1_in_addr();
4047 our_port = FETH0_PORT;
4048 peer_port = FETH1_PORT;
4049 nx_port = CHILD_FLOWSWITCH_PORT;
4050 uuid_parse(CHILD_FLOW_UUID, flow_id);
4051 uuid_parse(PARENT_FLOW_UUID, parent_flow_id);
4052 // Wait for the parent to setup the flow-switch
4053 sleep(1);
4054 } else if (id == REMOTE_ID) {
4055 ifname = FETH1_NAME;
4056 our_ip = sktc_feth1_in_addr();
4057 peer_ip = sktc_feth0_in_addr();
4058 our_port = FETH1_PORT;
4059 peer_port = FETH0_PORT;
4060 nx_port = OUR_FLOWSWITCH_PORT;
4061 }
4062
4063 if (id == PARENT_ID || id == REMOTE_ID) {
4064 // set up the flowswitch
4065 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
4066 flags, our_ip, our_mask, our_port, getpid(), peer_ip,
4067 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
4068 } else if (id == CHILD_ID) {
4069 // child will reuse parent interface and flowswitch
4070 struct flow_demux_pattern demux_patterns[1];
4071 memset(demux_patterns, 0, sizeof(struct flow_demux_pattern));
4072
4073 uint16_t payload_byte = DEMUX_PAYLOAD_VALUE;
4074 demux_patterns[0].fdp_offset = DEMUX_PAYLOAD_OFFSET + demux_offset;
4075 demux_patterns[0].fdp_mask[0] = 0xFF;
4076 demux_patterns[0].fdp_mask[1] = 0xFF;
4077 demux_patterns[0].fdp_value[0] = payload_byte;
4078 demux_patterns[0].fdp_value[1] = payload_byte >> 8;
4079 demux_patterns[0].fdp_len = sizeof(payload_byte);
4080 error = fetch_if_flowswitch_and_setup_flow(&handles, ifname,
4081 IPPROTO_UDP, 0, our_ip, our_mask, our_port, getpid(), peer_ip,
4082 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false, parent_flow_id,
4083 demux_patterns, 1);
4084 }
4085 if (error == 0) {
4086 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
4087 nx_port, ENABLE_UPP, false, false);
4088 assert(port.chan != NULL);
4089 }
4090
4091 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
4092 SKT_LOG("write fail: %s\n", strerror(errno));
4093 return 1;
4094 }
4095 assert(ret == 1);
4096 #if SKT_XFER_DEBUG
4097 T_LOG("ID %d signaled\n", id);
4098 #endif
4099 /* Wait for go signal */
4100 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
4101 SKT_LOG("read fail: %s\n", strerror(errno));
4102 return 1;
4103 }
4104 assert(ret == 1);
4105 if (error != 0) {
4106 return 1;
4107 }
4108 port.ip_addr = our_ip;
4109
4110 /* warm up the arp cache before starting the actual test */
4111 if (id == CHILD_ID || id == REMOTE_ID) {
4112 T_LOG("child %d: Warm up ARP cache\n", id);
4113 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 1, 1, id,
4114 FALSE, flowadv_idx, FALSE, FALSE, demux_offset);
4115
4116 T_LOG("child %d: Test Start\n", id);
4117 ping_pong(&port, flow_id, our_port, peer_ip, peer_port, 5, 5, id,
4118 FALSE, flowadv_idx, FALSE, FALSE, demux_offset);
4119 } else if (id == PARENT_ID) {
4120 // Wait for the child ping-pong to complete
4121 sleep(1);
4122 }
4123
4124 return 0;
4125 }
4126
4127 static int
skt_xfer_rx_flow_steering_drop_packets(int child,bool drop_tx)4128 skt_xfer_rx_flow_steering_drop_packets(int child, bool drop_tx)
4129 {
4130 char buf[1] = { 0 };
4131 int error;
4132 const char * ifname;
4133 uuid_t flow_id = {};
4134 struct in_addr our_ip;
4135 struct in_addr our_mask;
4136 uint16_t our_port;
4137 struct in_addr peer_ip;
4138 uint16_t peer_port;
4139 channel_port port;
4140 ssize_t ret;
4141 flowadv_idx_t flowadv_idx;
4142 struct fsw_stats stats_before, stats_after;
4143 uint64_t counter = 0;
4144 uint16_t flags = 0;
4145
4146 our_mask = sktc_make_in_addr(IN_CLASSC_NET);
4147
4148 if (child == 0) {
4149 ifname = FETH0_NAME;
4150 our_ip = sktc_feth0_in_addr();
4151 peer_ip = sktc_feth1_in_addr();
4152 our_port = FETH0_PORT;
4153 peer_port = FETH1_PORT;
4154 flags = drop_tx ? NXFLOWREQF_AOP_OFFLOAD : 0;
4155 } else {
4156 child = 1;
4157 ifname = FETH1_NAME;
4158 our_ip = sktc_feth1_in_addr();
4159 peer_ip = sktc_feth0_in_addr();
4160 our_port = FETH1_PORT;
4161 peer_port = FETH0_PORT;
4162 flags = !drop_tx ? NXFLOWREQF_AOP_OFFLOAD : 0;
4163 }
4164
4165 /* set up the flowswitch over the right interface */
4166 error = setup_flowswitch_and_flow(&handles, ifname, IPPROTO_UDP,
4167 flags, our_ip, our_mask, our_port, getpid(), peer_ip,
4168 peer_port, flow_id, &flowadv_idx, -1, -1, -1, -1, false);
4169 if (error == 0) {
4170 sktu_channel_port_init(&port, handles.fsw_nx_uuid,
4171 OUR_FLOWSWITCH_PORT, ENABLE_UPP, false, false);
4172 assert(port.chan != NULL);
4173 }
4174 if ((ret = write(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
4175 SKT_LOG("write fail: %s\n", strerror(errno));
4176 return 1;
4177 }
4178 assert(ret == 1);
4179 #if SKT_XFER_DEBUG
4180 T_LOG("child %d signaled\n", child);
4181 #endif
4182 /* Wait for go signal */
4183 if ((ret = read(MPTEST_SEQ_FILENO, buf, sizeof(buf))) == -1) {
4184 SKT_LOG("read fail: %s\n", strerror(errno));
4185 return 1;
4186 }
4187 assert(ret == 1);
4188 if (error != 0) {
4189 return 1;
4190 }
4191 #if SKT_XFER_DEBUG
4192 T_LOG("got input %d from parent in child %d, starting test\n",
4193 buf[0], child);
4194 #endif
4195 port.ip_addr = our_ip;
4196
4197 if (flags == NXFLOWREQF_AOP_OFFLOAD) {
4198 ret = get_fsw_stats(&stats_before);
4199 assert(ret == 0);
4200 }
4201
4202 ping_pong(&port, flow_id, our_port, peer_ip, peer_port,
4203 1, 1, child, TRUE, flowadv_idx,
4204 FALSE, FALSE, MAX_DEMUX_OFFSET + 1);
4205
4206 if (flags == NXFLOWREQF_AOP_OFFLOAD) {
4207 ret = get_fsw_stats(&stats_after);
4208 assert(ret == 0);
4209
4210 if (drop_tx) {
4211 counter = STATS_VAL(&stats_after, FSW_STATS_TX_DISABLED);
4212 counter -= STATS_VAL(&stats_before, FSW_STATS_TX_DISABLED);
4213 } else {
4214 counter = STATS_VAL(&stats_after, FSW_STATS_RX_DISABLED);
4215 counter -= STATS_VAL(&stats_before, FSW_STATS_RX_DISABLED);
4216 }
4217 if (counter == 0) {
4218 T_LOG("Offload packets wasn't dropped");
4219 assert(0);
4220 }
4221 T_LOG("Offload packets dropped %"PRIu64"\n", counter);
4222 }
4223 return 0;
4224 }
4225
4226 static int
skt_xfer_udp_main(int argc,char * argv[])4227 skt_xfer_udp_main(int argc, char *argv[])
4228 {
4229 int child;
4230
4231 assert(!strcmp(argv[3], "--child"));
4232 child = atoi(argv[4]);
4233
4234 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT,
4235 XFER_TXRX_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE, -1, -1,
4236 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4237 }
4238
4239 static int
skt_xfer_udp_long_main(int argc,char * argv[])4240 skt_xfer_udp_long_main(int argc, char *argv[])
4241 {
4242 int child;
4243
4244 assert(!strcmp(argv[3], "--child"));
4245 child = atoi(argv[4]);
4246
4247 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT_LONG,
4248 XFER_TXRX_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE, -1, -1,
4249 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4250 }
4251
4252 static int
skt_xfer_udp_overwhelm_main(int argc,char * argv[])4253 skt_xfer_udp_overwhelm_main(int argc, char *argv[])
4254 {
4255 int child;
4256
4257 assert(!strcmp(argv[3], "--child"));
4258 child = atoi(argv[4]);
4259
4260 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT,
4261 XFER_TXRX_OVERWHELM_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE,
4262 XFER_TXRX_OVERWHELM_FSW_TX_RING_SIZE,
4263 XFER_TXRX_OVERWHELM_FSW_RX_RING_SIZE, -1, -1,
4264 SKT_FSW_EVENT_TEST_NONE, false, false, false);
4265 }
4266
4267 static int
skt_xfer_udp_overwhelm_long_main(int argc,char * argv[])4268 skt_xfer_udp_overwhelm_long_main(int argc, char *argv[])
4269 {
4270 int child;
4271
4272 assert(!strcmp(argv[3], "--child"));
4273 child = atoi(argv[4]);
4274
4275 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT_LONG,
4276 XFER_TXRX_OVERWHELM_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE,
4277 XFER_TXRX_OVERWHELM_FSW_TX_RING_SIZE,
4278 XFER_TXRX_OVERWHELM_FSW_RX_RING_SIZE, -1, -1,
4279 SKT_FSW_EVENT_TEST_NONE, false, false, false);
4280 }
4281
4282 static int
skt_xfer_udp_ping_pong_main(int argc,char * argv[])4283 skt_xfer_udp_ping_pong_main(int argc, char *argv[])
4284 {
4285 int child, test_id;
4286 bool low_latency;
4287 bool multi_llink;
4288
4289 assert(!strcmp(argv[3], "--child"));
4290 child = atoi(argv[4]);
4291 test_id = atoi(argv[5]);
4292
4293 low_latency = (test_id == SKT_FSW_PING_PONG_TEST_LOW_LATENCY);
4294 multi_llink = (test_id == SKT_FSW_PING_PONG_TEST_MULTI_LLINK);
4295 return skt_xfer_udp_common(child, XFER_PING_PACKET_COUNT,
4296 XFER_PING_BATCH_COUNT, TRUE, FALSE, FALSE, FALSE, -1, -1,
4297 -1, -1, SKT_FSW_EVENT_TEST_NONE, low_latency, multi_llink,
4298 false);
4299 }
4300
4301 static int
skt_xfer_rd_udp_ping_pong_main(int argc,char * argv[])4302 skt_xfer_rd_udp_ping_pong_main(int argc, char *argv[])
4303 {
4304 int child, test_id;
4305
4306 assert(!strcmp(argv[3], "--child"));
4307 child = atoi(argv[4]);
4308 test_id = atoi(argv[5]);
4309
4310 return skt_xfer_udp_common(child, XFER_PING_PACKET_COUNT,
4311 XFER_PING_BATCH_COUNT, TRUE, FALSE, FALSE, FALSE, -1, -1,
4312 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, true);
4313 }
4314
4315 static int
skt_xfer_udp_ping_pong_one_main(int argc,char * argv[])4316 skt_xfer_udp_ping_pong_one_main(int argc, char *argv[])
4317 {
4318 int child;
4319
4320 assert(!strcmp(argv[3], "--child"));
4321 child = atoi(argv[4]);
4322
4323 return skt_xfer_udp_common(child, 1, 1, TRUE, FALSE, FALSE, FALSE,
4324 -1, -1, -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4325 }
4326
4327 static int
skt_xfer_udp_ping_pong_long_main(int argc,char * argv[])4328 skt_xfer_udp_ping_pong_long_main(int argc, char *argv[])
4329 {
4330 int child;
4331
4332 assert(!strcmp(argv[3], "--child"));
4333 child = atoi(argv[4]);
4334
4335 return skt_xfer_udp_common(child, XFER_PING_PACKET_COUNT_LONG,
4336 XFER_PING_BATCH_COUNT, TRUE, FALSE, FALSE, FALSE, -1, -1,
4337 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4338 }
4339
4340 static int
skt_xfer_udp_ping_pong_one_wrong_main(int argc,char * argv[])4341 skt_xfer_udp_ping_pong_one_wrong_main(int argc, char *argv[])
4342 {
4343 int child;
4344
4345 assert(!strcmp(argv[3], "--child"));
4346 child = atoi(argv[4]);
4347
4348 return skt_xfer_udp_common(child, 1, 1, TRUE, TRUE, FALSE, FALSE,
4349 -1, -1, -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4350 }
4351
4352 static int
skt_xfer_tcp_syn_flood_main(int argc,char * argv[])4353 skt_xfer_tcp_syn_flood_main(int argc, char *argv[])
4354 {
4355 int child;
4356
4357 assert(!strcmp(argv[3], "--child"));
4358 child = atoi(argv[4]);
4359
4360 return skt_xfer_tcpflood(child, 10000, 64, TRUE);
4361 }
4362
4363 static int
skt_xfer_tcp_rst_flood_main(int argc,char * argv[])4364 skt_xfer_tcp_rst_flood_main(int argc, char *argv[])
4365 {
4366 int child;
4367
4368 assert(!strcmp(argv[3], "--child"));
4369 child = atoi(argv[4]);
4370
4371 return skt_xfer_tcpflood(child, 10000, 64, FALSE);
4372 }
4373
4374 static int
skt_xfer_udp_ping_pong_aqm_main(int argc,char * argv[])4375 skt_xfer_udp_ping_pong_aqm_main(int argc, char *argv[])
4376 {
4377 int child;
4378
4379 assert(!strcmp(argv[3], "--child"));
4380 child = atoi(argv[4]);
4381
4382 return skt_xfer_udp_common(child, XFER_AQM_PING_PACKET_COUNT,
4383 XFER_AQM_PING_BATCH_COUNT, TRUE, FALSE, TRUE, FALSE, -1, -1,
4384 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4385 }
4386
4387 static int
skt_xfer_udp_with_errors_main(int argc,char * argv[])4388 skt_xfer_udp_with_errors_main(int argc, char *argv[])
4389 {
4390 int child;
4391
4392 assert(!strcmp(argv[3], "--child"));
4393 child = atoi(argv[4]);
4394
4395 return skt_xfer_udp_with_errors_common(child,
4396 XFER_TXRX_PACKET_COUNT, XFER_TXRX_BATCH_COUNT);
4397 }
4398
4399 static int
skt_xfer_tcp_port_zero_main(int argc,char * argv[])4400 skt_xfer_tcp_port_zero_main(int argc, char *argv[])
4401 {
4402 int child;
4403
4404 assert(!strcmp(argv[3], "--child"));
4405 child = atoi(argv[4]);
4406
4407 return skt_xfer_portzero(child, IPPROTO_TCP);
4408 }
4409
4410 static int
skt_xfer_udp_port_zero_main(int argc,char * argv[])4411 skt_xfer_udp_port_zero_main(int argc, char *argv[])
4412 {
4413 int child;
4414
4415 assert(!strcmp(argv[3], "--child"));
4416 child = atoi(argv[4]);
4417
4418 return skt_xfer_portzero(child, IPPROTO_UDP);
4419 }
4420
4421 static int
skt_xfer_setuponly_main(int argc,char * argv[])4422 skt_xfer_setuponly_main(int argc, char *argv[])
4423 {
4424 int child;
4425
4426 assert(!strcmp(argv[3], "--child"));
4427 child = atoi(argv[4]);
4428 return skt_xfer_setuponly(child);
4429 }
4430
4431 static int
skt_xfer_udp_ping_pong_wmm_main(int argc,char * argv[])4432 skt_xfer_udp_ping_pong_wmm_main(int argc, char *argv[])
4433 {
4434 int child;
4435
4436 assert(!strcmp(argv[3], "--child"));
4437 child = atoi(argv[4]);
4438
4439 return skt_xfer_udp_common(child, XFER_WMM_PING_PACKET_COUNT,
4440 XFER_WMM_PING_BATCH_COUNT, TRUE, FALSE, FALSE, TRUE, -1, -1,
4441 -1, -1, SKT_FSW_EVENT_TEST_NONE, false, false, false);
4442 }
4443
4444 int
skt_xfer_flowmatch_main(int argc,char * argv[])4445 skt_xfer_flowmatch_main(int argc, char *argv[])
4446 {
4447 int child;
4448
4449 assert(!strcmp(argv[3], "--child"));
4450 child = atoi(argv[4]);
4451
4452 return skt_xfer_flowmatch(child);
4453 }
4454
4455 static int
skt_xfer_flowcleanup_main(int argc,char * argv[])4456 skt_xfer_flowcleanup_main(int argc, char *argv[])
4457 {
4458 int child;
4459
4460 assert(!strcmp(argv[3], "--child"));
4461 child = atoi(argv[4]);
4462
4463 return skt_xfer_flowcleanup(child, 128, 8);
4464 }
4465
4466 static int
skt_xfer_udp_ping_pong_multi_buflet_main(int argc,char * argv[])4467 skt_xfer_udp_ping_pong_multi_buflet_main(int argc, char *argv[])
4468 {
4469 int child;
4470
4471 assert(!strcmp(argv[3], "--child"));
4472 child = atoi(argv[4]);
4473
4474 return skt_xfer_udp_common(child, XFER_PING_PACKET_COUNT,
4475 XFER_PING_BATCH_COUNT, TRUE, FALSE, FALSE, FALSE, -1, -1,
4476 XFER_TXRX_MULTI_BUFLET_BUF_SIZE,
4477 XFER_TXRX_MULTI_BUFLET_MAX_FRAGS, SKT_FSW_EVENT_TEST_NONE,
4478 false, false, false);
4479 }
4480
4481 static int
skt_xfer_csumoffload_main(int argc,char * argv[])4482 skt_xfer_csumoffload_main(int argc, char *argv[])
4483 {
4484 int child;
4485
4486 assert(!strcmp(argv[3], "--child"));
4487 child = atoi(argv[4]);
4488 skt_xfer_csumoffload(child, IPPROTO_UDP);
4489
4490 return 0;
4491 }
4492
4493 static int
skt_xfer_fastlane_main(int argc,char * argv[])4494 skt_xfer_fastlane_main(int argc, char *argv[])
4495 {
4496 int child;
4497
4498 assert(!strcmp(argv[3], "--child"));
4499 child = atoi(argv[4]);
4500
4501 skt_xfer_qosmarking(child, IFRTYPE_QOSMARKING_FASTLANE);
4502
4503 return 0;
4504 }
4505
4506 static int
skt_xfer_rfc4594_main(int argc,char * argv[])4507 skt_xfer_rfc4594_main(int argc, char *argv[])
4508 {
4509 int child;
4510
4511 assert(!strcmp(argv[3], "--child"));
4512 child = atoi(argv[4]);
4513
4514 skt_xfer_qosmarking(child, IFRTYPE_QOSMARKING_RFC4594);
4515
4516 return 0;
4517 }
4518
4519 static int
skt_xfer_listener_tcp_rst_main(int argc,char * argv[])4520 skt_xfer_listener_tcp_rst_main(int argc, char *argv[])
4521 {
4522 int child;
4523
4524 assert(!strcmp(argv[3], "--child"));
4525 child = atoi(argv[4]);
4526
4527 skt_xfer_listener_tcp_rst(child);
4528
4529 return 0;
4530 }
4531
4532 static int
skt_xfer_udp_frags_main(int argc,char * argv[])4533 skt_xfer_udp_frags_main(int argc, char *argv[])
4534 {
4535 int child;
4536
4537 assert(!strcmp(argv[3], "--child"));
4538 child = atoi(argv[4]);
4539
4540 skt_xfer_udp_frags(child, FALSE);
4541
4542 return 0;
4543 }
4544
4545 static int
skt_xfer_udp_bad_frags_main(int argc,char * argv[])4546 skt_xfer_udp_bad_frags_main(int argc, char *argv[])
4547 {
4548 int child;
4549
4550 assert(!strcmp(argv[3], "--child"));
4551 child = atoi(argv[4]);
4552
4553 skt_xfer_udp_frags(child, TRUE);
4554
4555 return 0;
4556 }
4557
4558 static int
skt_xfer_udp_ifadv_main(int argc,char * argv[])4559 skt_xfer_udp_ifadv_main(int argc, char *argv[])
4560 {
4561 int child, test_id;
4562
4563 assert(!strcmp(argv[3], "--child"));
4564 child = atoi(argv[4]);
4565 test_id = atoi(argv[5]);
4566
4567 return skt_xfer_udp_common(child, XFER_TXRX_PACKET_COUNT,
4568 XFER_TXRX_BATCH_COUNT, FALSE, FALSE, FALSE, FALSE,
4569 -1, -1, -1, -1, test_id, false, false, false);
4570 }
4571
4572 static int
skt_xfer_parent_child_flow_main(int argc,char * argv[])4573 skt_xfer_parent_child_flow_main(int argc, char *argv[])
4574 {
4575 int child, test_id;
4576
4577 assert(!strcmp(argv[3], "--child"));
4578 child = atoi(argv[4]);
4579 test_id = 0;
4580
4581 return skt_xfer_udp_parent_child(child, 0);
4582 }
4583
4584 static int
skt_xfer_parent_child_flow_main_offset_400(int argc,char * argv[])4585 skt_xfer_parent_child_flow_main_offset_400(int argc, char *argv[])
4586 {
4587 int child, test_id;
4588
4589 assert(!strcmp(argv[3], "--child"));
4590 child = atoi(argv[4]);
4591 test_id = 0;
4592
4593 return skt_xfer_udp_parent_child(child, 400);
4594 }
4595
4596 static int
skt_xfer_rx_flow_steering_drop_tx_packets_main(int argc,char * argv[])4597 skt_xfer_rx_flow_steering_drop_tx_packets_main(int argc, char *argv[])
4598 {
4599 int child;
4600
4601 assert(!strcmp(argv[3], "--child"));
4602 child = atoi(argv[4]);
4603
4604 skt_xfer_rx_flow_steering_drop_packets(child, true);
4605
4606 return 0;
4607 }
4608
4609 static int
skt_xfer_rx_flow_steering_drop_rx_packets_main(int argc,char * argv[])4610 skt_xfer_rx_flow_steering_drop_rx_packets_main(int argc, char *argv[])
4611 {
4612 int child;
4613
4614 assert(!strcmp(argv[3], "--child"));
4615 child = atoi(argv[4]);
4616
4617 skt_xfer_rx_flow_steering_drop_packets(child, false);
4618
4619 return 0;
4620 }
4621
4622 static void
skt_xfer_init_txstart(void)4623 skt_xfer_init_txstart(void)
4624 {
4625 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4626 1000 * 1000);
4627 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART);
4628 sktc_reset_classq_update_intervals();
4629 sktc_enable_ip_reass();
4630 sktc_config_fsw_rx_agg_tcp(0);
4631 }
4632
4633 static void
skt_xfer_init_native(void)4634 skt_xfer_init_native(void)
4635 {
4636 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4637 1000 * 1000);
4638 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE);
4639 sktc_reset_classq_update_intervals();
4640 sktc_enable_ip_reass();
4641 sktc_config_fsw_rx_agg_tcp(0);
4642 }
4643
4644 static void
skt_xfer_rd_init(void)4645 skt_xfer_rd_init(void)
4646 {
4647 int err;
4648 uint32_t disable_nxctl_check = 1;
4649 size_t len = sizeof(skt_disable_nxctl_check);
4650
4651 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4652 1000 * 1000);
4653 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4654 &skt_disable_nxctl_check, &len, &disable_nxctl_check,
4655 sizeof(disable_nxctl_check));
4656 assert(err == 0);
4657 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE);
4658 sktc_ifnet_rd_create();
4659 sktc_reset_classq_update_intervals();
4660 sktc_enable_ip_reass();
4661 sktc_config_fsw_rx_agg_tcp(0);
4662 }
4663
4664 /* QoS Marking App Policy needs to be set before child is launched */
4665 static int restricted_old;
4666 static void
skt_xfer_init_enable_qos_marking_policy(void)4667 skt_xfer_init_enable_qos_marking_policy(void)
4668 {
4669 int zero = 0;
4670 size_t restricted_old_size = sizeof(restricted_old);
4671 assert(sysctlbyname("net.qos.policy.restricted",
4672 &restricted_old, &restricted_old_size,
4673 &zero, sizeof(zero)) == 0);
4674 }
4675
4676 static void
skt_xfer_init_txstart_fastlane(void)4677 skt_xfer_init_txstart_fastlane(void)
4678 {
4679 skt_xfer_init_txstart();
4680 skt_xfer_init_enable_qos_marking_policy();
4681 }
4682
4683 static void
skt_xfer_init_txstart_fcs(void)4684 skt_xfer_init_txstart_fcs(void)
4685 {
4686 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4687 1000 * 1000);
4688 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART |
4689 FETH_FLAGS_FCS);
4690 sktc_reset_classq_update_intervals();
4691 sktc_enable_ip_reass();
4692 sktc_config_fsw_rx_agg_tcp(0);
4693 }
4694
4695 static void
skt_xfer_init_txstart_trailer(void)4696 skt_xfer_init_txstart_trailer(void)
4697 {
4698 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4699 1000 * 1000);
4700 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART |
4701 FETH_FLAGS_TRAILER);
4702 sktc_reset_classq_update_intervals();
4703 sktc_enable_ip_reass();
4704 sktc_config_fsw_rx_agg_tcp(0);
4705 }
4706
4707 static void
skt_xfer_init_native_fastlane(void)4708 skt_xfer_init_native_fastlane(void)
4709 {
4710 skt_xfer_init_native();
4711 skt_xfer_init_enable_qos_marking_policy();
4712 }
4713
4714 static void
skt_xfer_init_native_split_pools(void)4715 skt_xfer_init_native_split_pools(void)
4716 {
4717 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4718 1000 * 1000);
4719 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE |
4720 FETH_FLAGS_NONSHAREDSPLITPOOLS);
4721 sktc_reset_classq_update_intervals();
4722 sktc_enable_ip_reass();
4723 sktc_config_fsw_rx_agg_tcp(0);
4724 }
4725
4726 static void
skt_xfer_init_native_fcs(void)4727 skt_xfer_init_native_fcs(void)
4728 {
4729 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4730 1000 * 1000);
4731 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE |
4732 FETH_FLAGS_FCS);
4733 sktc_reset_classq_update_intervals();
4734 sktc_enable_ip_reass();
4735 sktc_config_fsw_rx_agg_tcp(0);
4736 }
4737
4738 static void
skt_xfer_init_native_trailer(void)4739 skt_xfer_init_native_trailer(void)
4740 {
4741 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4742 1000 * 1000);
4743 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE |
4744 FETH_FLAGS_TRAILER);
4745 sktc_reset_classq_update_intervals();
4746 sktc_enable_ip_reass();
4747 sktc_config_fsw_rx_agg_tcp(0);
4748 }
4749
4750 static void
skt_xfer_init_llink(void)4751 skt_xfer_init_llink(void)
4752 {
4753 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4754 1000 * 1000);
4755 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_LLINK);
4756 sktc_reset_classq_update_intervals();
4757 sktc_enable_ip_reass();
4758 sktc_config_fsw_rx_agg_tcp(0);
4759 }
4760
4761 static void
skt_xfer_init_llink_wmm(void)4762 skt_xfer_init_llink_wmm(void)
4763 {
4764 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4765 1000 * 1000);
4766 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_LLINK |
4767 FETH_FLAGS_WMM);
4768 sktc_reset_classq_update_intervals();
4769 sktc_enable_ip_reass();
4770 sktc_config_fsw_rx_agg_tcp(0);
4771 }
4772
4773 static void
skt_xfer_init_llink_multi(void)4774 skt_xfer_init_llink_multi(void)
4775 {
4776 int err;
4777 uint32_t disable_nxctl_check = 1;
4778 size_t len = sizeof(skt_disable_nxctl_check);
4779
4780 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4781 1000 * 1000);
4782 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4783 &skt_disable_nxctl_check, &len, &disable_nxctl_check,
4784 sizeof(disable_nxctl_check));
4785 assert(err == 0);
4786 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_MULTI_LLINK);
4787 sktc_reset_classq_update_intervals();
4788 sktc_enable_ip_reass();
4789 sktc_config_fsw_rx_agg_tcp(0);
4790 }
4791
4792 static void
skt_xfer_fini(void)4793 skt_xfer_fini(void)
4794 {
4795 #if SKT_XFER_DEBUG
4796 T_LOG("Waiting");
4797 fflush(stdout);
4798 for (int i = 0; i < 5; i++) {
4799 sleep(1);
4800 T_LOG(".");
4801 fflush(stdout);
4802 }
4803 T_LOG("\n");
4804 #endif
4805 sktc_ifnet_feth0_set_dequeue_stall(FALSE);
4806 sktc_ifnet_feth1_set_dequeue_stall(FALSE);
4807 sktc_ifnet_feth_pair_destroy();
4808 sktc_restore_ip_reass();
4809 sktc_restore_fsw_rx_agg_tcp();
4810 }
4811
4812 static void
skt_xfer_rd_fini(void)4813 skt_xfer_rd_fini(void)
4814 {
4815 #if SKT_XFER_DEBUG
4816 T_LOG("Waiting");
4817 fflush(stdout);
4818 for (int i = 0; i < 5; i++) {
4819 sleep(1);
4820 T_LOG(".");
4821 fflush(stdout);
4822 }
4823 T_LOG("\n");
4824 #endif
4825 int err;
4826
4827 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4828 NULL, NULL, &skt_disable_nxctl_check,
4829 sizeof(skt_disable_nxctl_check));
4830 assert(err == 0);
4831 sktc_ifnet_feth0_set_dequeue_stall(FALSE);
4832 sktc_ifnet_feth1_set_dequeue_stall(FALSE);
4833 sktc_ifnet_feth_pair_destroy();
4834 sktc_ifnet_rd_destroy();
4835 sktc_restore_ip_reass();
4836 sktc_restore_fsw_rx_agg_tcp();
4837 }
4838
4839 static void
skt_xfer_fini_fastlane(void)4840 skt_xfer_fini_fastlane(void)
4841 {
4842 /* restore sysctl */
4843 assert(sysctlbyname("net.qos.policy.restricted", NULL, NULL,
4844 &restricted_old, sizeof(restricted_old)) == 0);
4845
4846 skt_xfer_fini();
4847 }
4848
4849 static void
skt_xfer_fini_llink_multi(void)4850 skt_xfer_fini_llink_multi(void)
4851 {
4852 int err;
4853
4854 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4855 NULL, NULL, &skt_disable_nxctl_check,
4856 sizeof(skt_disable_nxctl_check));
4857 assert(err == 0);
4858 skt_xfer_fini();
4859 }
4860
4861 static void
skt_xfer_errors_init(void)4862 skt_xfer_errors_init(void)
4863 {
4864 uint64_t emask = (1ull << 63);
4865 uint32_t rmask = 0x7ff;
4866
4867 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE);
4868 set_error_inject_mask(&emask);
4869 inject_error_rmask = sktu_set_inject_error_rmask(&rmask);
4870 sktc_config_fsw_rx_agg_tcp(0);
4871 }
4872
4873 static void
skt_xfer_errors_compat_init(void)4874 skt_xfer_errors_compat_init(void)
4875 {
4876 uint64_t emask = (1ull << 63);
4877 uint32_t rmask = 0x7ff;
4878
4879 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART);
4880 set_error_inject_mask(&emask);
4881 inject_error_rmask = sktu_set_inject_error_rmask(&rmask);
4882 sktc_config_fsw_rx_agg_tcp(0);
4883 }
4884
4885 static void
skt_xfer_errors_fini(void)4886 skt_xfer_errors_fini(void)
4887 {
4888 uint64_t emask = 0;
4889
4890 set_error_inject_mask(&emask);
4891 (void) sktu_set_inject_error_rmask(&inject_error_rmask);
4892 sktc_ifnet_feth_pair_destroy();
4893 sktc_restore_fsw_rx_agg_tcp();
4894 }
4895
4896 static void
skt_xfer_multi_buflet_fini()4897 skt_xfer_multi_buflet_fini()
4898 {
4899 sktc_restore_channel_buflet_alloc();
4900 skt_xfer_fini();
4901 }
4902
4903 static void
skt_xfer_init_native_wmm(void)4904 skt_xfer_init_native_wmm(void)
4905 {
4906 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_WMM);
4907 sktc_enable_ip_reass();
4908 sktc_config_fsw_rx_agg_tcp(0);
4909 }
4910
4911 static void
skt_xfer_init_native_multi_buflet(void)4912 skt_xfer_init_native_multi_buflet(void)
4913 {
4914 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_MULTI_BUFLET |
4915 FETH_FLAGS_TX_HEADROOM);
4916 sktc_enable_channel_buflet_alloc();
4917 sktc_enable_ip_reass();
4918 sktc_config_fsw_rx_agg_tcp(0);
4919 }
4920
4921 static void
skt_xfer_init_native_multi_buflet_copy(void)4922 skt_xfer_init_native_multi_buflet_copy(void)
4923 {
4924 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE | FETH_FLAGS_MULTI_BUFLET |
4925 FETH_FLAGS_NONSHAREDPOOL | FETH_FLAGS_TX_HEADROOM);
4926 sktc_enable_ip_reass();
4927 sktc_enable_channel_buflet_alloc();
4928 sktc_config_fsw_rx_agg_tcp(0);
4929 }
4930
4931 static void
skt_xfer_init_ifadv(void)4932 skt_xfer_init_ifadv(void)
4933 {
4934 int intvl = XFER_IFADV_INTERVAL; /* in milliseconds */
4935
4936 assert(sysctlbyname("net.link.fake.if_adv_intvl",
4937 NULL, 0, &intvl, sizeof(intvl)) == 0);
4938 skt_xfer_init_native();
4939 }
4940
4941 static void
skt_xfer_fini_ifadv(void)4942 skt_xfer_fini_ifadv(void)
4943 {
4944 int intvl = 0; /* disable */
4945
4946 skt_xfer_fini();
4947 assert(sysctlbyname("net.link.fake.if_adv_intvl",
4948 NULL, 0, &intvl, sizeof(intvl)) == 0);
4949 }
4950
4951 static void
skt_xfer_init_chan_event(void)4952 skt_xfer_init_chan_event(void)
4953 {
4954 int drops = XFER_TX_PKT_DROP_RATE;
4955 assert(sysctlbyname("net.link.fake.tx_drops",
4956 NULL, 0, &drops, sizeof(drops)) == 0);
4957
4958 skt_xfer_init_native();
4959 }
4960
4961 static void
skt_xfer_fini_chan_event(void)4962 skt_xfer_fini_chan_event(void)
4963 {
4964 skt_xfer_fini();
4965 int drops = 0;
4966 assert(sysctlbyname("net.link.fake.tx_drops",
4967 NULL, 0, &drops, sizeof(drops)) == 0);
4968 }
4969
4970 static void
skt_xfer_init_chan_event_async(void)4971 skt_xfer_init_chan_event_async(void)
4972 {
4973 int tx_compl_mode = 1; /* async mode */
4974 assert(sysctlbyname("net.link.fake.tx_completion_mode",
4975 NULL, 0, &tx_compl_mode, sizeof(tx_compl_mode)) == 0);
4976 skt_xfer_init_chan_event();
4977 }
4978
4979 static void
skt_xfer_fini_chan_event_async(void)4980 skt_xfer_fini_chan_event_async(void)
4981 {
4982 int tx_compl_mode = 0; /* sync mode (default) */
4983 skt_xfer_fini_chan_event();
4984 assert(sysctlbyname("net.link.fake.tx_completion_mode",
4985 NULL, 0, &tx_compl_mode, sizeof(tx_compl_mode)) == 0);
4986 }
4987
4988 static void
skt_xfer_init_parent_child_flow(void)4989 skt_xfer_init_parent_child_flow(void)
4990 {
4991 int err;
4992 uint32_t disable_nxctl_check = 1;
4993 size_t len = sizeof(skt_disable_nxctl_check);
4994
4995 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
4996 1000 * 1000);
4997 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
4998 &skt_disable_nxctl_check, &len, &disable_nxctl_check,
4999 sizeof(disable_nxctl_check));
5000 assert(err == 0);
5001 sktc_ifnet_feth_pair_create(FETH_FLAGS_TXSTART);
5002 sktc_reset_classq_update_intervals();
5003 sktc_enable_ip_reass();
5004 sktc_config_fsw_rx_agg_tcp(0);
5005 }
5006
5007 static void
skt_xfer_init_parent_child_flow_native(void)5008 skt_xfer_init_parent_child_flow_native(void)
5009 {
5010 int err;
5011 uint32_t disable_nxctl_check = 1;
5012 size_t len = sizeof(skt_disable_nxctl_check);
5013
5014 sktc_set_classq_update_intervals(XFER_CLASSQ_UPDATE_INTERVAL *
5015 1000 * 1000);
5016 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
5017 &skt_disable_nxctl_check, &len, &disable_nxctl_check,
5018 sizeof(disable_nxctl_check));
5019 assert(err == 0);
5020 sktc_ifnet_feth_pair_create(FETH_FLAGS_NATIVE);
5021 sktc_reset_classq_update_intervals();
5022 sktc_enable_ip_reass();
5023 sktc_config_fsw_rx_agg_tcp(0);
5024 }
5025
5026 static void
skt_xfer_fini_parent_child_flow(void)5027 skt_xfer_fini_parent_child_flow(void)
5028 {
5029 #if SKT_XFER_DEBUG
5030 T_LOG("Waiting");
5031 fflush(stdout);
5032 for (int i = 0; i < 5; i++) {
5033 sleep(1);
5034 T_LOG(".");
5035 fflush(stdout);
5036 }
5037 T_LOG("\n");
5038 #endif
5039 int err;
5040
5041 err = sysctlbyname("kern.skywalk.disable_nxctl_check",
5042 NULL, NULL, &skt_disable_nxctl_check,
5043 sizeof(skt_disable_nxctl_check));
5044 assert(err == 0);
5045 sktc_ifnet_feth0_set_dequeue_stall(FALSE);
5046 sktc_ifnet_feth1_set_dequeue_stall(FALSE);
5047 sktc_ifnet_feth_pair_destroy();
5048 sktc_restore_ip_reass();
5049 sktc_restore_fsw_rx_agg_tcp();
5050 }
5051
5052 static void
skt_xfer_init_rx_flow_steering(void)5053 skt_xfer_init_rx_flow_steering(void)
5054 {
5055 int rx_flow_steering = 1;
5056
5057 assert(sysctlbyname("net.link.fake.rx_flow_steering_support",
5058 NULL, 0, &rx_flow_steering, sizeof(rx_flow_steering)) == 0);
5059 skt_xfer_init_native();
5060 }
5061
5062 static void
skt_xfer_fini_rx_flow_steering(void)5063 skt_xfer_fini_rx_flow_steering(void)
5064 {
5065 int rx_flow_steering = 0;
5066
5067 skt_xfer_fini();
5068 assert(sysctlbyname("net.link.fake.rx_flow_steering_support",
5069 NULL, 0, &rx_flow_steering, sizeof(rx_flow_steering)) == 0);
5070 }
5071
5072 struct skywalk_mptest skt_xferudp = {
5073 "xferudp", "UDP bi-directional transfer over fake ethernet pair",
5074 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5075 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5076 2, skt_xfer_udp_main,
5077 { NULL, NULL, NULL, NULL, NULL, NULL },
5078 skt_xfer_init_txstart, skt_xfer_fini, {},
5079 };
5080
5081 struct skywalk_mptest skt_xferudpn = {
5082 "xferudpn",
5083 "UDP bi-directional transfer over native fake ethernet pair",
5084 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5085 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5086 2, skt_xfer_udp_main,
5087 { NULL, NULL, NULL, NULL, NULL, NULL },
5088 skt_xfer_init_native, skt_xfer_fini, {},
5089 };
5090
5091 struct skywalk_mptest skt_xferudpnsp = {
5092 "xferudpnsp",
5093 "UDP bi-directional transfer over native fake ethernet pair"
5094 " with split rx/tx pools",
5095 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5096 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5097 2, skt_xfer_udp_main,
5098 { NULL, NULL, NULL, NULL, NULL, NULL },
5099 skt_xfer_init_native_split_pools, skt_xfer_fini, {},
5100 };
5101
5102 struct skywalk_mptest skt_xferudpfcs = {
5103 "xferudpfcs",
5104 "UDP bi-directional transfer over fake ethernet pair"
5105 " with link frame check sequence",
5106 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5107 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5108 2, skt_xfer_udp_main,
5109 { NULL, NULL, NULL, NULL, NULL, NULL },
5110 skt_xfer_init_txstart_fcs, skt_xfer_fini, {},
5111 };
5112
5113 struct skywalk_mptest skt_xferudptrailer = {
5114 "xferudptrailer",
5115 "UDP bi-directional transfer over fake ethernet pair"
5116 " with link trailer",
5117 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5118 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5119 2, skt_xfer_udp_main,
5120 { NULL, NULL, NULL, NULL, NULL, NULL },
5121 skt_xfer_init_txstart_trailer, skt_xfer_fini, {},
5122 };
5123
5124 struct skywalk_mptest skt_xferudpnfcs = {
5125 "xferudpnfcs",
5126 "UDP bi-directional transfer over native fake ethernet pair"
5127 " with link frame check sequence",
5128 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5129 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5130 2, skt_xfer_udp_main,
5131 { NULL, NULL, NULL, NULL, NULL, NULL },
5132 skt_xfer_init_native_fcs, skt_xfer_fini, {},
5133 };
5134
5135 struct skywalk_mptest skt_xferudpntrailer = {
5136 "xferudpntrailer",
5137 "UDP bi-directional transfer over native fake ethernet pair"
5138 " with link trailer",
5139 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5140 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5141 2, skt_xfer_udp_main,
5142 { NULL, NULL, NULL, NULL, NULL, NULL },
5143 skt_xfer_init_native_trailer, skt_xfer_fini, {},
5144 };
5145
5146 struct skywalk_mptest skt_xferudplong = {
5147 "xferudplong",
5148 "UDP bi-directional transfer over fake ethernet pair longer duration",
5149 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5150 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5151 2, skt_xfer_udp_long_main,
5152 { NULL, NULL, NULL, NULL, NULL, NULL },
5153 skt_xfer_init_txstart, skt_xfer_fini, {},
5154 };
5155
5156 struct skywalk_mptest skt_xferudplongn = {
5157 "xferudplongn",
5158 "UDP bi-directional transfer over"
5159 " native fake ethernet pair longer duration",
5160 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5161 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5162 2, skt_xfer_udp_long_main,
5163 { NULL, NULL, NULL, NULL, NULL, NULL },
5164 skt_xfer_init_native, skt_xfer_fini, {},
5165 };
5166
5167 struct skywalk_mptest skt_xferudpoverwhelm = {
5168 "xferudpoverwhelm",
5169 "UDP bi-directional transfer over fake ethernet pair overwhelm",
5170 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5171 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5172 2, skt_xfer_udp_overwhelm_main,
5173 { NULL, NULL, NULL, NULL, NULL, NULL },
5174 skt_xfer_init_txstart, skt_xfer_fini, {},
5175 };
5176
5177 struct skywalk_mptest skt_xferudpoverwhelmn = {
5178 "xferudpoverwhelmn",
5179 "UDP bi-directional transfer over native fake ethernet pair overwhelm",
5180 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5181 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5182 2, skt_xfer_udp_overwhelm_main,
5183 { NULL, NULL, NULL, NULL, NULL, NULL },
5184 skt_xfer_init_native, skt_xfer_fini, {},
5185 };
5186
5187 struct skywalk_mptest skt_xferudpoverwhelmnsp = {
5188 "xferudpoverwhelmnsp",
5189 "UDP bi-directional transfer over native fake ethernet pair overwhelm"
5190 " with split rx/tx pools",
5191 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5192 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5193 2, skt_xfer_udp_overwhelm_main,
5194 { NULL, NULL, NULL, NULL, NULL, NULL },
5195 skt_xfer_init_native_split_pools, skt_xfer_fini, {},
5196 };
5197
5198 struct skywalk_mptest skt_xferudpoverwhelmlong = {
5199 "xferudpoverwhelmlong",
5200 "UDP bi-directional transfer over fake ethernet pair overwhelm long",
5201 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5202 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5203 2, skt_xfer_udp_overwhelm_long_main,
5204 { NULL, NULL, NULL, NULL, NULL, NULL },
5205 skt_xfer_init_txstart, skt_xfer_fini, {},
5206 };
5207
5208 struct skywalk_mptest skt_xferudpoverwhelmlongn = {
5209 "xferudpoverwhelmlongn",
5210 "UDP bi-directional transfer over"
5211 " native fake ethernet pair overwhelm long",
5212 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5213 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5214 2, skt_xfer_udp_overwhelm_long_main,
5215 { NULL, NULL, NULL, NULL, NULL, NULL },
5216 skt_xfer_init_native, skt_xfer_fini, {},
5217 };
5218
5219 struct skywalk_mptest skt_xferudpping = {
5220 "xferudpping", "UDP ping-pong over fake ethernet pair",
5221 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5222 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5223 2, skt_xfer_udp_ping_pong_main,
5224 { NULL, NULL, NULL, NULL, NULL,
5225 STR(SKT_FSW_PING_PONG_TEST_DEFAULT)},
5226 skt_xfer_init_txstart, skt_xfer_fini, {},
5227 };
5228
5229 struct skywalk_mptest skt_xferudppingn = {
5230 "xferudppingn", "UDP ping-pong over native fake ethernet pair",
5231 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5232 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5233 2, skt_xfer_udp_ping_pong_main,
5234 { NULL, NULL, NULL, NULL, NULL,
5235 STR(SKT_FSW_PING_PONG_TEST_DEFAULT)},
5236 skt_xfer_init_native, skt_xfer_fini, {},
5237 };
5238
5239 struct skywalk_mptest skt_xferudpping1 = {
5240 "xferudpping1", "UDP ping-pong once over fake ethernet pair",
5241 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5242 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5243 2, skt_xfer_udp_ping_pong_one_main,
5244 { NULL, NULL, NULL, NULL, NULL, NULL },
5245 skt_xfer_init_txstart, skt_xfer_fini, {},
5246 };
5247
5248 struct skywalk_mptest skt_xferudpping1n = {
5249 "xferudpping1n", "UDP ping-pong once over native fake ethernet pair",
5250 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5251 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5252 2, skt_xfer_udp_ping_pong_one_main,
5253 { NULL, NULL, NULL, NULL, NULL, NULL },
5254 skt_xfer_init_native, skt_xfer_fini, {},
5255 };
5256
5257 struct skywalk_mptest skt_xferudppinglong = {
5258 "xferudppinglong",
5259 "UDP ping-pong over fake ethernet pair longer duration",
5260 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5261 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5262 2, skt_xfer_udp_ping_pong_long_main,
5263 { NULL, NULL, NULL, NULL, NULL, NULL },
5264 skt_xfer_init_txstart, skt_xfer_fini, {},
5265 };
5266
5267 struct skywalk_mptest skt_xferudppinglongn = {
5268 "xferudppinglongn",
5269 "UDP ping-pong over native fake ethernet pair longer duration",
5270 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5271 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5272 2, skt_xfer_udp_ping_pong_long_main,
5273 { NULL, NULL, NULL, NULL, NULL, NULL },
5274 skt_xfer_init_native, skt_xfer_fini, {},
5275 };
5276
5277 struct skywalk_mptest skt_xferudpping1wrong = {
5278 "xferudpping1wrong",
5279 "UDP ping-pong once over fake ethernet pair with wrong flow IDs",
5280 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5281 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5282 2, skt_xfer_udp_ping_pong_one_wrong_main,
5283 { NULL, NULL, NULL, NULL, NULL, NULL },
5284 skt_xfer_init_txstart, skt_xfer_fini, {},
5285 };
5286
5287 struct skywalk_mptest skt_xferrdudpping = {
5288 "xferrdudpping",
5289 "UDP ping-pong between redirect and fake ethernet interface",
5290 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5291 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5292 2, skt_xfer_rd_udp_ping_pong_main,
5293 { NULL, NULL, NULL, NULL, NULL,
5294 STR(SKT_FSW_PING_PONG_TEST_DEFAULT)},
5295 skt_xfer_rd_init, skt_xfer_rd_fini, {},
5296 };
5297
5298 struct skywalk_mptest skt_xfertcpsynflood = {
5299 "xfertcpsynflood",
5300 "TCP SYN flood",
5301 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5302 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5303 1, skt_xfer_tcp_syn_flood_main,
5304 { NULL, NULL, NULL, NULL, NULL, NULL },
5305 skt_xfer_init_txstart, skt_xfer_fini, {},
5306 };
5307
5308 struct skywalk_mptest skt_xfertcprstflood = {
5309 "xfertcprstflood",
5310 "TCP RST flood",
5311 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5312 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5313 1, skt_xfer_tcp_rst_flood_main,
5314 { NULL, NULL, NULL, NULL, NULL, NULL },
5315 skt_xfer_init_txstart, skt_xfer_fini, {},
5316 };
5317
5318 struct skywalk_mptest skt_xferudpping_aqm = {
5319 "xferudppingaqm", "UDP ping-pong over fake ethernet pair with AQM",
5320 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5321 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5322 2, skt_xfer_udp_ping_pong_aqm_main,
5323 { NULL, NULL, NULL, NULL, NULL, NULL },
5324 skt_xfer_init_txstart, skt_xfer_fini, {},
5325 };
5326
5327 struct skywalk_mptest skt_xferudppingn_aqm = {
5328 "xferudppingnaqm", "UDP ping-pong over native fake ethernet pair with"
5329 " AQM",
5330 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5331 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5332 2, skt_xfer_udp_ping_pong_aqm_main,
5333 { NULL, NULL, NULL, NULL, NULL, NULL },
5334 skt_xfer_init_native, skt_xfer_fini, {},
5335 };
5336
5337 struct skywalk_mptest skt_xferudpwitherrors = {
5338 "xferudpwitherrors",
5339 "UDP bi-directional transfer over"
5340 " native fake ethernet pair with injected errors",
5341 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5342 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS |
5343 SK_FEATURE_DEV_OR_DEBUG,
5344 2, skt_xfer_udp_with_errors_main,
5345 { NULL, NULL, NULL, NULL, NULL, NULL },
5346 skt_xfer_errors_init, skt_xfer_errors_fini, {},
5347 };
5348
5349 struct skywalk_mptest skt_xferudpwitherrorscompat = {
5350 "xferudpwitherrorscompat",
5351 "UDP bi-directional transfer over"
5352 " compat fake ethernet pair with injected errors",
5353 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5354 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS |
5355 SK_FEATURE_DEV_OR_DEBUG,
5356 2, skt_xfer_udp_with_errors_main,
5357 { NULL, NULL, NULL, NULL, NULL, NULL },
5358 skt_xfer_errors_compat_init, skt_xfer_errors_fini, {},
5359 };
5360
5361 struct skywalk_mptest skt_xfertcpportzero = {
5362 "xfertcpportzero",
5363 "TCP connect to port 0",
5364 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5365 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5366 1, skt_xfer_tcp_port_zero_main,
5367 { NULL, NULL, NULL, NULL, NULL, NULL },
5368 skt_xfer_init_txstart, skt_xfer_fini, {},
5369 };
5370
5371 struct skywalk_mptest skt_xferudpportzero = {
5372 "xferudpportzero",
5373 "UDP connect to port 0",
5374 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5375 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5376 1, skt_xfer_udp_port_zero_main,
5377 { NULL, NULL, NULL, NULL, NULL, NULL },
5378 skt_xfer_init_txstart, skt_xfer_fini, {},
5379 };
5380
5381 struct skywalk_mptest skt_xfersetuponly = {
5382 "xfersetuponly", "setup fake ethernet pair only",
5383 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5384 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5385 2, skt_xfer_setuponly_main,
5386 { NULL, NULL, NULL, NULL, NULL, NULL },
5387 skt_xfer_init_txstart, skt_xfer_fini, {},
5388 };
5389
5390 struct skywalk_mptest skt_xfersetuponlyn = {
5391 "xfersetuponlyn", "setup native fake ethernet pair only",
5392 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5393 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5394 2, skt_xfer_setuponly_main,
5395 { NULL, NULL, NULL, NULL, NULL, NULL },
5396 skt_xfer_init_native, skt_xfer_fini, {},
5397 };
5398
5399 struct skywalk_mptest skt_xferudppingn_wmm = {
5400 "xferudppingnwmm", "UDP ping-pong over native fake ethernet pair in wmm"
5401 " mode",
5402 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5403 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5404 2, skt_xfer_udp_ping_pong_wmm_main,
5405 { NULL, NULL, NULL, NULL, NULL, NULL },
5406 skt_xfer_init_native_wmm, skt_xfer_fini, {},
5407 };
5408
5409 struct skywalk_mptest skt_xferflowmatch = {
5410 "xferflowmatch",
5411 "Packets not matching registered flow tuple should be dropped",
5412 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5413 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5414 2, skt_xfer_flowmatch_main,
5415 { NULL, NULL, NULL, NULL, NULL, NULL },
5416 skt_xfer_init_native, skt_xfer_fini, {},
5417 };
5418
5419 struct skywalk_mptest skt_xferflowcleanup = {
5420 "xferflowcleanup",
5421 "verification of flow cleanup on channel close",
5422 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5423 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5424 2, skt_xfer_flowcleanup_main,
5425 { NULL, NULL, NULL, NULL, NULL, NULL },
5426 skt_xfer_init_native, skt_xfer_fini, {},
5427 };
5428
5429 struct skywalk_mptest skt_xferudppingn_mb = {
5430 "xferudppingnmb", "UDP ping-pong over native fake ethernet pair with"
5431 " multi-buflet packet",
5432 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5433 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS |
5434 SK_FEATURE_DEV_OR_DEBUG,
5435 2, skt_xfer_udp_ping_pong_multi_buflet_main,
5436 { NULL, NULL, NULL, NULL, NULL, NULL },
5437 skt_xfer_init_native_multi_buflet, skt_xfer_multi_buflet_fini, {},
5438 };
5439
5440 struct skywalk_mptest skt_xferudppingn_mbc = {
5441 "xferudppingnmbc", "UDP ping-pong over native fake ethernet pair with"
5442 " multi-buflet packet in copy packet mode",
5443 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5444 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS |
5445 SK_FEATURE_DEV_OR_DEBUG,
5446 2, skt_xfer_udp_ping_pong_multi_buflet_main,
5447 { NULL, NULL, NULL, NULL, NULL, NULL },
5448 skt_xfer_init_native_multi_buflet_copy, skt_xfer_multi_buflet_fini, {},
5449 };
5450
5451 struct skywalk_mptest skt_xfercsumoffload = {
5452 "xfercsumoffload",
5453 "Packet checksum offload",
5454 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5455 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5456 2, skt_xfer_csumoffload_main,
5457 { NULL, NULL, NULL, NULL, NULL, NULL },
5458 skt_xfer_init_txstart, skt_xfer_fini, {},
5459 };
5460
5461 struct skywalk_mptest skt_xfercsumoffloadn = {
5462 "xfercsumoffloadn",
5463 "Packet checksum offload over native",
5464 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5465 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5466 2, skt_xfer_csumoffload_main,
5467 { NULL, NULL, NULL, NULL, NULL, NULL },
5468 skt_xfer_init_native, skt_xfer_fini, {},
5469 };
5470
5471 struct skywalk_mptest skt_xferfastlane = {
5472 "xferqosmarking_fastlane",
5473 "fastlane qos marking",
5474 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5475 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5476 2, skt_xfer_fastlane_main,
5477 { NULL, NULL, NULL, NULL, NULL, NULL },
5478 skt_xfer_init_txstart_fastlane, skt_xfer_fini_fastlane, {},
5479 };
5480
5481 struct skywalk_mptest skt_xferfastlanen = {
5482 "xferqosmarking_fastlanen",
5483 "fastlane qos marking over native",
5484 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5485 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5486 2, skt_xfer_fastlane_main,
5487 { NULL, NULL, NULL, NULL, NULL, NULL },
5488 skt_xfer_init_native_fastlane, skt_xfer_fini_fastlane, {},
5489 };
5490
5491 struct skywalk_mptest skt_xferrfc4594 = {
5492 "xferqosmarking_rfc4594",
5493 "rfc4594 qos marking",
5494 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5495 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5496 2, skt_xfer_rfc4594_main,
5497 { NULL, NULL, NULL, NULL, NULL, NULL },
5498 skt_xfer_init_txstart_fastlane, skt_xfer_fini_fastlane, {},
5499 };
5500
5501 struct skywalk_mptest skt_xferrfc4594n = {
5502 "xferqosmarking_rfc4594n",
5503 "rfc4594 qos marking over native",
5504 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5505 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5506 2, skt_xfer_rfc4594_main,
5507 { NULL, NULL, NULL, NULL, NULL, NULL },
5508 skt_xfer_init_native_fastlane, skt_xfer_fini_fastlane, {},
5509 };
5510
5511 struct skywalk_mptest skt_xferlistenertcprst = {
5512 "xferlistenertcprst",
5513 "TCP Listner should be able to send RST",
5514 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5515 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5516 2, skt_xfer_listener_tcp_rst_main,
5517 { NULL, NULL, NULL, NULL, NULL, NULL },
5518 skt_xfer_init_native, skt_xfer_fini, {},
5519 };
5520
5521 struct skywalk_mptest skt_xferudpfrags = {
5522 "xferudpfrags",
5523 "UDP fragmentation test (channel flow Tx)",
5524 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5525 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5526 2, skt_xfer_udp_frags_main,
5527 { NULL, NULL, NULL, NULL, NULL, NULL },
5528 skt_xfer_init_native, skt_xfer_fini, {},
5529 };
5530
5531 struct skywalk_mptest skt_xferudpbadfrags = {
5532 "xferudpbadfrags",
5533 "UDP fragmentation test (channel flow Tx)",
5534 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5535 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5536 2, skt_xfer_udp_bad_frags_main,
5537 { NULL, NULL, NULL, NULL, NULL, NULL },
5538 skt_xfer_init_native, skt_xfer_fini, {},
5539 };
5540
5541 struct skywalk_mptest skt_xferudpifadvenable = {
5542 "xferudpifadvenable",
5543 "flowswitch interface advisory enabled test",
5544 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5545 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5546 2, skt_xfer_udp_ifadv_main,
5547 { NULL, NULL, NULL, NULL, NULL,
5548 STR(SKT_FSW_EVENT_TEST_IF_ADV_ENABLED)},
5549 skt_xfer_init_ifadv, skt_xfer_fini_ifadv, {},
5550 };
5551
5552 struct skywalk_mptest skt_xferudpifadvdisable = {
5553 "xferudpifadvdisable",
5554 "flowswitch interface advisory disabled test",
5555 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5556 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5557 2, skt_xfer_udp_ifadv_main,
5558 { NULL, NULL, NULL, NULL, NULL,
5559 STR(SKT_FSW_EVENT_TEST_IF_ADV_DISABLED)},
5560 skt_xfer_init_ifadv, skt_xfer_fini_ifadv, {},
5561 };
5562
5563 struct skywalk_mptest skt_xferudppingnll = {
5564 "xferudppingnll",
5565 "UDP ping-pong over low latency channel on native fake ethernet pair",
5566 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5567 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5568 2, skt_xfer_udp_ping_pong_main,
5569 { NULL, NULL, NULL, NULL, NULL,
5570 STR(SKT_FSW_PING_PONG_TEST_LOW_LATENCY)},
5571 skt_xfer_init_native, skt_xfer_fini, {},
5572 };
5573
5574 struct skywalk_mptest skt_xferudppingllink = {
5575 "xferudppingllink", "UDP ping-pong over fake ethernet pair in llink mode",
5576 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5577 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5578 2, skt_xfer_udp_ping_pong_main,
5579 { NULL, NULL, NULL, NULL, NULL,
5580 STR(SKT_FSW_PING_PONG_TEST_DEFAULT)},
5581 skt_xfer_init_llink, skt_xfer_fini, {},
5582 };
5583
5584 struct skywalk_mptest skt_xferudppingllink_wmm = {
5585 "xferudppingllinkwmm", "UDP ping-pong over fake ethernet pair in llink & wmm mode",
5586 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5587 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5588 2, skt_xfer_udp_ping_pong_wmm_main,
5589 { NULL, NULL, NULL, NULL, NULL, NULL },
5590 skt_xfer_init_llink_wmm, skt_xfer_fini, {},
5591 };
5592
5593 struct skywalk_mptest skt_xferudppingllink_multi = {
5594 "xferudppingllinkmulti", "UDP ping-pong over fake ethernet pair in multi llink mode",
5595 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF |
5596 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5597 2, skt_xfer_udp_ping_pong_main,
5598 { NULL, NULL, NULL, NULL, NULL,
5599 STR(SKT_FSW_PING_PONG_TEST_MULTI_LLINK)},
5600 skt_xfer_init_llink_multi, skt_xfer_fini_llink_multi, {},
5601 };
5602
5603 struct skywalk_mptest skt_xferudpchanevents = {
5604 "skt_xferudpchanevents",
5605 "flowswitch channel events test",
5606 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5607 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5608 2, skt_xfer_udp_ifadv_main,
5609 { NULL, NULL, NULL, NULL, NULL,
5610 STR(SKT_FSW_EVENT_TEST_CHANNEL_EVENTS)},
5611 skt_xfer_init_chan_event, skt_xfer_fini_chan_event, {},
5612 };
5613
5614 struct skywalk_mptest skt_xferudpchaneventsasync = {
5615 "skt_xferudpchaneventsasync",
5616 "flowswitch channel events in async mode test",
5617 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5618 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5619 2, skt_xfer_udp_ifadv_main,
5620 { NULL, NULL, NULL, NULL, NULL,
5621 STR(SKT_FSW_EVENT_TEST_CHANNEL_EVENTS)},
5622 skt_xfer_init_chan_event_async, skt_xfer_fini_chan_event_async, {},
5623 };
5624
5625 struct skywalk_mptest skt_xferparentchildflow = {
5626 "skt_xferparentchild",
5627 "flowswitch parent child flows test",
5628 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5629 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5630 3, skt_xfer_parent_child_flow_main,
5631 { NULL, NULL, NULL, NULL, NULL, NULL },
5632 skt_xfer_init_parent_child_flow, skt_xfer_fini_parent_child_flow, {},
5633 };
5634
5635 struct skywalk_mptest skt_xferparentchildflow_offset_400 = {
5636 "skt_xferparentchild_offset_400",
5637 "flowswitch parent child flows test with demux offset 400",
5638 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5639 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5640 3, skt_xfer_parent_child_flow_main_offset_400,
5641 { NULL, NULL, NULL, NULL, NULL, NULL },
5642 skt_xfer_init_parent_child_flow, skt_xfer_fini_parent_child_flow, {},
5643 };
5644
5645 struct skywalk_mptest skt_xferparentchildflown = {
5646 "skt_xferparentchildn",
5647 "flowswitch parent child flows on native fake ethernet interface test",
5648 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5649 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5650 3, skt_xfer_parent_child_flow_main,
5651 { NULL, NULL, NULL, NULL, NULL, NULL },
5652 skt_xfer_init_parent_child_flow_native, skt_xfer_fini_parent_child_flow, {},
5653 };
5654
5655 struct skywalk_mptest skt_xferparentchildflown_offset_400 = {
5656 "skt_xferparentchildn_offset_400",
5657 "flowswitch parent child flows on native fake ethernet interface test with demux offset 400",
5658 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5659 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5660 3, skt_xfer_parent_child_flow_main_offset_400,
5661 { NULL, NULL, NULL, NULL, NULL, NULL },
5662 skt_xfer_init_parent_child_flow_native, skt_xfer_fini_parent_child_flow, {},
5663 };
5664
5665 struct skywalk_mptest skt_xferrxflowsteeringdroptxpackets = {
5666 "skt_xferrxflowsteeringdroptxpackets",
5667 "drop aop2 offload Tx packets in flowswitch",
5668 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5669 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5670 2, skt_xfer_rx_flow_steering_drop_tx_packets_main,
5671 { NULL, NULL, NULL, NULL, NULL, NULL },
5672 skt_xfer_init_rx_flow_steering, skt_xfer_fini_rx_flow_steering, {},
5673 };
5674
5675 struct skywalk_mptest skt_xferrxflowsteeringdroprxpackets = {
5676 "skt_xferrxflowsteeringdroprxpackets",
5677 "drop aop2 offload Rx packets in flowswitch",
5678 SK_FEATURE_SKYWALK | SK_FEATURE_NEXUS_NETIF | SK_FEATURE_DEV_OR_DEBUG |
5679 SK_FEATURE_NEXUS_FLOWSWITCH | SK_FEATURE_NETNS,
5680 2, skt_xfer_rx_flow_steering_drop_rx_packets_main,
5681 { NULL, NULL, NULL, NULL, NULL, NULL },
5682 skt_xfer_init_rx_flow_steering, skt_xfer_fini_rx_flow_steering, {},
5683 };
5684