1 /* -*- compile-command: "xcrun --sdk iphoneos.internal make net_tuntests" -*- */
2
3 #include <inttypes.h>
4 #include <stdbool.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <time.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <poll.h>
11 #include <sys/types.h>
12 #include <sys/event.h>
13 #include <sys/time.h>
14 #include <uuid/uuid.h>
15 #include <arpa/inet.h>
16 #include <sys/sysctl.h>
17 #include <sys/kern_control.h>
18 #include <sys/ioctl.h>
19 #include <sys/socket.h>
20 #include <sys/kern_control.h>
21 #include <sys/sys_domain.h>
22
23 #include <net/if.h>
24 #include <net/if_ipsec.h>
25 #include <net/if_utun.h>
26 #include <netinet/in.h>
27 #include <netinet/in_var.h>
28 #include <net/pfkeyv2.h>
29 #include <netinet6/ipsec.h>
30
31 #include <darwintest.h>
32 #include <darwintest_utils.h>
33
34 #include <skywalk/os_skywalk_private.h> // for SK_FEATURE_*
35
36 T_GLOBAL_META(T_META_NAMESPACE("xnu.net.tun"));
37
38 /* Disable all these test until <rdar://problem/49124468> is fixed */
39 T_GLOBAL_META(T_META_ENABLED(false));
40
41 #if 0
42 #undef T_QUIET
43 #define T_QUIET
44 #endif
45
46 #if 0
47 static void
48 log_hexdump(const void *inp, size_t len)
49 {
50 unsigned i, off = 0;
51 char buf[9 + 16 * 3 + 1];
52 for (i = 0; i < len; i++) {
53 if (i % 16 == 0) {
54 off = (unsigned)snprintf(buf, sizeof(buf), "%08x:", i);
55 }
56 off += (unsigned)snprintf(buf + off, sizeof(buf) - off, " %02x", (((const uint8_t *)inp)[i]) & 0xff);
57 if (i % 16 == 15) {
58 T_LOG("%s", buf);
59 }
60 }
61 if (len % 16) {
62 T_LOG("%s", buf);
63 }
64 }
65 #else
66 static void
log_hexdump(const void * inp,size_t len)67 log_hexdump(const void *inp, size_t len)
68 {
69 #pragma unused(inp, len)
70 }
71 #endif
72
73 static bool
is_netagent_enabled(void)74 is_netagent_enabled(void)
75 {
76 int enabled = 0;
77 size_t len = sizeof(enabled);
78 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(sysctlbyname("net.link.generic.system.enable_netagent", &enabled, &len, NULL, 0), NULL);
79 T_QUIET; T_ASSERT_EQ(len, sizeof(enabled), NULL);
80 return enabled == 1;
81 }
82
83 static bool g_is_ipsec_test;
84 static bool g_is_utun_test;
85 static int g_OPT_ENABLE_NETIF = -1;
86 static int g_OPT_ENABLE_FLOWSWITCH = -1;
87 static int g_OPT_ENABLE_CHANNEL = -1;
88 static int g_OPT_GET_CHANNEL_UUID = -1;
89 static int g_OPT_IFNAME = -1;
90 static char *g_CONTROL_NAME = NULL;
91
92 static int create_tunsock(int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[]);
93
94 static void
setup_ipsec_test(void)95 setup_ipsec_test(void)
96 {
97 T_LOG("Configuring for ipsec tests");
98 g_OPT_ENABLE_NETIF = IPSEC_OPT_ENABLE_NETIF;
99 g_OPT_ENABLE_FLOWSWITCH = IPSEC_OPT_ENABLE_FLOWSWITCH;
100 g_OPT_ENABLE_CHANNEL = IPSEC_OPT_ENABLE_CHANNEL;
101 g_OPT_GET_CHANNEL_UUID = IPSEC_OPT_GET_CHANNEL_UUID;
102 g_OPT_IFNAME = IPSEC_OPT_IFNAME;
103 g_CONTROL_NAME = IPSEC_CONTROL_NAME;
104 g_is_ipsec_test = true;
105 }
106
107 static void
setup_utun_test(void)108 setup_utun_test(void)
109 {
110 T_LOG("Configuring for utun tests");
111 g_OPT_ENABLE_NETIF = UTUN_OPT_ENABLE_NETIF;
112 g_OPT_ENABLE_FLOWSWITCH = UTUN_OPT_ENABLE_FLOWSWITCH;
113 g_OPT_ENABLE_CHANNEL = UTUN_OPT_ENABLE_CHANNEL;
114 g_OPT_GET_CHANNEL_UUID = UTUN_OPT_GET_CHANNEL_UUID;
115 g_OPT_IFNAME = UTUN_OPT_IFNAME;
116 g_CONTROL_NAME = UTUN_CONTROL_NAME;
117 g_is_utun_test = true;
118 }
119
120 static bool
setblocking(int s,bool blocking)121 setblocking(int s, bool blocking)
122 {
123 int flags;
124 bool ret;
125
126 T_QUIET; T_EXPECT_POSIX_SUCCESS(flags = fcntl(s, F_GETFL, 0), NULL);
127
128 ret = !(flags & O_NONBLOCK);
129
130 if (blocking) {
131 flags &= ~O_NONBLOCK;
132 } else {
133 flags |= O_NONBLOCK;
134 }
135
136 #if 0
137 T_LOG("Setting fd %d from %s to %s\n",
138 s, ret ? "blocking" : "nonblocking",
139 blocking ? "blocking" : "nonblocking");
140 #endif
141
142 T_QUIET; T_EXPECT_POSIX_SUCCESS(flags = fcntl(s, F_SETFL, flags), NULL);
143
144 return ret;
145 }
146
147
148 static void
check_enables(int tunsock,int enable_netif,int enable_flowswitch,int channel_count,uuid_t uuid[])149 check_enables(int tunsock, int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[])
150 {
151 int scratch;
152 socklen_t scratchlen, uuidlen;
153 uuid_t scratchuuid[channel_count];
154 if (!uuid) {
155 uuid = scratchuuid;
156 }
157
158 //T_LOG("checking tunsock %d", tunsock);
159
160 if (g_is_ipsec_test && channel_count && !enable_netif) {
161 /* Unfortunately, the connect incorrectly unwinds the bind if it get an error.
162 * until that is fixed, expect EINVAL here
163 */
164 scratchlen = sizeof(scratch);
165 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
166 &scratch, &scratchlen), EINVAL, NULL);
167 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
168 &scratch, &scratchlen), EINVAL, NULL);
169 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
170 &scratch, &scratchlen), EINVAL, NULL);
171 for (int i = 0; i < channel_count; i++) {
172 uuid_clear(uuid[i]);
173 }
174 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
175 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
176 uuid, &uuidlen), EINVAL, NULL);
177 for (int i = 0; i < channel_count; i++) {
178 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
179 }
180 return;
181 }
182
183
184 scratchlen = sizeof(scratch);
185 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
186 &scratch, &scratchlen), NULL);
187 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)scratchlen, sizeof(scratch), NULL);
188 T_QUIET; T_EXPECT_EQ(scratch, enable_netif, NULL);
189
190 scratchlen = sizeof(scratch);
191 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
192 &scratch, &scratchlen), NULL);
193 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)scratchlen, sizeof(scratch), NULL);
194 if (is_netagent_enabled()) {
195 if (enable_netif) {
196 T_QUIET; T_EXPECT_EQ(scratch, enable_flowswitch, NULL);
197 } else {
198 T_QUIET; T_EXPECT_EQ(scratch, 0, NULL);
199 }
200 } else {
201 T_QUIET; T_EXPECT_EQ(scratch, 0, NULL);
202 }
203
204 scratchlen = sizeof(scratch);
205 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
206 &scratch, &scratchlen), NULL);
207 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)scratchlen, sizeof(scratch), NULL);
208 if (g_is_ipsec_test && !enable_netif) {
209 T_QUIET; T_EXPECT_EQ(scratch, 0, NULL);
210 } else {
211 T_QUIET; T_EXPECT_EQ(scratch, (int)channel_count, NULL);
212 }
213
214 if (scratch) {
215 for (int i = 0; i < channel_count; i++) {
216 uuid_clear(uuid[i]);
217 }
218 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
219 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
220 uuid, &uuidlen), NULL);
221 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
222 for (int i = 0; i < channel_count; i++) {
223 T_QUIET; T_EXPECT_FALSE(uuid_is_null(uuid[i]), NULL);
224 }
225 } else {
226 for (int i = 0; i < channel_count; i++) {
227 uuid_clear(uuid[i]);
228 }
229 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
230 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
231 uuid, &uuidlen), ENXIO, NULL);
232 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
233 for (int i = 0; i < channel_count; i++) {
234 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
235 }
236 }
237 }
238
239 static void
tunsock_get_ifname(int s,char ifname[IFXNAMSIZ])240 tunsock_get_ifname(int s, char ifname[IFXNAMSIZ])
241 {
242 socklen_t optlen = IFXNAMSIZ;
243 T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(getsockopt(s, SYSPROTO_CONTROL, g_OPT_IFNAME, ifname, &optlen), NULL);
244 T_QUIET; T_ASSERT_TRUE(optlen > 0, NULL);
245 T_QUIET; T_ASSERT_TRUE(ifname[optlen - 1] == '\0', NULL);
246 T_QUIET; T_ASSERT_TRUE(strlen(ifname) + 1 == optlen, "got ifname \"%s\" len %zd expected %u", ifname, strlen(ifname), optlen);
247 }
248
249 static short
ifnet_get_flags(int s,const char ifname[IFNAMSIZ])250 ifnet_get_flags(int s, const char ifname[IFNAMSIZ])
251 {
252 struct ifreq ifr;
253 memset(&ifr, 0, sizeof(ifr));
254 strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
255 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(ioctl(s, SIOCGIFFLAGS, (caddr_t)&ifr), NULL);
256 return ifr.ifr_flags;
257 }
258
259 static void
ifnet_add_addr4(const char ifname[IFNAMSIZ],struct in_addr * addr,struct in_addr * mask,struct in_addr * broadaddr)260 ifnet_add_addr4(const char ifname[IFNAMSIZ], struct in_addr *addr, struct in_addr *mask, struct in_addr *broadaddr)
261 {
262 struct sockaddr_in *sin;
263 struct in_aliasreq ifra;
264 int s;
265
266 T_QUIET; T_EXPECT_POSIX_SUCCESS(s = socket(AF_INET, SOCK_DGRAM, 0), NULL);
267
268 memset(&ifra, 0, sizeof(ifra));
269 strlcpy(ifra.ifra_name, ifname, sizeof(ifra.ifra_name));
270
271 if (addr != NULL) {
272 sin = &ifra.ifra_addr;
273 sin->sin_len = sizeof(*sin);
274 sin->sin_family = AF_INET;
275 sin->sin_addr = *addr;
276 }
277
278 if (mask != NULL) {
279 sin = &ifra.ifra_mask;
280 sin->sin_len = sizeof(*sin);
281 sin->sin_family = AF_INET;
282 sin->sin_addr = *mask;
283 }
284
285 if (broadaddr != NULL || (addr != NULL &&
286 (ifnet_get_flags(s, ifname) & IFF_POINTOPOINT) != 0)) {
287 sin = &ifra.ifra_broadaddr;
288 sin->sin_len = sizeof(*sin);
289 sin->sin_family = AF_INET;
290 sin->sin_addr = (broadaddr != NULL) ? *broadaddr : *addr;
291 }
292
293 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(ioctl(s, SIOCAIFADDR, &ifra), NULL);
294
295 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(s), NULL);
296 }
297
298 static int g_pfkeyso = -1;
299 static struct in_addr g_addr1, g_addr2;
300
301 static void
create_sa(const char ifname[IFXNAMSIZ],uint8_t type,uint32_t spi,struct in_addr * src,struct in_addr * dst)302 create_sa(const char ifname[IFXNAMSIZ], uint8_t type, uint32_t spi, struct in_addr *src, struct in_addr *dst)
303 {
304 if (g_pfkeyso == -1) {
305 T_QUIET; T_EXPECT_POSIX_SUCCESS(g_pfkeyso = socket(PF_KEY, SOCK_RAW, PF_KEY_V2), NULL);
306 }
307
308 /*
309 * <base, SA, (lifetime(HS),) address(SD), (address(P),)
310 * key(AE), (identity(SD),) (sensitivity)>
311 */
312
313 struct {
314 struct sadb_msg msg __attribute((aligned(sizeof(uint64_t))));
315 struct sadb_key key __attribute((aligned(sizeof(uint64_t))));
316 struct sadb_sa sa __attribute((aligned(sizeof(uint64_t))));
317 struct sadb_x_sa2 sa2 __attribute((aligned(sizeof(uint64_t))));
318 struct sadb_x_ipsecif ipsecif __attribute((aligned(sizeof(uint64_t))));
319 struct {
320 struct sadb_address addr __attribute((aligned(sizeof(uint64_t))));
321 struct sockaddr_in saddr __attribute((aligned(sizeof(uint64_t))));
322 } src;
323 struct {
324 struct sadb_address addr __attribute((aligned(sizeof(uint64_t))));
325 struct sockaddr_in saddr __attribute((aligned(sizeof(uint64_t))));
326 } dst;
327 } addcmd;
328
329 memset(&addcmd, 0, sizeof(addcmd));
330
331 addcmd.msg.sadb_msg_version = PF_KEY_V2;
332 addcmd.msg.sadb_msg_type = type;
333 addcmd.msg.sadb_msg_errno = 0;
334 addcmd.msg.sadb_msg_satype = SADB_SATYPE_ESP;
335 addcmd.msg.sadb_msg_len = PFKEY_UNIT64(sizeof(addcmd));
336 addcmd.msg.sadb_msg_reserved = 0;
337 addcmd.msg.sadb_msg_seq = 0;
338 addcmd.msg.sadb_msg_pid = (unsigned)getpid();
339
340 addcmd.key.sadb_key_len = PFKEY_UNIT64(sizeof(addcmd.key));
341 addcmd.key.sadb_key_exttype = SADB_EXT_KEY_ENCRYPT;
342 addcmd.key.sadb_key_bits = 0;
343 addcmd.key.sadb_key_reserved = 0;
344
345 addcmd.sa.sadb_sa_len = PFKEY_UNIT64(sizeof(addcmd.sa));
346 addcmd.sa.sadb_sa_exttype = SADB_EXT_SA;
347 addcmd.sa.sadb_sa_spi = htonl(spi);
348 addcmd.sa.sadb_sa_replay = 0;
349 addcmd.sa.sadb_sa_state = 0;
350 addcmd.sa.sadb_sa_auth = SADB_AALG_NONE;
351 addcmd.sa.sadb_sa_encrypt = SADB_EALG_NULL;
352 addcmd.sa.sadb_sa_flags = SADB_X_EXT_CYCSEQ;
353
354 addcmd.sa2.sadb_x_sa2_len = PFKEY_UNIT64(sizeof(addcmd.sa2));
355 addcmd.sa2.sadb_x_sa2_exttype = SADB_X_EXT_SA2;
356 addcmd.sa2.sadb_x_sa2_mode = IPSEC_MODE_ANY;
357 addcmd.sa2.sadb_x_sa2_alwaysexpire = 1;
358 addcmd.sa2.sadb_x_sa2_flags = SADB_X_EXT_SA2_DELETE_ON_DETACH;
359 addcmd.sa2.sadb_x_sa2_sequence = 0;
360 addcmd.sa2.sadb_x_sa2_reqid = 0;
361
362 addcmd.ipsecif.sadb_x_ipsecif_len = PFKEY_UNIT64(sizeof(addcmd.ipsecif));
363 addcmd.ipsecif.sadb_x_ipsecif_exttype = SADB_X_EXT_IPSECIF;
364 memset(addcmd.ipsecif.sadb_x_ipsecif_internal_if, 0, sizeof(addcmd.ipsecif.sadb_x_ipsecif_internal_if));
365 memset(addcmd.ipsecif.sadb_x_ipsecif_outgoing_if, 0, sizeof(addcmd.ipsecif.sadb_x_ipsecif_outgoing_if));
366 strlcpy(addcmd.ipsecif.sadb_x_ipsecif_ipsec_if, ifname, sizeof(addcmd.ipsecif.sadb_x_ipsecif_ipsec_if));
367 addcmd.ipsecif.sadb_x_ipsecif_init_disabled = 0;
368 addcmd.ipsecif.reserved = 0;
369
370 addcmd.src.addr.sadb_address_len = PFKEY_UNIT64(sizeof(addcmd.src));
371 addcmd.src.addr.sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
372 addcmd.src.addr.sadb_address_proto = IPSEC_ULPROTO_ANY;
373 addcmd.src.addr.sadb_address_prefixlen = sizeof(struct in_addr) << 3; //XXX Why?
374 addcmd.src.addr.sadb_address_reserved = 0;
375 addcmd.src.saddr.sin_len = sizeof(addcmd.src.saddr);
376 addcmd.src.saddr.sin_family = AF_INET;
377 addcmd.src.saddr.sin_port = htons(0);
378 addcmd.src.saddr.sin_addr = *src;
379
380 addcmd.dst.addr.sadb_address_len = PFKEY_UNIT64(sizeof(addcmd.dst));
381 addcmd.dst.addr.sadb_address_exttype = SADB_EXT_ADDRESS_DST;
382 addcmd.dst.addr.sadb_address_proto = IPSEC_ULPROTO_ANY;
383 addcmd.dst.addr.sadb_address_prefixlen = sizeof(struct in_addr) << 3; //XXX Why?
384 addcmd.dst.addr.sadb_address_reserved = 0;
385 addcmd.dst.saddr.sin_len = sizeof(addcmd.dst.saddr);
386 addcmd.dst.saddr.sin_family = AF_INET;
387 addcmd.dst.saddr.sin_port = htons(0);
388 addcmd.dst.saddr.sin_addr = *dst;
389
390 log_hexdump(&addcmd, sizeof(addcmd));
391
392 ssize_t slen;
393 T_QUIET; T_EXPECT_POSIX_SUCCESS(slen = send(g_pfkeyso, &addcmd, sizeof(addcmd), 0), NULL);
394 T_QUIET; T_EXPECT_EQ(slen, (ssize_t)sizeof(addcmd), NULL);
395 }
396
397 /* This version of the test expects channels to be enabled before connect
398 */
399 static int
create_tunsock(int enable_netif,int enable_flowswitch,int channel_count,uuid_t uuid[])400 create_tunsock(int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[])
401 {
402 int tunsock;
403 struct ctl_info kernctl_info;
404 struct sockaddr_ctl kernctl_addr;
405 uuid_t scratchuuid[channel_count];
406 if (!uuid) {
407 uuid = scratchuuid;
408 }
409 socklen_t uuidlen;
410
411 startover:
412
413 T_QUIET; T_EXPECT_POSIX_SUCCESS(tunsock = socket(PF_SYSTEM, SOCK_DGRAM, SYSPROTO_CONTROL), NULL);
414
415 memset(&kernctl_info, 0, sizeof(kernctl_info));
416 strlcpy(kernctl_info.ctl_name, g_CONTROL_NAME, sizeof(kernctl_info.ctl_name));
417 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(ioctl(tunsock, CTLIOCGINFO, &kernctl_info), NULL);
418
419 memset(&kernctl_addr, 0, sizeof(kernctl_addr));
420 kernctl_addr.sc_len = sizeof(kernctl_addr);
421 kernctl_addr.sc_family = AF_SYSTEM;
422 kernctl_addr.ss_sysaddr = AF_SYS_CONTROL;
423 kernctl_addr.sc_id = kernctl_info.ctl_id;
424 kernctl_addr.sc_unit = 0;
425
426 T_LOG("%s: enable_netif = %d, enable_flowswitch = %d, channel_count = %d",
427 __func__, enable_netif, enable_flowswitch, channel_count);
428
429 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
430 &enable_netif, sizeof(enable_netif)), EINVAL, NULL);
431 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
432 &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL);
433 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
434 &channel_count, sizeof(channel_count)), EINVAL, NULL);
435 for (int i = 0; i < channel_count; i++) {
436 uuid_clear(uuid[i]);
437 }
438 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
439 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
440 uuid, &uuidlen), EINVAL, NULL);
441 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
442 for (int i = 0; i < channel_count; i++) {
443 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
444 }
445
446 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(bind(tunsock, (struct sockaddr *)&kernctl_addr, sizeof(kernctl_addr)), NULL);
447
448 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
449 &enable_netif, sizeof(enable_netif)), NULL);
450 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
451 &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL);
452 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
453 &channel_count, sizeof(channel_count)), NULL);
454
455 for (int i = 0; i < channel_count; i++) {
456 uuid_clear(uuid[i]);
457 }
458 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
459 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
460 uuid, &uuidlen), ENXIO, NULL);
461 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
462 for (int i = 0; i < channel_count; i++) {
463 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
464 }
465
466 int error = connect(tunsock, (struct sockaddr *)&kernctl_addr, sizeof(kernctl_addr));
467 if (error == -1 && errno == EBUSY) {
468 /* XXX remove this retry nonsense when this is fixed:
469 * <rdar://problem/37340313> creating an interface without specifying specific interface name should not return EBUSY
470 */
471 close(tunsock);
472 T_LOG("connect got EBUSY, sleeping 1 second before retry");
473 sleep(1);
474 goto startover;
475 }
476 if (g_is_ipsec_test && channel_count && !enable_netif) {
477 /* ipsec doesn't support channels without a netif */
478 T_QUIET; T_EXPECT_POSIX_FAILURE(error, ENOTSUP, "connect() == -1 && errno == ENOTSUP");
479 } else {
480 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(error, "connect() == 0");
481 }
482
483 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
484 &enable_netif, sizeof(enable_netif)), EINVAL, NULL);
485
486 if (g_is_ipsec_test && channel_count && !enable_netif) {
487 /* Connect failed above, so we get EINVAL */
488 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
489 &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL);
490 } else {
491 if (is_netagent_enabled()) {
492 if (enable_netif) {
493 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
494 &enable_flowswitch, sizeof(enable_flowswitch)), NULL);
495 } else {
496 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
497 &enable_flowswitch, sizeof(enable_flowswitch)), ENOENT, NULL);
498 }
499 } else {
500 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
501 &enable_flowswitch, sizeof(enable_flowswitch)), ENOTSUP, NULL);
502 }
503 }
504
505 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
506 &channel_count, sizeof(channel_count)), EINVAL, NULL);
507
508 for (int i = 0; i < channel_count; i++) {
509 uuid_clear(uuid[i]);
510 }
511 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
512 if (!channel_count || (g_is_ipsec_test && channel_count && !enable_netif)) {
513 /* ipsec doesn't support channels without a netif */
514 if (g_is_ipsec_test && channel_count && !enable_netif) {
515 /* Unfortunately, the connect incorrectly unwinds the bind if it get an error.
516 * until that is fixed, expect EINVAL here
517 */
518 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
519 uuid, &uuidlen), EINVAL, NULL);
520 } else {
521 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
522 uuid, &uuidlen), ENXIO, NULL);
523 }
524 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
525 for (int i = 0; i < channel_count; i++) {
526 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
527 }
528 } else {
529 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
530 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
531 uuid, &uuidlen), NULL);
532 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
533 for (int i = 0; i < channel_count; i++) {
534 T_QUIET; T_EXPECT_FALSE(uuid_is_null(uuid[i]), NULL);
535 }
536 }
537
538 check_enables(tunsock, enable_netif, enable_flowswitch, channel_count, uuid);
539
540 //T_LOG("Returning tunsock %d", tunsock);
541
542 return tunsock;
543 }
544
545 #if 0
546 static void
547 ipsec_stats(void)
548 {
549 struct ifmibdata ifmd;
550
551 len = sizeof(struct ifmibdata);
552 name[3] = IFMIB_IFDATA;
553 name[4] = interesting_row;
554 name[5] = IpFDATA_GENERAL;
555 if (sysctl(name, 6, &ifmd, &len, (void *)0, 0) == -1) {
556 err(1, "sysctl IFDATA_GENERAL %d", interesting_row);
557 }
558 }
559 #endif
560
561 static void
permute_enables(void)562 permute_enables(void)
563 {
564 int tunsock;
565 T_EXPECT_GE(tunsock = create_tunsock(false, false, false, NULL), 0, NULL);
566 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
567 T_EXPECT_GE(tunsock = create_tunsock(false, false, true, NULL), 0, NULL);
568 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
569 T_EXPECT_GE(tunsock = create_tunsock(false, true, false, NULL), 0, NULL);
570 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
571 T_EXPECT_GE(tunsock = create_tunsock(false, true, true, NULL), 0, NULL);
572 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
573 T_EXPECT_GE(tunsock = create_tunsock(true, false, false, NULL), 0, NULL);
574 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
575 T_EXPECT_GE(tunsock = create_tunsock(true, false, true, NULL), 0, NULL);
576 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
577 T_EXPECT_GE(tunsock = create_tunsock(true, true, false, NULL), 0, NULL);
578 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
579 T_EXPECT_GE(tunsock = create_tunsock(true, true, true, NULL), 0, NULL);
580 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
581 }
582
583 T_DECL(ipsec_enables, "This test checks combinations of netif/channel/flowswitch on ipsec", T_META_TAG_VM_PREFERRED)
584 {
585 setup_ipsec_test();
586 permute_enables();
587 }
588
589 T_DECL(utun_enables, "This test checks combinations of netif/channel/flowswitch on utun", T_META_TAG_VM_PREFERRED)
590 {
591 setup_utun_test();
592 permute_enables();
593 }
594
595 static int g_tunsock = -1;
596
597 static void
cleanup_tunsock(void)598 cleanup_tunsock(void)
599 {
600 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(g_tunsock), NULL);
601 T_QUIET; T_EXPECT_POSIX_FAILURE(close(g_tunsock), EBADF, NULL);
602 if (g_is_ipsec_test) {
603 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(g_pfkeyso), NULL);
604 T_QUIET; T_EXPECT_POSIX_FAILURE(close(g_pfkeyso), EBADF, NULL);
605 }
606 }
607
608 static void
setup_tunsock(int channel_count,uuid_t uuids[])609 setup_tunsock(int channel_count, uuid_t uuids[])
610 {
611 T_ASSERT_GE(g_tunsock = create_tunsock(true, false, channel_count, uuids), 0, NULL);
612 T_ATEND(cleanup_tunsock);
613
614 char ifname[IFXNAMSIZ];
615 tunsock_get_ifname(g_tunsock, ifname);
616
617 T_LOG("Created interface %s", ifname);
618
619 uint32_t ifaddr = (10 << 24) | ((unsigned)getpid() & 0xffff) << 8 | 160;
620 struct in_addr mask;
621 g_addr1.s_addr = htonl(ifaddr);
622 g_addr2.s_addr = htonl(ifaddr + 1);
623 mask.s_addr = htonl(0xffffffff);
624
625 ifnet_add_addr4(ifname, &g_addr1, &mask, &g_addr2);
626
627 if (g_is_ipsec_test) {
628 create_sa(ifname, SADB_ADD, 12345, &g_addr1, &g_addr2);
629 create_sa(ifname, SADB_ADD, 12346, &g_addr2, &g_addr1);
630 }
631 }
632
633 T_DECL(setup_ipsec, "This test sets up an ipsec interface", T_META_TAG_VM_PREFERRED)
634 {
635 setup_ipsec_test();
636 setup_tunsock(1, NULL);
637 }
638
639 T_DECL(setup_utun, "This test sets up a utun interface", T_META_TAG_VM_PREFERRED)
640 {
641 setup_utun_test();
642 setup_tunsock(1, NULL);
643 }
644
645 static const int SOCKET_TRAFFIC_CLASSES[] = {
646 SO_TC_BK_SYS, // BK
647 SO_TC_BK, // BK
648 SO_TC_BE, // BE
649 SO_TC_RD, // BE
650 SO_TC_OAM, // BE
651 SO_TC_AV, // VI
652 SO_TC_RV, // VI
653 SO_TC_VI, // VI
654 SO_TC_VO, // VO
655 SO_TC_CTL, // VO
656 };
657
658 // this should match ipsec_find_tx_ring_by_svc in ipsec driver
659 static const int SOCKET_TC_TO_RING[] = {
660 3,
661 3,
662 2,
663 2,
664 2,
665 1,
666 1,
667 1,
668 0,
669 0,
670 };
671
672 /* How many sockets map to this ring */
673 static const int RING_TO_TC_COUNT[] = {
674 2, 3, 3, 2,
675 };
676
677 static void
setup_channels_and_rings(int kq,int channel_count,channel_t channels[],channel_ring_t rxrings[],channel_ring_t txrings[],uuid_t uuids[],int cfds[])678 setup_channels_and_rings(int kq, int channel_count, channel_t channels[], channel_ring_t rxrings[], channel_ring_t txrings[], uuid_t uuids[], int cfds[])
679 {
680 setup_tunsock(channel_count, uuids);
681
682 #if 0
683 // give time to enable a tcpdump if desired
684 T_LOG("Sleeping 10");
685 sleep(10);
686 T_LOG("Done");
687 #endif
688
689 for (int ri = 0; ri < channel_count; ri++) {
690 if (rxrings) {
691 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(channels[ri] = os_channel_create(uuids[ri], 0), NULL);
692 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(rxrings[ri] = os_channel_rx_ring(channels[ri],
693 os_channel_ring_id(channels[ri], CHANNEL_FIRST_RX_RING)), NULL);
694 }
695 if (txrings) {
696 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(channels[ri] = os_channel_create(uuids[ri], 0), NULL);
697 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(rxrings[ri] = os_channel_rx_ring(channels[ri],
698 os_channel_ring_id(channels[ri], CHANNEL_FIRST_TX_RING)), NULL);
699 }
700
701 struct kevent kev;
702 T_QUIET; T_EXPECT_POSIX_SUCCESS(cfds[ri] = os_channel_get_fd(channels[ri]), NULL);
703 EV_SET(&kev, cfds[ri], EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, (void *)(uintptr_t)ri);
704 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(kevent(kq, &kev, 1, NULL, 0, NULL), NULL);
705 }
706 }
707
708 static void
cleanup_channels_and_rings(int channel_count,channel_t channels[],channel_ring_t rxrings[],channel_ring_t txrings[],uuid_t uuids[])709 cleanup_channels_and_rings(int channel_count, channel_t channels[], channel_ring_t rxrings[], channel_ring_t txrings[], uuid_t uuids[])
710 {
711 for (int ri = 0; ri < channel_count; ri++) {
712 if (rxrings) {
713 rxrings[ri] = NULL;
714 }
715 if (txrings) {
716 rxrings[ri] = NULL;
717 }
718 os_channel_destroy(channels[ri]);
719 channels[ri] = NULL;
720 uuid_clear(uuids[ri]);
721 }
722 }
723
724 static void
setup_sockets(int sockets[SO_TC_MAX],int type)725 setup_sockets(int sockets[SO_TC_MAX], int type)
726 {
727 for (int si = 0; si < SO_TC_MAX; si++) {
728 T_QUIET; T_EXPECT_POSIX_SUCCESS(sockets[si] = socket(PF_INET, type, 0), NULL);
729
730 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(sockets[si], SOL_SOCKET,
731 SO_TRAFFIC_CLASS, &SOCKET_TRAFFIC_CLASSES[si], sizeof(SOCKET_TRAFFIC_CLASSES[si])), NULL);
732
733 // XXX setsockopt(IP_BOUND_IF) here?
734
735 struct sockaddr_in sin;
736 memset(&sin, 0, sizeof(sin));
737 sin.sin_len = sizeof(sin);
738 sin.sin_family = AF_INET;
739 sin.sin_addr = g_addr1;
740
741 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(bind(sockets[si], (struct sockaddr *)&sin, sizeof(sin)), NULL);
742
743 char sbuf[INET6_ADDRSTRLEN];
744 inet_ntop(sin.sin_family, &sin.sin_addr.s_addr, sbuf, sizeof(sbuf));
745 #if 0
746 T_LOG("%s socket %d bound to %s port %d",
747 type == SOCK_DGRAM ? "udp" : type == SOCK_STREAM ? "tcp" : "???",
748 sockets[si], sbuf, ntohs(sin.sin_port));
749 #endif
750 setblocking(sockets[si], false);
751 }
752 }
753
754 static void
cleanup_sockets(int sockets[SO_TC_MAX])755 cleanup_sockets(int sockets[SO_TC_MAX])
756 {
757 for (int si = 0; si < SO_TC_MAX; si++) {
758 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(sockets[si]), NULL);
759 sockets[si] = -1;
760 }
761 }
762
763 static void
drain_ring(channel_ring_t rxring)764 drain_ring(channel_ring_t rxring)
765 {
766 uint32_t i, sc = os_channel_available_slot_count(rxring);
767 channel_slot_t rxprev = NULL;
768 for (i = 0; i < sc; i++) {
769 slot_prop_t rxprop;
770 channel_slot_t rxslot;
771
772 memset(&rxprop, 0, sizeof(rxprop));
773 T_QUIET; T_WITH_ERRNO; T_EXPECT_NOTNULL(rxslot = os_channel_get_next_slot(rxring, rxprev, &rxprop), NULL);
774 T_QUIET; T_ASSERT_NE_UINT(0, rxprop.sp_len, NULL);
775 T_QUIET; T_ASSERT_NOTNULL((void *)rxprop.sp_buf_ptr, NULL);
776
777 log_hexdump((void *)rxprop.sp_buf_ptr, rxprop.sp_len);
778
779 rxprev = rxslot;
780 }
781 if (sc) {
782 T_QUIET; T_EXPECT_POSIX_ZERO(os_channel_advance_slot(rxring, rxprev), NULL);
783 }
784 }
785
786 static void
send_one_packet(int s,int type)787 send_one_packet(int s, int type)
788 {
789 struct sockaddr_in sin;
790 memset(&sin, 0, sizeof(sin));
791 sin.sin_len = sizeof(sin);
792 sin.sin_family = AF_INET;
793 sin.sin_addr = g_addr2;
794 sin.sin_port = ntohs(12345);
795
796 if (type == SOCK_STREAM) {
797 T_QUIET; T_EXPECT_POSIX_FAILURE(connect(s, (struct sockaddr *)&sin, sizeof(sin)), EINPROGRESS, NULL);
798 }
799 if (type == SOCK_DGRAM) {
800 T_QUIET; T_WITH_ERRNO; T_EXPECT_EQ_LONG((long)sizeof(s), sendto(s, &s, sizeof(s), 0,
801 (struct sockaddr *)&sin, sizeof(sin)), NULL);
802 }
803 }
804
805 static void
expect_empty_rings(int channel_count,channel_ring_t rings[])806 expect_empty_rings(int channel_count, channel_ring_t rings[])
807 {
808 /* Check all the rings and make sure there are no packets */
809 for (int ri = 0; ri < channel_count; ri++) {
810 T_QUIET; T_EXPECT_EQ_UINT(0U, os_channel_available_slot_count(rings[ri]), NULL);
811 }
812 }
813
814 static void
xfer_1_packet_singly(int channel_count,int type)815 xfer_1_packet_singly(int channel_count, int type)
816 {
817 uuid_t uuids[channel_count];
818 channel_t channels[channel_count];
819 int sockets[SO_TC_MAX];
820 channel_ring_t rxrings[channel_count];
821 int cfds[channel_count];
822 int kq;
823
824 T_QUIET; T_EXPECT_POSIX_SUCCESS(kq = kqueue(), NULL);
825
826 setup_channels_and_rings(kq, channel_count, channels, rxrings, NULL, uuids, cfds);
827
828 setup_sockets(sockets, type);
829
830 for (int si = 0; si < SO_TC_MAX; si++) {
831 expect_empty_rings(channel_count, rxrings);
832
833 send_one_packet(sockets[si], type);
834
835 int expected_ring = channel_count == 1 ? 0 : SOCKET_TC_TO_RING[si];
836
837 /* Wait for the packet delivery and check that it's only one packet and on the correct ring */
838 struct kevent kev[channel_count + 1];
839 int nev;
840 memset(kev, 0, sizeof(kev));
841 struct timespec to = { 0, 100 * NSEC_PER_MSEC }; // 100 ms
842 T_QUIET; T_EXPECT_POSIX_SUCCESS(nev = kevent(kq, NULL, 0, kev, channel_count + 1, &to), NULL);
843 T_QUIET; T_EXPECT_EQ_INT(nev, 1, NULL);
844 T_QUIET; T_EXPECT_EQ_PTR((void *)kev[0].ident, (void *)(uintptr_t)cfds[expected_ring], NULL);
845 T_QUIET; T_EXPECT_EQ_PTR(kev[0].udata, (void *)(uintptr_t)expected_ring, NULL);
846 T_QUIET; T_EXPECT_EQ_SHORT(kev[0].filter, (short)EVFILT_READ, NULL);
847 T_QUIET; T_EXPECT_FALSE(kev[0].flags & EV_ERROR, NULL);
848
849 /* Make sure it comes out the expected interface */
850 for (int ri = 0; ri < channel_count; ri++) {
851 errno = 0;
852
853 uint32_t sc = os_channel_available_slot_count(rxrings[ri]);
854
855 /* Check that the packet appears only on the expected ring and
856 * is the only packet on the expected ring.
857 */
858 T_QUIET; T_EXPECT_EQ_UINT(ri == expected_ring, sc, NULL);
859
860 if ((ri == expected_ring) == sc) {
861 T_PASS("tc index %d ring %d expected ring %d slot count %u", si, ri, expected_ring, sc);
862 } else {
863 T_FAIL("tc index %d ring %d expected ring %d slot count %u", si, ri, expected_ring, sc);
864 }
865
866 drain_ring(rxrings[ri]);
867 }
868 }
869
870 cleanup_sockets(sockets);
871
872 cleanup_channels_and_rings(channel_count, channels, rxrings, NULL, uuids);
873
874 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(kq), NULL);
875 }
876
877 T_DECL(ipsec35889979u1s, "transfers 1 packet at a time of each sevice class over udp to a single ring", T_META_TAG_VM_PREFERRED)
878 {
879 setup_ipsec_test();
880 xfer_1_packet_singly(1, SOCK_DGRAM);
881 }
882
883 T_DECL(ipsec35889979u4s, "transfers 1 packet at a time of each sevice class over udp to 4 rings", T_META_TAG_VM_PREFERRED)
884 {
885 setup_ipsec_test();
886 xfer_1_packet_singly(4, SOCK_DGRAM);
887 }
888
889 T_DECL(ipsec35889979t1s, "transfers 1 packet at a time of each sevice class over tcp to a single ring", T_META_TAG_VM_PREFERRED)
890 {
891 setup_ipsec_test();
892 xfer_1_packet_singly(1, SOCK_STREAM);
893 }
894
895
896 T_DECL(ipsec35889979t4s, "transfers 1 packet at a time of each sevice class over tcp to 4 rings",
897 /* This test will fail because tcp syn packets get elevated
898 * due to ack prioritization
899 */
900 T_META_ENABLED(false), T_META_TAG_VM_PREFERRED)
901 {
902 setup_ipsec_test();
903 xfer_1_packet_singly(4, SOCK_STREAM);
904 }
905
906 static void
xfer_1_packet_together(int channel_count,int type)907 xfer_1_packet_together(int channel_count, int type)
908 {
909 uuid_t uuids[channel_count];
910 channel_t channels[channel_count];
911 int sockets[SO_TC_MAX];
912 channel_ring_t rxrings[channel_count];
913 int cfds[channel_count];
914 int kq;
915
916 T_QUIET; T_EXPECT_POSIX_SUCCESS(kq = kqueue(), NULL);
917
918 setup_channels_and_rings(kq, channel_count, channels, rxrings, NULL, uuids, cfds);
919
920 setup_sockets(sockets, type);
921
922 for (int si = 0; si < SO_TC_MAX; si++) {
923 expect_empty_rings(channel_count, rxrings);
924
925 send_one_packet(sockets[si], type);
926 }
927
928 /* Sleep to make sure all packets get delivered */
929 struct timespec to = { 0, 100 * NSEC_PER_MSEC }; // 100 ms
930 nanosleep(&to, NULL);
931
932 /* Wait for the packet delivery and check that all rings event */
933 struct kevent kev[channel_count + 1];
934 int nev;
935 memset(kev, 0, sizeof(kev));
936 T_QUIET; T_EXPECT_POSIX_SUCCESS(nev = kevent(kq, NULL, 0, kev, channel_count + 1, &to), NULL);
937 T_QUIET; T_EXPECT_EQ_INT(nev, channel_count, NULL);
938
939 uint32_t found[channel_count];
940 memset(found, 0, sizeof(found));
941 for (int e = 0; e < nev; e++) {
942 T_LOG("kevent %lu filter 0x%4x flags 0x%04x fflags 0x%08x data %"PRIdPTR" udata %p",
943 kev[e].ident, kev[e].filter, kev[e].flags, kev[e].fflags, kev[e].data, kev[e].udata);
944
945 T_QUIET; T_ASSERT_GE_PTR(kev[e].udata, (void *)0, NULL);
946 T_QUIET; T_ASSERT_LT_PTR(kev[e].udata, (void *)(intptr_t)channel_count, NULL);
947 int ri = (int)kev[e].udata;
948 T_QUIET; T_EXPECT_EQ_UINT(found[ri], 0U, NULL);
949
950 T_QUIET; T_EXPECT_EQ_ULONG(kev[e].ident, (uintptr_t)cfds[ri], NULL);
951 T_QUIET; T_EXPECT_EQ_SHORT(kev[e].filter, (short)EVFILT_READ, NULL);
952 T_QUIET; T_EXPECT_FALSE(kev[e].flags & EV_ERROR, NULL);
953
954 if (channel_count == 1) {
955 T_QUIET; T_EXPECT_EQ_LONG(kev[e].data, (long)SO_TC_MAX, NULL);
956 } else {
957 T_QUIET; T_EXPECT_EQ_LONG(kev[e].data, (long)RING_TO_TC_COUNT[ri], NULL);
958 }
959
960 found[ri] += (uint32_t)kev[e].data;
961 }
962 /* Check that something came out of all rings */
963 for (int ri = 0; ri < channel_count; ri++) {
964 T_QUIET; T_EXPECT_NE_UINT(found[ri], 0U, NULL);
965 }
966
967 /* Make sure it comes out the expected interface */
968 for (int ri = 0; ri < channel_count; ri++) {
969 uint32_t sc = os_channel_available_slot_count(rxrings[ri]);
970 if (channel_count == 1) {
971 if (sc == SO_TC_MAX) {
972 T_PASS("ring %d got %"PRIu32" slots expecting %"PRIu32"", ri, sc, SO_TC_MAX);
973 } else {
974 T_FAIL("ring %d got %"PRIu32" slots expecting %"PRIu32"", ri, sc, SO_TC_MAX);
975 }
976 } else {
977 if (sc == (uint32_t)RING_TO_TC_COUNT[ri]) {
978 T_PASS("ring %d got %"PRIu32" slots expecting %"PRIu32"", ri, sc, (uint32_t)RING_TO_TC_COUNT[ri]);
979 } else {
980 T_FAIL("ring %d got %"PRIu32" slots expecting %"PRIu32"", ri, sc, (uint32_t)RING_TO_TC_COUNT[ri]);
981 }
982 }
983
984 drain_ring(rxrings[ri]);
985 }
986
987 cleanup_sockets(sockets);
988
989 cleanup_channels_and_rings(channel_count, channels, rxrings, NULL, uuids);
990
991 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(kq), NULL);
992 }
993
994 T_DECL(ipsec35889979u1m, "transfers 1 packet together of each sevice class over udp to a single ring", T_META_TAG_VM_PREFERRED)
995 {
996 setup_ipsec_test();
997 xfer_1_packet_together(1, SOCK_DGRAM);
998 }
999
1000 T_DECL(ipsec35889979u4m, "transfers 1 packet together of each sevice class over udp to 4 rings", T_META_TAG_VM_PREFERRED)
1001 {
1002 setup_ipsec_test();
1003 xfer_1_packet_together(4, SOCK_DGRAM);
1004 }
1005
1006 T_DECL(ipsec35889979t1m, "transfers 1 packet together of each sevice class over tcp to a single ring", T_META_TAG_VM_PREFERRED)
1007 {
1008 setup_ipsec_test();
1009 xfer_1_packet_together(1, SOCK_STREAM);
1010 }
1011
1012 T_DECL(ipsec35889979t4m, "transfers 1 packet together of each sevice class over tcp to 4 rings",
1013 /* This test will fail because tcp syn packets get elevated
1014 * due to ack prioritization
1015 */
1016 T_META_ENABLED(false), T_META_TAG_VM_PREFERRED)
1017 {
1018 setup_ipsec_test();
1019 xfer_1_packet_together(4, SOCK_STREAM);
1020 }
1021