1 /*
2 * Copyright (c) 2019-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/netif/nx_netif.h>
31 #include <net/if_vlan_var.h>
32 #include <sys/sdt.h>
33
34 #define NETIF_DEMUX_ALLOC_SLOTS 128
35
36 #define OUTBOUND_CHECK_OFF 0
37 #define OUTBOUND_CHECK_ON 1
38 #define OUTBOUND_CHECK_FORCED 2
39
40 /* Turning this off allows packets to be spoofed for testing purposes */
41 static uint32_t outbound_check = OUTBOUND_CHECK_ON;
42
43 /* This controls the per-NA pool size of custom ether and llw NAs */
44 static uint32_t vp_pool_size = 2048;
45
46 /* This enables zerocopy on llw NAs */
47 static uint32_t vp_zerocopy = 0;
48
49 /* TX Ring size */
50 static uint32_t vp_tx_slots = 0;
51
52 /* RX Ring size */
53 static uint32_t vp_rx_slots = 0;
54
55 /*
56 * Disable all packet validation
57 */
58 uint32_t nx_netif_vp_accept_all = 0;
59
60 #if (DEVELOPMENT || DEBUG)
61 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, outbound_check,
62 CTLFLAG_RW | CTLFLAG_LOCKED, &outbound_check, 0,
63 "netif outbound packet validation");
64 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, vp_pool_size,
65 CTLFLAG_RW | CTLFLAG_LOCKED, &vp_pool_size, 0,
66 "netif virtual port pool size");
67 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, vp_zerocopy,
68 CTLFLAG_RW | CTLFLAG_LOCKED, &vp_zerocopy, 0,
69 "netif virtual port zero copy");
70 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, vp_tx_slots,
71 CTLFLAG_RW | CTLFLAG_LOCKED, &vp_tx_slots, 0,
72 "netif virtual port tx slots");
73 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, vp_rx_slots,
74 CTLFLAG_RW | CTLFLAG_LOCKED, &vp_rx_slots, 0,
75 "netif virtual port rx slots");
76 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, vp_accept_all,
77 CTLFLAG_RW | CTLFLAG_LOCKED, &nx_netif_vp_accept_all, 0,
78 "netif accept all");
79 #endif /* (DEVELOPMENT || DEBUG) */
80
81 static int
82 netif_vp_na_channel_event_notify(struct nexus_adapter *,
83 struct __kern_packet *, struct __kern_channel_event *, uint16_t);
84
85 static void
netif_vp_dump_packet(struct __kern_packet * pkt)86 netif_vp_dump_packet(struct __kern_packet *pkt)
87 {
88 uint8_t *baddr;
89
90 MD_BUFLET_ADDR_ABS(pkt, baddr);
91 ASSERT(baddr != NULL);
92 baddr += pkt->pkt_headroom;
93
94 DTRACE_SKYWALK2(dump__packet, struct __kern_packet *,
95 pkt, uint8_t *, baddr);
96 }
97
98 static int
netif_copy_or_attach_pkt(struct __kern_channel_ring * ring,kern_channel_slot_t slot,struct __kern_packet * pkt)99 netif_copy_or_attach_pkt(struct __kern_channel_ring *ring,
100 kern_channel_slot_t slot, struct __kern_packet *pkt)
101 {
102 kern_packet_t ph;
103 struct __kern_packet *dpkt;
104 errno_t err;
105
106 if (pkt->pkt_qum.qum_pp == ring->ckr_pp) {
107 DTRACE_SKYWALK2(attach__pkt, struct __kern_channel_ring *, ring,
108 struct __kern_packet *, pkt);
109 ph = SK_PKT2PH(pkt);
110 err = kern_packet_finalize(ph);
111 VERIFY(err == 0);
112 } else {
113 DTRACE_SKYWALK2(copy__pkt, struct __kern_channel_ring *, ring,
114 struct __kern_packet *, pkt);
115 dpkt = nx_netif_pkt_to_pkt(NIFNA(ring->ckr_na), pkt,
116 ring->ckr_na->na_type == NA_NETIF_VP ? NETIF_CONVERT_RX :
117 NETIF_CONVERT_TX);
118 if (__improbable(dpkt == NULL)) {
119 return ENOMEM;
120 }
121 ph = SK_PKT2PH(dpkt);
122 }
123 err = kern_channel_slot_attach_packet(ring, slot, ph);
124 VERIFY(err == 0);
125 return 0;
126 }
127
128 static errno_t
netif_deliver_pkt(struct nexus_adapter * na,struct __kern_packet * pkt_chain,uint32_t flags)129 netif_deliver_pkt(struct nexus_adapter *na, struct __kern_packet *pkt_chain,
130 uint32_t flags)
131 {
132 #pragma unused(flags)
133 struct __kern_channel_ring *ring = &na->na_rx_rings[0];
134 struct __kern_packet *pkt = pkt_chain, *next;
135 kern_channel_slot_t last_slot = NULL, slot = NULL;
136 struct nexus_netif_adapter *nifna = NIFNA(na);
137 struct nx_netif *nif = nifna->nifna_netif;
138 struct netif_stats *nifs = &nif->nif_stats;
139 sk_protect_t protect;
140 int cnt = 0, dropcnt = 0, err;
141
142 (void) kr_enter(ring, TRUE);
143 protect = sk_sync_protect();
144
145 if (__improbable(KR_DROP(ring))) {
146 nx_netif_free_packet_chain(pkt, &dropcnt);
147 STATS_ADD(nifs,
148 NETIF_STATS_VP_DROP_USER_RING_DISABLED, dropcnt);
149 STATS_ADD(nifs, NETIF_STATS_DROP, dropcnt);
150 DTRACE_SKYWALK2(ring__drop, struct __kern_channel_ring *, ring,
151 int, dropcnt);
152 sk_sync_unprotect(protect);
153 kr_exit(ring);
154 return ENXIO;
155 }
156 while (pkt != NULL) {
157 slot = kern_channel_get_next_slot(ring, last_slot, NULL);
158 if (slot == NULL) {
159 break;
160 }
161 next = pkt->pkt_nextpkt;
162 pkt->pkt_nextpkt = NULL;
163 netif_vp_dump_packet(pkt);
164 err = netif_copy_or_attach_pkt(ring, slot, pkt);
165 if (__probable(err == 0)) {
166 last_slot = slot;
167 }
168 pkt = next;
169 cnt++;
170 }
171 if (NETIF_IS_LOW_LATENCY(nif)) {
172 STATS_ADD(nifs, NETIF_STATS_VP_LL_DELIVERED, cnt);
173 } else {
174 STATS_ADD(nifs, NETIF_STATS_VP_DELIVERED, cnt);
175 }
176 DTRACE_SKYWALK4(delivered, struct nexus_adapter *, na,
177 struct __kern_channel_ring *, ring, struct __kern_packet *, pkt,
178 int, cnt);
179
180 if (pkt != NULL) {
181 nx_netif_free_packet_chain(pkt, &dropcnt);
182 STATS_ADD(nifs,
183 NETIF_STATS_VP_DROP_USER_RING_NO_SPACE, dropcnt);
184 STATS_ADD(nifs, NETIF_STATS_DROP, dropcnt);
185 DTRACE_SKYWALK2(deliver__drop, struct nexus_adapter *, na,
186 int, dropcnt);
187 }
188 if (last_slot != NULL) {
189 kern_channel_advance_slot(ring, last_slot);
190 }
191 sk_sync_unprotect(protect);
192 kr_exit(ring);
193 if (cnt > 0) {
194 (void) kern_channel_notify(ring, 0);
195 }
196 return 0;
197 }
198
199 static errno_t
netif_deliver_cb(void * arg,void * chain,uint32_t flags)200 netif_deliver_cb(void *arg, void *chain, uint32_t flags)
201 {
202 return netif_deliver_pkt(arg, chain, flags);
203 }
204
205 static int
netif_hwna_rx_get_pkts(struct __kern_channel_ring * ring,struct proc * p,uint32_t flags,struct __kern_packet ** chain)206 netif_hwna_rx_get_pkts(struct __kern_channel_ring *ring, struct proc *p,
207 uint32_t flags, struct __kern_packet **chain)
208 {
209 int err, cnt = 0;
210 sk_protect_t protect;
211 slot_idx_t ktail, idx;
212 struct __kern_packet *pkt_chain = NULL, **tailp = &pkt_chain;
213 struct netif_stats *nifs = &NIFNA(KRNA(ring))->nifna_netif->nif_stats;
214
215 err = kr_enter(ring, ((flags & NA_NOTEF_CAN_SLEEP) != 0 ||
216 (ring->ckr_flags & CKRF_HOST) != 0));
217 if (err != 0) {
218 SK_DF(SK_VERB_VP,
219 "hwna \"%s\" (0x%llx) kr \"%s\" (0x%llx) krflags 0x%b "
220 "(%d)", KRNA(ring)->na_name, SK_KVA(KRNA(ring)),
221 ring->ckr_name, SK_KVA(ring), ring->ckr_flags,
222 CKRF_BITS, err);
223 STATS_INC(nifs, NETIF_STATS_VP_KR_ENTER_FAIL);
224 return err;
225 }
226 if (__improbable(KR_DROP(ring))) {
227 kr_exit(ring);
228 STATS_INC(nifs, NETIF_STATS_VP_DEV_RING_DISABLED);
229 return ENODEV;
230 }
231 protect = sk_sync_protect();
232
233 err = ring->ckr_na_sync(ring, p, 0);
234 if (err != 0 && err != EAGAIN) {
235 STATS_INC(nifs, NETIF_STATS_VP_SYNC_UNKNOWN_ERR);
236 goto out;
237 }
238 ktail = ring->ckr_ktail;
239 if (__improbable(ring->ckr_khead == ktail)) {
240 SK_DF(SK_VERB_VP,
241 "spurious wakeup on hwna %s (0x%llx)", KRNA(ring)->na_name,
242 SK_KVA(KRNA(ring)));
243 STATS_INC(nifs, NETIF_STATS_VP_SPURIOUS_NOTIFY);
244 err = ENOENT;
245 goto out;
246 }
247 /* get all packets from the ring */
248 idx = ring->ckr_rhead;
249 while (idx != ktail) {
250 struct __kern_slot_desc *ksd = KR_KSD(ring, idx);
251 struct __kern_packet *pkt = ksd->sd_pkt;
252
253 ASSERT(pkt->pkt_nextpkt == NULL);
254 KR_SLOT_DETACH_METADATA(ring, ksd);
255 cnt++;
256 *tailp = pkt;
257 tailp = &pkt->pkt_nextpkt;
258 idx = SLOT_NEXT(idx, ring->ckr_lim);
259 }
260 ring->ckr_rhead = ktail;
261 ring->ckr_rtail = ring->ckr_ktail;
262
263 DTRACE_SKYWALK2(rx__notify, struct __kern_channel_ring *, ring,
264 int, cnt);
265 *chain = pkt_chain;
266 out:
267 sk_sync_unprotect(protect);
268 kr_exit(ring);
269 return err;
270 }
271
272 int
netif_llw_rx_notify_fast(struct __kern_channel_ring * ring,struct proc * p,uint32_t flags)273 netif_llw_rx_notify_fast(struct __kern_channel_ring *ring, struct proc *p,
274 uint32_t flags)
275 {
276 #pragma unused (p, flags)
277 struct nexus_adapter *hwna;
278 uint32_t count;
279 int i, err;
280
281 hwna = KRNA(ring);
282 count = na_get_nslots(hwna, NR_RX);
283 err = nx_rx_sync_packets(ring, ring->ckr_scratch, &count);
284 if (__improbable(err != 0)) {
285 SK_ERR("nx_rx_sync_packets failed: %d", err);
286 DTRACE_SKYWALK2(rx__sync__packets__failed,
287 struct __kern_channel_ring *, ring, int, err);
288 return err;
289 }
290 DTRACE_SKYWALK1(chain__count, uint32_t, count);
291 for (i = 0; i < count; i++) {
292 struct __kern_packet *pkt_chain;
293
294 pkt_chain = SK_PTR_ADDR_KPKT(ring->ckr_scratch[i]);
295 ASSERT(pkt_chain != NULL);
296 (void) nx_netif_demux(NIFNA(KRNA(ring)), pkt_chain, NULL,
297 NETIF_FLOW_SOURCE);
298 }
299 return 0;
300 }
301
302 int
netif_llw_rx_notify_default(struct __kern_channel_ring * ring,struct proc * p,uint32_t flags)303 netif_llw_rx_notify_default(struct __kern_channel_ring *ring, struct proc *p,
304 uint32_t flags)
305 {
306 int err;
307 struct __kern_packet *pkt_chain = NULL;
308
309 err = netif_hwna_rx_get_pkts(ring, p, flags, &pkt_chain);
310 if (err != 0) {
311 return err;
312 }
313 return nx_netif_demux(NIFNA(KRNA(ring)), pkt_chain, NULL,
314 NETIF_FLOW_SOURCE);
315 }
316
317 static errno_t
netif_hwna_setup(struct nx_netif * nif)318 netif_hwna_setup(struct nx_netif *nif)
319 {
320 struct kern_channel *ch;
321 struct kern_nexus *nx = nif->nif_nx;
322 struct chreq chr;
323 int err;
324
325 SK_LOCK_ASSERT_HELD();
326 ASSERT(NETIF_IS_LOW_LATENCY(nif));
327 if (nif->nif_hw_ch != NULL) {
328 nif->nif_hw_ch_refcnt++;
329 SK_DF(SK_VERB_VP, "%s: hw channel already open, refcnt %d",
330 if_name(nif->nif_ifp), nif->nif_hw_ch_refcnt);
331 return 0;
332 }
333 ASSERT(nif->nif_hw_ch_refcnt == 0);
334 bzero(&chr, sizeof(chr));
335 uuid_copy(chr.cr_spec_uuid, nx->nx_uuid);
336 chr.cr_ring_id = 0;
337 chr.cr_port = NEXUS_PORT_NET_IF_DEV;
338 chr.cr_mode |= CHMODE_CONFIG;
339
340 err = 0;
341 ch = ch_open_special(nx, &chr, FALSE, &err);
342 if (ch == NULL) {
343 SK_ERR("%s: failed to open nx 0x%llx (err %d)",
344 if_name(nif->nif_ifp), SK_KVA(nx), err);
345 return err;
346 }
347 netif_hwna_set_mode(ch->ch_na, NETIF_MODE_LLW, NULL);
348 na_start_spec(nx, ch);
349 nif->nif_hw_ch_refcnt = 1;
350 nif->nif_hw_ch = ch;
351 SK_DF(SK_VERB_VP, "%s: hw channel opened 0x%llx, %s:%s",
352 if_name(nif->nif_ifp), SK_KVA(ch), NX_DOM(nx)->nxdom_name,
353 NX_DOM_PROV(nx)->nxdom_prov_name);
354 return 0;
355 }
356
357 static void
netif_hwna_teardown(struct nx_netif * nif)358 netif_hwna_teardown(struct nx_netif *nif)
359 {
360 struct kern_nexus *nx = nif->nif_nx;
361 struct kern_channel *ch = nif->nif_hw_ch;
362
363 SK_LOCK_ASSERT_HELD();
364 ASSERT(NETIF_IS_LOW_LATENCY(nif));
365 ASSERT(ch != NULL);
366 if (--nif->nif_hw_ch_refcnt > 0) {
367 SK_DF(SK_VERB_VP, "%s: hw channel still open, refcnt %d",
368 if_name(nif->nif_ifp), nif->nif_hw_ch_refcnt);
369 return;
370 }
371 SK_DF(SK_VERB_VP, "%s: hw channel closing 0x%llx, %s:%s",
372 if_name(nif->nif_ifp), SK_KVA(ch), NX_DOM(nx)->nxdom_name,
373 NX_DOM_PROV(nx)->nxdom_prov_name);
374
375 na_stop_spec(nx, ch);
376 netif_hwna_clear_mode(ch->ch_na);
377 ch_close_special(ch);
378 (void) ch_release_locked(ch);
379 nif->nif_hw_ch = NULL;
380 SK_DF(SK_VERB_VP, "%s: hw channel closed, %s:%s",
381 if_name(nif->nif_ifp), NX_DOM(nx)->nxdom_name,
382 NX_DOM_PROV(nx)->nxdom_prov_name);
383 }
384
385 static int
netif_vp_na_activate_on(struct nexus_adapter * na)386 netif_vp_na_activate_on(struct nexus_adapter *na)
387 {
388 errno_t err;
389 struct netif_flow *nf;
390 struct netif_port_info npi;
391 struct nexus_netif_adapter *nifna;
392 struct nx_netif *nif;
393 boolean_t hwna_setup = FALSE;
394
395 nifna = NIFNA(na);
396 nif = nifna->nifna_netif;
397
398 /* lock needed to protect against nxdom_unbind_port */
399 NETIF_WLOCK(nif);
400 err = nx_port_get_info(nif->nif_nx, na->na_nx_port,
401 NX_PORT_INFO_TYPE_NETIF, &npi, sizeof(npi));
402 NETIF_WUNLOCK(nif);
403 if (err != 0) {
404 SK_ERR("port info not found: %d", err);
405 return err;
406 }
407 if (NETIF_IS_LOW_LATENCY(nif)) {
408 err = netif_hwna_setup(nif);
409 if (err != 0) {
410 return err;
411 }
412 hwna_setup = TRUE;
413 }
414 err = nx_netif_flow_add(nif, na->na_nx_port, &npi.npi_fd, na,
415 netif_deliver_cb, &nf);
416 if (err != 0) {
417 if (hwna_setup) {
418 netif_hwna_teardown(nif);
419 }
420 return err;
421 }
422 nifna->nifna_flow = nf;
423 atomic_bitset_32(&na->na_flags, NAF_ACTIVE);
424 return 0;
425 }
426
427 static int
netif_vp_na_activate_off(struct nexus_adapter * na)428 netif_vp_na_activate_off(struct nexus_adapter *na)
429 {
430 errno_t err;
431 struct nexus_netif_adapter *nifna;
432 struct nx_netif *nif;
433
434 nifna = NIFNA(na);
435 nif = nifna->nifna_netif;
436 err = nx_netif_flow_remove(nif, nifna->nifna_flow);
437 VERIFY(err == 0);
438
439 nifna->nifna_flow = NULL;
440 if (NETIF_IS_LOW_LATENCY(nif)) {
441 netif_hwna_teardown(nif);
442 }
443 atomic_bitclear_32(&na->na_flags, NAF_ACTIVE);
444 return 0;
445 }
446
447 static int
netif_vp_na_activate(struct nexus_adapter * na,na_activate_mode_t mode)448 netif_vp_na_activate(struct nexus_adapter *na, na_activate_mode_t mode)
449 {
450 errno_t err;
451
452 ASSERT(na->na_type == NA_NETIF_VP);
453 if (mode == NA_ACTIVATE_MODE_ON) {
454 err = netif_vp_na_activate_on(na);
455 } else {
456 err = netif_vp_na_activate_off(na);
457 }
458 SK_DF(SK_VERB_VP, "na \"%s\" (0x%llx) %s err %d", na->na_name,
459 SK_KVA(na), na_activate_mode2str(mode), err);
460 return err;
461 }
462
463 /*
464 * XXX
465 * The native path sends to the dev ring directly, bypassing aqm.
466 * This is ok since this is only used by llw now. This will need to
467 * change when we add native support for filters.
468 */
469 static int
netif_vp_send_pkt_chain_low_latency(struct nexus_netif_adapter * dev_nifna,struct __kern_packet * pkt_chain,struct proc * p)470 netif_vp_send_pkt_chain_low_latency(struct nexus_netif_adapter *dev_nifna,
471 struct __kern_packet *pkt_chain, struct proc *p)
472 {
473 struct __kern_packet *pkt = pkt_chain, *next;
474 struct nexus_adapter *na = &dev_nifna->nifna_up;
475 struct __kern_channel_ring *ring = &na->na_tx_rings[0];
476 struct netif_stats *nifs = &dev_nifna->nifna_netif->nif_stats;
477 sk_protect_t protect;
478 slot_idx_t ktail, idx;
479 uint32_t cnt;
480 int err_stat = -1;
481 errno_t err;
482
483 (void) kr_enter(ring, TRUE);
484 protect = sk_sync_protect();
485 if (__improbable(KR_DROP(ring))) {
486 SK_ERR("ring is not ready");
487 DTRACE_SKYWALK1(ring__drop, struct __kern_channel_ring *, ring);
488 err_stat = NETIF_STATS_VP_DROP_DEV_RING_DISABLED;
489 err = ENXIO;
490 goto done;
491 }
492 idx = ring->ckr_rhead;
493 ktail = ring->ckr_ktail;
494 if (idx == ktail) {
495 SK_ERR("no space to send");
496 DTRACE_SKYWALK1(no__space, struct __kern_channel_ring *, ring);
497 err_stat = NETIF_STATS_VP_DROP_DEV_RING_NO_SPACE;
498 goto sync;
499 }
500 cnt = 0;
501 while (pkt != NULL && idx != ktail) {
502 struct __slot_desc *slot = &ring->ckr_ksds[idx];
503
504 next = pkt->pkt_nextpkt;
505 pkt->pkt_nextpkt = NULL;
506 netif_vp_dump_packet(pkt);
507 err = netif_copy_or_attach_pkt(ring, slot, pkt);
508 if (__probable(err == 0)) {
509 cnt++;
510 idx = SLOT_NEXT(idx, ring->ckr_lim);
511 }
512 pkt = next;
513 }
514 ring->ckr_rhead = idx;
515 STATS_ADD(nifs, NETIF_STATS_VP_LL_ENQUEUED, cnt);
516 DTRACE_SKYWALK2(ll__enqueued, struct __kern_channel_ring *, ring,
517 uint32_t, cnt);
518 sync:
519 ring->ckr_khead_pre = ring->ckr_khead;
520 err = ring->ckr_na_sync(ring, p, NA_SYNCF_SYNC_ONLY);
521 if (err != 0 && err != EAGAIN) {
522 SK_ERR("unexpected sync err %d", err);
523 DTRACE_SKYWALK1(sync__failed, struct __kern_channel_ring *,
524 ring);
525 err_stat = NETIF_STATS_VP_DROP_UNEXPECTED_ERR;
526 goto done;
527 }
528 /*
529 * Verify that the driver has detached packets from the consumed slots.
530 */
531 idx = ring->ckr_khead_pre;
532 cnt = 0;
533 while (idx != ring->ckr_khead) {
534 struct __kern_slot_desc *ksd = KR_KSD(ring, idx);
535
536 cnt++;
537 VERIFY(!KSD_VALID_METADATA(ksd));
538 idx = SLOT_NEXT(idx, ring->ckr_lim);
539 }
540 ring->ckr_khead_pre = ring->ckr_khead;
541 STATS_ADD(nifs, NETIF_STATS_VP_LL_SENT, cnt);
542 DTRACE_SKYWALK2(ll__sent, struct __kern_channel_ring *, ring,
543 uint32_t, cnt);
544 err = 0;
545
546 done:
547 sk_sync_unprotect(protect);
548 kr_exit(ring);
549
550 /*
551 * Free all unsent packets.
552 */
553 if (pkt != NULL) {
554 int dropcnt;
555
556 nx_netif_free_packet_chain(pkt, &dropcnt);
557 if (err_stat != -1) {
558 STATS_ADD(nifs, err_stat, dropcnt);
559 }
560 STATS_ADD(nifs, NETIF_STATS_DROP, dropcnt);
561 }
562 return err;
563 }
564
565 static int
netif_vp_send_pkt_chain_common(struct nexus_netif_adapter * dev_nifna,struct __kern_packet * pkt_chain,boolean_t compat)566 netif_vp_send_pkt_chain_common(struct nexus_netif_adapter *dev_nifna,
567 struct __kern_packet *pkt_chain, boolean_t compat)
568 {
569 struct __kern_packet *pkt = pkt_chain, *next, *p;
570 struct nx_netif *nif = dev_nifna->nifna_netif;
571 struct netif_stats *nifs = &nif->nif_stats;
572 ifnet_t ifp = nif->nif_ifp;
573 struct mbuf *m;
574 boolean_t drop;
575 int cnt = 0;
576 errno_t err;
577
578 while (pkt != NULL) {
579 next = pkt->pkt_nextpkt;
580 pkt->pkt_nextpkt = NULL;
581 drop = FALSE;
582
583 if (compat) {
584 m = nx_netif_pkt_to_mbuf(dev_nifna, pkt, NETIF_CONVERT_TX);
585 if (m == NULL) {
586 pkt = next;
587 continue;
588 }
589 err = ifnet_enqueue_mbuf(ifp, m, FALSE, &drop);
590 } else {
591 p = nx_netif_pkt_to_pkt(dev_nifna, pkt, NETIF_CONVERT_TX);
592 if (p == NULL) {
593 pkt = next;
594 continue;
595 }
596 err = ifnet_enqueue_pkt(ifp, p, FALSE, &drop);
597 }
598 if (err != 0) {
599 SK_ERR("enqueue failed: %d", err);
600 STATS_INC(nifs, NETIF_STATS_VP_ENQUEUE_FAILED);
601 if (drop) {
602 STATS_INC(nifs, NETIF_STATS_DROP);
603 }
604 DTRACE_SKYWALK2(enqueue__failed,
605 struct nexus_netif_adapter *, dev_nifna,
606 boolean_t, drop);
607 } else {
608 STATS_INC(nifs, NETIF_STATS_VP_ENQUEUED);
609 cnt++;
610 }
611 pkt = next;
612 }
613 if (cnt > 0) {
614 ifnet_start(ifp);
615 }
616 return 0;
617 }
618
619 static int
netif_vp_send_pkt_chain(struct nexus_netif_adapter * dev_nifna,struct __kern_packet * pkt_chain,struct proc * p)620 netif_vp_send_pkt_chain(struct nexus_netif_adapter *dev_nifna,
621 struct __kern_packet *pkt_chain, struct proc *p)
622 {
623 struct nexus_adapter *na = &dev_nifna->nifna_up;
624
625 if (NETIF_IS_LOW_LATENCY(dev_nifna->nifna_netif)) {
626 return netif_vp_send_pkt_chain_low_latency(dev_nifna,
627 pkt_chain, p);
628 }
629 if (na->na_type == NA_NETIF_DEV) {
630 return netif_vp_send_pkt_chain_common(dev_nifna, pkt_chain, FALSE);
631 }
632 ASSERT(na->na_type == NA_NETIF_COMPAT_DEV);
633 return netif_vp_send_pkt_chain_common(dev_nifna, pkt_chain, TRUE);
634 }
635
636 SK_NO_INLINE_ATTRIBUTE
637 static boolean_t
validate_packet(struct nexus_netif_adapter * nifna,struct __kern_packet * pkt)638 validate_packet(struct nexus_netif_adapter *nifna, struct __kern_packet *pkt)
639 {
640 struct nx_netif *nif = nifna->nifna_netif;
641
642 VERIFY(pkt->pkt_nextpkt == NULL);
643
644 if (nx_netif_vp_accept_all != 0) {
645 return TRUE;
646 }
647 if (outbound_check == 0 ||
648 (NETIF_IS_LOW_LATENCY(nif) &&
649 outbound_check != OUTBOUND_CHECK_FORCED)) {
650 return TRUE;
651 }
652 if (!nx_netif_validate_macaddr(nif, pkt, NETIF_FLOW_OUTBOUND)) {
653 return FALSE;
654 }
655 if (!nx_netif_flow_match(nif, pkt, nifna->nifna_flow,
656 NETIF_FLOW_OUTBOUND)) {
657 return FALSE;
658 }
659 return TRUE;
660 }
661
662 static int
netif_vp_na_txsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)663 netif_vp_na_txsync(struct __kern_channel_ring *kring, struct proc *p,
664 uint32_t flags)
665 {
666 #pragma unused(flags)
667 kern_channel_slot_t last_slot = NULL, slot = NULL;
668 struct __kern_packet *head = NULL, **tailp = &head, *pkt;
669 struct nexus_netif_adapter *nifna, *dev_nifna;
670 struct nx_netif *nif;
671 struct netif_stats *nifs;
672 kern_packet_t ph;
673 errno_t err;
674 int cnt = 0;
675
676 nifna = NIFNA(KRNA(kring));
677 nif = nifna->nifna_netif;
678 nifs = &nif->nif_stats;
679 for (;;) {
680 slot = kern_channel_get_next_slot(kring, slot, NULL);
681 if (slot == NULL) {
682 break;
683 }
684 ph = kern_channel_slot_get_packet(kring, slot);
685 if (__improbable(ph == 0)) {
686 SK_ERR("packet got dropped by internalize");
687 STATS_INC(nifs, NETIF_STATS_VP_DROP_INTERNALIZE_FAIL);
688 DTRACE_SKYWALK2(bad__slot, struct __kern_channel_ring *,
689 kring, kern_channel_slot_t, slot);
690 last_slot = slot;
691 continue;
692 }
693 pkt = SK_PTR_ADDR_KPKT(ph);
694 if (__improbable(pkt->pkt_length == 0)) {
695 SK_ERR("dropped zero length packet");
696 STATS_INC(nifs, NETIF_STATS_VP_BAD_PKT_LEN);
697 DTRACE_SKYWALK2(bad__slot, struct __kern_channel_ring *,
698 kring, kern_channel_slot_t, slot);
699 last_slot = slot;
700 continue;
701 }
702 err = kern_channel_slot_detach_packet(kring, slot, ph);
703 VERIFY(err == 0);
704
705 /* packet needs to be finalized after detach */
706 err = kern_packet_finalize(ph);
707 VERIFY(err == 0);
708 last_slot = slot;
709
710 if (validate_packet(nifna, pkt)) {
711 nx_netif_snoop(nif, pkt, FALSE);
712 cnt++;
713 *tailp = pkt;
714 tailp = &pkt->pkt_nextpkt;
715 } else {
716 nx_netif_free_packet(pkt);
717 }
718 }
719 if (cnt == 0) {
720 STATS_INC(nifs, NETIF_STATS_VP_SYNC_NO_PKTS);
721 DTRACE_SKYWALK2(no__data, struct nexus_netif_adapter *, nifna,
722 struct __kern_channel_ring *, kring);
723 return 0;
724 }
725 DTRACE_SKYWALK4(injected, struct nexus_netif_adapter *, nifna,
726 struct __kern_channel_ring *, kring, struct __kern_packet *, head,
727 int, cnt);
728 if (last_slot != NULL) {
729 kern_channel_advance_slot(kring, last_slot);
730 }
731
732 dev_nifna = NIFNA(nx_port_get_na(KRNA(kring)->na_nx,
733 NEXUS_PORT_NET_IF_DEV));
734
735 err = netif_vp_send_pkt_chain(dev_nifna, head, p);
736 if (err != 0) {
737 SK_ERR("send failed: %d\n", err);
738 }
739 return 0;
740 }
741
742 static int
netif_vp_na_rxsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)743 netif_vp_na_rxsync(struct __kern_channel_ring *kring, struct proc *p,
744 uint32_t flags)
745 {
746 #pragma unused(p, flags)
747 (void) kr_reclaim(kring);
748 return 0;
749 }
750
751 static int
netif_vp_na_krings_create(struct nexus_adapter * na,struct kern_channel * ch)752 netif_vp_na_krings_create(struct nexus_adapter *na, struct kern_channel *ch)
753 {
754 ASSERT(na->na_type == NA_NETIF_VP);
755 return na_rings_mem_setup(na, 0, FALSE, ch);
756 }
757
758
759 /* na_krings_delete callback for flow switch ports. */
760 static void
netif_vp_na_krings_delete(struct nexus_adapter * na,struct kern_channel * ch,boolean_t defunct)761 netif_vp_na_krings_delete(struct nexus_adapter *na, struct kern_channel *ch,
762 boolean_t defunct)
763 {
764 ASSERT(na->na_type == NA_NETIF_VP);
765 na_rings_mem_teardown(na, ch, defunct);
766 }
767
768 static int
netif_vp_region_params_setup(struct nexus_adapter * na,struct skmem_region_params * srp,struct kern_pbufpool ** tx_pp)769 netif_vp_region_params_setup(struct nexus_adapter *na,
770 struct skmem_region_params *srp, struct kern_pbufpool **tx_pp)
771 {
772 #pragma unused (tx_pp)
773 uint32_t max_mtu;
774 uint32_t buf_sz, buf_cnt, nslots, afslots, evslots, totalrings;
775 struct nexus_adapter *devna;
776 struct kern_nexus *nx;
777 struct nx_netif *nif;
778 int err, i;
779
780 for (i = 0; i < SKMEM_REGIONS; i++) {
781 srp[i] = *skmem_get_default(i);
782 }
783 totalrings = na_get_nrings(na, NR_TX) + na_get_nrings(na, NR_RX) +
784 na_get_nrings(na, NR_A) + na_get_nrings(na, NR_F) +
785 na_get_nrings(na, NR_EV);
786
787 srp[SKMEM_REGION_SCHEMA].srp_r_obj_size =
788 (uint32_t)CHANNEL_SCHEMA_SIZE(totalrings);
789 srp[SKMEM_REGION_SCHEMA].srp_r_obj_cnt = totalrings;
790 skmem_region_params_config(&srp[SKMEM_REGION_SCHEMA]);
791
792 srp[SKMEM_REGION_RING].srp_r_obj_size =
793 sizeof(struct __user_channel_ring);
794 srp[SKMEM_REGION_RING].srp_r_obj_cnt = totalrings;
795 skmem_region_params_config(&srp[SKMEM_REGION_RING]);
796
797 /* USD regions need to be writable to support user packet pool */
798 srp[SKMEM_REGION_TXAUSD].srp_cflags &= ~SKMEM_REGION_CR_UREADONLY;
799 srp[SKMEM_REGION_RXFUSD].srp_cflags &= ~SKMEM_REGION_CR_UREADONLY;
800
801 /* Enable per-CPU caching for UMD and KMD regions */
802 srp[SKMEM_REGION_UMD].srp_cflags &= ~SKMEM_REGION_CR_NOMAGAZINES;
803 srp[SKMEM_REGION_KMD].srp_cflags &= ~SKMEM_REGION_CR_NOMAGAZINES;
804
805 nslots = na_get_nslots(na, NR_TX);
806 afslots = na_get_nslots(na, NR_A);
807 evslots = na_get_nslots(na, NR_EV);
808 srp[SKMEM_REGION_TXAKSD].srp_r_obj_size =
809 MAX(MAX(nslots, afslots), evslots) * SLOT_DESC_SZ;
810 srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt =
811 na_get_nrings(na, NR_TX) + na_get_nrings(na, NR_A) +
812 na_get_nrings(na, NR_EV);
813 skmem_region_params_config(&srp[SKMEM_REGION_TXAKSD]);
814
815 /* USD and KSD objects share the same size and count */
816 srp[SKMEM_REGION_TXAUSD].srp_r_obj_size =
817 srp[SKMEM_REGION_TXAKSD].srp_r_obj_size;
818 srp[SKMEM_REGION_TXAUSD].srp_r_obj_cnt =
819 srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt;
820 skmem_region_params_config(&srp[SKMEM_REGION_TXAUSD]);
821
822 /*
823 * Since the rx/free slots share the same region and cache,
824 * we will use the same object size for both types of slots.
825 */
826 nslots = na_get_nslots(na, NR_RX);
827 afslots = na_get_nslots(na, NR_F);
828 srp[SKMEM_REGION_RXFKSD].srp_r_obj_size =
829 MAX(nslots, afslots) * SLOT_DESC_SZ;
830 srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt =
831 na_get_nrings(na, NR_RX) + na_get_nrings(na, NR_F);
832 skmem_region_params_config(&srp[SKMEM_REGION_RXFKSD]);
833
834 /* USD and KSD objects share the same size and count */
835 srp[SKMEM_REGION_RXFUSD].srp_r_obj_size =
836 srp[SKMEM_REGION_RXFKSD].srp_r_obj_size;
837 srp[SKMEM_REGION_RXFUSD].srp_r_obj_cnt =
838 srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt;
839 skmem_region_params_config(&srp[SKMEM_REGION_RXFUSD]);
840
841 /*
842 * No need to create our own buffer pool if we can share the device's
843 * pool. We don't support sharing split pools to user space.
844 */
845 nx = na->na_nx;
846 nif = nx->nx_arg;
847 if (vp_zerocopy != 0 && NETIF_IS_LOW_LATENCY(nif) &&
848 nx->nx_tx_pp != NULL && (nx->nx_rx_pp == NULL ||
849 nx->nx_tx_pp == nx->nx_rx_pp) && !PP_KERNEL_ONLY(nx->nx_tx_pp)) {
850 struct kern_pbufpool *pp = nx->nx_tx_pp;
851
852 if (nif->nif_hw_ch_refcnt != 0) {
853 SK_ERR("only one channel is supported for zero copy");
854 return ENOTSUP;
855 }
856 SK_DF(SK_VERB_VP, "sharing %s's pool", if_name(na->na_ifp));
857
858 /*
859 * These types need to be initialized otherwise some assertions
860 * skmem_arena_create_for_nexus() will fail.
861 */
862 srp[SKMEM_REGION_UMD].srp_md_type = pp->pp_md_type;
863 srp[SKMEM_REGION_UMD].srp_md_subtype = pp->pp_md_subtype;
864 srp[SKMEM_REGION_KMD].srp_md_type = pp->pp_md_type;
865 srp[SKMEM_REGION_KMD].srp_md_subtype = pp->pp_md_subtype;
866 *tx_pp = nx->nx_tx_pp;
867 return 0;
868 }
869
870 devna = nx_port_get_na(nx, NEXUS_PORT_NET_IF_DEV);
871 ASSERT(devna != NULL);
872 if (devna->na_type == NA_NETIF_DEV) {
873 /*
874 * For native devices, use the driver's buffer size
875 */
876 ASSERT(nx->nx_rx_pp != NULL);
877 ASSERT(nx->nx_tx_pp != NULL);
878 buf_sz = nx->nx_tx_pp->pp_buflet_size;
879 } else {
880 if ((err = nx_netif_get_max_mtu(na->na_ifp, &max_mtu)) != 0) {
881 /*
882 * If the driver doesn't support SIOCGIFDEVMTU, use the
883 * default MTU size.
884 */
885 max_mtu = ifnet_mtu(na->na_ifp);
886 err = 0;
887 }
888 /* max_mtu does not include the L2 header */
889 buf_sz = MAX(max_mtu + sizeof(struct ether_vlan_header), 2048);
890 }
891 buf_cnt = vp_pool_size;
892 pp_regions_params_adjust(&srp[SKMEM_REGION_BUF], &srp[SKMEM_REGION_KMD],
893 &srp[SKMEM_REGION_UMD], NULL, NULL, NEXUS_META_TYPE_PACKET,
894 NEXUS_META_SUBTYPE_RAW, buf_cnt, 1, buf_sz, buf_cnt);
895
896 nx_netif_vp_region_params_adjust(na, srp);
897 return 0;
898 }
899
900 static int
netif_vp_na_mem_new(struct kern_nexus * nx,struct nexus_adapter * na)901 netif_vp_na_mem_new(struct kern_nexus *nx, struct nexus_adapter *na)
902 {
903 #pragma unused(nx)
904 struct skmem_region_params srp[SKMEM_REGIONS];
905 struct kern_pbufpool *tx_pp = NULL;
906 int err;
907
908 err = netif_vp_region_params_setup(na, srp, &tx_pp);
909 if (err != 0) {
910 return err;
911 }
912 na->na_arena = skmem_arena_create_for_nexus(na, srp,
913 tx_pp != NULL ? &tx_pp : NULL, NULL,
914 FALSE, FALSE, &nx->nx_adv, &err);
915 ASSERT(na->na_arena != NULL || err != 0);
916 return err;
917 }
918
919 static void
netif_vp_na_dtor(struct nexus_adapter * na)920 netif_vp_na_dtor(struct nexus_adapter *na)
921 {
922 struct kern_nexus *nx = na->na_nx;
923 struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
924 struct nexus_netif_adapter *nifna = NIFNA(na);
925
926 NETIF_WLOCK(nif);
927 (void) nx_port_unbind(nx, na->na_nx_port);
928 nx_port_free(nx, na->na_nx_port);
929 nif->nif_vp_cnt--;
930 if (na->na_ifp != NULL) {
931 ifnet_decr_iorefcnt(na->na_ifp);
932 na->na_ifp = NULL;
933 }
934 if (nifna->nifna_netif != NULL) {
935 nx_netif_release(nifna->nifna_netif);
936 nifna->nifna_netif = NULL;
937 }
938 NETIF_WUNLOCK(nif);
939 SK_DF(SK_VERB_VP, "na \"%s\" (0x%llx)", na->na_name, SK_KVA(na));
940 }
941
942 int
netif_vp_na_create(struct kern_nexus * nx,struct chreq * chr,struct nexus_adapter ** nap)943 netif_vp_na_create(struct kern_nexus *nx, struct chreq *chr,
944 struct nexus_adapter **nap)
945 {
946 struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
947 struct nxprov_params *nxp = NX_PROV(nx)->nxprov_params;
948 struct nexus_adapter *na = NULL;
949 struct nexus_netif_adapter *nifna;
950 uint32_t slots;
951 int err;
952
953 NETIF_WLOCK_ASSERT_HELD(nif);
954 if (nif->nif_ifp == NULL) {
955 SK_ERR("ifnet not yet attached");
956 return ENXIO;
957 }
958 ASSERT((chr->cr_mode & CHMODE_KERNEL) == 0);
959 if ((chr->cr_mode & CHMODE_USER_PACKET_POOL) == 0) {
960 SK_ERR("user packet pool required");
961 return EINVAL;
962 }
963 /*
964 * No locking needed while checking for the initialized bit because
965 * if this were not set, no other codepaths would modify the flags.
966 */
967 if ((nif->nif_flow_flags & NETIF_FLOW_FLAG_INITIALIZED) == 0) {
968 SK_ERR("demux vp not supported");
969 return ENOTSUP;
970 }
971 na = (struct nexus_adapter *)na_netif_alloc(Z_WAITOK);
972 nifna = NIFNA(na);
973 nifna->nifna_netif = nif;
974 nx_netif_retain(nif);
975 nifna->nifna_flow = NULL;
976
977 (void) snprintf(na->na_name, sizeof(na->na_name),
978 "netif_vp:%d", chr->cr_port);
979 uuid_generate_random(na->na_uuid);
980
981 na_set_nrings(na, NR_TX, nxp->nxp_tx_rings);
982 na_set_nrings(na, NR_RX, nxp->nxp_rx_rings);
983 /*
984 * If the packet pool is configured to be multi-buflet, then we
985 * need 2 pairs of alloc/free rings(for packet and buflet).
986 */
987 na_set_nrings(na, NR_A, ((nxp->nxp_max_frags > 1) &&
988 (sk_channel_buflet_alloc != 0)) ? 2 : 1);
989
990 slots = vp_tx_slots != 0 ? vp_tx_slots :
991 NX_DOM(nx)->nxdom_tx_slots.nb_def;
992 na_set_nslots(na, NR_TX, slots);
993
994 slots = vp_rx_slots != 0 ? vp_rx_slots :
995 NX_DOM(nx)->nxdom_rx_slots.nb_def;
996 na_set_nslots(na, NR_RX, slots);
997
998 na_set_nslots(na, NR_A, NETIF_DEMUX_ALLOC_SLOTS);
999 ASSERT(na_get_nrings(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_rings.nb_max);
1000 ASSERT(na_get_nrings(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_rings.nb_max);
1001 ASSERT(na_get_nslots(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_slots.nb_max);
1002 ASSERT(na_get_nslots(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_slots.nb_max);
1003
1004 atomic_bitset_32(&na->na_flags, NAF_USER_PKT_POOL);
1005
1006 if (chr->cr_mode & CHMODE_EVENT_RING) {
1007 na_set_nrings(na, NR_EV, NX_NETIF_EVENT_RING_NUM);
1008 na_set_nslots(na, NR_EV, NX_NETIF_EVENT_RING_SIZE);
1009 atomic_bitset_32(&na->na_flags, NAF_EVENT_RING);
1010 na->na_channel_event_notify = netif_vp_na_channel_event_notify;
1011 }
1012
1013 na->na_nx_port = chr->cr_port;
1014 na->na_type = NA_NETIF_VP;
1015 na->na_free = na_netif_free;
1016 na->na_dtor = netif_vp_na_dtor;
1017 na->na_activate = netif_vp_na_activate;
1018 na->na_txsync = netif_vp_na_txsync;
1019 na->na_rxsync = netif_vp_na_rxsync;
1020 na->na_krings_create = netif_vp_na_krings_create;
1021 na->na_krings_delete = netif_vp_na_krings_delete;
1022 na->na_special = NULL;
1023 na->na_ifp = nif->nif_ifp;
1024 ifnet_incr_iorefcnt(na->na_ifp);
1025
1026 *(nexus_stats_type_t *)(uintptr_t)&na->na_stats_type =
1027 NEXUS_STATS_TYPE_INVALID;
1028
1029 /* other fields are set in the common routine */
1030 na_attach_common(na, nx, &nx_netif_prov_s);
1031
1032 err = netif_vp_na_mem_new(nx, na);
1033 if (err != 0) {
1034 ASSERT(na->na_arena == NULL);
1035 goto err;
1036 }
1037
1038 *(uint32_t *)(uintptr_t)&na->na_flowadv_max = nxp->nxp_flowadv_max;
1039 ASSERT(na->na_flowadv_max == 0 ||
1040 skmem_arena_nexus(na->na_arena)->arn_flowadv_obj != NULL);
1041
1042 nif->nif_vp_cnt++;
1043 *nap = na;
1044 return 0;
1045
1046 err:
1047 if (na != NULL) {
1048 if (na->na_ifp != NULL) {
1049 ifnet_decr_iorefcnt(na->na_ifp);
1050 na->na_ifp = NULL;
1051 }
1052 if (na->na_arena != NULL) {
1053 skmem_arena_release(na->na_arena);
1054 na->na_arena = NULL;
1055 }
1056 if (nifna->nifna_netif != NULL) {
1057 nx_netif_release(nifna->nifna_netif);
1058 nifna->nifna_netif = NULL;
1059 }
1060 NA_FREE(na);
1061 }
1062 SK_ERR("VP NA creation failed, err(%d)", err);
1063 return err;
1064 }
1065
1066 static int
netif_vp_na_channel_event_notify(struct nexus_adapter * vpna,struct __kern_packet * dev_pkt,struct __kern_channel_event * ev,uint16_t ev_len)1067 netif_vp_na_channel_event_notify(struct nexus_adapter *vpna,
1068 struct __kern_packet *dev_pkt, struct __kern_channel_event *ev,
1069 uint16_t ev_len)
1070 {
1071 #pragma unused (dev_pkt)
1072 int err;
1073 char *baddr;
1074 kern_packet_t ph;
1075 kern_buflet_t buf;
1076 sk_protect_t protect;
1077 kern_channel_slot_t slot;
1078 struct __kern_packet *vpna_pkt = NULL;
1079 struct __kern_channel_event_metadata *emd;
1080 struct __kern_channel_ring *ring = &vpna->na_event_rings[0];
1081 struct netif_stats *nifs = &NIFNA(vpna)->nifna_netif->nif_stats;
1082
1083 if (__improbable(!NA_IS_ACTIVE(vpna))) {
1084 STATS_INC(nifs, NETIF_STATS_EV_DROP_NA_INACTIVE);
1085 err = ENXIO;
1086 goto error;
1087 }
1088 if (__improbable(NA_IS_DEFUNCT(vpna))) {
1089 STATS_INC(nifs, NETIF_STATS_EV_DROP_NA_DEFUNCT);
1090 err = ENXIO;
1091 goto error;
1092 }
1093 if (!NA_CHANNEL_EVENT_ATTACHED(vpna)) {
1094 STATS_INC(nifs, NETIF_STATS_EV_DROP_KEVENT_INACTIVE);
1095 err = ENXIO;
1096 goto error;
1097 }
1098 if (__improbable(KR_DROP(ring))) {
1099 STATS_INC(nifs, NETIF_STATS_EV_DROP_KRDROP_MODE);
1100 err = ENXIO;
1101 goto error;
1102 }
1103 vpna_pkt = nx_netif_alloc_packet(ring->ckr_pp, ev_len, &ph);
1104 if (__improbable(vpna_pkt == NULL)) {
1105 STATS_INC(nifs, NETIF_STATS_EV_DROP_NOMEM_PKT);
1106 err = ENOMEM;
1107 goto error;
1108 }
1109 buf = __packet_get_next_buflet(ph, NULL);
1110 baddr = __buflet_get_data_address(buf);
1111 emd = (struct __kern_channel_event_metadata *)(void *)baddr;
1112 emd->emd_etype = CHANNEL_EVENT_PACKET_TRANSMIT_STATUS;
1113 emd->emd_nevents = 1;
1114 bcopy(ev, (baddr + __KERN_CHANNEL_EVENT_OFFSET), ev_len);
1115 err = __buflet_set_data_length(buf,
1116 (ev_len + __KERN_CHANNEL_EVENT_OFFSET));
1117 VERIFY(err == 0);
1118 err = __packet_finalize(ph);
1119 VERIFY(err == 0);
1120 kr_enter(ring, TRUE);
1121 protect = sk_sync_protect();
1122 slot = kern_channel_get_next_slot(ring, NULL, NULL);
1123 if (slot == NULL) {
1124 sk_sync_unprotect(protect);
1125 kr_exit(ring);
1126 STATS_INC(nifs, NETIF_STATS_EV_DROP_KRSPACE);
1127 err = ENOSPC;
1128 goto error;
1129 }
1130 err = kern_channel_slot_attach_packet(ring, slot, ph);
1131 VERIFY(err == 0);
1132 vpna_pkt = NULL;
1133 kern_channel_advance_slot(ring, slot);
1134 sk_sync_unprotect(protect);
1135 kr_exit(ring);
1136 kern_channel_event_notify(&vpna->na_tx_rings[0]);
1137 STATS_INC(nifs, NETIF_STATS_EV_SENT);
1138 return 0;
1139
1140 error:
1141 ASSERT(err != 0);
1142 if (vpna_pkt != NULL) {
1143 nx_netif_free_packet(vpna_pkt);
1144 }
1145 STATS_INC(nifs, NETIF_STATS_EV_DROP);
1146 return err;
1147 }
1148