1 /*
2 * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 */
53
54 #include <skywalk/os_skywalk_private.h>
55 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
56 #include <skywalk/nexus/flowswitch/fsw_var.h>
57 #include <sys/sdt.h>
58
59 static void fsw_vp_na_dtor(struct nexus_adapter *);
60 static int fsw_vp_na_special(struct nexus_adapter *,
61 struct kern_channel *, struct chreq *, nxspec_cmd_t);
62 static struct nexus_vp_adapter *fsw_vp_na_alloc(zalloc_flags_t);
63 static void fsw_vp_na_free(struct nexus_adapter *);
64 static int fsw_vp_na_channel_event_notify(struct nexus_adapter *vpna,
65 struct __kern_channel_event *__sized_by(ev_len)ev, uint16_t ev_len);
66
67 static SKMEM_TYPE_DEFINE(na_vp_zone, struct nexus_vp_adapter);
68
69 static uint16_t fsw_vpna_gencnt = 0;
70
71 /* na_activate() callback for flow switch ports */
72 int
fsw_vp_na_activate(struct nexus_adapter * na,na_activate_mode_t mode)73 fsw_vp_na_activate(struct nexus_adapter *na, na_activate_mode_t mode)
74 {
75 int ret = 0;
76 struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
77 struct nx_flowswitch *fsw = vpna->vpna_fsw;
78
79 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
80
81 SK_DF(SK_VERB_FSW, "na \"%s\" (%p) %s", na->na_name,
82 SK_KVA(na), na_activate_mode2str(mode));
83
84 /*
85 * Persistent ports may be put in Skywalk mode
86 * before being attached to a FlowSwitch.
87 */
88 FSW_WLOCK(fsw);
89
90 os_atomic_inc(&fsw_vpna_gencnt, relaxed);
91 vpna->vpna_gencnt = fsw_vpna_gencnt;
92
93 if (mode == NA_ACTIVATE_MODE_ON) {
94 os_atomic_or(&na->na_flags, NAF_ACTIVE, relaxed);
95 }
96
97 ret = fsw_port_na_activate(fsw, vpna, mode);
98 if (ret != 0) {
99 SK_DF(SK_VERB_FSW, "na \"%s\" (%p) %s err(%d)",
100 na->na_name, SK_KVA(na), na_activate_mode2str(mode), ret);
101 if (mode == NA_ACTIVATE_MODE_ON) {
102 os_atomic_andnot(&na->na_flags, NAF_ACTIVE, relaxed);
103 }
104 goto done;
105 }
106
107 if (mode == NA_ACTIVATE_MODE_DEFUNCT ||
108 mode == NA_ACTIVATE_MODE_OFF) {
109 struct skmem_arena_nexus *arn = skmem_arena_nexus(na->na_arena);
110
111 if (mode == NA_ACTIVATE_MODE_OFF) {
112 os_atomic_andnot(&na->na_flags, NAF_ACTIVE, relaxed);
113 }
114
115 AR_LOCK(na->na_arena);
116 if (na->na_type == NA_FLOWSWITCH_VP &&
117 arn->arn_stats_obj != NULL) {
118 fsw_fold_stats(fsw,
119 arn->arn_stats_obj, na->na_stats_type);
120 }
121 AR_UNLOCK(na->na_arena);
122
123 enum txrx t;
124 uint32_t i;
125 struct __nx_stats_channel_errors stats;
126 for_all_rings(t) {
127 for (i = 0; i < na_get_nrings(na, t); i++) {
128 stats.nxs_cres =
129 &NAKR(na, t)[i].ckr_err_stats;
130 fsw_fold_stats(fsw, &stats,
131 NEXUS_STATS_TYPE_CHAN_ERRORS);
132 }
133 }
134 }
135
136 done:
137 FSW_WUNLOCK(fsw);
138 return ret;
139 }
140
141 /* na_dtor callback for ephemeral flow switch ports */
142 static void
fsw_vp_na_dtor(struct nexus_adapter * na)143 fsw_vp_na_dtor(struct nexus_adapter *na)
144 {
145 struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
146 struct nx_flowswitch *fsw = vpna->vpna_fsw;
147
148 SK_LOCK_ASSERT_HELD();
149 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
150
151 SK_DF(SK_VERB_FSW, "na \"%s\" (%p)", na->na_name, SK_KVA(na));
152
153 if (fsw != NULL) {
154 FSW_WLOCK(fsw);
155 fsw_port_free(fsw, vpna, vpna->vpna_nx_port, FALSE);
156 FSW_WUNLOCK(fsw);
157 }
158 }
159
160 /*
161 * na_krings_create callback for flow switch ports.
162 * Calls the standard na_kr_create(), then adds leases on rx
163 * rings and bdgfwd on tx rings.
164 */
165 int
fsw_vp_na_krings_create(struct nexus_adapter * na,struct kern_channel * ch)166 fsw_vp_na_krings_create(struct nexus_adapter *na, struct kern_channel *ch)
167 {
168 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
169
170 return na_rings_mem_setup(na, FALSE, ch);
171 }
172
173
174 /* na_krings_delete callback for flow switch ports. */
175 void
fsw_vp_na_krings_delete(struct nexus_adapter * na,struct kern_channel * ch,boolean_t defunct)176 fsw_vp_na_krings_delete(struct nexus_adapter *na, struct kern_channel *ch,
177 boolean_t defunct)
178 {
179 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
180
181 na_rings_mem_teardown(na, ch, defunct);
182 }
183
184 /* na_txsync callback for flow switch ports */
185 int
fsw_vp_na_txsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)186 fsw_vp_na_txsync(struct __kern_channel_ring *kring, struct proc *p,
187 uint32_t flags)
188 {
189 #pragma unused(flags)
190 struct nexus_vp_adapter *vpna = VPNA(KRNA(kring));
191 struct nx_flowswitch *fsw = vpna->vpna_fsw;
192 int error = 0;
193
194 /*
195 * Flush packets if and only if the ring isn't in drop mode,
196 * and if the adapter is currently attached to a nexus port;
197 * otherwise we drop them.
198 */
199 if (__probable(!KR_DROP(kring) && fsw != NULL)) {
200 fsw_ring_flush(fsw, kring, p);
201 } else {
202 int dropped_pkts;
203 /* packets between khead to rhead have been dropped */
204 dropped_pkts = kring->ckr_rhead - kring->ckr_khead;
205 if (dropped_pkts < 0) {
206 dropped_pkts += kring->ckr_num_slots;
207 }
208 if (fsw != NULL) {
209 STATS_INC(&fsw->fsw_stats, FSW_STATS_DST_RING_DROPMODE);
210 STATS_ADD(&fsw->fsw_stats, FSW_STATS_DROP,
211 dropped_pkts);
212 }
213 /* we're dropping; claim all */
214 slot_idx_t sidx = kring->ckr_khead;
215 while (sidx != kring->ckr_rhead) {
216 struct __kern_slot_desc *ksd = KR_KSD(kring, sidx);
217 if (KSD_VALID_METADATA(ksd)) {
218 struct __kern_packet *pkt = ksd->sd_pkt;
219 (void) KR_SLOT_DETACH_METADATA(kring, ksd);
220 pp_free_packet_single(pkt);
221 }
222 sidx = SLOT_NEXT(sidx, kring->ckr_lim);
223 }
224 kring->ckr_khead = kring->ckr_rhead;
225 kring->ckr_ktail = SLOT_PREV(kring->ckr_rhead, kring->ckr_lim);
226 error = ENODEV;
227 SK_ERR("kr \"%s\" (%p) krflags 0x%x in drop mode (err %d)",
228 kring->ckr_name, SK_KVA(kring), kring->ckr_flags, error);
229 }
230
231 SK_DF(SK_VERB_FSW | SK_VERB_SYNC | SK_VERB_TX,
232 "%s(%d) kr \"%s\" (%p) krflags 0x%x ring %u flags 0x%x",
233 sk_proc_name(p), sk_proc_pid(p), kring->ckr_name,
234 SK_KVA(kring), kring->ckr_flags, kring->ckr_ring_id, flags);
235
236 return error;
237 }
238
239 /*
240 * na_rxsync callback for flow switch ports. We're already protected
241 * against concurrent calls from userspace.
242 */
243 int
fsw_vp_na_rxsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)244 fsw_vp_na_rxsync(struct __kern_channel_ring *kring, struct proc *p,
245 uint32_t flags)
246 {
247 #pragma unused(p, flags)
248 slot_idx_t head, khead_prev;
249
250 head = kring->ckr_rhead;
251 ASSERT(head <= kring->ckr_lim);
252
253 /* First part, import newly received packets. */
254 /* actually nothing to do here, they are already in the kring */
255
256 /* Second part, skip past packets that userspace has released. */
257 khead_prev = kring->ckr_khead;
258 kring->ckr_khead = head;
259
260 /* ensure global visibility */
261 os_atomic_thread_fence(seq_cst);
262
263 SK_DF(SK_VERB_FSW | SK_VERB_SYNC | SK_VERB_RX,
264 "%s(%d) kr \"%s\" (%p) krflags 0x%x ring %u "
265 "kh %u (was %u) rh %u flags 0x%x", sk_proc_name(p),
266 sk_proc_pid(p), kring->ckr_name, SK_KVA(kring), kring->ckr_flags,
267 kring->ckr_ring_id, kring->ckr_khead, khead_prev, kring->ckr_rhead,
268 flags);
269
270 return 0;
271 }
272
273 static int
fsw_vp_na_special(struct nexus_adapter * na,struct kern_channel * ch,struct chreq * chr,nxspec_cmd_t spec_cmd)274 fsw_vp_na_special(struct nexus_adapter *na, struct kern_channel *ch,
275 struct chreq *chr, nxspec_cmd_t spec_cmd)
276 {
277 int error = 0;
278
279 SK_LOCK_ASSERT_HELD();
280 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
281
282 /*
283 * fsw_vp_na_attach() must have created this adapter
284 * exclusively for kernel (NAF_KERNEL); leave this alone.
285 */
286 ASSERT(NA_KERNEL_ONLY(na));
287
288 switch (spec_cmd) {
289 case NXSPEC_CMD_CONNECT:
290 ASSERT(!(na->na_flags & NAF_SPEC_INIT));
291 ASSERT(na->na_channels == 0);
292
293 error = na_bind_channel(na, ch, chr);
294 if (error != 0) {
295 goto done;
296 }
297
298 os_atomic_or(&na->na_flags, NAF_SPEC_INIT, relaxed);
299 break;
300
301 case NXSPEC_CMD_DISCONNECT:
302 ASSERT(na->na_channels == 1);
303 ASSERT(na->na_flags & NAF_SPEC_INIT);
304 os_atomic_andnot(&na->na_flags, NAF_SPEC_INIT, relaxed);
305
306 na_unbind_channel(ch);
307 break;
308
309 case NXSPEC_CMD_START:
310 na_kr_drop(na, FALSE);
311 break;
312
313 case NXSPEC_CMD_STOP:
314 na_kr_drop(na, TRUE);
315 break;
316
317 default:
318 error = EINVAL;
319 break;
320 }
321
322 done:
323 SK_DF(error ? SK_VERB_ERROR : SK_VERB_FSW,
324 "ch %p na \"%s\" (%p) nx %p spec_cmd %u (err %d)",
325 SK_KVA(ch), na->na_name, SK_KVA(na), SK_KVA(ch->ch_nexus),
326 spec_cmd, error);
327
328 return error;
329 }
330
331 /*
332 * Create a nexus_vp_adapter that describes a flow switch port.
333 */
334 int
fsw_vp_na_create(struct kern_nexus * nx,struct chreq * chr,struct proc * p,struct nexus_vp_adapter ** ret)335 fsw_vp_na_create(struct kern_nexus *nx, struct chreq *chr, struct proc *p,
336 struct nexus_vp_adapter **ret)
337 {
338 struct nxprov_params *nxp = NX_PROV(nx)->nxprov_params;
339 struct nx_flowswitch *fsw = NX_FSW_PRIVATE(nx);
340 struct nexus_vp_adapter *vpna;
341 struct nexus_adapter *na;
342 int error;
343
344 SK_LOCK_ASSERT_HELD();
345
346 if ((chr->cr_mode & CHMODE_KERNEL) != 0) {
347 SK_ERR("VP adapter can't be used by kernel");
348 return ENOTSUP;
349 }
350 if ((chr->cr_mode & CHMODE_USER_PACKET_POOL) == 0) {
351 SK_ERR("user packet pool required");
352 return EINVAL;
353 }
354
355 vpna = fsw_vp_na_alloc(Z_WAITOK);
356
357 ASSERT(vpna->vpna_up.na_type == NA_FLOWSWITCH_VP);
358 ASSERT(vpna->vpna_up.na_free == fsw_vp_na_free);
359
360 na = &vpna->vpna_up;
361 (void) snprintf(na->na_name, sizeof(na->na_name), "fsw_%s[%u]_%s.%d",
362 fsw->fsw_ifp ? if_name(fsw->fsw_ifp) : "??", chr->cr_port,
363 proc_best_name(p), proc_pid(p));
364 na->na_name[sizeof(na->na_name) - 1] = '\0';
365 uuid_generate_random(na->na_uuid);
366
367 /*
368 * Verify upper bounds; for all cases including user pipe nexus,
369 * as well as flow switch-based ones, the parameters must have
370 * already been validated by corresponding nxdom_prov_params()
371 * function defined by each domain. The user pipe nexus would
372 * be checking against the flow switch's parameters there.
373 */
374 na_set_nrings(na, NR_TX, nxp->nxp_tx_rings);
375 na_set_nrings(na, NR_RX, nxp->nxp_rx_rings);
376 /*
377 * If the packet pool is configured to be multi-buflet, then we
378 * need 2 pairs of alloc/free rings(for packet and buflet).
379 */
380 na_set_nrings(na, NR_A, ((nxp->nxp_max_frags > 1) &&
381 (sk_channel_buflet_alloc != 0)) ? 2 : 1);
382 na_set_nslots(na, NR_TX, nxp->nxp_tx_slots);
383 na_set_nslots(na, NR_RX, nxp->nxp_rx_slots);
384 na_set_nslots(na, NR_A, NX_FSW_AFRINGSIZE);
385 ASSERT(na_get_nrings(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_rings.nb_max);
386 ASSERT(na_get_nrings(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_rings.nb_max);
387 ASSERT(na_get_nslots(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_slots.nb_max);
388 ASSERT(na_get_nslots(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_slots.nb_max);
389
390 os_atomic_or(&na->na_flags, NAF_USER_PKT_POOL, relaxed);
391
392 if (chr->cr_mode & CHMODE_LOW_LATENCY) {
393 os_atomic_or(&na->na_flags, NAF_LOW_LATENCY, relaxed);
394 }
395
396 if (chr->cr_mode & CHMODE_EVENT_RING) {
397 na_set_nrings(na, NR_EV, NX_FSW_EVENT_RING_NUM);
398 na_set_nslots(na, NR_EV, NX_FSW_EVENT_RING_SIZE);
399 os_atomic_or(&na->na_flags, NAF_EVENT_RING, relaxed);
400 na->na_channel_event_notify = fsw_vp_na_channel_event_notify;
401 }
402 if (nxp->nxp_max_frags > 1 && fsw->fsw_tso_mode != FSW_TSO_MODE_NONE) {
403 na_set_nrings(na, NR_LBA, 1);
404 na_set_nslots(na, NR_LBA, NX_FSW_AFRINGSIZE);
405 }
406 vpna->vpna_nx_port = chr->cr_port;
407 na->na_dtor = fsw_vp_na_dtor;
408 na->na_activate = fsw_vp_na_activate;
409 na->na_txsync = fsw_vp_na_txsync;
410 na->na_rxsync = fsw_vp_na_rxsync;
411 na->na_krings_create = fsw_vp_na_krings_create;
412 na->na_krings_delete = fsw_vp_na_krings_delete;
413 na->na_special = fsw_vp_na_special;
414
415 *(nexus_stats_type_t *)(uintptr_t)&na->na_stats_type =
416 NEXUS_STATS_TYPE_FSW;
417
418 /* other fields are set in the common routine */
419 na_attach_common(na, nx, &nx_fsw_prov_s);
420
421 if ((error = NX_DOM_PROV(nx)->nxdom_prov_mem_new(NX_DOM_PROV(nx),
422 nx, na)) != 0) {
423 ASSERT(na->na_arena == NULL);
424 goto err;
425 }
426 ASSERT(na->na_arena != NULL);
427
428 *(uint32_t *)(uintptr_t)&na->na_flowadv_max = nxp->nxp_flowadv_max;
429 ASSERT(na->na_flowadv_max == 0 ||
430 skmem_arena_nexus(na->na_arena)->arn_flowadv_obj != NULL);
431
432 #if SK_LOG
433 uuid_string_t uuidstr;
434 SK_DF(SK_VERB_FSW, "na_name: \"%s\"", na->na_name);
435 SK_DF(SK_VERB_FSW, " UUID: %s", sk_uuid_unparse(na->na_uuid,
436 uuidstr));
437 SK_DF(SK_VERB_FSW, " nx: %p (\"%s\":\"%s\")",
438 SK_KVA(na->na_nx), NX_DOM(na->na_nx)->nxdom_name,
439 NX_DOM_PROV(na->na_nx)->nxdom_prov_name);
440 SK_DF(SK_VERB_FSW, " flags: 0x%x", na->na_flags);
441 SK_DF(SK_VERB_FSW, " stats_type: %u", na->na_stats_type);
442 SK_DF(SK_VERB_FSW, " flowadv_max: %u", na->na_flowadv_max);
443 SK_DF(SK_VERB_FSW, " rings: tx %u rx %u af %u",
444 na_get_nrings(na, NR_TX), na_get_nrings(na, NR_RX),
445 na_get_nrings(na, NR_A));
446 SK_DF(SK_VERB_FSW, " slots: tx %u rx %u af %u",
447 na_get_nslots(na, NR_TX), na_get_nslots(na, NR_RX),
448 na_get_nslots(na, NR_A));
449 #if CONFIG_NEXUS_USER_PIPE
450 SK_DF(SK_VERB_FSW, " next_pipe: %u", na->na_next_pipe);
451 SK_DF(SK_VERB_FSW, " max_pipes: %u", na->na_max_pipes);
452 #endif /* CONFIG_NEXUS_USER_PIPE */
453 SK_DF(SK_VERB_FSW, " nx_port: %d", (int)vpna->vpna_nx_port);
454 #endif /* SK_LOG */
455
456 *ret = vpna;
457 na_retain_locked(&vpna->vpna_up);
458
459 return 0;
460
461 err:
462 if (na->na_arena != NULL) {
463 skmem_arena_release(na->na_arena);
464 na->na_arena = NULL;
465 }
466 NA_FREE(&vpna->vpna_up);
467 return error;
468 }
469
470 static struct nexus_vp_adapter *
fsw_vp_na_alloc(zalloc_flags_t how)471 fsw_vp_na_alloc(zalloc_flags_t how)
472 {
473 struct nexus_vp_adapter *vpna;
474
475 static_assert(offsetof(struct nexus_vp_adapter, vpna_up) == 0);
476
477 vpna = zalloc_flags(na_vp_zone, how | Z_ZERO);
478 if (vpna) {
479 vpna->vpna_up.na_type = NA_FLOWSWITCH_VP;
480 vpna->vpna_up.na_free = fsw_vp_na_free;
481 }
482 return vpna;
483 }
484
485 static void
fsw_vp_na_free(struct nexus_adapter * na)486 fsw_vp_na_free(struct nexus_adapter *na)
487 {
488 struct nexus_vp_adapter *__single vpna = (struct nexus_vp_adapter *)(void *)na;
489
490 ASSERT(vpna->vpna_up.na_refcount == 0);
491 SK_DF(SK_VERB_MEM, "vpna %p FREE", SK_KVA(vpna));
492 bzero(vpna, sizeof(*vpna));
493 zfree(na_vp_zone, vpna);
494 }
495
496 void
fsw_vp_channel_error_stats_fold(struct fsw_stats * fs,struct __nx_stats_channel_errors * es)497 fsw_vp_channel_error_stats_fold(struct fsw_stats *fs,
498 struct __nx_stats_channel_errors *es)
499 {
500 STATS_ADD(fs, FSW_STATS_CHAN_ERR_UPP_ALLOC,
501 es->nxs_cres->cres_pkt_alloc_failures);
502 }
503
504 SK_NO_INLINE_ATTRIBUTE
505 static struct __kern_packet *
nx_fsw_alloc_packet(struct kern_pbufpool * pp,uint32_t sz,kern_packet_t * php)506 nx_fsw_alloc_packet(struct kern_pbufpool *pp, uint32_t sz, kern_packet_t *php)
507 {
508 kern_packet_t ph;
509 ph = pp_alloc_packet_by_size(pp, sz, SKMEM_NOSLEEP);
510 if (__improbable(ph == 0)) {
511 DTRACE_SKYWALK2(alloc__fail, struct kern_pbufpool *,
512 pp, size_t, sz);
513 return NULL;
514 }
515 if (php != NULL) {
516 *php = ph;
517 }
518 return SK_PTR_ADDR_KPKT(ph);
519 }
520
521 SK_NO_INLINE_ATTRIBUTE
522 static void
nx_fsw_free_packet(struct __kern_packet * pkt)523 nx_fsw_free_packet(struct __kern_packet *pkt)
524 {
525 pp_free_packet_single(pkt);
526 }
527
528 static int
fsw_vp_na_channel_event_notify(struct nexus_adapter * vpna,struct __kern_channel_event * __sized_by (ev_len)ev,uint16_t ev_len)529 fsw_vp_na_channel_event_notify(struct nexus_adapter *vpna,
530 struct __kern_channel_event *__sized_by(ev_len)ev, uint16_t ev_len)
531 {
532 int err;
533 char *baddr;
534 kern_packet_t ph;
535 kern_buflet_t __single buf;
536 sk_protect_t protect;
537 kern_channel_slot_t slot;
538 struct __kern_packet *vpna_pkt = NULL;
539 struct __kern_channel_event_metadata *emd;
540 struct __kern_channel_ring *ring = &vpna->na_event_rings[0];
541 struct fsw_stats *fs = &((struct nexus_vp_adapter *)(vpna))->vpna_fsw->fsw_stats;
542
543 if (__probable(ev->ev_type == CHANNEL_EVENT_PACKET_TRANSMIT_STATUS)) {
544 STATS_INC(fs, FSW_STATS_EV_RECV_TX_STATUS);
545 }
546 if (__improbable(ev->ev_type == CHANNEL_EVENT_PACKET_TRANSMIT_EXPIRED)) {
547 STATS_INC(fs, FSW_STATS_EV_RECV_TX_EXPIRED);
548 }
549 STATS_INC(fs, FSW_STATS_EV_RECV);
550
551 if (__improbable(!NA_IS_ACTIVE(vpna))) {
552 STATS_INC(fs, FSW_STATS_EV_DROP_NA_INACTIVE);
553 err = ENXIO;
554 goto error;
555 }
556 if (__improbable(NA_IS_DEFUNCT(vpna))) {
557 STATS_INC(fs, FSW_STATS_EV_DROP_NA_DEFUNCT);
558 err = ENXIO;
559 goto error;
560 }
561 if (!NA_CHANNEL_EVENT_ATTACHED(vpna)) {
562 STATS_INC(fs, FSW_STATS_EV_DROP_KEVENT_INACTIVE);
563 err = ENXIO;
564 goto error;
565 }
566 if (__improbable(KR_DROP(ring))) {
567 STATS_INC(fs, FSW_STATS_EV_DROP_KRDROP_MODE);
568 err = ENXIO;
569 goto error;
570 }
571
572 vpna_pkt = nx_fsw_alloc_packet(ring->ckr_pp, ev_len, &ph);
573 if (__improbable(vpna_pkt == NULL)) {
574 STATS_INC(fs, FSW_STATS_EV_DROP_NOMEM_PKT);
575 err = ENOMEM;
576 goto error;
577 }
578 buf = __packet_get_next_buflet(ph, NULL);
579 baddr = __buflet_get_data_address(buf);
580 emd = (struct __kern_channel_event_metadata *)(void *)baddr;
581 emd->emd_etype = ev->ev_type;
582 emd->emd_nevents = 1;
583 bcopy(ev, (baddr + __KERN_CHANNEL_EVENT_OFFSET), ev_len);
584 err = __buflet_set_data_length(buf,
585 (ev_len + __KERN_CHANNEL_EVENT_OFFSET));
586 VERIFY(err == 0);
587 err = __packet_finalize(ph);
588 VERIFY(err == 0);
589 kr_enter(ring, TRUE);
590 protect = sk_sync_protect();
591 slot = kern_channel_get_next_slot(ring, NULL, NULL);
592 if (slot == NULL) {
593 sk_sync_unprotect(protect);
594 kr_exit(ring);
595 STATS_INC(fs, FSW_STATS_EV_DROP_KRSPACE);
596 err = ENOBUFS;
597 goto error;
598 }
599 err = kern_channel_slot_attach_packet(ring, slot, ph);
600 VERIFY(err == 0);
601 vpna_pkt = NULL;
602 kern_channel_advance_slot(ring, slot);
603 sk_sync_unprotect(protect);
604 kr_exit(ring);
605 kern_channel_event_notify(&vpna->na_tx_rings[0]);
606 STATS_INC(fs, NETIF_STATS_EV_SENT);
607 return 0;
608
609 error:
610 ASSERT(err != 0);
611 if (vpna_pkt != NULL) {
612 nx_fsw_free_packet(vpna_pkt);
613 }
614 STATS_INC(fs, FSW_STATS_EV_DROP);
615 return err;
616 }
617
618 static inline struct nexus_adapter *
fsw_find_port_vpna(struct nx_flowswitch * fsw,uint32_t nx_port_id)619 fsw_find_port_vpna(struct nx_flowswitch *fsw, uint32_t nx_port_id)
620 {
621 struct kern_nexus *nx = fsw->fsw_nx;
622 struct nexus_adapter *na = NULL;
623 nexus_port_t port;
624 uint16_t gencnt;
625
626 PKT_DECOMPOSE_NX_PORT_ID(nx_port_id, port, gencnt);
627
628 if (port < FSW_VP_USER_MIN) {
629 SK_ERR("non VPNA port");
630 return NULL;
631 }
632
633 if (__improbable(!nx_port_is_valid(nx, port))) {
634 SK_ERR("%s[%d] port no longer valid",
635 if_name(fsw->fsw_ifp), port);
636 return NULL;
637 }
638
639 na = nx_port_get_na(nx, port);
640 if (na != NULL && VPNA(na)->vpna_gencnt != gencnt) {
641 return NULL;
642 }
643 return na;
644 }
645
646 errno_t
fsw_vp_na_channel_event(struct nx_flowswitch * fsw,uint32_t nx_port_id,struct __kern_channel_event * __sized_by (event_len)event,uint16_t event_len)647 fsw_vp_na_channel_event(struct nx_flowswitch *fsw, uint32_t nx_port_id,
648 struct __kern_channel_event *__sized_by(event_len)event, uint16_t event_len)
649 {
650 int err = 0;
651 struct nexus_adapter *fsw_vpna;
652
653 SK_DF(SK_VERB_EVENTS, "%s[%d] ev: %p ev_len: %hu "
654 "ev_type: %u ev_flags: %u _reserved: %hu ev_dlen: %hu",
655 if_name(fsw->fsw_ifp), nx_port_id, event, event_len,
656 event->ev_type, event->ev_flags, event->_reserved, event->ev_dlen);
657
658 FSW_RLOCK(fsw);
659 struct fsw_stats *fs = &fsw->fsw_stats;
660
661 fsw_vpna = fsw_find_port_vpna(fsw, nx_port_id);
662 if (__improbable(fsw_vpna == NULL)) {
663 err = ENXIO;
664 STATS_INC(fs, FSW_STATS_EV_DROP_DEMUX_ERR);
665 goto error;
666 }
667 if (__improbable(fsw_vpna->na_channel_event_notify == NULL)) {
668 err = ENOTSUP;
669 STATS_INC(fs, FSW_STATS_EV_DROP_EV_VPNA_NOTSUP);
670 goto error;
671 }
672 err = fsw_vpna->na_channel_event_notify(fsw_vpna, event, event_len);
673 FSW_RUNLOCK(fsw);
674 return err;
675
676 error:
677 STATS_INC(fs, FSW_STATS_EV_DROP);
678 FSW_RUNLOCK(fsw);
679 return err;
680 }
681