1 /*
2 * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 */
53
54 #include <skywalk/os_skywalk_private.h>
55 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
56 #include <skywalk/nexus/flowswitch/fsw_var.h>
57 #include <sys/sdt.h>
58
59 static void fsw_vp_na_dtor(struct nexus_adapter *);
60 static int fsw_vp_na_special(struct nexus_adapter *,
61 struct kern_channel *, struct chreq *, nxspec_cmd_t);
62 static struct nexus_vp_adapter *fsw_vp_na_alloc(zalloc_flags_t);
63 static void fsw_vp_na_free(struct nexus_adapter *);
64 static int fsw_vp_na_channel_event_notify(struct nexus_adapter *vpna,
65 struct __kern_channel_event *ev, uint16_t ev_len);
66
67 static SKMEM_TYPE_DEFINE(na_vp_zone, struct nexus_vp_adapter);
68
69 static uint16_t fsw_vpna_gencnt = 0;
70
71 /* na_activate() callback for flow switch ports */
72 int
fsw_vp_na_activate(struct nexus_adapter * na,na_activate_mode_t mode)73 fsw_vp_na_activate(struct nexus_adapter *na, na_activate_mode_t mode)
74 {
75 int ret = 0;
76 struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
77 struct nx_flowswitch *fsw = vpna->vpna_fsw;
78
79 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
80
81 SK_DF(SK_VERB_FSW, "na \"%s\" (0x%llx) %s", na->na_name,
82 SK_KVA(na), na_activate_mode2str(mode));
83
84 /*
85 * Persistent ports may be put in Skywalk mode
86 * before being attached to a FlowSwitch.
87 */
88 FSW_WLOCK(fsw);
89
90 os_atomic_inc(&fsw_vpna_gencnt, relaxed);
91 vpna->vpna_gencnt = fsw_vpna_gencnt;
92
93 if (mode == NA_ACTIVATE_MODE_ON) {
94 os_atomic_or(&na->na_flags, NAF_ACTIVE, relaxed);
95 }
96
97 ret = fsw_port_na_activate(fsw, vpna, mode);
98 if (ret != 0) {
99 SK_DF(SK_VERB_FSW, "na \"%s\" (0x%llx) %s err(%d)",
100 na->na_name, SK_KVA(na), na_activate_mode2str(mode), ret);
101 if (mode == NA_ACTIVATE_MODE_ON) {
102 os_atomic_andnot(&na->na_flags, NAF_ACTIVE, relaxed);
103 }
104 goto done;
105 }
106
107 if (mode == NA_ACTIVATE_MODE_DEFUNCT ||
108 mode == NA_ACTIVATE_MODE_OFF) {
109 struct skmem_arena_nexus *arn = skmem_arena_nexus(na->na_arena);
110
111 if (mode == NA_ACTIVATE_MODE_OFF) {
112 os_atomic_andnot(&na->na_flags, NAF_ACTIVE, relaxed);
113 }
114
115 AR_LOCK(na->na_arena);
116 if (na->na_type == NA_FLOWSWITCH_VP &&
117 arn->arn_stats_obj != NULL) {
118 fsw_fold_stats(fsw,
119 arn->arn_stats_obj, na->na_stats_type);
120 }
121 AR_UNLOCK(na->na_arena);
122
123 enum txrx t;
124 uint32_t i;
125 struct __nx_stats_channel_errors stats;
126 for_all_rings(t) {
127 for (i = 0; i < na_get_nrings(na, t); i++) {
128 stats.nxs_cres =
129 &NAKR(na, t)[i].ckr_err_stats;
130 fsw_fold_stats(fsw, &stats,
131 NEXUS_STATS_TYPE_CHAN_ERRORS);
132 }
133 }
134 }
135
136 done:
137 FSW_WUNLOCK(fsw);
138 return ret;
139 }
140
141 /* na_dtor callback for ephemeral flow switch ports */
142 static void
fsw_vp_na_dtor(struct nexus_adapter * na)143 fsw_vp_na_dtor(struct nexus_adapter *na)
144 {
145 struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
146 struct nx_flowswitch *fsw = vpna->vpna_fsw;
147
148 SK_LOCK_ASSERT_HELD();
149 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
150
151 SK_DF(SK_VERB_FSW, "na \"%s\" (0x%llx)", na->na_name, SK_KVA(na));
152
153 if (fsw != NULL) {
154 FSW_WLOCK(fsw);
155 fsw_port_free(fsw, vpna, vpna->vpna_nx_port, FALSE);
156 FSW_WUNLOCK(fsw);
157 }
158 }
159
160 /*
161 * na_krings_create callback for flow switch ports.
162 * Calls the standard na_kr_create(), then adds leases on rx
163 * rings and bdgfwd on tx rings.
164 */
165 int
fsw_vp_na_krings_create(struct nexus_adapter * na,struct kern_channel * ch)166 fsw_vp_na_krings_create(struct nexus_adapter *na, struct kern_channel *ch)
167 {
168 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
169
170 return na_rings_mem_setup(na, FALSE, ch);
171 }
172
173
174 /* na_krings_delete callback for flow switch ports. */
175 void
fsw_vp_na_krings_delete(struct nexus_adapter * na,struct kern_channel * ch,boolean_t defunct)176 fsw_vp_na_krings_delete(struct nexus_adapter *na, struct kern_channel *ch,
177 boolean_t defunct)
178 {
179 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
180
181 na_rings_mem_teardown(na, ch, defunct);
182 }
183
184 /* na_txsync callback for flow switch ports */
185 int
fsw_vp_na_txsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)186 fsw_vp_na_txsync(struct __kern_channel_ring *kring, struct proc *p,
187 uint32_t flags)
188 {
189 #pragma unused(flags)
190 struct nexus_vp_adapter *vpna = VPNA(KRNA(kring));
191 struct nx_flowswitch *fsw = vpna->vpna_fsw;
192 int error = 0;
193
194 /*
195 * Flush packets if and only if the ring isn't in drop mode,
196 * and if the adapter is currently attached to a nexus port;
197 * otherwise we drop them.
198 */
199 if (__probable(!KR_DROP(kring) && fsw != NULL)) {
200 fsw_ring_flush(fsw, kring, p);
201 } else {
202 int dropped_pkts;
203 /* packets between khead to rhead have been dropped */
204 dropped_pkts = kring->ckr_rhead - kring->ckr_khead;
205 if (dropped_pkts < 0) {
206 dropped_pkts += kring->ckr_num_slots;
207 }
208 if (fsw != NULL) {
209 STATS_INC(&fsw->fsw_stats, FSW_STATS_DST_RING_DROPMODE);
210 STATS_ADD(&fsw->fsw_stats, FSW_STATS_DROP,
211 dropped_pkts);
212 }
213 /* we're dropping; claim all */
214 slot_idx_t sidx = kring->ckr_khead;
215 while (sidx != kring->ckr_rhead) {
216 struct __kern_slot_desc *ksd = KR_KSD(kring, sidx);
217 if (KSD_VALID_METADATA(ksd)) {
218 struct __kern_packet *pkt = ksd->sd_pkt;
219 (void) KR_SLOT_DETACH_METADATA(kring, ksd);
220 pp_free_packet_single(pkt);
221 }
222 sidx = SLOT_NEXT(sidx, kring->ckr_lim);
223 }
224 kring->ckr_khead = kring->ckr_rhead;
225 kring->ckr_ktail = SLOT_PREV(kring->ckr_rhead, kring->ckr_lim);
226 error = ENODEV;
227 SK_ERR("kr \"%s\" (0x%llx) krflags 0x%b in drop mode (err %d)",
228 kring->ckr_name, SK_KVA(kring), kring->ckr_flags,
229 CKRF_BITS, error);
230 }
231
232 SK_DF(SK_VERB_FSW | SK_VERB_SYNC | SK_VERB_TX,
233 "%s(%d) kr \"%s\" (0x%llx) krflags 0x%b ring %u flags 0x%x",
234 sk_proc_name_address(p), sk_proc_pid(p), kring->ckr_name,
235 SK_KVA(kring), kring->ckr_flags, CKRF_BITS, kring->ckr_ring_id,
236 flags);
237
238 return error;
239 }
240
241 /*
242 * na_rxsync callback for flow switch ports. We're already protected
243 * against concurrent calls from userspace.
244 */
245 int
fsw_vp_na_rxsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)246 fsw_vp_na_rxsync(struct __kern_channel_ring *kring, struct proc *p,
247 uint32_t flags)
248 {
249 #pragma unused(p, flags)
250 slot_idx_t head, khead_prev;
251
252 head = kring->ckr_rhead;
253 ASSERT(head <= kring->ckr_lim);
254
255 /* First part, import newly received packets. */
256 /* actually nothing to do here, they are already in the kring */
257
258 /* Second part, skip past packets that userspace has released. */
259 khead_prev = kring->ckr_khead;
260 kring->ckr_khead = head;
261
262 /* ensure global visibility */
263 os_atomic_thread_fence(seq_cst);
264
265 SK_DF(SK_VERB_FSW | SK_VERB_SYNC | SK_VERB_RX,
266 "%s(%d) kr \"%s\" (0x%llx) krflags 0x%b ring %u "
267 "kh %u (was %u) rh %u flags 0x%x", sk_proc_name_address(p),
268 sk_proc_pid(p), kring->ckr_name, SK_KVA(kring), kring->ckr_flags,
269 CKRF_BITS, kring->ckr_ring_id, kring->ckr_khead, khead_prev,
270 kring->ckr_rhead, flags);
271
272 return 0;
273 }
274
275 int
fsw_vp_na_attach(struct kern_nexus * nx,const char * cr_name,struct nexus_adapter * na)276 fsw_vp_na_attach(struct kern_nexus *nx, const char *cr_name,
277 struct nexus_adapter *na)
278 {
279 #pragma unused(nx)
280 SK_LOCK_ASSERT_HELD();
281 ASSERT(nx->nx_prov->nxprov_params->nxp_type == NEXUS_TYPE_FLOW_SWITCH);
282 ASSERT(VPNA(na)->vpna_fsw == NULL);
283
284 (void) strncpy(na->na_name, cr_name, sizeof(na->na_name) - 1);
285 na->na_name[sizeof(na->na_name) - 1] = '\0';
286
287 return 0;
288 }
289
290 static int
fsw_vp_na_special(struct nexus_adapter * na,struct kern_channel * ch,struct chreq * chr,nxspec_cmd_t spec_cmd)291 fsw_vp_na_special(struct nexus_adapter *na, struct kern_channel *ch,
292 struct chreq *chr, nxspec_cmd_t spec_cmd)
293 {
294 int error = 0;
295
296 SK_LOCK_ASSERT_HELD();
297 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
298
299 /*
300 * fsw_vp_na_attach() must have created this adapter
301 * exclusively for kernel (NAF_KERNEL); leave this alone.
302 */
303 ASSERT(NA_KERNEL_ONLY(na));
304
305 switch (spec_cmd) {
306 case NXSPEC_CMD_CONNECT:
307 ASSERT(!(na->na_flags & NAF_SPEC_INIT));
308 ASSERT(na->na_channels == 0);
309
310 error = na_bind_channel(na, ch, chr);
311 if (error != 0) {
312 goto done;
313 }
314
315 os_atomic_or(&na->na_flags, NAF_SPEC_INIT, relaxed);
316 break;
317
318 case NXSPEC_CMD_DISCONNECT:
319 ASSERT(na->na_channels > 0);
320 ASSERT(na->na_flags & NAF_SPEC_INIT);
321 os_atomic_andnot(&na->na_flags, NAF_SPEC_INIT, relaxed);
322
323 na_unbind_channel(ch);
324 break;
325
326 case NXSPEC_CMD_START:
327 na_kr_drop(na, FALSE);
328 break;
329
330 case NXSPEC_CMD_STOP:
331 na_kr_drop(na, TRUE);
332 break;
333
334 default:
335 error = EINVAL;
336 break;
337 }
338
339 done:
340 SK_DF(error ? SK_VERB_ERROR : SK_VERB_FSW,
341 "ch 0x%llx na \"%s\" (0x%llx) nx 0x%llx spec_cmd %u (err %d)",
342 SK_KVA(ch), na->na_name, SK_KVA(na), SK_KVA(ch->ch_nexus),
343 spec_cmd, error);
344
345 return error;
346 }
347
348 /*
349 * Create a nexus_vp_adapter that describes a flow switch port.
350 */
351 int
fsw_vp_na_create(struct kern_nexus * nx,struct chreq * chr,struct nexus_vp_adapter ** ret)352 fsw_vp_na_create(struct kern_nexus *nx, struct chreq *chr,
353 struct nexus_vp_adapter **ret)
354 {
355 struct nxprov_params *nxp = NX_PROV(nx)->nxprov_params;
356 struct nx_flowswitch *fsw = NX_FSW_PRIVATE(nx);
357 struct nexus_vp_adapter *vpna;
358 struct nexus_adapter *na;
359 int error;
360
361 SK_LOCK_ASSERT_HELD();
362
363 if ((chr->cr_mode & CHMODE_KERNEL) != 0) {
364 SK_ERR("VP adapter can't be used by kernel");
365 return ENOTSUP;
366 }
367 if ((chr->cr_mode & CHMODE_USER_PACKET_POOL) == 0) {
368 SK_ERR("user packet pool required");
369 return EINVAL;
370 }
371
372 vpna = fsw_vp_na_alloc(Z_WAITOK);
373
374 ASSERT(vpna->vpna_up.na_type == NA_FLOWSWITCH_VP);
375 ASSERT(vpna->vpna_up.na_free == fsw_vp_na_free);
376
377 na = &vpna->vpna_up;
378 (void) strncpy(na->na_name, chr->cr_name, sizeof(na->na_name) - 1);
379 na->na_name[sizeof(na->na_name) - 1] = '\0';
380 uuid_generate_random(na->na_uuid);
381
382 /*
383 * Verify upper bounds; for all cases including user pipe nexus,
384 * as well as flow switch-based ones, the parameters must have
385 * already been validated by corresponding nxdom_prov_params()
386 * function defined by each domain. The user pipe nexus would
387 * be checking against the flow switch's parameters there.
388 */
389 na_set_nrings(na, NR_TX, nxp->nxp_tx_rings);
390 na_set_nrings(na, NR_RX, nxp->nxp_rx_rings);
391 /*
392 * If the packet pool is configured to be multi-buflet, then we
393 * need 2 pairs of alloc/free rings(for packet and buflet).
394 */
395 na_set_nrings(na, NR_A, ((nxp->nxp_max_frags > 1) &&
396 (sk_channel_buflet_alloc != 0)) ? 2 : 1);
397 na_set_nslots(na, NR_TX, nxp->nxp_tx_slots);
398 na_set_nslots(na, NR_RX, nxp->nxp_rx_slots);
399 na_set_nslots(na, NR_A, NX_FSW_AFRINGSIZE);
400 ASSERT(na_get_nrings(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_rings.nb_max);
401 ASSERT(na_get_nrings(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_rings.nb_max);
402 ASSERT(na_get_nslots(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_slots.nb_max);
403 ASSERT(na_get_nslots(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_slots.nb_max);
404
405 os_atomic_or(&na->na_flags, NAF_USER_PKT_POOL, relaxed);
406
407 if (chr->cr_mode & CHMODE_LOW_LATENCY) {
408 os_atomic_or(&na->na_flags, NAF_LOW_LATENCY, relaxed);
409 }
410
411 if (chr->cr_mode & CHMODE_EVENT_RING) {
412 na_set_nrings(na, NR_EV, NX_FSW_EVENT_RING_NUM);
413 na_set_nslots(na, NR_EV, NX_FSW_EVENT_RING_SIZE);
414 os_atomic_or(&na->na_flags, NAF_EVENT_RING, relaxed);
415 na->na_channel_event_notify = fsw_vp_na_channel_event_notify;
416 }
417 if (nxp->nxp_max_frags > 1 && fsw->fsw_tso_mode != FSW_TSO_MODE_NONE) {
418 na_set_nrings(na, NR_LBA, 1);
419 na_set_nslots(na, NR_LBA, NX_FSW_AFRINGSIZE);
420 }
421 vpna->vpna_nx_port = chr->cr_port;
422 na->na_dtor = fsw_vp_na_dtor;
423 na->na_activate = fsw_vp_na_activate;
424 na->na_txsync = fsw_vp_na_txsync;
425 na->na_rxsync = fsw_vp_na_rxsync;
426 na->na_krings_create = fsw_vp_na_krings_create;
427 na->na_krings_delete = fsw_vp_na_krings_delete;
428 na->na_special = fsw_vp_na_special;
429
430 *(nexus_stats_type_t *)(uintptr_t)&na->na_stats_type =
431 NEXUS_STATS_TYPE_FSW;
432
433 /* other fields are set in the common routine */
434 na_attach_common(na, nx, &nx_fsw_prov_s);
435
436 if ((error = NX_DOM_PROV(nx)->nxdom_prov_mem_new(NX_DOM_PROV(nx),
437 nx, na)) != 0) {
438 ASSERT(na->na_arena == NULL);
439 goto err;
440 }
441 ASSERT(na->na_arena != NULL);
442
443 *(uint32_t *)(uintptr_t)&na->na_flowadv_max = nxp->nxp_flowadv_max;
444 ASSERT(na->na_flowadv_max == 0 ||
445 skmem_arena_nexus(na->na_arena)->arn_flowadv_obj != NULL);
446
447 #if SK_LOG
448 uuid_string_t uuidstr;
449 SK_DF(SK_VERB_FSW, "na_name: \"%s\"", na->na_name);
450 SK_DF(SK_VERB_FSW, " UUID: %s", sk_uuid_unparse(na->na_uuid,
451 uuidstr));
452 SK_DF(SK_VERB_FSW, " nx: 0x%llx (\"%s\":\"%s\")",
453 SK_KVA(na->na_nx), NX_DOM(na->na_nx)->nxdom_name,
454 NX_DOM_PROV(na->na_nx)->nxdom_prov_name);
455 SK_DF(SK_VERB_FSW, " flags: 0x%b", na->na_flags, NAF_BITS);
456 SK_DF(SK_VERB_FSW, " stats_type: %u", na->na_stats_type);
457 SK_DF(SK_VERB_FSW, " flowadv_max: %u", na->na_flowadv_max);
458 SK_DF(SK_VERB_FSW, " rings: tx %u rx %u af %u",
459 na_get_nrings(na, NR_TX), na_get_nrings(na, NR_RX),
460 na_get_nrings(na, NR_A));
461 SK_DF(SK_VERB_FSW, " slots: tx %u rx %u af %u",
462 na_get_nslots(na, NR_TX), na_get_nslots(na, NR_RX),
463 na_get_nslots(na, NR_A));
464 #if CONFIG_NEXUS_USER_PIPE
465 SK_DF(SK_VERB_FSW, " next_pipe: %u", na->na_next_pipe);
466 SK_DF(SK_VERB_FSW, " max_pipes: %u", na->na_max_pipes);
467 #endif /* CONFIG_NEXUS_USER_PIPE */
468 SK_DF(SK_VERB_FSW, " nx_port: %d", (int)vpna->vpna_nx_port);
469 #endif /* SK_LOG */
470
471 *ret = vpna;
472 na_retain_locked(&vpna->vpna_up);
473
474 return 0;
475
476 err:
477 if (na->na_arena != NULL) {
478 skmem_arena_release(na->na_arena);
479 na->na_arena = NULL;
480 }
481 NA_FREE(&vpna->vpna_up);
482 return error;
483 }
484
485 static struct nexus_vp_adapter *
fsw_vp_na_alloc(zalloc_flags_t how)486 fsw_vp_na_alloc(zalloc_flags_t how)
487 {
488 struct nexus_vp_adapter *vpna;
489
490 _CASSERT(offsetof(struct nexus_vp_adapter, vpna_up) == 0);
491
492 vpna = zalloc_flags(na_vp_zone, how | Z_ZERO);
493 if (vpna) {
494 vpna->vpna_up.na_type = NA_FLOWSWITCH_VP;
495 vpna->vpna_up.na_free = fsw_vp_na_free;
496 }
497 return vpna;
498 }
499
500 static void
fsw_vp_na_free(struct nexus_adapter * na)501 fsw_vp_na_free(struct nexus_adapter *na)
502 {
503 struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
504
505 ASSERT(vpna->vpna_up.na_refcount == 0);
506 SK_DF(SK_VERB_MEM, "vpna 0x%llx FREE", SK_KVA(vpna));
507 bzero(vpna, sizeof(*vpna));
508 zfree(na_vp_zone, vpna);
509 }
510
511 void
fsw_vp_channel_error_stats_fold(struct fsw_stats * fs,struct __nx_stats_channel_errors * es)512 fsw_vp_channel_error_stats_fold(struct fsw_stats *fs,
513 struct __nx_stats_channel_errors *es)
514 {
515 STATS_ADD(fs, FSW_STATS_CHAN_ERR_UPP_ALLOC,
516 es->nxs_cres->cres_pkt_alloc_failures);
517 }
518
519 SK_NO_INLINE_ATTRIBUTE
520 static struct __kern_packet *
nx_fsw_alloc_packet(struct kern_pbufpool * pp,uint32_t sz,kern_packet_t * php)521 nx_fsw_alloc_packet(struct kern_pbufpool *pp, uint32_t sz, kern_packet_t *php)
522 {
523 kern_packet_t ph;
524 ph = pp_alloc_packet_by_size(pp, sz, SKMEM_NOSLEEP);
525 if (__improbable(ph == 0)) {
526 DTRACE_SKYWALK2(alloc__fail, struct kern_pbufpool *,
527 pp, size_t, sz);
528 return NULL;
529 }
530 if (php != NULL) {
531 *php = ph;
532 }
533 return SK_PTR_ADDR_KPKT(ph);
534 }
535
536 SK_NO_INLINE_ATTRIBUTE
537 static void
nx_fsw_free_packet(struct __kern_packet * pkt)538 nx_fsw_free_packet(struct __kern_packet *pkt)
539 {
540 pp_free_packet_single(pkt);
541 }
542
543 static int
fsw_vp_na_channel_event_notify(struct nexus_adapter * vpna,struct __kern_channel_event * ev,uint16_t ev_len)544 fsw_vp_na_channel_event_notify(struct nexus_adapter *vpna,
545 struct __kern_channel_event *ev, uint16_t ev_len)
546 {
547 int err;
548 char *baddr;
549 kern_packet_t ph;
550 kern_buflet_t buf;
551 sk_protect_t protect;
552 kern_channel_slot_t slot;
553 struct __kern_packet *vpna_pkt = NULL;
554 struct __kern_channel_event_metadata *emd;
555 struct __kern_channel_ring *ring = &vpna->na_event_rings[0];
556 struct fsw_stats *fs = &((struct nexus_vp_adapter *)(vpna))->vpna_fsw->fsw_stats;
557
558 if (__probable(ev->ev_type == CHANNEL_EVENT_PACKET_TRANSMIT_STATUS)) {
559 STATS_INC(fs, FSW_STATS_EV_RECV_TX_STATUS);
560 }
561 if (__improbable(ev->ev_type == CHANNEL_EVENT_PACKET_TRANSMIT_EXPIRED)) {
562 STATS_INC(fs, FSW_STATS_EV_RECV_TX_EXPIRED);
563 }
564 STATS_INC(fs, FSW_STATS_EV_RECV);
565
566 if (__improbable(!NA_IS_ACTIVE(vpna))) {
567 STATS_INC(fs, FSW_STATS_EV_DROP_NA_INACTIVE);
568 err = ENXIO;
569 goto error;
570 }
571 if (__improbable(NA_IS_DEFUNCT(vpna))) {
572 STATS_INC(fs, FSW_STATS_EV_DROP_NA_DEFUNCT);
573 err = ENXIO;
574 goto error;
575 }
576 if (!NA_CHANNEL_EVENT_ATTACHED(vpna)) {
577 STATS_INC(fs, FSW_STATS_EV_DROP_KEVENT_INACTIVE);
578 err = ENXIO;
579 goto error;
580 }
581 if (__improbable(KR_DROP(ring))) {
582 STATS_INC(fs, FSW_STATS_EV_DROP_KRDROP_MODE);
583 err = ENXIO;
584 goto error;
585 }
586
587 vpna_pkt = nx_fsw_alloc_packet(ring->ckr_pp, ev_len, &ph);
588 if (__improbable(vpna_pkt == NULL)) {
589 STATS_INC(fs, FSW_STATS_EV_DROP_NOMEM_PKT);
590 err = ENOMEM;
591 goto error;
592 }
593 buf = __packet_get_next_buflet(ph, NULL);
594 baddr = __buflet_get_data_address(buf);
595 emd = (struct __kern_channel_event_metadata *)(void *)baddr;
596 emd->emd_etype = ev->ev_type;
597 emd->emd_nevents = 1;
598 bcopy(ev, (baddr + __KERN_CHANNEL_EVENT_OFFSET), ev_len);
599 err = __buflet_set_data_length(buf,
600 (ev_len + __KERN_CHANNEL_EVENT_OFFSET));
601 VERIFY(err == 0);
602 err = __packet_finalize(ph);
603 VERIFY(err == 0);
604 kr_enter(ring, TRUE);
605 protect = sk_sync_protect();
606 slot = kern_channel_get_next_slot(ring, NULL, NULL);
607 if (slot == NULL) {
608 sk_sync_unprotect(protect);
609 kr_exit(ring);
610 STATS_INC(fs, FSW_STATS_EV_DROP_KRSPACE);
611 err = ENOSPC;
612 goto error;
613 }
614 err = kern_channel_slot_attach_packet(ring, slot, ph);
615 VERIFY(err == 0);
616 vpna_pkt = NULL;
617 kern_channel_advance_slot(ring, slot);
618 sk_sync_unprotect(protect);
619 kr_exit(ring);
620 kern_channel_event_notify(&vpna->na_tx_rings[0]);
621 STATS_INC(fs, NETIF_STATS_EV_SENT);
622 return 0;
623
624 error:
625 ASSERT(err != 0);
626 if (vpna_pkt != NULL) {
627 nx_fsw_free_packet(vpna_pkt);
628 }
629 STATS_INC(fs, FSW_STATS_EV_DROP);
630 return err;
631 }
632
633 static inline struct nexus_adapter *
fsw_find_port_vpna(struct nx_flowswitch * fsw,uint32_t nx_port_id)634 fsw_find_port_vpna(struct nx_flowswitch *fsw, uint32_t nx_port_id)
635 {
636 struct kern_nexus *nx = fsw->fsw_nx;
637 struct nexus_adapter *na = NULL;
638 nexus_port_t port;
639 uint16_t gencnt;
640
641 PKT_DECOMPOSE_NX_PORT_ID(nx_port_id, port, gencnt);
642
643 if (port < FSW_VP_USER_MIN) {
644 SK_ERR("non VPNA port");
645 return NULL;
646 }
647
648 if (__improbable(!nx_port_is_valid(nx, port))) {
649 SK_ERR("%s[%d] port no longer valid",
650 if_name(fsw->fsw_ifp), port);
651 return NULL;
652 }
653
654 na = nx_port_get_na(nx, port);
655 if (na != NULL && VPNA(na)->vpna_gencnt != gencnt) {
656 return NULL;
657 }
658 return na;
659 }
660
661 errno_t
fsw_vp_na_channel_event(struct nx_flowswitch * fsw,uint32_t nx_port_id,struct __kern_channel_event * event,uint16_t event_len)662 fsw_vp_na_channel_event(struct nx_flowswitch *fsw, uint32_t nx_port_id,
663 struct __kern_channel_event *event, uint16_t event_len)
664 {
665 int err = 0;
666 struct nexus_adapter *fsw_vpna;
667
668 SK_DF(SK_VERB_EVENTS, "%s[%d] ev: %p ev_len: %hu "
669 "ev_type: %u ev_flags: %u _reserved: %hu ev_dlen: %hu",
670 if_name(fsw->fsw_ifp), nx_port_id, event, event_len,
671 event->ev_type, event->ev_flags, event->_reserved, event->ev_dlen);
672
673 FSW_RLOCK(fsw);
674 struct fsw_stats *fs = &fsw->fsw_stats;
675
676 fsw_vpna = fsw_find_port_vpna(fsw, nx_port_id);
677 if (__improbable(fsw_vpna == NULL)) {
678 err = ENXIO;
679 STATS_INC(fs, FSW_STATS_EV_DROP_DEMUX_ERR);
680 goto error;
681 }
682 if (__improbable(fsw_vpna->na_channel_event_notify == NULL)) {
683 err = ENOTSUP;
684 STATS_INC(fs, FSW_STATS_EV_DROP_EV_VPNA_NOTSUP);
685 goto error;
686 }
687 err = fsw_vpna->na_channel_event_notify(fsw_vpna, event, event_len);
688 FSW_RUNLOCK(fsw);
689 return err;
690
691 error:
692 STATS_INC(fs, FSW_STATS_EV_DROP);
693 FSW_RUNLOCK(fsw);
694 return err;
695 }
696