1 /*
2 * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 */
53
54 #include <skywalk/os_skywalk_private.h>
55 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
56 #include <skywalk/nexus/flowswitch/fsw_var.h>
57 #include <sys/sdt.h>
58
59 static void fsw_vp_na_dtor(struct nexus_adapter *);
60 static int fsw_vp_na_special(struct nexus_adapter *,
61 struct kern_channel *, struct chreq *, nxspec_cmd_t);
62 static struct nexus_vp_adapter *fsw_vp_na_alloc(zalloc_flags_t);
63 static void fsw_vp_na_free(struct nexus_adapter *);
64 static int fsw_vp_na_channel_event_notify(struct nexus_adapter *vpna,
65 struct __kern_channel_event *ev, uint16_t ev_len);
66
67 static ZONE_DEFINE(na_vp_zone, SKMEM_ZONE_PREFIX ".na.fsw.vp",
68 sizeof(struct nexus_vp_adapter), ZC_ZFREE_CLEARMEM);
69
70 static uint16_t fsw_vpna_gencnt = 0;
71
72 /* na_activate() callback for flow switch ports */
73 int
fsw_vp_na_activate(struct nexus_adapter * na,na_activate_mode_t mode)74 fsw_vp_na_activate(struct nexus_adapter *na, na_activate_mode_t mode)
75 {
76 int ret = 0;
77 struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
78 struct nx_flowswitch *fsw = vpna->vpna_fsw;
79
80 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
81
82 SK_DF(SK_VERB_FSW, "na \"%s\" (0x%llx) %s", na->na_name,
83 SK_KVA(na), na_activate_mode2str(mode));
84
85 /*
86 * Persistent ports may be put in Skywalk mode
87 * before being attached to a FlowSwitch.
88 */
89 FSW_WLOCK(fsw);
90
91 atomic_add_16(&fsw_vpna_gencnt, 1);
92 vpna->vpna_gencnt = fsw_vpna_gencnt;
93
94 if (mode == NA_ACTIVATE_MODE_ON) {
95 atomic_bitset_32(&na->na_flags, NAF_ACTIVE);
96 }
97
98 ret = fsw_port_na_activate(fsw, vpna, mode);
99 if (ret != 0) {
100 SK_DF(SK_VERB_FSW, "na \"%s\" (0x%llx) %s err(%d)",
101 na->na_name, SK_KVA(na), na_activate_mode2str(mode), ret);
102 if (mode == NA_ACTIVATE_MODE_ON) {
103 atomic_bitclear_32(&na->na_flags, NAF_ACTIVE);
104 }
105 goto done;
106 }
107
108 if (mode == NA_ACTIVATE_MODE_DEFUNCT ||
109 mode == NA_ACTIVATE_MODE_OFF) {
110 struct skmem_arena_nexus *arn = skmem_arena_nexus(na->na_arena);
111
112 if (mode == NA_ACTIVATE_MODE_OFF) {
113 atomic_bitclear_32(&na->na_flags, NAF_ACTIVE);
114 }
115
116 AR_LOCK(na->na_arena);
117 if (na->na_type == NA_FLOWSWITCH_VP &&
118 arn->arn_stats_obj != NULL) {
119 fsw_fold_stats(fsw,
120 arn->arn_stats_obj, na->na_stats_type);
121 }
122 AR_UNLOCK(na->na_arena);
123
124 enum txrx t;
125 uint32_t i;
126 struct __nx_stats_channel_errors stats;
127 for_all_rings(t) {
128 for (i = 0; i < na_get_nrings(na, t); i++) {
129 stats.nxs_cres =
130 &NAKR(na, t)[i].ckr_err_stats;
131 fsw_fold_stats(fsw, &stats,
132 NEXUS_STATS_TYPE_CHAN_ERRORS);
133 }
134 }
135 }
136
137 done:
138 FSW_WUNLOCK(fsw);
139 return ret;
140 }
141
142 /* na_dtor callback for ephemeral flow switch ports */
143 static void
fsw_vp_na_dtor(struct nexus_adapter * na)144 fsw_vp_na_dtor(struct nexus_adapter *na)
145 {
146 struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
147 struct nx_flowswitch *fsw = vpna->vpna_fsw;
148
149 SK_LOCK_ASSERT_HELD();
150 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
151
152 SK_DF(SK_VERB_FSW, "na \"%s\" (0x%llx)", na->na_name, SK_KVA(na));
153
154 if (fsw != NULL) {
155 FSW_WLOCK(fsw);
156 fsw_port_free(fsw, vpna, vpna->vpna_nx_port, FALSE);
157 FSW_WUNLOCK(fsw);
158 }
159 }
160
161 /*
162 * na_krings_create callback for flow switch ports.
163 * Calls the standard na_kr_create(), then adds leases on rx
164 * rings and bdgfwd on tx rings.
165 */
166 int
fsw_vp_na_krings_create(struct nexus_adapter * na,struct kern_channel * ch)167 fsw_vp_na_krings_create(struct nexus_adapter *na, struct kern_channel *ch)
168 {
169 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
170
171 return na_rings_mem_setup(na, 0, FALSE, ch);
172 }
173
174
175 /* na_krings_delete callback for flow switch ports. */
176 void
fsw_vp_na_krings_delete(struct nexus_adapter * na,struct kern_channel * ch,boolean_t defunct)177 fsw_vp_na_krings_delete(struct nexus_adapter *na, struct kern_channel *ch,
178 boolean_t defunct)
179 {
180 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
181
182 na_rings_mem_teardown(na, ch, defunct);
183 }
184
185 /* na_txsync callback for flow switch ports */
186 int
fsw_vp_na_txsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)187 fsw_vp_na_txsync(struct __kern_channel_ring *kring, struct proc *p,
188 uint32_t flags)
189 {
190 #pragma unused(flags)
191 struct nexus_vp_adapter *vpna = VPNA(KRNA(kring));
192 struct nx_flowswitch *fsw = vpna->vpna_fsw;
193 int error = 0;
194
195 /*
196 * Flush packets if and only if the ring isn't in drop mode,
197 * and if the adapter is currently attached to a nexus port;
198 * otherwise we drop them.
199 */
200 if (__probable(!KR_DROP(kring) && fsw != NULL)) {
201 fsw_ring_flush(fsw, kring, p);
202 } else {
203 int dropped_pkts;
204 /* packets between khead to rhead have been dropped */
205 dropped_pkts = kring->ckr_rhead - kring->ckr_khead;
206 if (dropped_pkts < 0) {
207 dropped_pkts += kring->ckr_num_slots;
208 }
209 if (fsw != NULL) {
210 STATS_INC(&fsw->fsw_stats, FSW_STATS_DST_RING_DROPMODE);
211 STATS_ADD(&fsw->fsw_stats, FSW_STATS_DROP,
212 dropped_pkts);
213 }
214 /* we're dropping; claim all */
215 slot_idx_t sidx = kring->ckr_khead;
216 while (sidx != kring->ckr_rhead) {
217 struct __kern_slot_desc *ksd = KR_KSD(kring, sidx);
218 if (KSD_VALID_METADATA(ksd)) {
219 struct __kern_packet *pkt = ksd->sd_pkt;
220 (void) KR_SLOT_DETACH_METADATA(kring, ksd);
221 pp_free_packet_single(pkt);
222 }
223 sidx = SLOT_NEXT(sidx, kring->ckr_lim);
224 }
225 kring->ckr_khead = kring->ckr_rhead;
226 kring->ckr_ktail = SLOT_PREV(kring->ckr_rhead, kring->ckr_lim);
227 error = ENODEV;
228 SK_ERR("kr \"%s\" (0x%llx) krflags 0x%b in drop mode (err %d)",
229 kring->ckr_name, SK_KVA(kring), kring->ckr_flags,
230 CKRF_BITS, error);
231 }
232
233 SK_DF(SK_VERB_FSW | SK_VERB_SYNC | SK_VERB_TX,
234 "%s(%d) kr \"%s\" (0x%llx) krflags 0x%b ring %u flags 0x%x",
235 sk_proc_name_address(p), sk_proc_pid(p), kring->ckr_name,
236 SK_KVA(kring), kring->ckr_flags, CKRF_BITS, kring->ckr_ring_id,
237 flags);
238
239 return error;
240 }
241
242 /*
243 * na_rxsync callback for flow switch ports. We're already protected
244 * against concurrent calls from userspace.
245 */
246 int
fsw_vp_na_rxsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)247 fsw_vp_na_rxsync(struct __kern_channel_ring *kring, struct proc *p,
248 uint32_t flags)
249 {
250 #pragma unused(p, flags)
251 slot_idx_t head, khead_prev;
252
253 head = kring->ckr_rhead;
254 ASSERT(head <= kring->ckr_lim);
255
256 /* First part, import newly received packets. */
257 /* actually nothing to do here, they are already in the kring */
258
259 /* Second part, skip past packets that userspace has released. */
260 khead_prev = kring->ckr_khead;
261 kring->ckr_khead = head;
262
263 /* ensure global visibility */
264 membar_sync();
265
266 SK_DF(SK_VERB_FSW | SK_VERB_SYNC | SK_VERB_RX,
267 "%s(%d) kr \"%s\" (0x%llx) krflags 0x%b ring %u "
268 "kh %u (was %u) rh %u flags 0x%x", sk_proc_name_address(p),
269 sk_proc_pid(p), kring->ckr_name, SK_KVA(kring), kring->ckr_flags,
270 CKRF_BITS, kring->ckr_ring_id, kring->ckr_khead, khead_prev,
271 kring->ckr_rhead, flags);
272
273 return 0;
274 }
275
276 int
fsw_vp_na_attach(struct kern_nexus * nx,const char * cr_name,struct nexus_adapter * na)277 fsw_vp_na_attach(struct kern_nexus *nx, const char *cr_name,
278 struct nexus_adapter *na)
279 {
280 #pragma unused(nx)
281 SK_LOCK_ASSERT_HELD();
282 ASSERT(nx->nx_prov->nxprov_params->nxp_type == NEXUS_TYPE_FLOW_SWITCH);
283 ASSERT(VPNA(na)->vpna_fsw == NULL);
284
285 (void) strncpy(na->na_name, cr_name, sizeof(na->na_name) - 1);
286 na->na_name[sizeof(na->na_name) - 1] = '\0';
287
288 return 0;
289 }
290
291 static int
fsw_vp_na_special(struct nexus_adapter * na,struct kern_channel * ch,struct chreq * chr,nxspec_cmd_t spec_cmd)292 fsw_vp_na_special(struct nexus_adapter *na, struct kern_channel *ch,
293 struct chreq *chr, nxspec_cmd_t spec_cmd)
294 {
295 int error = 0;
296
297 SK_LOCK_ASSERT_HELD();
298 ASSERT(na->na_type == NA_FLOWSWITCH_VP);
299
300 /*
301 * fsw_vp_na_attach() must have created this adapter
302 * exclusively for kernel (NAF_KERNEL); leave this alone.
303 */
304 ASSERT(NA_KERNEL_ONLY(na));
305
306 switch (spec_cmd) {
307 case NXSPEC_CMD_CONNECT:
308 ASSERT(!(na->na_flags & NAF_SPEC_INIT));
309 ASSERT(na->na_channels == 0);
310
311 error = na_bind_channel(na, ch, chr);
312 if (error != 0) {
313 goto done;
314 }
315
316 atomic_bitset_32(&na->na_flags, NAF_SPEC_INIT);
317 break;
318
319 case NXSPEC_CMD_DISCONNECT:
320 ASSERT(na->na_channels > 0);
321 ASSERT(na->na_flags & NAF_SPEC_INIT);
322 atomic_bitclear_32(&na->na_flags, NAF_SPEC_INIT);
323
324 na_unbind_channel(ch);
325 break;
326
327 case NXSPEC_CMD_START:
328 na_kr_drop(na, FALSE);
329 break;
330
331 case NXSPEC_CMD_STOP:
332 na_kr_drop(na, TRUE);
333 break;
334
335 default:
336 error = EINVAL;
337 break;
338 }
339
340 done:
341 SK_DF(error ? SK_VERB_ERROR : SK_VERB_FSW,
342 "ch 0x%llx na \"%s\" (0x%llx) nx 0x%llx spec_cmd %u (err %d)",
343 SK_KVA(ch), na->na_name, SK_KVA(na), SK_KVA(ch->ch_nexus),
344 spec_cmd, error);
345
346 return error;
347 }
348
349 /*
350 * Create a nexus_vp_adapter that describes a flow switch port.
351 */
352 int
fsw_vp_na_create(struct kern_nexus * nx,struct chreq * chr,struct nexus_vp_adapter ** ret)353 fsw_vp_na_create(struct kern_nexus *nx, struct chreq *chr,
354 struct nexus_vp_adapter **ret)
355 {
356 struct nxprov_params *nxp = NX_PROV(nx)->nxprov_params;
357 struct nexus_vp_adapter *vpna;
358 struct nexus_adapter *na;
359 int error;
360
361 SK_LOCK_ASSERT_HELD();
362
363 if ((chr->cr_mode & CHMODE_KERNEL) != 0) {
364 SK_ERR("VP adapter can't be used by kernel");
365 return ENOTSUP;
366 }
367 if ((chr->cr_mode & CHMODE_USER_PACKET_POOL) == 0) {
368 SK_ERR("user packet pool required");
369 return EINVAL;
370 }
371
372 vpna = fsw_vp_na_alloc(Z_WAITOK);
373
374 ASSERT(vpna->vpna_up.na_type == NA_FLOWSWITCH_VP);
375 ASSERT(vpna->vpna_up.na_free == fsw_vp_na_free);
376
377 na = &vpna->vpna_up;
378 (void) strncpy(na->na_name, chr->cr_name, sizeof(na->na_name) - 1);
379 na->na_name[sizeof(na->na_name) - 1] = '\0';
380 uuid_generate_random(na->na_uuid);
381
382 /*
383 * Verify upper bounds; for all cases including user pipe nexus,
384 * as well as flow switch-based ones, the parameters must have
385 * already been validated by corresponding nxdom_prov_params()
386 * function defined by each domain. The user pipe nexus would
387 * be checking against the flow switch's parameters there.
388 */
389 na_set_nrings(na, NR_TX, nxp->nxp_tx_rings);
390 na_set_nrings(na, NR_RX, nxp->nxp_rx_rings);
391 /*
392 * If the packet pool is configured to be multi-buflet, then we
393 * need 2 pairs of alloc/free rings(for packet and buflet).
394 */
395 na_set_nrings(na, NR_A, ((nxp->nxp_max_frags > 1) &&
396 (sk_channel_buflet_alloc != 0)) ? 2 : 1);
397 na_set_nslots(na, NR_TX, nxp->nxp_tx_slots);
398 na_set_nslots(na, NR_RX, nxp->nxp_rx_slots);
399 na_set_nslots(na, NR_A, NX_FSW_AFRINGSIZE);
400 ASSERT(na_get_nrings(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_rings.nb_max);
401 ASSERT(na_get_nrings(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_rings.nb_max);
402 ASSERT(na_get_nslots(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_slots.nb_max);
403 ASSERT(na_get_nslots(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_slots.nb_max);
404
405 atomic_bitset_32(&na->na_flags, NAF_USER_PKT_POOL);
406
407 if (chr->cr_mode & CHMODE_LOW_LATENCY) {
408 atomic_bitset_32(&na->na_flags, NAF_LOW_LATENCY);
409 }
410
411 if (chr->cr_mode & CHMODE_EVENT_RING) {
412 na_set_nrings(na, NR_EV, NX_FSW_EVENT_RING_NUM);
413 na_set_nslots(na, NR_EV, NX_FSW_EVENT_RING_SIZE);
414 atomic_bitset_32(&na->na_flags, NAF_EVENT_RING);
415 na->na_channel_event_notify = fsw_vp_na_channel_event_notify;
416 }
417
418 vpna->vpna_nx_port = chr->cr_port;
419 na->na_dtor = fsw_vp_na_dtor;
420 na->na_activate = fsw_vp_na_activate;
421 na->na_txsync = fsw_vp_na_txsync;
422 na->na_rxsync = fsw_vp_na_rxsync;
423 na->na_krings_create = fsw_vp_na_krings_create;
424 na->na_krings_delete = fsw_vp_na_krings_delete;
425 na->na_special = fsw_vp_na_special;
426
427 *(nexus_stats_type_t *)(uintptr_t)&na->na_stats_type =
428 NEXUS_STATS_TYPE_FSW;
429
430 /* other fields are set in the common routine */
431 na_attach_common(na, nx, &nx_fsw_prov_s);
432
433 if ((error = NX_DOM_PROV(nx)->nxdom_prov_mem_new(NX_DOM_PROV(nx),
434 nx, na)) != 0) {
435 ASSERT(na->na_arena == NULL);
436 goto err;
437 }
438 ASSERT(na->na_arena != NULL);
439
440 *(uint32_t *)(uintptr_t)&na->na_flowadv_max = nxp->nxp_flowadv_max;
441 ASSERT(na->na_flowadv_max == 0 ||
442 skmem_arena_nexus(na->na_arena)->arn_flowadv_obj != NULL);
443
444 #if SK_LOG
445 uuid_string_t uuidstr;
446 SK_DF(SK_VERB_FSW, "na_name: \"%s\"", na->na_name);
447 SK_DF(SK_VERB_FSW, " UUID: %s", sk_uuid_unparse(na->na_uuid,
448 uuidstr));
449 SK_DF(SK_VERB_FSW, " nx: 0x%llx (\"%s\":\"%s\")",
450 SK_KVA(na->na_nx), NX_DOM(na->na_nx)->nxdom_name,
451 NX_DOM_PROV(na->na_nx)->nxdom_prov_name);
452 SK_DF(SK_VERB_FSW, " flags: 0x%b", na->na_flags, NAF_BITS);
453 SK_DF(SK_VERB_FSW, " stats_type: %u", na->na_stats_type);
454 SK_DF(SK_VERB_FSW, " flowadv_max: %u", na->na_flowadv_max);
455 SK_DF(SK_VERB_FSW, " rings: tx %u rx %u af %u",
456 na_get_nrings(na, NR_TX), na_get_nrings(na, NR_RX),
457 na_get_nrings(na, NR_A));
458 SK_DF(SK_VERB_FSW, " slots: tx %u rx %u af %u",
459 na_get_nslots(na, NR_TX), na_get_nslots(na, NR_RX),
460 na_get_nslots(na, NR_A));
461 #if CONFIG_NEXUS_USER_PIPE
462 SK_DF(SK_VERB_FSW, " next_pipe: %u", na->na_next_pipe);
463 SK_DF(SK_VERB_FSW, " max_pipes: %u", na->na_max_pipes);
464 #endif /* CONFIG_NEXUS_USER_PIPE */
465 SK_DF(SK_VERB_FSW, " nx_port: %d", (int)vpna->vpna_nx_port);
466 #endif /* SK_LOG */
467
468 *ret = vpna;
469 na_retain_locked(&vpna->vpna_up);
470
471 return 0;
472
473 err:
474 if (na->na_arena != NULL) {
475 skmem_arena_release(na->na_arena);
476 na->na_arena = NULL;
477 }
478 NA_FREE(&vpna->vpna_up);
479 return error;
480 }
481
482 static struct nexus_vp_adapter *
fsw_vp_na_alloc(zalloc_flags_t how)483 fsw_vp_na_alloc(zalloc_flags_t how)
484 {
485 struct nexus_vp_adapter *vpna;
486
487 _CASSERT(offsetof(struct nexus_vp_adapter, vpna_up) == 0);
488
489 vpna = zalloc_flags(na_vp_zone, how | Z_ZERO);
490 if (vpna) {
491 vpna->vpna_up.na_type = NA_FLOWSWITCH_VP;
492 vpna->vpna_up.na_free = fsw_vp_na_free;
493 }
494 return vpna;
495 }
496
497 static void
fsw_vp_na_free(struct nexus_adapter * na)498 fsw_vp_na_free(struct nexus_adapter *na)
499 {
500 struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
501
502 ASSERT(vpna->vpna_up.na_refcount == 0);
503 SK_DF(SK_VERB_MEM, "vpna 0x%llx FREE", SK_KVA(vpna));
504 bzero(vpna, sizeof(*vpna));
505 zfree(na_vp_zone, vpna);
506 }
507
508 void
fsw_vp_channel_error_stats_fold(struct fsw_stats * fs,struct __nx_stats_channel_errors * es)509 fsw_vp_channel_error_stats_fold(struct fsw_stats *fs,
510 struct __nx_stats_channel_errors *es)
511 {
512 STATS_ADD(fs, FSW_STATS_CHAN_ERR_UPP_ALLOC,
513 es->nxs_cres->cres_pkt_alloc_failures);
514 }
515
516 SK_NO_INLINE_ATTRIBUTE
517 static struct __kern_packet *
nx_fsw_alloc_packet(struct kern_pbufpool * pp,uint32_t sz,kern_packet_t * php)518 nx_fsw_alloc_packet(struct kern_pbufpool *pp, uint32_t sz, kern_packet_t *php)
519 {
520 kern_packet_t ph;
521 ph = pp_alloc_packet_by_size(pp, sz, SKMEM_NOSLEEP);
522 if (__improbable(ph == 0)) {
523 DTRACE_SKYWALK2(alloc__fail, struct kern_pbufpool *,
524 pp, size_t, sz);
525 return NULL;
526 }
527 if (php != NULL) {
528 *php = ph;
529 }
530 return SK_PTR_ADDR_KPKT(ph);
531 }
532
533 SK_NO_INLINE_ATTRIBUTE
534 static void
nx_fsw_free_packet(struct __kern_packet * pkt)535 nx_fsw_free_packet(struct __kern_packet *pkt)
536 {
537 pp_free_packet_single(pkt);
538 }
539
540 static int
fsw_vp_na_channel_event_notify(struct nexus_adapter * vpna,struct __kern_channel_event * ev,uint16_t ev_len)541 fsw_vp_na_channel_event_notify(struct nexus_adapter *vpna,
542 struct __kern_channel_event *ev, uint16_t ev_len)
543 {
544 int err;
545 char *baddr;
546 kern_packet_t ph;
547 kern_buflet_t buf;
548 sk_protect_t protect;
549 kern_channel_slot_t slot;
550 struct __kern_packet *vpna_pkt = NULL;
551 struct __kern_channel_event_metadata *emd;
552 struct __kern_channel_ring *ring = &vpna->na_event_rings[0];
553 struct fsw_stats *fs = &((struct nexus_vp_adapter *)(vpna))->vpna_fsw->fsw_stats;
554
555 if (__improbable(!NA_IS_ACTIVE(vpna))) {
556 STATS_INC(fs, FSW_STATS_EV_DROP_NA_INACTIVE);
557 err = ENXIO;
558 goto error;
559 }
560 if (__improbable(NA_IS_DEFUNCT(vpna))) {
561 STATS_INC(fs, FSW_STATS_EV_DROP_NA_DEFUNCT);
562 err = ENXIO;
563 goto error;
564 }
565 if (!NA_CHANNEL_EVENT_ATTACHED(vpna)) {
566 STATS_INC(fs, FSW_STATS_EV_DROP_KEVENT_INACTIVE);
567 err = ENXIO;
568 goto error;
569 }
570 if (__improbable(KR_DROP(ring))) {
571 STATS_INC(fs, FSW_STATS_EV_DROP_KRDROP_MODE);
572 err = ENXIO;
573 goto error;
574 }
575
576 vpna_pkt = nx_fsw_alloc_packet(ring->ckr_pp, ev_len, &ph);
577 if (__improbable(vpna_pkt == NULL)) {
578 STATS_INC(fs, FSW_STATS_EV_DROP_NOMEM_PKT);
579 err = ENOMEM;
580 goto error;
581 }
582 buf = __packet_get_next_buflet(ph, NULL);
583 baddr = __buflet_get_data_address(buf);
584 emd = (struct __kern_channel_event_metadata *)(void *)baddr;
585 emd->emd_etype = CHANNEL_EVENT_PACKET_TRANSMIT_STATUS;
586 emd->emd_nevents = 1;
587 bcopy(ev, (baddr + __KERN_CHANNEL_EVENT_OFFSET), ev_len);
588 err = __buflet_set_data_length(buf,
589 (ev_len + __KERN_CHANNEL_EVENT_OFFSET));
590 VERIFY(err == 0);
591 err = __packet_finalize(ph);
592 VERIFY(err == 0);
593 kr_enter(ring, TRUE);
594 protect = sk_sync_protect();
595 slot = kern_channel_get_next_slot(ring, NULL, NULL);
596 if (slot == NULL) {
597 sk_sync_unprotect(protect);
598 kr_exit(ring);
599 STATS_INC(fs, FSW_STATS_EV_DROP_KRSPACE);
600 err = ENOSPC;
601 goto error;
602 }
603 err = kern_channel_slot_attach_packet(ring, slot, ph);
604 VERIFY(err == 0);
605 vpna_pkt = NULL;
606 kern_channel_advance_slot(ring, slot);
607 sk_sync_unprotect(protect);
608 kr_exit(ring);
609 kern_channel_event_notify(&vpna->na_tx_rings[0]);
610 STATS_INC(fs, NETIF_STATS_EV_SENT);
611 return 0;
612
613 error:
614 ASSERT(err != 0);
615 if (vpna_pkt != NULL) {
616 nx_fsw_free_packet(vpna_pkt);
617 }
618 STATS_INC(fs, FSW_STATS_EV_DROP);
619 return err;
620 }
621
622 static inline struct nexus_adapter *
fsw_find_port_vpna(struct nx_flowswitch * fsw,uint32_t nx_port_id)623 fsw_find_port_vpna(struct nx_flowswitch *fsw, uint32_t nx_port_id)
624 {
625 struct kern_nexus *nx = fsw->fsw_nx;
626 struct nexus_adapter *na = NULL;
627 nexus_port_t port;
628 uint16_t gencnt;
629
630 PKT_DECOMPOSE_NX_PORT_ID(nx_port_id, port, gencnt);
631
632 if (port < FSW_VP_USER_MIN) {
633 SK_ERR("non VPNA port");
634 return NULL;
635 }
636
637 if (__improbable(!nx_port_is_valid(nx, port))) {
638 SK_ERR("%s[%d] port no longer valid",
639 if_name(fsw->fsw_ifp), port);
640 return NULL;
641 }
642
643 na = nx_port_get_na(nx, port);
644 if (na != NULL && VPNA(na)->vpna_gencnt != gencnt) {
645 return NULL;
646 }
647 return na;
648 }
649
650 errno_t
fsw_vp_na_channel_event(struct nx_flowswitch * fsw,uint32_t nx_port_id,struct __kern_channel_event * event,uint16_t event_len)651 fsw_vp_na_channel_event(struct nx_flowswitch *fsw, uint32_t nx_port_id,
652 struct __kern_channel_event *event, uint16_t event_len)
653 {
654 int err = 0;
655 struct nexus_adapter *fsw_vpna;
656
657 FSW_RLOCK(fsw);
658 struct fsw_stats *fs = &fsw->fsw_stats;
659
660 fsw_vpna = fsw_find_port_vpna(fsw, nx_port_id);
661 if (__improbable(fsw_vpna == NULL)) {
662 err = ENXIO;
663 STATS_INC(fs, FSW_STATS_EV_DROP_DEMUX_ERR);
664 goto error;
665 }
666 if (__improbable(fsw_vpna->na_channel_event_notify == NULL)) {
667 err = ENOTSUP;
668 STATS_INC(fs, FSW_STATS_EV_DROP_EV_VPNA_NOTSUP);
669 goto error;
670 }
671 err = fsw_vpna->na_channel_event_notify(fsw_vpna, event, event_len);
672 FSW_RUNLOCK(fsw);
673 return err;
674
675 error:
676 STATS_INC(fs, FSW_STATS_EV_DROP);
677 FSW_RUNLOCK(fsw);
678 return err;
679 }
680