xref: /xnu-8020.121.3/bsd/skywalk/nexus/flowswitch/fsw_vp.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  *   1. Redistributions of source code must retain the above copyright
36  *      notice, this list of conditions and the following disclaimer.
37  *   2. Redistributions in binary form must reproduce the above copyright
38  *      notice, this list of conditions and the following disclaimer in the
39  *      documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  */
53 
54 #include <skywalk/os_skywalk_private.h>
55 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
56 #include <skywalk/nexus/flowswitch/fsw_var.h>
57 
58 static void fsw_vp_na_dtor(struct nexus_adapter *);
59 static int fsw_vp_na_special(struct nexus_adapter *,
60     struct kern_channel *, struct chreq *, nxspec_cmd_t);
61 static struct nexus_vp_adapter *fsw_vp_na_alloc(zalloc_flags_t);
62 static void fsw_vp_na_free(struct nexus_adapter *);
63 
64 static ZONE_DEFINE(na_vp_zone, SKMEM_ZONE_PREFIX ".na.fsw.vp",
65     sizeof(struct nexus_vp_adapter), ZC_ZFREE_CLEARMEM);
66 
67 /* na_activate() callback for flow switch ports */
68 int
fsw_vp_na_activate(struct nexus_adapter * na,na_activate_mode_t mode)69 fsw_vp_na_activate(struct nexus_adapter *na, na_activate_mode_t mode)
70 {
71 	int ret = 0;
72 	struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
73 	struct nx_flowswitch *fsw = vpna->vpna_fsw;
74 
75 	ASSERT(na->na_type == NA_FLOWSWITCH_VP);
76 
77 	SK_DF(SK_VERB_FSW, "na \"%s\" (0x%llx) %s", na->na_name,
78 	    SK_KVA(na), na_activate_mode2str(mode));
79 
80 	/*
81 	 * Persistent ports may be put in Skywalk mode
82 	 * before being attached to a FlowSwitch.
83 	 */
84 	FSW_WLOCK(fsw);
85 	if (mode == NA_ACTIVATE_MODE_ON) {
86 		atomic_bitset_32(&na->na_flags, NAF_ACTIVE);
87 	}
88 
89 	ret = fsw_port_na_activate(fsw, vpna, mode);
90 	if (ret != 0) {
91 		SK_DF(SK_VERB_FSW, "na \"%s\" (0x%llx) %s err(%d)",
92 		    na->na_name, SK_KVA(na), na_activate_mode2str(mode), ret);
93 		if (mode == NA_ACTIVATE_MODE_ON) {
94 			atomic_bitclear_32(&na->na_flags, NAF_ACTIVE);
95 		}
96 		goto done;
97 	}
98 
99 	if (mode == NA_ACTIVATE_MODE_DEFUNCT ||
100 	    mode == NA_ACTIVATE_MODE_OFF) {
101 		struct skmem_arena_nexus *arn = skmem_arena_nexus(na->na_arena);
102 
103 		if (mode == NA_ACTIVATE_MODE_OFF) {
104 			atomic_bitclear_32(&na->na_flags, NAF_ACTIVE);
105 		}
106 
107 		AR_LOCK(na->na_arena);
108 		if (na->na_type == NA_FLOWSWITCH_VP &&
109 		    arn->arn_stats_obj != NULL) {
110 			fsw_fold_stats(fsw,
111 			    arn->arn_stats_obj, na->na_stats_type);
112 		}
113 		AR_UNLOCK(na->na_arena);
114 
115 		enum txrx t;
116 		uint32_t i;
117 		struct __nx_stats_channel_errors stats;
118 		for_all_rings(t) {
119 			for (i = 0; i < na_get_nrings(na, t); i++) {
120 				stats.nxs_cres =
121 				    &NAKR(na, t)[i].ckr_err_stats;
122 				fsw_fold_stats(fsw, &stats,
123 				    NEXUS_STATS_TYPE_CHAN_ERRORS);
124 			}
125 		}
126 	}
127 
128 done:
129 	FSW_WUNLOCK(fsw);
130 	return ret;
131 }
132 
133 /* na_dtor callback for ephemeral flow switch ports */
134 static void
fsw_vp_na_dtor(struct nexus_adapter * na)135 fsw_vp_na_dtor(struct nexus_adapter *na)
136 {
137 	struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
138 	struct nx_flowswitch *fsw = vpna->vpna_fsw;
139 
140 	SK_LOCK_ASSERT_HELD();
141 	ASSERT(na->na_type == NA_FLOWSWITCH_VP);
142 
143 	SK_DF(SK_VERB_FSW, "na \"%s\" (0x%llx)", na->na_name, SK_KVA(na));
144 
145 	if (fsw != NULL) {
146 		FSW_WLOCK(fsw);
147 		fsw_port_free(fsw, vpna, vpna->vpna_nx_port, FALSE);
148 		FSW_WUNLOCK(fsw);
149 	}
150 }
151 
152 /*
153  * na_krings_create callback for flow switch ports.
154  * Calls the standard na_kr_create(), then adds leases on rx
155  * rings and bdgfwd on tx rings.
156  */
157 int
fsw_vp_na_krings_create(struct nexus_adapter * na,struct kern_channel * ch)158 fsw_vp_na_krings_create(struct nexus_adapter *na, struct kern_channel *ch)
159 {
160 	ASSERT(na->na_type == NA_FLOWSWITCH_VP);
161 
162 	return na_rings_mem_setup(na, 0, FALSE, ch);
163 }
164 
165 
166 /* na_krings_delete callback for flow switch ports. */
167 void
fsw_vp_na_krings_delete(struct nexus_adapter * na,struct kern_channel * ch,boolean_t defunct)168 fsw_vp_na_krings_delete(struct nexus_adapter *na, struct kern_channel *ch,
169     boolean_t defunct)
170 {
171 	ASSERT(na->na_type == NA_FLOWSWITCH_VP);
172 
173 	na_rings_mem_teardown(na, ch, defunct);
174 }
175 
176 /* na_txsync callback for flow switch ports */
177 int
fsw_vp_na_txsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)178 fsw_vp_na_txsync(struct __kern_channel_ring *kring, struct proc *p,
179     uint32_t flags)
180 {
181 #pragma unused(flags)
182 	struct nexus_vp_adapter *vpna = VPNA(KRNA(kring));
183 	struct nx_flowswitch *fsw = vpna->vpna_fsw;
184 	int error = 0;
185 
186 	/*
187 	 * Flush packets if and only if the ring isn't in drop mode,
188 	 * and if the adapter is currently attached to a nexus port;
189 	 * otherwise we drop them.
190 	 */
191 	if (__probable(!KR_DROP(kring) && fsw != NULL)) {
192 		fsw_ring_flush(fsw, kring, p);
193 	} else {
194 		int dropped_pkts;
195 		/* packets between khead to rhead have been dropped */
196 		dropped_pkts = kring->ckr_rhead - kring->ckr_khead;
197 		if (dropped_pkts < 0) {
198 			dropped_pkts += kring->ckr_num_slots;
199 		}
200 		if (fsw != NULL) {
201 			STATS_INC(&fsw->fsw_stats, FSW_STATS_DST_RING_DROPMODE);
202 			STATS_ADD(&fsw->fsw_stats, FSW_STATS_DROP,
203 			    dropped_pkts);
204 		}
205 		/* we're dropping; claim all */
206 		slot_idx_t sidx = kring->ckr_khead;
207 		while (sidx != kring->ckr_rhead) {
208 			struct __kern_slot_desc *ksd = KR_KSD(kring, sidx);
209 			if (KSD_VALID_METADATA(ksd)) {
210 				struct __kern_packet *pkt = ksd->sd_pkt;
211 				(void) KR_SLOT_DETACH_METADATA(kring, ksd);
212 				pp_free_packet_single(pkt);
213 			}
214 			sidx = SLOT_NEXT(sidx, kring->ckr_lim);
215 		}
216 		kring->ckr_khead = kring->ckr_rhead;
217 		kring->ckr_ktail = SLOT_PREV(kring->ckr_rhead, kring->ckr_lim);
218 		error = ENODEV;
219 		SK_ERR("kr \"%s\" (0x%llx) krflags 0x%b in drop mode (err %d)",
220 		    kring->ckr_name, SK_KVA(kring), kring->ckr_flags,
221 		    CKRF_BITS, error);
222 	}
223 
224 	SK_DF(SK_VERB_FSW | SK_VERB_SYNC | SK_VERB_TX,
225 	    "%s(%d) kr \"%s\" (0x%llx) krflags 0x%b ring %u flags 0x%x",
226 	    sk_proc_name_address(p), sk_proc_pid(p), kring->ckr_name,
227 	    SK_KVA(kring), kring->ckr_flags, CKRF_BITS, kring->ckr_ring_id,
228 	    flags);
229 
230 	return error;
231 }
232 
233 /*
234  * na_rxsync callback for flow switch ports.  We're already protected
235  * against concurrent calls from userspace.
236  */
237 int
fsw_vp_na_rxsync(struct __kern_channel_ring * kring,struct proc * p,uint32_t flags)238 fsw_vp_na_rxsync(struct __kern_channel_ring *kring, struct proc *p,
239     uint32_t flags)
240 {
241 #pragma unused(p, flags)
242 	slot_idx_t head, khead_prev;
243 
244 	head = kring->ckr_rhead;
245 	ASSERT(head <= kring->ckr_lim);
246 
247 	/* First part, import newly received packets. */
248 	/* actually nothing to do here, they are already in the kring */
249 
250 	/* Second part, skip past packets that userspace has released. */
251 	khead_prev = kring->ckr_khead;
252 	kring->ckr_khead = head;
253 
254 	/* ensure global visibility */
255 	membar_sync();
256 
257 	SK_DF(SK_VERB_FSW | SK_VERB_SYNC | SK_VERB_RX,
258 	    "%s(%d) kr \"%s\" (0x%llx) krflags 0x%b ring %u "
259 	    "kh %u (was %u) rh %u flags 0x%x", sk_proc_name_address(p),
260 	    sk_proc_pid(p), kring->ckr_name, SK_KVA(kring), kring->ckr_flags,
261 	    CKRF_BITS, kring->ckr_ring_id, kring->ckr_khead, khead_prev,
262 	    kring->ckr_rhead, flags);
263 
264 	return 0;
265 }
266 
267 int
fsw_vp_na_attach(struct kern_nexus * nx,const char * cr_name,struct nexus_adapter * na)268 fsw_vp_na_attach(struct kern_nexus *nx, const char *cr_name,
269     struct nexus_adapter *na)
270 {
271 #pragma unused(nx)
272 	SK_LOCK_ASSERT_HELD();
273 	ASSERT(nx->nx_prov->nxprov_params->nxp_type == NEXUS_TYPE_FLOW_SWITCH);
274 	ASSERT(VPNA(na)->vpna_fsw == NULL);
275 
276 	(void) strncpy(na->na_name, cr_name, sizeof(na->na_name) - 1);
277 	na->na_name[sizeof(na->na_name) - 1] = '\0';
278 
279 	return 0;
280 }
281 
282 static int
fsw_vp_na_special(struct nexus_adapter * na,struct kern_channel * ch,struct chreq * chr,nxspec_cmd_t spec_cmd)283 fsw_vp_na_special(struct nexus_adapter *na, struct kern_channel *ch,
284     struct chreq *chr, nxspec_cmd_t spec_cmd)
285 {
286 	int error = 0;
287 
288 	SK_LOCK_ASSERT_HELD();
289 	ASSERT(na->na_type == NA_FLOWSWITCH_VP);
290 
291 	/*
292 	 * fsw_vp_na_attach() must have created this adapter
293 	 * exclusively for kernel (NAF_KERNEL); leave this alone.
294 	 */
295 	ASSERT(NA_KERNEL_ONLY(na));
296 
297 	switch (spec_cmd) {
298 	case NXSPEC_CMD_CONNECT:
299 		ASSERT(!(na->na_flags & NAF_SPEC_INIT));
300 		ASSERT(na->na_channels == 0);
301 
302 		error = na_bind_channel(na, ch, chr);
303 		if (error != 0) {
304 			goto done;
305 		}
306 
307 		atomic_bitset_32(&na->na_flags, NAF_SPEC_INIT);
308 		break;
309 
310 	case NXSPEC_CMD_DISCONNECT:
311 		ASSERT(na->na_channels > 0);
312 		ASSERT(na->na_flags & NAF_SPEC_INIT);
313 		atomic_bitclear_32(&na->na_flags, NAF_SPEC_INIT);
314 
315 		na_unbind_channel(ch);
316 		break;
317 
318 	case NXSPEC_CMD_START:
319 		na_kr_drop(na, FALSE);
320 		break;
321 
322 	case NXSPEC_CMD_STOP:
323 		na_kr_drop(na, TRUE);
324 		break;
325 
326 	default:
327 		error = EINVAL;
328 		break;
329 	}
330 
331 done:
332 	SK_DF(error ? SK_VERB_ERROR : SK_VERB_FSW,
333 	    "ch 0x%llx na \"%s\" (0x%llx) nx 0x%llx spec_cmd %u (err %d)",
334 	    SK_KVA(ch), na->na_name, SK_KVA(na), SK_KVA(ch->ch_nexus),
335 	    spec_cmd, error);
336 
337 	return error;
338 }
339 
340 /*
341  * Create a nexus_vp_adapter that describes a flow switch port.
342  */
343 int
fsw_vp_na_create(struct kern_nexus * nx,struct chreq * chr,struct nexus_vp_adapter ** ret)344 fsw_vp_na_create(struct kern_nexus *nx, struct chreq *chr,
345     struct nexus_vp_adapter **ret)
346 {
347 	struct nxprov_params *nxp = NX_PROV(nx)->nxprov_params;
348 	struct nexus_vp_adapter *vpna;
349 	struct nexus_adapter *na;
350 	int error;
351 
352 	SK_LOCK_ASSERT_HELD();
353 
354 	if ((chr->cr_mode & CHMODE_KERNEL) != 0) {
355 		SK_ERR("VP adapter can't be used by kernel");
356 		return ENOTSUP;
357 	}
358 	if ((chr->cr_mode & CHMODE_USER_PACKET_POOL) == 0) {
359 		SK_ERR("user packet pool required");
360 		return EINVAL;
361 	}
362 
363 	vpna = fsw_vp_na_alloc(Z_WAITOK);
364 
365 	ASSERT(vpna->vpna_up.na_type == NA_FLOWSWITCH_VP);
366 	ASSERT(vpna->vpna_up.na_free == fsw_vp_na_free);
367 
368 	na = &vpna->vpna_up;
369 	(void) strncpy(na->na_name, chr->cr_name, sizeof(na->na_name) - 1);
370 	na->na_name[sizeof(na->na_name) - 1] = '\0';
371 	uuid_generate_random(na->na_uuid);
372 
373 	/*
374 	 * Verify upper bounds; for all cases including user pipe nexus,
375 	 * as well as flow switch-based ones, the parameters must have
376 	 * already been validated by corresponding nxdom_prov_params()
377 	 * function defined by each domain.  The user pipe nexus would
378 	 * be checking against the flow switch's parameters there.
379 	 */
380 	na_set_nrings(na, NR_TX, nxp->nxp_tx_rings);
381 	na_set_nrings(na, NR_RX, nxp->nxp_rx_rings);
382 	/*
383 	 * If the packet pool is configured to be multi-buflet, then we
384 	 * need 2 pairs of alloc/free rings(for packet and buflet).
385 	 */
386 	na_set_nrings(na, NR_A, ((nxp->nxp_max_frags > 1) &&
387 	    (sk_channel_buflet_alloc != 0)) ? 2 : 1);
388 	na_set_nslots(na, NR_TX, nxp->nxp_tx_slots);
389 	na_set_nslots(na, NR_RX, nxp->nxp_rx_slots);
390 	na_set_nslots(na, NR_A, NX_FSW_AFRINGSIZE);
391 	ASSERT(na_get_nrings(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_rings.nb_max);
392 	ASSERT(na_get_nrings(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_rings.nb_max);
393 	ASSERT(na_get_nslots(na, NR_TX) <= NX_DOM(nx)->nxdom_tx_slots.nb_max);
394 	ASSERT(na_get_nslots(na, NR_RX) <= NX_DOM(nx)->nxdom_rx_slots.nb_max);
395 
396 	atomic_bitset_32(&na->na_flags, NAF_USER_PKT_POOL);
397 
398 	if (chr->cr_mode & CHMODE_LOW_LATENCY) {
399 		atomic_bitset_32(&na->na_flags, NAF_LOW_LATENCY);
400 	}
401 
402 	vpna->vpna_nx_port = chr->cr_port;
403 	na->na_dtor = fsw_vp_na_dtor;
404 	na->na_activate = fsw_vp_na_activate;
405 	na->na_txsync = fsw_vp_na_txsync;
406 	na->na_rxsync = fsw_vp_na_rxsync;
407 	na->na_krings_create = fsw_vp_na_krings_create;
408 	na->na_krings_delete = fsw_vp_na_krings_delete;
409 	na->na_special = fsw_vp_na_special;
410 
411 	*(nexus_stats_type_t *)(uintptr_t)&na->na_stats_type =
412 	    NEXUS_STATS_TYPE_FSW;
413 
414 	/* other fields are set in the common routine */
415 	na_attach_common(na, nx, &nx_fsw_prov_s);
416 
417 	if ((error = NX_DOM_PROV(nx)->nxdom_prov_mem_new(NX_DOM_PROV(nx),
418 	    nx, na)) != 0) {
419 		ASSERT(na->na_arena == NULL);
420 		goto err;
421 	}
422 	ASSERT(na->na_arena != NULL);
423 
424 	*(uint32_t *)(uintptr_t)&na->na_flowadv_max = nxp->nxp_flowadv_max;
425 	ASSERT(na->na_flowadv_max == 0 ||
426 	    skmem_arena_nexus(na->na_arena)->arn_flowadv_obj != NULL);
427 
428 #if SK_LOG
429 	uuid_string_t uuidstr;
430 	SK_DF(SK_VERB_FSW, "na_name: \"%s\"", na->na_name);
431 	SK_DF(SK_VERB_FSW, "  UUID:        %s", sk_uuid_unparse(na->na_uuid,
432 	    uuidstr));
433 	SK_DF(SK_VERB_FSW, "  nx:          0x%llx (\"%s\":\"%s\")",
434 	    SK_KVA(na->na_nx), NX_DOM(na->na_nx)->nxdom_name,
435 	    NX_DOM_PROV(na->na_nx)->nxdom_prov_name);
436 	SK_DF(SK_VERB_FSW, "  flags:       0x%b", na->na_flags, NAF_BITS);
437 	SK_DF(SK_VERB_FSW, "  stats_type:  %u", na->na_stats_type);
438 	SK_DF(SK_VERB_FSW, "  flowadv_max: %u", na->na_flowadv_max);
439 	SK_DF(SK_VERB_FSW, "  rings:       tx %u rx %u af %u",
440 	    na_get_nrings(na, NR_TX), na_get_nrings(na, NR_RX),
441 	    na_get_nrings(na, NR_A));
442 	SK_DF(SK_VERB_FSW, "  slots:       tx %u rx %u af %u",
443 	    na_get_nslots(na, NR_TX), na_get_nslots(na, NR_RX),
444 	    na_get_nslots(na, NR_A));
445 #if CONFIG_NEXUS_USER_PIPE
446 	SK_DF(SK_VERB_FSW, "  next_pipe:   %u", na->na_next_pipe);
447 	SK_DF(SK_VERB_FSW, "  max_pipes:   %u", na->na_max_pipes);
448 #endif /* CONFIG_NEXUS_USER_PIPE */
449 	SK_DF(SK_VERB_FSW, "  nx_port:     %d", (int)vpna->vpna_nx_port);
450 #endif /* SK_LOG */
451 
452 	*ret = vpna;
453 	na_retain_locked(&vpna->vpna_up);
454 
455 	return 0;
456 
457 err:
458 	if (na->na_arena != NULL) {
459 		skmem_arena_release(na->na_arena);
460 		na->na_arena = NULL;
461 	}
462 	NA_FREE(&vpna->vpna_up);
463 	return error;
464 }
465 
466 static struct nexus_vp_adapter *
fsw_vp_na_alloc(zalloc_flags_t how)467 fsw_vp_na_alloc(zalloc_flags_t how)
468 {
469 	struct nexus_vp_adapter *vpna;
470 
471 	_CASSERT(offsetof(struct nexus_vp_adapter, vpna_up) == 0);
472 
473 	vpna = zalloc_flags(na_vp_zone, how | Z_ZERO);
474 	if (vpna) {
475 		vpna->vpna_up.na_type = NA_FLOWSWITCH_VP;
476 		vpna->vpna_up.na_free = fsw_vp_na_free;
477 	}
478 	return vpna;
479 }
480 
481 static void
fsw_vp_na_free(struct nexus_adapter * na)482 fsw_vp_na_free(struct nexus_adapter *na)
483 {
484 	struct nexus_vp_adapter *vpna = (struct nexus_vp_adapter *)(void *)na;
485 
486 	ASSERT(vpna->vpna_up.na_refcount == 0);
487 	SK_DF(SK_VERB_MEM, "vpna 0x%llx FREE", SK_KVA(vpna));
488 	bzero(vpna, sizeof(*vpna));
489 	zfree(na_vp_zone, vpna);
490 }
491 
492 void
fsw_vp_channel_error_stats_fold(struct fsw_stats * fs,struct __nx_stats_channel_errors * es)493 fsw_vp_channel_error_stats_fold(struct fsw_stats *fs,
494     struct __nx_stats_channel_errors *es)
495 {
496 	STATS_ADD(fs, FSW_STATS_CHAN_ERR_UPP_ALLOC,
497 	    es->nxs_cres->cres_pkt_alloc_failures);
498 }
499