1 /*
2 * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30
31 #include <dev/random/randomdev.h>
32 #include <net/flowhash.h>
33 #include <netkey/key.h>
34
35 #include <skywalk/nexus/flowswitch/fsw_var.h>
36 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
37 #include <skywalk/nexus/netif/nx_netif.h>
38 #include <skywalk/namespace/flowidns.h>
39
40 struct flow_entry *fe_alloc(boolean_t);
41 static void fe_free(struct flow_entry *);
42 static int fe_id_cmp(const struct flow_entry *, const struct flow_entry *);
43 static void fe_stats_init(struct flow_entry *);
44 static void fe_stats_update(struct flow_entry *);
45
46 RB_GENERATE_PREV(flow_entry_id_tree, flow_entry, fe_id_link, fe_id_cmp);
47
48 os_refgrp_decl(static, flow_entry_refgrp, "flow_entry", NULL);
49
50 extern struct zone *sk_fed_zone;
51
52 const struct flow_key fk_mask_2tuple
53 __sk_aligned(16) =
54 {
55 .fk_mask = FKMASK_2TUPLE,
56 .fk_ipver = 0,
57 .fk_proto = 0xff,
58 .fk_sport = 0xffff,
59 .fk_dport = 0,
60 .fk_src._addr64[0] = 0,
61 .fk_src._addr64[1] = 0,
62 .fk_dst._addr64[0] = 0,
63 .fk_dst._addr64[1] = 0,
64 .fk_pad[0] = 0,
65 };
66
67 const struct flow_key fk_mask_3tuple
68 __sk_aligned(16) =
69 {
70 .fk_mask = FKMASK_3TUPLE,
71 .fk_ipver = 0xff,
72 .fk_proto = 0xff,
73 .fk_sport = 0xffff,
74 .fk_dport = 0,
75 .fk_src._addr64[0] = 0xffffffffffffffffULL,
76 .fk_src._addr64[1] = 0xffffffffffffffffULL,
77 .fk_dst._addr64[0] = 0,
78 .fk_dst._addr64[1] = 0,
79 .fk_pad[0] = 0,
80 };
81
82 const struct flow_key fk_mask_4tuple
83 __sk_aligned(16) =
84 {
85 .fk_mask = FKMASK_4TUPLE,
86 .fk_ipver = 0xff,
87 .fk_proto = 0xff,
88 .fk_sport = 0xffff,
89 .fk_dport = 0xffff,
90 .fk_src._addr64[0] = 0xffffffffffffffffULL,
91 .fk_src._addr64[1] = 0xffffffffffffffffULL,
92 .fk_dst._addr64[0] = 0,
93 .fk_dst._addr64[1] = 0,
94 .fk_pad[0] = 0,
95 };
96
97 const struct flow_key fk_mask_5tuple
98 __sk_aligned(16) =
99 {
100 .fk_mask = FKMASK_5TUPLE,
101 .fk_ipver = 0xff,
102 .fk_proto = 0xff,
103 .fk_sport = 0xffff,
104 .fk_dport = 0xffff,
105 .fk_src._addr64[0] = 0xffffffffffffffffULL,
106 .fk_src._addr64[1] = 0xffffffffffffffffULL,
107 .fk_dst._addr64[0] = 0xffffffffffffffffULL,
108 .fk_dst._addr64[1] = 0xffffffffffffffffULL,
109 .fk_pad[0] = 0,
110 };
111
112 const struct flow_key fk_mask_ipflow1
113 __sk_aligned(16) =
114 {
115 .fk_mask = FKMASK_IPFLOW1,
116 .fk_ipver = 0,
117 .fk_proto = 0xff,
118 .fk_sport = 0,
119 .fk_dport = 0,
120 .fk_src._addr64[0] = 0,
121 .fk_src._addr64[1] = 0,
122 .fk_dst._addr64[0] = 0,
123 .fk_dst._addr64[1] = 0,
124 .fk_pad[0] = 0,
125 };
126
127 const struct flow_key fk_mask_ipflow2
128 __sk_aligned(16) =
129 {
130 .fk_mask = FKMASK_IPFLOW2,
131 .fk_ipver = 0xff,
132 .fk_proto = 0xff,
133 .fk_sport = 0,
134 .fk_dport = 0,
135 .fk_src._addr64[0] = 0xffffffffffffffffULL,
136 .fk_src._addr64[1] = 0xffffffffffffffffULL,
137 .fk_dst._addr64[0] = 0,
138 .fk_dst._addr64[1] = 0,
139 .fk_pad[0] = 0,
140 };
141
142 const struct flow_key fk_mask_ipflow3
143 __sk_aligned(16) =
144 {
145 .fk_mask = FKMASK_IPFLOW3,
146 .fk_ipver = 0xff,
147 .fk_proto = 0xff,
148 .fk_sport = 0,
149 .fk_dport = 0,
150 .fk_src._addr64[0] = 0xffffffffffffffffULL,
151 .fk_src._addr64[1] = 0xffffffffffffffffULL,
152 .fk_dst._addr64[0] = 0xffffffffffffffffULL,
153 .fk_dst._addr64[1] = 0xffffffffffffffffULL,
154 .fk_pad[0] = 0,
155 };
156
157 struct flow_owner *
flow_owner_find_by_pid(struct flow_owner_bucket * fob,pid_t pid,void * context,bool low_latency)158 flow_owner_find_by_pid(struct flow_owner_bucket *fob, pid_t pid, void *context,
159 bool low_latency)
160 {
161 struct flow_owner find = { .fo_context = context, .fo_pid = pid,
162 .fo_low_latency = low_latency};
163
164 ASSERT(low_latency == true || low_latency == false);
165 FOB_LOCK_ASSERT_HELD(fob);
166 return RB_FIND(flow_owner_tree, &fob->fob_owner_head, &find);
167 }
168
169 struct flow_entry *
flow_entry_find_by_uuid(struct flow_owner * fo,uuid_t uuid)170 flow_entry_find_by_uuid(struct flow_owner *fo, uuid_t uuid)
171 {
172 struct flow_entry find, *fe = NULL;
173 FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
174
175 uuid_copy(find.fe_uuid, uuid);
176 fe = RB_FIND(flow_entry_id_tree, &fo->fo_flow_entry_id_head, &find);
177 if (fe != NULL) {
178 flow_entry_retain(fe);
179 }
180
181 return fe;
182 }
183
184 static uint32_t
flow_entry_calc_flowid(struct flow_entry * fe)185 flow_entry_calc_flowid(struct flow_entry *fe)
186 {
187 uint32_t flowid;
188 struct flowidns_flow_key fk;
189
190 bzero(&fk, sizeof(fk));
191 _CASSERT(sizeof(fe->fe_key.fk_src) == sizeof(fk.ffk_laddr));
192 _CASSERT(sizeof(fe->fe_key.fk_dst) == sizeof(fk.ffk_raddr));
193 bcopy(&fe->fe_key.fk_src, &fk.ffk_laddr, sizeof(fk.ffk_laddr));
194 bcopy(&fe->fe_key.fk_dst, &fk.ffk_raddr, sizeof(fk.ffk_raddr));
195
196 fk.ffk_lport = fe->fe_key.fk_sport;
197 fk.ffk_rport = fe->fe_key.fk_dport;
198 fk.ffk_af = (fe->fe_key.fk_ipver == 4) ? AF_INET : AF_INET6;
199 fk.ffk_proto = fe->fe_key.fk_proto;
200
201 flowidns_allocate_flowid(FLOWIDNS_DOMAIN_FLOWSWITCH, &fk, &flowid);
202 return flowid;
203 }
204
205 static bool
flow_entry_add_child(struct flow_entry * parent_fe,struct flow_entry * child_fe)206 flow_entry_add_child(struct flow_entry *parent_fe, struct flow_entry *child_fe)
207 {
208 SK_LOG_VAR(char dbgbuf[FLOWENTRY_DBGBUF_SIZE]);
209 ASSERT(parent_fe->fe_flags & FLOWENT_PARENT);
210
211 lck_rw_lock_exclusive(&parent_fe->fe_child_list_lock);
212
213 if (parent_fe->fe_flags & FLOWENTF_NONVIABLE) {
214 SK_ERR("child entry add failed, parent fe \"%s\" non viable 0x%llx "
215 "flags 0x%b %s(%d)", fe_as_string(parent_fe,
216 dbgbuf, sizeof(dbgbuf)), SK_KVA(parent_fe), parent_fe->fe_flags,
217 FLOWENTF_BITS, parent_fe->fe_proc_name,
218 parent_fe->fe_pid);
219 lck_rw_unlock_exclusive(&parent_fe->fe_child_list_lock);
220 return false;
221 }
222
223 struct flow_entry *fe, *tfe;
224 TAILQ_FOREACH_SAFE(fe, &parent_fe->fe_child_list, fe_child_link, tfe) {
225 if (!fe_id_cmp(fe, child_fe)) {
226 lck_rw_unlock_exclusive(&parent_fe->fe_child_list_lock);
227 SK_ERR("child entry \"%s\" already exists at fe 0x%llx "
228 "flags 0x%b %s(%d)", fe_as_string(fe,
229 dbgbuf, sizeof(dbgbuf)), SK_KVA(fe), fe->fe_flags,
230 FLOWENTF_BITS, fe->fe_proc_name,
231 fe->fe_pid);
232 return false;
233 }
234
235 if (fe->fe_flags & FLOWENTF_NONVIABLE) {
236 TAILQ_REMOVE(&parent_fe->fe_child_list, fe, fe_child_link);
237 ASSERT(--parent_fe->fe_child_count >= 0);
238 flow_entry_release(&fe);
239 }
240 }
241
242 flow_entry_retain(child_fe);
243 TAILQ_INSERT_TAIL(&parent_fe->fe_child_list, child_fe, fe_child_link);
244 ASSERT(++parent_fe->fe_child_count > 0);
245
246 lck_rw_unlock_exclusive(&parent_fe->fe_child_list_lock);
247
248 return true;
249 }
250
251 static void
flow_entry_remove_all_children(struct flow_entry * parent_fe,struct nx_flowswitch * fsw)252 flow_entry_remove_all_children(struct flow_entry *parent_fe, struct nx_flowswitch *fsw)
253 {
254 bool sched_reaper_thread = false;
255
256 ASSERT(parent_fe->fe_flags & FLOWENT_PARENT);
257
258 lck_rw_lock_exclusive(&parent_fe->fe_child_list_lock);
259
260 struct flow_entry *fe, *tfe;
261 TAILQ_FOREACH_SAFE(fe, &parent_fe->fe_child_list, fe_child_link, tfe) {
262 if (!(fe->fe_flags & FLOWENTF_NONVIABLE)) {
263 /*
264 * fsw_pending_nonviable is a hint for reaper thread;
265 * due to the fact that setting fe_want_nonviable and
266 * incrementing fsw_pending_nonviable counter is not
267 * atomic, let the increment happen first, and the
268 * thread losing the CAS does decrement.
269 */
270 atomic_add_32(&fsw->fsw_pending_nonviable, 1);
271 if (atomic_test_set_32(&fe->fe_want_nonviable, 0, 1)) {
272 sched_reaper_thread = true;
273 } else {
274 atomic_add_32(&fsw->fsw_pending_nonviable, -1);
275 }
276 }
277
278 TAILQ_REMOVE(&parent_fe->fe_child_list, fe, fe_child_link);
279 ASSERT(--parent_fe->fe_child_count >= 0);
280 flow_entry_release(&fe);
281 }
282
283 lck_rw_unlock_exclusive(&parent_fe->fe_child_list_lock);
284
285 if (sched_reaper_thread) {
286 fsw_reap_sched(fsw);
287 }
288 }
289
290 static void
flow_entry_set_demux_patterns(struct flow_entry * fe,struct nx_flow_req * req)291 flow_entry_set_demux_patterns(struct flow_entry *fe, struct nx_flow_req *req)
292 {
293 ASSERT(fe->fe_flags & FLOWENT_CHILD);
294 ASSERT(req->nfr_flow_demux_count > 0);
295
296 fe->fe_demux_patterns = sk_alloc_type_array(struct kern_flow_demux_pattern, req->nfr_flow_demux_count,
297 Z_WAITOK | Z_NOFAIL, skmem_tag_flow_demux);
298
299 for (int i = 0; i < req->nfr_flow_demux_count; i++) {
300 bcopy(&req->nfr_flow_demux_patterns[i], &fe->fe_demux_patterns[i].fdp_demux_pattern,
301 sizeof(struct flow_demux_pattern));
302
303 fe->fe_demux_patterns[i].fdp_memcmp_mask = NULL;
304 if (req->nfr_flow_demux_patterns[i].fdp_len == 16) {
305 fe->fe_demux_patterns[i].fdp_memcmp_mask = sk_memcmp_mask_16B;
306 } else if (req->nfr_flow_demux_patterns[i].fdp_len == 32) {
307 fe->fe_demux_patterns[i].fdp_memcmp_mask = sk_memcmp_mask_32B;
308 } else if (req->nfr_flow_demux_patterns[i].fdp_len > 32) {
309 VERIFY(0);
310 }
311 }
312
313 fe->fe_demux_pattern_count = req->nfr_flow_demux_count;
314 }
315
316 static int
convert_flowkey_to_inet_td(struct flow_key * key,struct ifnet_traffic_descriptor_inet * td)317 convert_flowkey_to_inet_td(struct flow_key *key,
318 struct ifnet_traffic_descriptor_inet *td)
319 {
320 if ((key->fk_mask & FKMASK_IPVER) != 0) {
321 td->inet_ipver = key->fk_ipver;
322 td->inet_mask |= IFNET_TRAFFIC_DESCRIPTOR_INET_IPVER;
323 }
324 if ((key->fk_mask & FKMASK_PROTO) != 0) {
325 td->inet_proto = key->fk_proto;
326 td->inet_mask |= IFNET_TRAFFIC_DESCRIPTOR_INET_PROTO;
327 }
328 if ((key->fk_mask & FKMASK_SRC) != 0) {
329 if (td->inet_ipver == IPVERSION) {
330 bcopy(&key->fk_src4, &td->inet_laddr.iia_v4addr,
331 sizeof(key->fk_src4));
332 } else {
333 bcopy(&key->fk_src6, &td->inet_laddr,
334 sizeof(key->fk_src6));
335 }
336 td->inet_mask |= IFNET_TRAFFIC_DESCRIPTOR_INET_LADDR;
337 }
338 if ((key->fk_mask & FKMASK_DST) != 0) {
339 if (td->inet_ipver == IPVERSION) {
340 bcopy(&key->fk_dst4, &td->inet_raddr.iia_v4addr,
341 sizeof(key->fk_dst4));
342 } else {
343 bcopy(&key->fk_dst6, &td->inet_raddr,
344 sizeof(key->fk_dst6));
345 }
346 td->inet_mask |= IFNET_TRAFFIC_DESCRIPTOR_INET_RADDR;
347 }
348 if ((key->fk_mask & FKMASK_SPORT) != 0) {
349 td->inet_lport = key->fk_sport;
350 td->inet_mask |= IFNET_TRAFFIC_DESCRIPTOR_INET_LPORT;
351 }
352 if ((key->fk_mask & FKMASK_DPORT) != 0) {
353 td->inet_rport = key->fk_dport;
354 td->inet_mask |= IFNET_TRAFFIC_DESCRIPTOR_INET_RPORT;
355 }
356 td->inet_common.itd_type = IFNET_TRAFFIC_DESCRIPTOR_TYPE_INET;
357 td->inet_common.itd_len = sizeof(*td);
358 td->inet_common.itd_flags = IFNET_TRAFFIC_DESCRIPTOR_FLAG_INBOUND |
359 IFNET_TRAFFIC_DESCRIPTOR_FLAG_OUTBOUND;
360 return 0;
361 }
362
363 void
flow_qset_select_dynamic(struct nx_flowswitch * fsw,struct flow_entry * fe,boolean_t skip_if_no_change)364 flow_qset_select_dynamic(struct nx_flowswitch *fsw, struct flow_entry *fe,
365 boolean_t skip_if_no_change)
366 {
367 struct ifnet_traffic_descriptor_inet td;
368 struct ifnet *ifp;
369 uint64_t qset_id;
370 struct nx_netif *nif;
371 boolean_t changed;
372 int err;
373
374 ifp = fsw->fsw_ifp;
375 changed = ifnet_sync_traffic_rule_genid(ifp, &fe->fe_tr_genid);
376 if (!changed && skip_if_no_change) {
377 return;
378 }
379 if (fe->fe_qset != NULL) {
380 nx_netif_qset_release(&fe->fe_qset);
381 ASSERT(fe->fe_qset == NULL);
382 }
383 if (ifp->if_traffic_rule_count == 0) {
384 DTRACE_SKYWALK2(no__rules, struct nx_flowswitch *, fsw,
385 struct flow_entry *, fe);
386 return;
387 }
388 err = convert_flowkey_to_inet_td(&fe->fe_key, &td);
389 ASSERT(err == 0);
390 err = nxctl_inet_traffic_rule_find_qset_id(ifp->if_xname, &td, &qset_id);
391 if (err != 0) {
392 DTRACE_SKYWALK3(qset__id__not__found,
393 struct nx_flowswitch *, fsw,
394 struct flow_entry *, fe,
395 struct ifnet_traffic_descriptor_inet *, &td);
396 return;
397 }
398 DTRACE_SKYWALK4(qset__id__found, struct nx_flowswitch *, fsw,
399 struct flow_entry *, fe, struct ifnet_traffic_descriptor_inet *,
400 &td, uint64_t, qset_id);
401 nif = NX_NETIF_PRIVATE(fsw->fsw_dev_ch->ch_na->na_nx);
402 ASSERT(fe->fe_qset == NULL);
403 fe->fe_qset = nx_netif_find_qset(nif, qset_id);
404 }
405
406 /* writer-lock must be owned for memory management functions */
407 struct flow_entry *
flow_entry_alloc(struct flow_owner * fo,struct nx_flow_req * req,int * perr)408 flow_entry_alloc(struct flow_owner *fo, struct nx_flow_req *req, int *perr)
409 {
410 SK_LOG_VAR(char dbgbuf[FLOWENTRY_DBGBUF_SIZE]);
411 nexus_port_t nx_port = req->nfr_nx_port;
412 struct flow_entry *fe = NULL;
413 struct flow_entry *parent_fe = NULL;
414 flowadv_idx_t fadv_idx = FLOWADV_IDX_NONE;
415 struct nexus_adapter *dev_na;
416 struct nx_netif *nif;
417 int err;
418
419 FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
420 ASSERT(nx_port != NEXUS_PORT_ANY);
421 ASSERT(!fo->fo_nx_port_destroyed);
422
423 *perr = 0;
424
425 struct flow_key key __sk_aligned(16);
426 err = flow_req2key(req, &key);
427 if (__improbable(err != 0)) {
428 SK_ERR("invalid request (err %d)", err);
429 goto done;
430 }
431
432 struct flow_mgr *fm = fo->fo_fsw->fsw_flow_mgr;
433 fe = flow_mgr_find_conflicting_fe(fm, &key);
434 if (fe != NULL) {
435 if ((fe->fe_flags & FLOWENT_PARENT) &&
436 uuid_compare(fe->fe_uuid, req->nfr_parent_flow_uuid) == 0) {
437 parent_fe = fe;
438 fe = NULL;
439 } else {
440 SK_ERR("entry \"%s\" already exists at fe 0x%llx "
441 "flags 0x%b %s(%d)", fe_as_string(fe,
442 dbgbuf, sizeof(dbgbuf)), SK_KVA(fe), fe->fe_flags,
443 FLOWENTF_BITS, fe->fe_proc_name,
444 fe->fe_pid);
445 /* don't return it */
446 flow_entry_release(&fe);
447 err = EEXIST;
448 goto done;
449 }
450 } else if (!uuid_is_null(req->nfr_parent_flow_uuid)) {
451 uuid_string_t uuid_str;
452 sk_uuid_unparse(req->nfr_parent_flow_uuid, uuid_str);
453 SK_ERR("parent entry \"%s\" does not exist", uuid_str);
454 err = ENOENT;
455 goto done;
456 }
457
458 if ((req->nfr_flags & NXFLOWREQF_FLOWADV) &&
459 (flow_owner_flowadv_index_alloc(fo, &fadv_idx) != 0)) {
460 SK_ERR("failed to alloc flowadv index for flow %s",
461 sk_uuid_unparse(req->nfr_flow_uuid, dbgbuf));
462 /* XXX: what is the most appropriate error code ? */
463 err = ENOSPC;
464 goto done;
465 }
466
467 fe = fe_alloc(TRUE);
468 if (__improbable(fe == NULL)) {
469 err = ENOMEM;
470 goto done;
471 }
472
473 fe->fe_key = key;
474 if (req->nfr_route != NULL) {
475 fe->fe_laddr_gencnt = req->nfr_route->fr_laddr_gencnt;
476 } else {
477 fe->fe_laddr_gencnt = req->nfr_saddr_gencnt;
478 }
479
480 if (__improbable(req->nfr_flags & NXFLOWREQF_LISTENER)) {
481 /* mark this as listener mode */
482 atomic_bitset_32(&fe->fe_flags, FLOWENTF_LISTENER);
483 } else {
484 ASSERT((fe->fe_key.fk_ipver == IPVERSION &&
485 fe->fe_key.fk_src4.s_addr != INADDR_ANY) ||
486 (fe->fe_key.fk_ipver == IPV6_VERSION &&
487 !IN6_IS_ADDR_UNSPECIFIED(&fe->fe_key.fk_src6)));
488
489 /* mark this as connected mode */
490 atomic_bitset_32(&fe->fe_flags, FLOWENTF_CONNECTED);
491 }
492
493 fe->fe_port_reservation = req->nfr_port_reservation;
494 req->nfr_port_reservation = NULL;
495 if (req->nfr_flags & NXFLOWREQF_EXT_PORT_RSV) {
496 fe->fe_flags |= FLOWENTF_EXTRL_PORT;
497 }
498 fe->fe_proto_reservation = req->nfr_proto_reservation;
499 req->nfr_proto_reservation = NULL;
500 if (req->nfr_flags & NXFLOWREQF_EXT_PROTO_RSV) {
501 fe->fe_flags |= FLOWENTF_EXTRL_PROTO;
502 }
503 fe->fe_ipsec_reservation = req->nfr_ipsec_reservation;
504 req->nfr_ipsec_reservation = NULL;
505
506 fe->fe_tx_process = dp_flow_tx_process;
507 fe->fe_rx_process = dp_flow_rx_process;
508
509 dev_na = fo->fo_fsw->fsw_dev_ch->ch_na;
510 nif = NX_NETIF_PRIVATE(dev_na->na_nx);
511 if (NX_LLINK_PROV(nif->nif_nx) &&
512 (fe->fe_key.fk_mask & (FKMASK_IPVER | FKMASK_PROTO | FKMASK_DST)) ==
513 (FKMASK_IPVER | FKMASK_PROTO | FKMASK_DST)) {
514 if (req->nfr_qset_id != 0) {
515 fe->fe_qset_select = FE_QSET_SELECT_FIXED;
516 fe->fe_qset_id = req->nfr_qset_id;
517 fe->fe_qset = nx_netif_find_qset(nif, req->nfr_qset_id);
518 } else {
519 fe->fe_qset_select = FE_QSET_SELECT_DYNAMIC;
520 fe->fe_qset_id = 0;
521 flow_qset_select_dynamic(fo->fo_fsw, fe, FALSE);
522 }
523 } else {
524 fe->fe_qset_select = FE_QSET_SELECT_NONE;
525 }
526 if (req->nfr_flags & NXFLOWREQF_LOW_LATENCY) {
527 atomic_bitset_32(&fe->fe_flags, FLOWENTF_LOW_LATENCY);
528 }
529
530 fe->fe_transport_protocol = req->nfr_transport_protocol;
531 if (NX_FSW_TCP_RX_AGG_ENABLED() &&
532 (fo->fo_fsw->fsw_nx->nx_prov->nxprov_params->nxp_max_frags > 1) &&
533 (fe->fe_key.fk_proto == IPPROTO_TCP) &&
534 (fe->fe_key.fk_mask == FKMASK_5TUPLE)) {
535 fe->fe_rx_process = flow_rx_agg_tcp;
536 }
537 uuid_copy(fe->fe_uuid, req->nfr_flow_uuid);
538 if ((req->nfr_flags & NXFLOWREQF_LISTENER) == 0 &&
539 (req->nfr_flags & NXFLOWREQF_TRACK) != 0) {
540 switch (req->nfr_ip_protocol) {
541 case IPPROTO_TCP:
542 case IPPROTO_UDP:
543 atomic_bitset_32(&fe->fe_flags, FLOWENTF_TRACK);
544 break;
545 default:
546 break;
547 }
548 }
549
550 if (req->nfr_flags & NXFLOWREQF_QOS_MARKING) {
551 atomic_bitset_32(&fe->fe_flags, FLOWENTF_QOS_MARKING);
552 }
553
554 if (req->nfr_flags & NXFLOWREQF_PARENT) {
555 atomic_bitset_32(&fe->fe_flags, FLOWENT_PARENT);
556 TAILQ_INIT(&fe->fe_child_list);
557 lck_rw_init(&fe->fe_child_list_lock, &nexus_lock_group, &nexus_lock_attr);
558 }
559
560 if (req->nfr_route != NULL) {
561 fe->fe_route = req->nfr_route;
562 req->nfr_route = NULL;
563 }
564
565 fe->fe_nx_port = nx_port;
566 fe->fe_adv_idx = fadv_idx;
567
568 if (req->nfr_inp_flowhash != 0) {
569 /*
570 * BSD flow, use the inpcb flow hash value
571 */
572 fe->fe_flowid = req->nfr_inp_flowhash;
573 fe->fe_flags |= FLOWENTF_EXTRL_FLOWID;
574 } else {
575 fe->fe_flowid = flow_entry_calc_flowid(fe);
576 }
577
578 if (fe->fe_adv_idx != FLOWADV_IDX_NONE && fo->fo_nx_port_na != NULL) {
579 na_flowadv_entry_alloc(fo->fo_nx_port_na, fe->fe_uuid,
580 fe->fe_adv_idx, fe->fe_flowid);
581 }
582
583 if (KPKT_VALID_SVC(req->nfr_svc_class)) {
584 fe->fe_svc_class = (kern_packet_svc_class_t)req->nfr_svc_class;
585 } else {
586 fe->fe_svc_class = KPKT_SC_BE;
587 }
588
589 uuid_copy(fe->fe_eproc_uuid, req->nfr_euuid);
590 fe->fe_policy_id = req->nfr_policy_id;
591
592 err = flow_mgr_flow_hash_mask_add(fm, fe->fe_key.fk_mask);
593 ASSERT(err == 0);
594
595 if (parent_fe != NULL) {
596 atomic_bitset_32(&fe->fe_flags, FLOWENT_CHILD);
597 flow_entry_set_demux_patterns(fe, req);
598 fe->fe_demux_pkt_data = sk_alloc_data(FLOW_DEMUX_MAX_LEN, Z_WAITOK | Z_NOFAIL, skmem_tag_flow_demux);
599 if (!flow_entry_add_child(parent_fe, fe)) {
600 goto done;
601 }
602 } else {
603 fe->fe_key_hash = flow_key_hash(&fe->fe_key);
604 err = cuckoo_hashtable_add_with_hash(fm->fm_flow_table, &fe->fe_cnode,
605 fe->fe_key_hash);
606 if (err != 0) {
607 SK_ERR("flow table add failed (err %d)", err);
608 flow_mgr_flow_hash_mask_del(fm, fe->fe_key.fk_mask);
609 goto done;
610 }
611 }
612
613 RB_INSERT(flow_entry_id_tree, &fo->fo_flow_entry_id_head, fe);
614 flow_entry_retain(fe); /* one refcnt in id_tree */
615
616 *(struct nx_flowswitch **)(uintptr_t)&fe->fe_fsw = fo->fo_fsw;
617 fe->fe_pid = fo->fo_pid;
618 if (req->nfr_epid != -1 && req->nfr_epid != fo->fo_pid) {
619 fe->fe_epid = req->nfr_epid;
620 proc_name(fe->fe_epid, fe->fe_eproc_name,
621 sizeof(fe->fe_eproc_name));
622 } else {
623 fe->fe_epid = -1;
624 }
625
626 (void) snprintf(fe->fe_proc_name, sizeof(fe->fe_proc_name), "%s",
627 fo->fo_name);
628
629 fe_stats_init(fe);
630 flow_stats_retain(fe->fe_stats);
631 req->nfr_flow_stats = fe->fe_stats;
632
633 #if SK_LOG
634 SK_DF(SK_VERB_FLOW, "allocated entry \"%s\" fe 0x%llx flags 0x%b "
635 "[fo 0x%llx ]", fe_as_string(fe, dbgbuf,
636 sizeof(dbgbuf)), SK_KVA(fe), fe->fe_flags, FLOWENTF_BITS,
637 SK_KVA(fo));
638 #endif /* SK_LOG */
639
640 done:
641 if (parent_fe != NULL) {
642 flow_entry_release(&parent_fe);
643 }
644 if (err != 0) {
645 if (fadv_idx != FLOWADV_IDX_NONE) {
646 flow_owner_flowadv_index_free(fo, fadv_idx);
647 }
648 if (fe != NULL) {
649 flow_entry_release(&fe);
650 }
651 }
652 *perr = err;
653 return fe;
654 }
655
656 void
flow_entry_teardown(struct flow_owner * fo,struct flow_entry * fe)657 flow_entry_teardown(struct flow_owner *fo, struct flow_entry *fe)
658 {
659 #if SK_LOG
660 char dbgbuf[FLOWENTRY_DBGBUF_SIZE];
661 SK_DF(SK_VERB_FLOW, "entry \"%s\" fe 0x%llx flags 0x%b [fo 0x%llx] "
662 "non_via %d withdrawn %d", fe_as_string(fe, dbgbuf, sizeof(dbgbuf)),
663 SK_KVA(fe), fe->fe_flags, FLOWENTF_BITS, SK_KVA(fo),
664 fe->fe_want_nonviable, fe->fe_want_withdraw);
665 #endif /* SK_LOG */
666 struct nx_flowswitch *fsw = fo->fo_fsw;
667
668 FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
669
670 ASSERT(!(fe->fe_flags & FLOWENTF_DESTROYED));
671 ASSERT(!(fe->fe_flags & FLOWENTF_LINGERING));
672 ASSERT(fsw != NULL);
673
674 if (atomic_test_set_32(&fe->fe_want_nonviable, 1, 0)) {
675 ASSERT(fsw->fsw_pending_nonviable != 0);
676 atomic_add_32(&fsw->fsw_pending_nonviable, -1);
677 atomic_bitset_32(&fe->fe_flags, FLOWENTF_NONVIABLE);
678 }
679
680 /* always withdraw namespace during tear down */
681 if (!(fe->fe_flags & FLOWENTF_EXTRL_PORT) &&
682 !(fe->fe_flags & FLOWENTF_WITHDRAWN)) {
683 atomic_bitset_32(&fe->fe_flags, FLOWENTF_WITHDRAWN);
684 atomic_set_32(&fe->fe_want_withdraw, 0);
685 /* local port is now inactive; not eligible for offload */
686 flow_namespace_withdraw(&fe->fe_port_reservation);
687 }
688
689 /* we may get here multiple times, so check */
690 if (!(fe->fe_flags & FLOWENTF_TORN_DOWN)) {
691 atomic_bitset_32(&fe->fe_flags, FLOWENTF_TORN_DOWN);
692 if (fe->fe_adv_idx != FLOWADV_IDX_NONE) {
693 if (fo->fo_nx_port_na != NULL) {
694 na_flowadv_entry_free(fo->fo_nx_port_na,
695 fe->fe_uuid, fe->fe_adv_idx, fe->fe_flowid);
696 }
697 flow_owner_flowadv_index_free(fo, fe->fe_adv_idx);
698 fe->fe_adv_idx = FLOWADV_IDX_NONE;
699 }
700 }
701 ASSERT(fe->fe_adv_idx == FLOWADV_IDX_NONE);
702 ASSERT(fe->fe_flags & FLOWENTF_TORN_DOWN);
703
704 /* mark child flow as nonviable */
705 if (fe->fe_flags & FLOWENT_PARENT) {
706 flow_entry_remove_all_children(fe, fsw);
707 }
708 }
709
710 void
flow_entry_destroy(struct flow_owner * fo,struct flow_entry * fe,bool nolinger,void * close_params)711 flow_entry_destroy(struct flow_owner *fo, struct flow_entry *fe, bool nolinger,
712 void *close_params)
713 {
714 struct flow_mgr *fm = fo->fo_fsw->fsw_flow_mgr;
715 int err;
716
717 FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
718
719 /*
720 * regular flow: one in flow_table, one in id_tree, one here
721 * child flow: one in id_tree, one here
722 */
723 ASSERT(flow_entry_refcnt(fe) > 2 ||
724 ((fe->fe_flags & FLOWENT_CHILD) && flow_entry_refcnt(fe) > 1));
725
726 flow_entry_teardown(fo, fe);
727
728 err = flow_mgr_flow_hash_mask_del(fm, fe->fe_key.fk_mask);
729 ASSERT(err == 0);
730
731 /* only regular or parent flows have entries in flow_table */
732 if (__probable(!(fe->fe_flags & FLOWENT_CHILD))) {
733 uint32_t hash;
734 hash = flow_key_hash(&fe->fe_key);
735 cuckoo_hashtable_del(fm->fm_flow_table, &fe->fe_cnode, hash);
736 }
737
738 RB_REMOVE(flow_entry_id_tree, &fo->fo_flow_entry_id_head, fe);
739 struct flow_entry *tfe = fe;
740 flow_entry_release(&tfe);
741
742 ASSERT(!(fe->fe_flags & FLOWENTF_DESTROYED));
743 atomic_bitset_32(&fe->fe_flags, FLOWENTF_DESTROYED);
744
745 if (fe->fe_transport_protocol == IPPROTO_QUIC) {
746 if (!nolinger && close_params != NULL) {
747 flow_track_abort_quic(fe, close_params);
748 }
749 flow_entry_release(&fe);
750 } else if (nolinger || !(fe->fe_flags & FLOWENTF_WAIT_CLOSE)) {
751 flow_entry_release(&fe);
752 } else {
753 fsw_linger_insert(fe);
754 }
755 }
756
757 uint32_t
flow_entry_refcnt(struct flow_entry * fe)758 flow_entry_refcnt(struct flow_entry *fe)
759 {
760 return os_ref_get_count(&fe->fe_refcnt);
761 }
762
763 void
flow_entry_retain(struct flow_entry * fe)764 flow_entry_retain(struct flow_entry *fe)
765 {
766 os_ref_retain(&fe->fe_refcnt);
767 }
768
769 void
flow_entry_release(struct flow_entry ** pfe)770 flow_entry_release(struct flow_entry **pfe)
771 {
772 struct flow_entry *fe = *pfe;
773 ASSERT(fe != NULL);
774 *pfe = NULL; /* caller lose reference */
775 #if SK_LOG
776 if (__improbable(sk_verbose != 0)) {
777 char dbgbuf[FLOWENTRY_DBGBUF_SIZE];
778 SK_DF(SK_VERB_FLOW, "entry \"%s\" fe 0x%llx flags 0x%b",
779 fe_as_string(fe, dbgbuf, sizeof(dbgbuf)), SK_KVA(fe),
780 fe->fe_flags, FLOWENTF_BITS);
781 }
782 #endif /* SK_LOG */
783
784 if (__improbable(os_ref_release(&fe->fe_refcnt) == 0)) {
785 fe->fe_nx_port = NEXUS_PORT_ANY;
786 if (fe->fe_route != NULL) {
787 flow_route_release(fe->fe_route);
788 fe->fe_route = NULL;
789 }
790 if (fe->fe_qset != NULL) {
791 nx_netif_qset_release(&fe->fe_qset);
792 ASSERT(fe->fe_qset == NULL);
793 }
794 if (fe->fe_demux_patterns != NULL) {
795 sk_free_type_array(struct kern_flow_demux_pattern,
796 fe->fe_demux_pattern_count, fe->fe_demux_patterns);
797 fe->fe_demux_patterns = NULL;
798 fe->fe_demux_pattern_count = 0;
799 }
800 if (fe->fe_demux_pkt_data != NULL) {
801 sk_free_data(fe->fe_demux_pkt_data, FLOW_DEMUX_MAX_LEN);
802 fe->fe_demux_pkt_data = NULL;
803 }
804 fe_free(fe);
805 }
806 }
807
808 struct flow_entry_dead *
flow_entry_dead_alloc(zalloc_flags_t how)809 flow_entry_dead_alloc(zalloc_flags_t how)
810 {
811 struct flow_entry_dead *fed;
812
813 fed = zalloc_flags(sk_fed_zone, how | Z_ZERO);
814 if (fed != NULL) {
815 SK_DF(SK_VERB_MEM, "fed 0x%llx ALLOC", SK_KVA(fed));
816 }
817 return fed;
818 }
819
820 void
flow_entry_dead_free(struct flow_entry_dead * fed)821 flow_entry_dead_free(struct flow_entry_dead *fed)
822 {
823 SK_DF(SK_VERB_MEM, "fed 0x%llx FREE", SK_KVA(fed));
824 zfree(sk_fed_zone, fed);
825 }
826
827 static void
fe_stats_init(struct flow_entry * fe)828 fe_stats_init(struct flow_entry *fe)
829 {
830 struct nx_flowswitch *fsw = fe->fe_fsw;
831 struct sk_stats_flow *sf = &fe->fe_stats->fs_stats;
832
833 ASSERT(fe->fe_stats != NULL);
834 ASSERT(os_ref_get_count(&fe->fe_stats->fs_refcnt) >= 1);
835
836 bzero(sf, sizeof(*sf));
837 uuid_copy(sf->sf_nx_uuid, fsw->fsw_nx->nx_uuid);
838 (void) strlcpy(sf->sf_if_name, fsw->fsw_flow_mgr->fm_name, IFNAMSIZ);
839 sf->sf_if_index = fsw->fsw_ifp->if_index;
840 sf->sf_pid = fe->fe_pid;
841 sf->sf_epid = fe->fe_epid;
842 (void) snprintf(sf->sf_proc_name, sizeof(sf->sf_proc_name), "%s",
843 fe->fe_proc_name);
844 (void) snprintf(sf->sf_eproc_name, sizeof(sf->sf_eproc_name), "%s",
845 fe->fe_eproc_name);
846
847 sf->sf_nx_port = fe->fe_nx_port;
848 sf->sf_key = fe->fe_key;
849 sf->sf_protocol = fe->fe_transport_protocol;
850 sf->sf_svc_class = fe->fe_svc_class;
851 sf->sf_adv_idx = fe->fe_adv_idx;
852
853 if (fe->fe_flags & FLOWENTF_TRACK) {
854 sf->sf_flags |= SFLOWF_TRACK;
855 }
856 if (fe->fe_flags & FLOWENTF_LISTENER) {
857 sf->sf_flags |= SFLOWF_LISTENER;
858 }
859 if (fe->fe_route != NULL && fe->fe_route->fr_flags & FLOWRTF_ONLINK) {
860 sf->sf_flags |= SFLOWF_ONLINK;
861 }
862
863 fe_stats_update(fe);
864 }
865
866 static void
fe_stats_update(struct flow_entry * fe)867 fe_stats_update(struct flow_entry *fe)
868 {
869 struct sk_stats_flow *sf = &fe->fe_stats->fs_stats;
870
871 ASSERT(fe->fe_stats != NULL);
872 ASSERT(os_ref_get_count(&fe->fe_stats->fs_refcnt) >= 1);
873
874 if (fe->fe_flags & FLOWENTF_CONNECTED) {
875 sf->sf_flags |= SFLOWF_CONNECTED;
876 }
877 if (fe->fe_flags & FLOWENTF_QOS_MARKING) {
878 sf->sf_flags |= SFLOWF_QOS_MARKING;
879 }
880 if (fe->fe_flags & FLOWENTF_WAIT_CLOSE) {
881 sf->sf_flags |= SFLOWF_WAIT_CLOSE;
882 }
883 if (fe->fe_flags & FLOWENTF_CLOSE_NOTIFY) {
884 sf->sf_flags |= SFLOWF_CLOSE_NOTIFY;
885 }
886 if (fe->fe_flags & FLOWENTF_ABORTED) {
887 sf->sf_flags |= SFLOWF_ABORTED;
888 }
889 if (fe->fe_flags & FLOWENTF_NONVIABLE) {
890 sf->sf_flags |= SFLOWF_NONVIABLE;
891 }
892 if (fe->fe_flags & FLOWENTF_WITHDRAWN) {
893 sf->sf_flags |= SFLOWF_WITHDRAWN;
894 }
895 if (fe->fe_flags & FLOWENTF_TORN_DOWN) {
896 sf->sf_flags |= SFLOWF_TORN_DOWN;
897 }
898 if (fe->fe_flags & FLOWENTF_DESTROYED) {
899 sf->sf_flags |= SFLOWF_DESTROYED;
900 }
901 if (fe->fe_flags & FLOWENTF_LINGERING) {
902 sf->sf_flags |= SFLOWF_LINGERING;
903 }
904 if (fe->fe_flags & FLOWENTF_LOW_LATENCY) {
905 sf->sf_flags |= SFLOWF_LOW_LATENCY;
906 }
907 if (fe->fe_flags & FLOWENT_PARENT) {
908 sf->sf_flags |= SFLOWF_PARENT;
909 }
910 if (fe->fe_flags & FLOWENT_CHILD) {
911 sf->sf_flags |= SFLOWF_CHILD;
912 }
913
914 sf->sf_bucket_idx = SFLOW_BUCKET_NONE;
915
916 sf->sf_ltrack.sft_state = fe->fe_ltrack.fse_state;
917 sf->sf_ltrack.sft_seq = fe->fe_ltrack.fse_seqlo;
918 sf->sf_ltrack.sft_max_win = fe->fe_ltrack.fse_max_win;
919 sf->sf_ltrack.sft_wscale = fe->fe_ltrack.fse_wscale;
920 sf->sf_rtrack.sft_state = fe->fe_rtrack.fse_state;
921 sf->sf_rtrack.sft_seq = fe->fe_rtrack.fse_seqlo;
922 sf->sf_rtrack.sft_max_win = fe->fe_rtrack.fse_max_win;
923 }
924
925 void
flow_entry_stats_get(struct flow_entry * fe,struct sk_stats_flow * sf)926 flow_entry_stats_get(struct flow_entry *fe, struct sk_stats_flow *sf)
927 {
928 _CASSERT(sizeof(fe->fe_stats->fs_stats) == sizeof(*sf));
929
930 fe_stats_update(fe);
931 bcopy(&fe->fe_stats->fs_stats, sf, sizeof(*sf));
932 }
933
934 struct flow_entry *
fe_alloc(boolean_t can_block)935 fe_alloc(boolean_t can_block)
936 {
937 struct flow_entry *fe;
938
939 _CASSERT((offsetof(struct flow_entry, fe_key) % 16) == 0);
940
941 fe = skmem_cache_alloc(sk_fe_cache,
942 can_block ? SKMEM_SLEEP : SKMEM_NOSLEEP);
943 if (fe == NULL) {
944 return NULL;
945 }
946
947 /*
948 * fe_key is 16-bytes aligned which requires fe to begin on
949 * a 16-bytes boundary as well. This alignment is specified
950 * at sk_fe_cache creation time and we assert here.
951 */
952 ASSERT(IS_P2ALIGNED(fe, 16));
953 bzero(fe, sk_fe_size);
954
955 fe->fe_stats = flow_stats_alloc(can_block);
956 if (fe->fe_stats == NULL) {
957 skmem_cache_free(sk_fe_cache, fe);
958 return NULL;
959 }
960
961 SK_DF(SK_VERB_MEM, "fe 0x%llx ALLOC", SK_KVA(fe));
962
963 os_ref_init(&fe->fe_refcnt, &flow_entry_refgrp);
964
965 KPKTQ_INIT(&fe->fe_rx_pktq);
966 KPKTQ_INIT(&fe->fe_tx_pktq);
967
968 return fe;
969 }
970
971 static void
fe_free(struct flow_entry * fe)972 fe_free(struct flow_entry *fe)
973 {
974 ASSERT(fe->fe_flags & FLOWENTF_TORN_DOWN);
975 ASSERT(fe->fe_flags & FLOWENTF_DESTROYED);
976 ASSERT(!(fe->fe_flags & FLOWENTF_LINGERING));
977 ASSERT(fe->fe_route == NULL);
978
979 ASSERT(fe->fe_stats != NULL);
980 flow_stats_release(fe->fe_stats);
981 fe->fe_stats = NULL;
982
983 /* only at very last existence of flow releases namespace reservation */
984 if (!(fe->fe_flags & FLOWENTF_EXTRL_PORT) &&
985 NETNS_TOKEN_VALID(&fe->fe_port_reservation)) {
986 flow_namespace_destroy(&fe->fe_port_reservation);
987 ASSERT(!NETNS_TOKEN_VALID(&fe->fe_port_reservation));
988 }
989 fe->fe_port_reservation = NULL;
990
991 if (!(fe->fe_flags & FLOWENTF_EXTRL_PROTO) &&
992 protons_token_is_valid(fe->fe_proto_reservation)) {
993 protons_release(&fe->fe_proto_reservation);
994 }
995 fe->fe_proto_reservation = NULL;
996
997 if (key_custom_ipsec_token_is_valid(fe->fe_ipsec_reservation)) {
998 key_release_custom_ipsec(&fe->fe_ipsec_reservation);
999 }
1000 fe->fe_ipsec_reservation = NULL;
1001
1002 if (!(fe->fe_flags & FLOWENTF_EXTRL_FLOWID) && (fe->fe_flowid != 0)) {
1003 flowidns_release_flowid(fe->fe_flowid);
1004 fe->fe_flowid = 0;
1005 }
1006
1007 skmem_cache_free(sk_fe_cache, fe);
1008 }
1009
1010 static __inline__ int
fe_id_cmp(const struct flow_entry * a,const struct flow_entry * b)1011 fe_id_cmp(const struct flow_entry *a, const struct flow_entry *b)
1012 {
1013 return uuid_compare(a->fe_uuid, b->fe_uuid);
1014 }
1015
1016 #if SK_LOG
1017 SK_NO_INLINE_ATTRIBUTE
1018 char *
fk_as_string(const struct flow_key * fk,char * dst,size_t dsz)1019 fk_as_string(const struct flow_key *fk, char *dst, size_t dsz)
1020 {
1021 int af;
1022 char src_s[MAX_IPv6_STR_LEN];
1023 char dst_s[MAX_IPv6_STR_LEN];
1024
1025 af = fk->fk_ipver == 4 ? AF_INET : AF_INET6;
1026
1027 (void) inet_ntop(af, &fk->fk_src, src_s, sizeof(src_s));
1028 (void) inet_ntop(af, &fk->fk_dst, dst_s, sizeof(dst_s));
1029 (void) snprintf(dst, dsz,
1030 "ipver=%u,src=%s,dst=%s,proto=0x%02u,sport=%u,dport=%u "
1031 "mask=%08x,hash=%08x",
1032 fk->fk_ipver, src_s, dst_s, fk->fk_proto, ntohs(fk->fk_sport),
1033 ntohs(fk->fk_dport), fk->fk_mask, flow_key_hash(fk));
1034
1035 return dst;
1036 }
1037
1038 SK_NO_INLINE_ATTRIBUTE
1039 char *
fe_as_string(const struct flow_entry * fe,char * dst,size_t dsz)1040 fe_as_string(const struct flow_entry *fe, char *dst, size_t dsz)
1041 {
1042 char keybuf[FLOWKEY_DBGBUF_SIZE]; /* just for debug message */
1043 uuid_string_t uuidstr;
1044
1045 fk_as_string(&fe->fe_key, keybuf, sizeof(keybuf));
1046
1047 (void) snprintf(dst, dsz,
1048 "fe 0x%llx proc %s nx_port %d flow_uuid %s %s tp_proto=0x%02u",
1049 SK_KVA(fe), fe->fe_proc_name, (int)fe->fe_nx_port,
1050 sk_uuid_unparse(fe->fe_uuid, uuidstr),
1051 keybuf, fe->fe_transport_protocol);
1052
1053 return dst;
1054 }
1055 #endif /* SK_LOG */
1056