1 /*
2 * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Once a packet is classified, it goes through checks to see if there
31 * is a matching flow entry in the flow table. The key used to search
32 * the entry is composed of the fields contained in struct flow_ptrs.
33 *
34 * Flow entry insertion and deletion to the flow table, on behalf of
35 * the owning client process, requires the use of the rule ID (UUID)
36 * as the search key.
37 *
38 * Because of the above, each flow entry simultaneously exists in two
39 * respective trees: flow_entry_tree and flow_entry_id_tree.
40 *
41 * Using a single RW lock to protect the two trees is simple, but the
42 * data path performance is impacted during flow insertion and deletion,
43 * especially as the number of client processes and flows grow.
44 *
45 * To solve that, we deploy the following scheme:
46 *
47 * Given that the flow_entry_tree is searched on a per-packet basis,
48 * we break it down into a series of trees, each one contained within
49 * a flow_bucket structure. The hash from flow_ptrs determines the
50 * index of the flow_bucket to search the flow_entry_tree from.
51 *
52 * The flow_entry_id_tree is searched on each flow insertion and
53 * deletion, and similarly we break it down into a series of trees,
54 * each contained within a flow_owner_bucket structure. We use the
55 * client process ID (pid_t) to determine the bucket index.
56 *
57 * Each flow_bucket and flow_owner_bucket structure is dynamically
58 * created, and is aligned on the CPU cache boundary. The amount
59 * of those buckets is determined by client module at the time the
60 * flow manager context is initialized. This is done to avoid false
61 * sharing, especially given that each bucket has its own RW lock.
62 */
63
64 #ifndef _SKYWALK_NEXUS_FLOWSIWTCH_FLOW_FLOWVAR_H_
65 #define _SKYWALK_NEXUS_FLOWSIWTCH_FLOW_FLOWVAR_H_
66
67 #ifdef BSD_KERNEL_PRIVATE
68 #include <skywalk/core/skywalk_var.h>
69 #include <skywalk/lib/cuckoo_hashtable.h>
70 #include <skywalk/namespace/netns.h>
71 #include <skywalk/namespace/protons.h>
72 #include <skywalk/packet/packet_var.h>
73 #include <net/flowhash.h>
74 #include <netinet/ip.h>
75 #include <netinet/in_stat.h>
76 #include <netinet/ip6.h>
77 #include <sys/eventhandler.h>
78
79 RB_HEAD(flow_owner_tree, flow_owner);
80
81 struct flow_owner_bucket {
82 decl_lck_mtx_data(, fob_lock);
83 struct flow_owner_tree fob_owner_head;
84 uint16_t fob_busy_flags;
85 uint16_t fob_open_waiters;
86 uint16_t fob_close_waiters;
87 uint16_t fob_dtor_waiters;
88 const size_t fob_idx;
89 };
90
91 #define FOBF_OPEN_BUSY 0x1 /* flow open monitor */
92 #define FOBF_CLOSE_BUSY 0x2 /* flow close monitor */
93 #define FOBF_DEAD 0x4 /* no longer usable */
94
95 #define FOB_LOCK(_fob) \
96 lck_mtx_lock(&(_fob)->fob_lock)
97 #define FOB_LOCK_SPIN(_fob) \
98 lck_mtx_lock_spin(&(_fob)->fob_lock)
99 #define FOB_LOCK_CONVERT(_fob) \
100 lck_mtx_convert_spin(&(_fob)->fob_lock)
101 #define FOB_TRY_LOCK(_fob) \
102 lck_mtx_try_lock(&(_fob)->fob_lock)
103 #define FOB_LOCK_ASSERT_HELD(_fob) \
104 LCK_MTX_ASSERT(&(_fob)->fob_lock, LCK_MTX_ASSERT_OWNED)
105 #define FOB_LOCK_ASSERT_NOTHELD(_fob) \
106 LCK_MTX_ASSERT(&(_fob)->fob_lock, LCK_MTX_ASSERT_NOTOWNED)
107 #define FOB_UNLOCK(_fob) \
108 lck_mtx_unlock(&(_fob)->fob_lock)
109
110 RB_HEAD(flow_entry_id_tree, flow_entry);
111
112 #define FLOW_PROCESS_NAME_LENGTH 24
113
114 struct flow_owner {
115 RB_ENTRY(flow_owner) fo_link;
116 struct flow_entry_id_tree fo_flow_entry_id_head;
117 const struct flow_owner_bucket *fo_bucket;
118 void *fo_context;
119 pid_t fo_pid;
120 bool fo_nx_port_pid_bound;
121 bool fo_nx_port_destroyed;
122 bool fo_low_latency;
123 nexus_port_t fo_nx_port;
124 uuid_t fo_key;
125
126 struct nexus_adapter * const fo_nx_port_na;
127 struct nx_flowswitch * const fo_fsw;
128
129 /*
130 * Array of bitmaps to manage the flow advisory table indices.
131 * Currently we are restricting a flow owner to a single nexus
132 * port, so this structure is effectively managing the flow advisory
133 * indices for a port.
134 */
135 bitmap_t *fo_flowadv_bmap;
136 uint32_t fo_flowadv_max;
137 uint32_t fo_num_flowadv;
138
139 /* for debugging */
140 char fo_name[FLOW_PROCESS_NAME_LENGTH];
141 };
142
143 #define FO_BUCKET(_fo) \
144 __DECONST(struct flow_owner_bucket *, (_fo)->fo_bucket)
145
146 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_owner_tree, flow_owner,
147 fo_link, fo_cmp);
148 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_entry_id_tree, flow_entry,
149 fe_id_link, fe_id_cmp);
150
151 typedef enum {
152 /*
153 * TCP states.
154 */
155 FT_STATE_CLOSED = 0, /* closed */
156 FT_STATE_LISTEN, /* listening for connection */
157 FT_STATE_SYN_SENT, /* active, have sent SYN */
158 FT_STATE_SYN_RECEIVED, /* have sent and rcvd SYN */
159 FT_STATE_ESTABLISHED, /* established */
160 FT_STATE_CLOSE_WAIT, /* rcvd FIN, waiting close */
161 FT_STATE_FIN_WAIT_1, /* have sent FIN */
162 FT_STATE_CLOSING, /* exchanged FINs, waiting FIN|ACK */
163 FT_STATE_LAST_ACK, /* rcvd FIN, closed, waiting FIN|ACK */
164 FT_STATE_FIN_WAIT_2, /* closed, FIN is ACK'd */
165 FT_STATE_TIME_WAIT, /* quiet wait after close */
166
167 /*
168 * UDP states.
169 */
170 FT_STATE_NO_TRAFFIC = 20, /* no packet observed */
171 FT_STATE_SINGLE, /* single packet */
172 FT_STATE_MULTIPLE, /* multiple packets */
173
174 FT_STATE_MAX = 255
175 } flow_track_state_t;
176
177 struct flow_track_rtt {
178 uint64_t frtt_timestamp; /* tracked segment timestamp */
179 uint64_t frtt_last; /* previous net_uptime(rate limiting) */
180 uint32_t frtt_seg_begin; /* tracked segment begin SEQ */
181 uint32_t frtt_seg_end; /* tracked segment end SEQ */
182 uint32_t frtt_usec; /* avg RTT in usec */
183 };
184
185 #define FLOWTRACK_RTT_SAMPLE_INTERVAL 2 /* sample ACK RTT every 2 sec */
186
187 struct flow_track {
188 /*
189 * TCP specific tracking info.
190 */
191 uint32_t fse_seqlo; /* max sequence number sent */
192 uint32_t fse_seqhi; /* max the other end ACKd + win */
193 uint32_t fse_seqlast; /* last sequence number (FIN) */
194 uint16_t fse_max_win; /* largest window (pre scaling) */
195 uint16_t fse_mss; /* maximum segment size option */
196 uint8_t fse_state; /* active state level (FT_STATE_*) */
197 uint8_t fse_wscale; /* window scaling factor */
198 uint16_t fse_flags; /* FLOWSTATEF_* */
199 uint32_t fse_syn_ts; /* SYN timestamp */
200 uint32_t fse_syn_cnt; /* # of SYNs per second */
201
202 struct flow_track_rtt fse_rtt; /* ACK RTT tracking */
203 #define fse_rtt_usec fse_rtt.frtt_usec
204 } __sk_aligned(8);
205
206 /* valid values for fse_flags */
207 #define FLOWSTATEF_WSCALE 0x1 /* fse_wscale is valid */
208
209 struct flow_llhdr {
210 uint32_t flh_gencnt; /* link-layer address gencnt */
211
212 const uint8_t flh_off;
213 const uint8_t flh_len;
214 uint16_t flh_pad; /* for future */
215
216 union _flh_u {
217 uint64_t _buf[2];
218 struct {
219 uint16_t _eth_pad;
220 struct ether_header _eth;
221 } _eth_padded;
222 } __sk_aligned(8) _flh;
223 #define flh_eth_padded _flh._eth_padded
224 #define flh_eth _flh._eth_padded._eth
225 };
226
227 typedef enum {
228 FE_QSET_SELECT_NONE,
229 FE_QSET_SELECT_FIXED,
230 FE_QSET_SELECT_DYNAMIC
231 } flow_qset_select_t;
232
233 extern kern_allocation_name_t skmem_tag_flow_demux;
234 typedef int (*flow_demux_memcmp_mask_t)(const uint8_t *src1, const uint8_t *src2,
235 const uint8_t *byte_mask);
236
237 struct kern_flow_demux_pattern {
238 struct flow_demux_pattern fdp_demux_pattern;
239 flow_demux_memcmp_mask_t fdp_memcmp_mask;
240 };
241
242 #define MAX_PKT_DEMUX_LIMIT 1000
243
244 TAILQ_HEAD(flow_entry_list, flow_entry);
245
246 typedef void (*flow_action_t)(struct nx_flowswitch *fsw, struct flow_entry *fe);
247
248 struct flow_entry {
249 /**** Common Group ****/
250 os_refcnt_t fe_refcnt;
251 struct flow_key fe_key;
252 uint32_t fe_flags;
253 uint32_t fe_key_hash;
254 struct cuckoo_node fe_cnode;
255
256 uuid_t fe_uuid __sk_aligned(8);
257 nexus_port_t fe_nx_port;
258 uint32_t fe_laddr_gencnt;
259 uint32_t fe_want_nonviable;
260 uint32_t fe_want_withdraw;
261 uint8_t fe_transport_protocol;
262
263 /**** Rx Group ****/
264 uint16_t fe_rx_frag_count;
265 uint32_t fe_rx_pktq_bytes;
266 struct pktq fe_rx_pktq;
267 TAILQ_ENTRY(flow_entry) fe_rx_link;
268 flow_action_t fe_rx_process;
269
270 /*
271 * largest allocated packet size.
272 * used by:
273 * - mbuf batch allocation logic during RX aggregtion and netif copy.
274 * - packet allocation logic during RX aggregation.
275 */
276 uint32_t fe_rx_largest_size;
277
278 /**** Tx Group ****/
279 bool fe_tx_is_cont_frag;
280 uint32_t fe_tx_frag_id;
281 struct pktq fe_tx_pktq;
282 TAILQ_ENTRY(flow_entry) fe_tx_link;
283 flow_action_t fe_tx_process;
284
285 uuid_t fe_eproc_uuid __sk_aligned(8);
286 flowadv_idx_t fe_adv_idx;
287 kern_packet_svc_class_t fe_svc_class;
288 uint32_t fe_policy_id; /* policy id matched to flow */
289
290 /**** Misc Group ****/
291 struct nx_flowswitch * const fe_fsw;
292 struct ns_token *fe_port_reservation;
293 struct protons_token *fe_proto_reservation;
294 void *fe_ipsec_reservation;
295
296 struct flow_track fe_ltrack; /* local endpoint state */
297 struct flow_track fe_rtrack; /* remote endpoint state */
298
299 /*
300 * Flow stats are kept externally stand-alone, refcnt'ed by various
301 * users (e.g. flow_entry, necp_client_flow, etc.)
302 */
303 struct flow_stats *fe_stats;
304 struct flow_route *fe_route;
305
306 RB_ENTRY(flow_entry) fe_id_link;
307
308 TAILQ_ENTRY(flow_entry) fe_linger_link;
309 uint64_t fe_linger_expire; /* expiration deadline */
310 uint32_t fe_linger_wait; /* linger time (seconds) */
311
312 pid_t fe_pid;
313 pid_t fe_epid;
314 char fe_proc_name[FLOW_PROCESS_NAME_LENGTH];
315 char fe_eproc_name[FLOW_PROCESS_NAME_LENGTH];
316
317 uint32_t fe_flowid; /* globally unique flow ID */
318
319 /* Logical link related information */
320 struct netif_qset *fe_qset;
321 uint64_t fe_qset_id;
322 flow_qset_select_t fe_qset_select;
323 uint32_t fe_tr_genid;
324
325 /* Parent child information */
326 decl_lck_rw_data(, fe_child_list_lock);
327 struct flow_entry_list fe_child_list;
328 TAILQ_ENTRY(flow_entry) fe_child_link;
329 #if DEVELOPMENT || DEBUG
330 int16_t fe_child_count;
331 #endif // DEVELOPMENT || DEBUG
332 uint8_t fe_demux_pattern_count;
333 struct kern_flow_demux_pattern *fe_demux_patterns;
334 uint8_t *fe_demux_pkt_data;
335 };
336
337 /* valid values for fe_flags */
338 #define FLOWENTF_INITED 0x00000001 /* {src,dst} states initialized */
339 #define FLOWENTF_TRACK 0x00000010 /* enable state tracking */
340 #define FLOWENTF_CONNECTED 0x00000020 /* connected mode */
341 #define FLOWENTF_LISTENER 0x00000040 /* listener mode */
342 #define FLOWENTF_QOS_MARKING 0x00000100 /* flow can have qos marking */
343 #define FLOWENTF_LOW_LATENCY 0x00000200 /* low latency flow */
344 #define FLOWENTF_WAIT_CLOSE 0x00001000 /* defer free after close */
345 #define FLOWENTF_CLOSE_NOTIFY 0x00002000 /* notify NECP upon tear down */
346 #define FLOWENTF_EXTRL_PORT 0x00004000 /* port reservation is held externally */
347 #define FLOWENTF_EXTRL_PROTO 0x00008000 /* proto reservation is held externally */
348 #define FLOWENTF_EXTRL_FLOWID 0x00010000 /* flowid reservation is held externally */
349 #define FLOWENT_CHILD 0x00020000 /* child flow */
350 #define FLOWENT_PARENT 0X00040000 /* parent flow */
351 #define FLOWENTF_ABORTED 0x01000000 /* has sent RST to peer */
352 #define FLOWENTF_NONVIABLE 0x02000000 /* disabled; awaiting tear down */
353 #define FLOWENTF_WITHDRAWN 0x04000000 /* flow has been withdrawn */
354 #define FLOWENTF_TORN_DOWN 0x08000000 /* torn down and awaiting destroy */
355 #define FLOWENTF_HALF_CLOSED 0x10000000 /* flow is half closed */
356 #define FLOWENTF_DESTROYED 0x40000000 /* not in RB trees anymore */
357 #define FLOWENTF_LINGERING 0x80000000 /* destroyed and in linger list */
358
359 #define FLOWENTF_BITS \
360 "\020\01INITED\05TRACK\06CONNECTED\07LISTNER\011QOS_MARKING" \
361 "\012LOW_LATENCY\015WAIT_CLOSE\016CLOSE_NOTIFY\017EXT_PORT" \
362 "\020EXT_PROTO\021EXT_FLOWID\031ABORTED\032NONVIABLE\033WITHDRAWN" \
363 "\034TORN_DOWN\035HALF_CLOSED\037DESTROYED\40LINGERING"
364
365 TAILQ_HEAD(flow_entry_linger_head, flow_entry);
366
367 struct flow_entry_dead {
368 LIST_ENTRY(flow_entry_dead) fed_link;
369
370 boolean_t fed_want_nonviable;
371 boolean_t fed_want_clonotify;
372
373 /* rule (flow) UUID */
374 union {
375 uint64_t fed_uuid_64[2];
376 uint32_t fed_uuid_32[4];
377 uuid_t fed_uuid;
378 } __sk_aligned(8);
379 };
380
381 /*
382 * Minimum refcnt for a flow route entry to be considered as idle.
383 */
384 #define FLOW_ROUTE_MINREF 2 /* for the 2 RB trees */
385
386 struct flow_route {
387 RB_ENTRY(flow_route) fr_link;
388 RB_ENTRY(flow_route) fr_id_link;
389
390 /*
391 * fr_laddr represents the local address that the system chooses
392 * for the foreign destination in fr_faddr. The flow entry that
393 * is referring to this flow route object may choose a different
394 * local address if it wishes.
395 *
396 * fr_gaddr represents the gateway address to reach the final
397 * foreign destination fr_faddr, valid only if the destination is
398 * not directly attached (FLOWRTF_GATEWAY is set).
399 *
400 * The use of sockaddr for storage is for convenience; the port
401 * value is not applicable for this object, as this is shared
402 * among flow entries.
403 */
404 union sockaddr_in_4_6 fr_laddr; /* local IP address */
405 union sockaddr_in_4_6 fr_faddr; /* remote IP address */
406 #define fr_af fr_faddr.sa.sa_family
407 union sockaddr_in_4_6 fr_gaddr; /* gateway IP address */
408
409 struct flow_llhdr fr_llhdr;
410 #define fr_eth_padded fr_llhdr.flh_eth_padded
411 #define fr_eth fr_llhdr.flh_eth
412
413 /*
414 * In flow_route_tree, we use the destination address as key.
415 * To speed up searches, we initialize fr_addr_key to the address
416 * portion of fr_faddr depending on the address family.
417 */
418 void *fr_addr_key;
419
420 /* flow route UUID */
421 uuid_t fr_uuid __sk_aligned(8);
422
423 /*
424 * fr_usecnt is updated atomically; incremented when a flow entry
425 * refers to this object and decremented otherwise. Periodically,
426 * the flowswitch instance garbage collects flow_route objects
427 * that aren't being referred to by any flow entries.
428 *
429 * fr_expire is set when fr_usecnt reaches its minimum count, and
430 * is cleared when it goes above the minimum count.
431 *
432 * The spin lock fr_reflock is used to serialize both.
433 */
434 decl_lck_spin_data(, fr_reflock);
435 uint64_t fr_expire;
436 volatile uint32_t fr_usecnt;
437
438 uint32_t fr_flags;
439 uint32_t fr_laddr_gencnt; /* local IP gencnt */
440 uint32_t fr_addr_len; /* sizeof {in,in6}_addr */
441
442 volatile uint32_t fr_want_configure;
443 volatile uint32_t fr_want_probe;
444
445 /* lock to serialize resolver */
446 decl_lck_mtx_data(, fr_lock);
447
448 /*
449 * fr_rt_dst is the route to final destination, and along with
450 * fr_rt_evhdlr_tag, they are used in route event registration.
451 *
452 * fr_rt_gw is valid only if FLOWRTF_GATEWAY is set.
453 */
454 eventhandler_tag fr_rt_evhdlr_tag;
455 struct rtentry *fr_rt_dst;
456 struct rtentry *fr_rt_gw;
457
458 /* nexus UUID */
459 uuid_t fr_nx_uuid __sk_aligned(8);
460
461 const struct flow_mgr *fr_mgr;
462 const struct flow_route_bucket *fr_frb;
463 const struct flow_route_id_bucket *fr_frib;
464 };
465
466 /* valid values for fr_flags */
467 #define FLOWRTF_ATTACHED 0x00000001 /* attached to RB trees */
468 #define FLOWRTF_ONLINK 0x00000010 /* dst directly on the link */
469 #define FLOWRTF_GATEWAY 0x00000020 /* gw IP address is valid */
470 #define FLOWRTF_RESOLVED 0x00000040 /* flow route is resolved */
471 #define FLOWRTF_HAS_LLINFO 0x00000080 /* has dst link-layer address */
472 #define FLOWRTF_DELETED 0x00000100 /* route has been deleted */
473 #define FLOWRTF_DST_LL_MCAST 0x00000200 /* dst is link layer multicast */
474 #define FLOWRTF_DST_LL_BCAST 0x00000400 /* dst is link layer broadcast */
475 #define FLOWRTF_STABLE_ADDR 0x00000800 /* local address prefers stable */
476
477 #define FR_LOCK(_fr) \
478 lck_mtx_lock(&(_fr)->fr_lock)
479 #define FR_TRY_LOCK(_fr) \
480 lck_mtx_try_lock(&(_fr)->fr_lock)
481 #define FR_LOCK_ASSERT_HELD(_fr) \
482 LCK_MTX_ASSERT(&(_fr)->fr_lock, LCK_MTX_ASSERT_OWNED)
483 #define FR_LOCK_ASSERT_NOTHELD(_fr) \
484 LCK_MTX_ASSERT(&(_fr)->fr_lock, LCK_MTX_ASSERT_NOTOWNED)
485 #define FR_UNLOCK(_fr) \
486 lck_mtx_unlock(&(_fr)->fr_lock)
487
488 #define FLOWRT_UPD_ETH_DST(_fr, _addr) do { \
489 bcopy((_addr), (_fr)->fr_eth.ether_dhost, ETHER_ADDR_LEN); \
490 (_fr)->fr_flags &= ~(FLOWRTF_DST_LL_MCAST|FLOWRTF_DST_LL_BCAST);\
491 if (ETHER_IS_MULTICAST(_addr)) { \
492 if (_ether_cmp(etherbroadcastaddr, (_addr)) == 0) \
493 (_fr)->fr_flags |= FLOWRTF_DST_LL_BCAST; \
494 else \
495 (_fr)->fr_flags |= FLOWRTF_DST_LL_MCAST; \
496 } \
497 } while (0)
498
499 RB_HEAD(flow_route_tree, flow_route);
500 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_route_tree, flow_route,
501 fr_link, fr_cmp);
502
503 struct flow_route_bucket {
504 decl_lck_rw_data(, frb_lock);
505 struct flow_route_tree frb_head;
506 const uint32_t frb_idx;
507 };
508
509 #define FRB_WLOCK(_frb) \
510 lck_rw_lock_exclusive(&(_frb)->frb_lock)
511 #define FRB_WLOCKTORLOCK(_frb) \
512 lck_rw_lock_exclusive_to_shared(&(_frb)->frb_lock)
513 #define FRB_WTRYLOCK(_frb) \
514 lck_rw_try_lock_exclusive(&(_frb)->frb_lock)
515 #define FRB_WUNLOCK(_frb) \
516 lck_rw_unlock_exclusive(&(_frb)->frb_lock)
517 #define FRB_RLOCK(_frb) \
518 lck_rw_lock_shared(&(_frb)->frb_lock)
519 #define FRB_RLOCKTOWLOCK(_frb) \
520 lck_rw_lock_shared_to_exclusive(&(_frb)->frb_lock)
521 #define FRB_RTRYLOCK(_frb) \
522 lck_rw_try_lock_shared(&(_frb)->frb_lock)
523 #define FRB_RUNLOCK(_frb) \
524 lck_rw_unlock_shared(&(_frb)->frb_lock)
525 #define FRB_UNLOCK(_frb) \
526 lck_rw_done(&(_frb)->frb_lock)
527 #define FRB_WLOCK_ASSERT_HELD(_frb) \
528 LCK_RW_ASSERT(&(_frb)->frb_lock, LCK_RW_ASSERT_EXCLUSIVE)
529 #define FRB_RLOCK_ASSERT_HELD(_frb) \
530 LCK_RW_ASSERT(&(_frb)->frb_lock, LCK_RW_ASSERT_SHARED)
531 #define FRB_LOCK_ASSERT_HELD(_frb) \
532 LCK_RW_ASSERT(&(_frb)->frb_lock, LCK_RW_ASSERT_HELD)
533
534 RB_HEAD(flow_route_id_tree, flow_route);
535 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_route_id_tree, flow_route,
536 fr_id_link, fr_id_cmp);
537
538 struct flow_route_id_bucket {
539 decl_lck_rw_data(, frib_lock);
540 struct flow_route_id_tree frib_head;
541 const uint32_t frib_idx;
542 };
543
544 #define FRIB_WLOCK(_frib) \
545 lck_rw_lock_exclusive(&(_frib)->frib_lock)
546 #define FRIB_WLOCKTORLOCK(_frib) \
547 lck_rw_lock_exclusive_to_shared(&(_frib)->frib_lock)
548 #define FRIB_WTRYLOCK(_frib) \
549 lck_rw_try_lock_exclusive(&(_frib)->frib_lock)
550 #define FRIB_WUNLOCK(_frib) \
551 lck_rw_unlock_exclusive(&(_frib)->frib_lock)
552 #define FRIB_RLOCK(_frib) \
553 lck_rw_lock_shared(&(_frib)->frib_lock)
554 #define FRIB_RLOCKTOWLOCK(_frib) \
555 lck_rw_lock_shared_to_exclusive(&(_frib)->frib_lock)
556 #define FRIB_RTRYLOCK(_frib) \
557 lck_rw_try_lock_shared(&(_frib)->frib_lock)
558 #define FRIB_RUNLOCK(_frib) \
559 lck_rw_unlock_shared(&(_frib)->frib_lock)
560 #define FRIB_UNLOCK(_frib) \
561 lck_rw_done(&(_frib)->frib_lock)
562 #define FRIB_WLOCK_ASSERT_HELD(_frib) \
563 LCK_RW_ASSERT(&(_frib)->frib_lock, LCK_RW_ASSERT_EXCLUSIVE)
564 #define FRIB_RLOCK_ASSERT_HELD(_frib) \
565 LCK_RW_ASSERT(&(_frib)->frib_lock, LCK_RW_ASSERT_SHARED)
566 #define FRIB_LOCK_ASSERT_HELD(_frib) \
567 LCK_RW_ASSERT(&(_frib)->frib_lock, LCK_RW_ASSERT_HELD)
568
569 struct flow_mgr {
570 char fm_name[IFNAMSIZ];
571 uuid_t fm_uuid;
572 RB_ENTRY(flow_mgr) fm_link;
573
574 struct cuckoo_hashtable *fm_flow_table;
575 size_t fm_flow_hash_count[FKMASK_IDX_MAX]; /* # of flows with mask */
576 uint16_t fm_flow_hash_masks[FKMASK_IDX_MAX];
577
578 void *fm_owner_buckets; /* cache-aligned fob */
579 const size_t fm_owner_buckets_cnt; /* total # of fobs */
580 const size_t fm_owner_bucket_sz; /* size of each fob */
581 const size_t fm_owner_bucket_tot_sz; /* allocated size of each fob */
582
583 void *fm_route_buckets; /* cache-aligned frb */
584 const size_t fm_route_buckets_cnt; /* total # of frb */
585 const size_t fm_route_bucket_sz; /* size of each frb */
586 const size_t fm_route_bucket_tot_sz; /* allocated size of each frb */
587
588 void *fm_route_id_buckets; /* cache-aligned frib */
589 const size_t fm_route_id_buckets_cnt; /* total # of frib */
590 const size_t fm_route_id_bucket_sz; /* size of each frib */
591 const size_t fm_route_id_bucket_tot_sz; /* allocated size of each frib */
592 };
593
594 /*
595 * this func compare match with key;
596 * return values:
597 * 0 as long as @key(exact) matches what @match(wildcard) wants to match on.
598 * 1 when it doesn't match
599 */
600 static inline int
flow_key_cmp(const struct flow_key * match,const struct flow_key * key)601 flow_key_cmp(const struct flow_key *match, const struct flow_key *key)
602 {
603 #define FK_CMP(field, mask) \
604 if ((match->fk_mask & mask) != 0) { \
605 if ((key->fk_mask & mask) == 0) { \
606 return 1; \
607 } \
608 int d = memcmp(&match->field, &key->field, sizeof(match->field)); \
609 if (d != 0) { \
610 return d; \
611 } \
612 }
613
614 FK_CMP(fk_ipver, FKMASK_IPVER);
615 FK_CMP(fk_proto, FKMASK_PROTO);
616 FK_CMP(fk_src, FKMASK_SRC);
617 FK_CMP(fk_dst, FKMASK_DST);
618 FK_CMP(fk_sport, FKMASK_SPORT);
619 FK_CMP(fk_dport, FKMASK_DPORT);
620
621 return 0;
622 }
623
624 /*
625 * Similar to flow_key_cmp() except using memory compare with mask,
626 * done with SIMD instructions, if available for the platform.
627 */
628 static inline int
flow_key_cmp_mask(const struct flow_key * match,const struct flow_key * key,const struct flow_key * mask)629 flow_key_cmp_mask(const struct flow_key *match,
630 const struct flow_key *key, const struct flow_key *mask)
631 {
632 _CASSERT(FLOW_KEY_LEN == 48);
633 _CASSERT(FLOW_KEY_LEN == sizeof(struct flow_key));
634 _CASSERT((sizeof(struct flow_entry) % 16) == 0);
635 _CASSERT((offsetof(struct flow_entry, fe_key) % 16) == 0);
636
637 return sk_memcmp_mask_48B((const uint8_t *)match,
638 (const uint8_t *)key, (const uint8_t *)mask);
639 }
640
641 static inline uint32_t
flow_key_hash(const struct flow_key * key)642 flow_key_hash(const struct flow_key *key)
643 {
644 uint32_t hash = FK_HASH_SEED;
645 #define FK_HASH(field, mask) \
646 if ((key->fk_mask & mask) != 0) { \
647 hash = net_flowhash(&key->field, sizeof(key->field), hash); \
648 }
649
650 FK_HASH(fk_ipver, FKMASK_IPVER);
651 FK_HASH(fk_proto, FKMASK_PROTO);
652 FK_HASH(fk_src, FKMASK_SRC);
653 FK_HASH(fk_dst, FKMASK_DST);
654 FK_HASH(fk_sport, FKMASK_SPORT);
655 FK_HASH(fk_dport, FKMASK_DPORT);
656
657 return hash;
658 }
659
660 __attribute__((always_inline))
661 static inline void
flow_key_unpack(const struct flow_key * key,union sockaddr_in_4_6 * laddr,union sockaddr_in_4_6 * faddr,uint8_t * protocol)662 flow_key_unpack(const struct flow_key *key, union sockaddr_in_4_6 *laddr,
663 union sockaddr_in_4_6 *faddr, uint8_t *protocol)
664 {
665 *protocol = key->fk_proto;
666 if (key->fk_ipver == IPVERSION) {
667 laddr->sa.sa_family = AF_INET;
668 laddr->sin.sin_addr = key->fk_src4;
669 laddr->sin.sin_port = key->fk_sport;
670 faddr->sa.sa_family = AF_INET;
671 faddr->sin.sin_addr = key->fk_dst4;
672 faddr->sin.sin_port = key->fk_dport;
673 } else if (key->fk_ipver == IPV6_VERSION) {
674 laddr->sa.sa_family = AF_INET6;
675 laddr->sin6.sin6_addr = key->fk_src6;
676 laddr->sin6.sin6_port = key->fk_sport;
677 faddr->sa.sa_family = AF_INET6;
678 faddr->sin6.sin6_addr = key->fk_dst6;
679 faddr->sin6.sin6_port = key->fk_dport;
680 }
681 }
682
683 __attribute__((always_inline))
684 static inline int
flow_req2key(struct nx_flow_req * req,struct flow_key * key)685 flow_req2key(struct nx_flow_req *req, struct flow_key *key)
686 {
687 FLOW_KEY_CLEAR(key);
688
689 if (req->nfr_saddr.sa.sa_family == AF_INET) {
690 key->fk_ipver = IPVERSION;
691 key->fk_proto = req->nfr_ip_protocol;
692 key->fk_mask |= FKMASK_PROTO;
693 if (sk_sa_has_addr(SA(&req->nfr_saddr))) {
694 key->fk_src4 = req->nfr_saddr.sin.sin_addr;
695 key->fk_mask |= (FKMASK_IPVER | FKMASK_SRC);
696 }
697 if (sk_sa_has_addr(SA(&req->nfr_daddr))) {
698 key->fk_dst4 = req->nfr_daddr.sin.sin_addr;
699 key->fk_mask |= (FKMASK_IPVER | FKMASK_DST);
700 }
701 if (sk_sa_has_port(SA(&req->nfr_saddr))) {
702 key->fk_sport = req->nfr_saddr.sin.sin_port;
703 key->fk_mask |= FKMASK_SPORT;
704 }
705 if (sk_sa_has_port(SA(&req->nfr_daddr))) {
706 key->fk_dport = req->nfr_daddr.sin.sin_port;
707 key->fk_mask |= FKMASK_DPORT;
708 }
709 } else if (req->nfr_saddr.sa.sa_family == AF_INET6) {
710 key->fk_ipver = IPV6_VERSION;
711 key->fk_proto = req->nfr_ip_protocol;
712 key->fk_mask |= FKMASK_PROTO;
713 if (sk_sa_has_addr(SA(&req->nfr_saddr))) {
714 key->fk_src6 = req->nfr_saddr.sin6.sin6_addr;
715 key->fk_mask |= (FKMASK_IPVER | FKMASK_SRC);
716 }
717 if (sk_sa_has_addr(SA(&req->nfr_daddr))) {
718 key->fk_dst6 = req->nfr_daddr.sin6.sin6_addr;
719 key->fk_mask |= (FKMASK_IPVER | FKMASK_DST);
720 }
721 if (sk_sa_has_port(SA(&req->nfr_saddr))) {
722 key->fk_sport = req->nfr_saddr.sin6.sin6_port;
723 key->fk_mask |= FKMASK_SPORT;
724 }
725 if (sk_sa_has_port(SA(&req->nfr_daddr))) {
726 key->fk_dport = req->nfr_daddr.sin6.sin6_port;
727 key->fk_mask |= FKMASK_DPORT;
728 }
729 } else {
730 SK_ERR("unknown AF %d", req->nfr_saddr.sa.sa_family);
731 return ENOTSUP;
732 }
733
734 switch (key->fk_mask) {
735 case FKMASK_5TUPLE:
736 case FKMASK_4TUPLE:
737 case FKMASK_3TUPLE:
738 case FKMASK_2TUPLE:
739 case FKMASK_IPFLOW3:
740 case FKMASK_IPFLOW2:
741 case FKMASK_IPFLOW1:
742 break;
743 default:
744 SK_ERR("unknown flow key mask 0x%04x", key->fk_mask);
745 return ENOTSUP;
746 }
747
748 return 0;
749 }
750
751 __attribute__((always_inline))
752 static inline void
flow_pkt2key(struct __kern_packet * pkt,boolean_t input,struct flow_key * key)753 flow_pkt2key(struct __kern_packet *pkt, boolean_t input,
754 struct flow_key *key)
755 {
756 struct __flow *flow = pkt->pkt_flow;
757
758 FLOW_KEY_CLEAR(key);
759
760 if (__improbable((pkt->pkt_qum_qflags & QUM_F_FLOW_CLASSIFIED) == 0)) {
761 return;
762 }
763
764 ASSERT(flow->flow_l3._l3_ip_ver != 0);
765
766 key->fk_ipver = flow->flow_l3._l3_ip_ver;
767 key->fk_proto = flow->flow_ip_proto;
768 if (input) {
769 if (flow->flow_ip_ver == IPVERSION) {
770 key->fk_src4 = flow->flow_ipv4_dst;
771 key->fk_sport = flow->flow_tcp_dst;
772 key->fk_dst4 = flow->flow_ipv4_src;
773 key->fk_dport = flow->flow_tcp_src;
774 } else {
775 key->fk_src6 = flow->flow_ipv6_dst;
776 key->fk_sport = flow->flow_tcp_dst;
777 key->fk_dst6 = flow->flow_ipv6_src;
778 key->fk_dport = flow->flow_tcp_src;
779 }
780 } else {
781 if (flow->flow_ip_ver == IPVERSION) {
782 key->fk_src4 = flow->flow_ipv4_src;
783 key->fk_sport = flow->flow_tcp_src;
784 key->fk_dst4 = flow->flow_ipv4_dst;
785 key->fk_dport = flow->flow_tcp_dst;
786 } else {
787 key->fk_src6 = flow->flow_ipv6_src;
788 key->fk_sport = flow->flow_tcp_src;
789 key->fk_dst6 = flow->flow_ipv6_dst;
790 key->fk_dport = flow->flow_tcp_dst;
791 }
792 }
793 }
794
795 __attribute__((always_inline))
796 static inline int
flow_ip_cmp(const void * a0,const void * b0,size_t alen)797 flow_ip_cmp(const void *a0, const void *b0, size_t alen)
798 {
799 struct flow_ip_addr *a = __DECONST(struct flow_ip_addr *, a0),
800 *b = __DECONST(struct flow_ip_addr *, b0);
801
802 switch (alen) {
803 case sizeof(struct in_addr):
804 if (a->_addr32[0] > b->_addr32[0]) {
805 return 1;
806 }
807 if (a->_addr32[0] < b->_addr32[0]) {
808 return -1;
809 }
810 break;
811
812 case sizeof(struct in6_addr):
813 if (a->_addr64[1] > b->_addr64[1]) {
814 return 1;
815 }
816 if (a->_addr64[1] < b->_addr64[1]) {
817 return -1;
818 }
819 if (a->_addr64[0] > b->_addr64[0]) {
820 return 1;
821 }
822 if (a->_addr64[0] < b->_addr64[0]) {
823 return -1;
824 }
825 break;
826
827 default:
828 VERIFY(0);
829 /* NOTREACHED */
830 __builtin_unreachable();
831 }
832 return 0;
833 }
834
835 __attribute__((always_inline))
836 static inline struct flow_owner_bucket *
flow_mgr_get_fob_at_idx(struct flow_mgr * fm,uint32_t idx)837 flow_mgr_get_fob_at_idx(struct flow_mgr *fm, uint32_t idx)
838 {
839 return (struct flow_owner_bucket *)(void *)
840 ((intptr_t)fm->fm_owner_buckets +
841 (idx * fm->fm_owner_bucket_sz));
842 }
843
844 __attribute__((always_inline))
845 static inline struct flow_route_bucket *
flow_mgr_get_frb_at_idx(struct flow_mgr * fm,uint32_t idx)846 flow_mgr_get_frb_at_idx(struct flow_mgr *fm, uint32_t idx)
847 {
848 return (struct flow_route_bucket *)(void *)
849 ((intptr_t)fm->fm_route_buckets +
850 (idx * fm->fm_route_bucket_sz));
851 }
852
853 __attribute__((always_inline))
854 static inline struct flow_route_id_bucket *
flow_mgr_get_frib_at_idx(struct flow_mgr * fm,uint32_t idx)855 flow_mgr_get_frib_at_idx(struct flow_mgr *fm, uint32_t idx)
856 {
857 return (struct flow_route_id_bucket *)(void *)
858 ((intptr_t)fm->fm_route_id_buckets +
859 (idx * fm->fm_route_id_bucket_sz));
860 }
861
862 __attribute__((always_inline))
863 static inline uint32_t
flow_mgr_get_fob_idx(struct flow_mgr * fm,struct flow_owner_bucket * bkt)864 flow_mgr_get_fob_idx(struct flow_mgr *fm,
865 struct flow_owner_bucket *bkt)
866 {
867 ASSERT(((intptr_t)bkt - (intptr_t)fm->fm_owner_buckets) %
868 fm->fm_owner_bucket_sz == 0);
869 return (uint32_t)(((intptr_t)bkt - (intptr_t)fm->fm_owner_buckets) /
870 fm->fm_owner_bucket_sz);
871 }
872
873 __attribute__((always_inline))
874 static inline size_t
flow_mgr_get_num_flows(struct flow_mgr * mgr)875 flow_mgr_get_num_flows(struct flow_mgr *mgr)
876 {
877 ASSERT(mgr->fm_flow_table != NULL);
878 return cuckoo_hashtable_entries(mgr->fm_flow_table);
879 }
880
881 extern unsigned int sk_fo_size;
882 extern struct skmem_cache *sk_fo_cache;
883
884 extern unsigned int sk_fe_size;
885 extern struct skmem_cache *sk_fe_cache;
886
887 extern unsigned int sk_fab_size;
888 extern struct skmem_cache *sk_fab_cache;
889
890 extern uint32_t flow_seed;
891
892 extern struct skmem_cache *flow_route_cache;
893 extern struct skmem_cache *flow_stats_cache;
894
895 __BEGIN_DECLS
896
897 typedef void (*flow_route_ctor_fn_t)(void *arg, struct flow_route *);
898 typedef int (*flow_route_resolve_fn_t)(void *arg, struct flow_route *,
899 struct __kern_packet *);
900
901 extern int flow_init(void);
902 extern void flow_fini(void);
903
904 extern void flow_mgr_init(void);
905 extern void flow_mgr_fini(void);
906 extern struct flow_mgr *flow_mgr_find_lock(uuid_t);
907 extern void flow_mgr_unlock(void);
908 extern struct flow_mgr * flow_mgr_create(size_t, size_t, size_t, size_t);
909 extern void flow_mgr_destroy(struct flow_mgr *);
910 extern void flow_mgr_terminate(struct flow_mgr *);
911 extern int flow_mgr_flow_add(struct kern_nexus *nx, struct flow_mgr *fm,
912 struct flow_owner *fo, struct ifnet *ifp, struct nx_flow_req *req,
913 flow_route_ctor_fn_t fr_ctor, flow_route_resolve_fn_t fr_resolve, void *fr_arg);
914 extern struct flow_owner_bucket *flow_mgr_get_fob_by_pid(
915 struct flow_mgr *, pid_t);
916 extern struct flow_entry *flow_mgr_get_fe_by_uuid_rlock(
917 struct flow_mgr *, uuid_t);
918 extern struct flow_route_bucket *flow_mgr_get_frb_by_addr(
919 struct flow_mgr *, union sockaddr_in_4_6 *);
920 extern struct flow_route_id_bucket *flow_mgr_get_frib_by_uuid(
921 struct flow_mgr *, uuid_t);
922 extern int flow_mgr_flow_hash_mask_add(struct flow_mgr *fm, uint32_t mask);
923 extern int flow_mgr_flow_hash_mask_del(struct flow_mgr *fm, uint32_t mask);
924
925 extern struct flow_entry * fe_alloc(boolean_t can_block);
926
927 extern int flow_namespace_create(union sockaddr_in_4_6 *, uint8_t protocol,
928 netns_token *, uint16_t, struct ns_flow_info *);
929 extern void flow_namespace_half_close(netns_token *token);
930 extern void flow_namespace_withdraw(netns_token *);
931 extern void flow_namespace_destroy(netns_token *);
932
933 extern struct flow_owner_bucket *flow_owner_buckets_alloc(size_t, size_t *, size_t *);
934 extern void flow_owner_buckets_free(struct flow_owner_bucket *, size_t);
935 extern void flow_owner_bucket_init(struct flow_owner_bucket *);
936 extern void flow_owner_bucket_destroy(struct flow_owner_bucket *);
937 extern void flow_owner_bucket_purge_all(struct flow_owner_bucket *);
938 extern void flow_owner_attach_nexus_port(struct flow_mgr *, boolean_t,
939 pid_t, nexus_port_t);
940 extern uint32_t flow_owner_detach_nexus_port(struct flow_mgr *,
941 boolean_t, pid_t, nexus_port_t, boolean_t);
942 extern struct flow_owner *flow_owner_alloc(struct flow_owner_bucket *,
943 struct proc *, nexus_port_t, bool, bool, struct nx_flowswitch*,
944 struct nexus_adapter *, void *, bool);
945 extern void flow_owner_free(struct flow_owner_bucket *, struct flow_owner *);
946 extern struct flow_entry *flow_owner_create_entry(struct flow_owner *,
947 struct nx_flow_req *, boolean_t, uint32_t, boolean_t,
948 struct flow_route *, int *);
949 extern int flow_owner_destroy_entry(struct flow_owner *, uuid_t, bool, void *);
950 extern struct flow_owner *flow_owner_find_by_pid(struct flow_owner_bucket *,
951 pid_t, void *, bool);
952 extern int flow_owner_flowadv_index_alloc(struct flow_owner *, flowadv_idx_t *);
953 extern void flow_owner_flowadv_index_free(struct flow_owner *, flowadv_idx_t);
954 extern uint32_t flow_owner_activate_nexus_port(struct flow_mgr *,
955 boolean_t, pid_t, nexus_port_t, struct nexus_adapter *,
956 na_activate_mode_t);
957
958 extern struct flow_entry *flow_mgr_find_fe_by_key(struct flow_mgr *,
959 struct flow_key *);
960 extern struct flow_entry * flow_mgr_find_conflicting_fe(struct flow_mgr *fm,
961 struct flow_key *fe_key);
962 extern void flow_mgr_foreach_flow(struct flow_mgr *fm,
963 void (^flow_handler)(struct flow_entry *fe));
964 extern struct flow_entry *flow_entry_find_by_uuid(struct flow_owner *,
965 uuid_t);
966 extern struct flow_entry * flow_entry_alloc(struct flow_owner *fo,
967 struct nx_flow_req *req, int *perr);
968 extern void flow_entry_teardown(struct flow_owner *, struct flow_entry *);
969 extern void flow_entry_destroy(struct flow_owner *, struct flow_entry *, bool,
970 void *);
971 extern void flow_entry_retain(struct flow_entry *fe);
972 extern void flow_entry_release(struct flow_entry **pfe);
973 extern uint32_t flow_entry_refcnt(struct flow_entry *fe);
974 extern bool rx_flow_demux_match(struct nx_flowswitch *, struct flow_entry *, struct __kern_packet *);
975 extern struct flow_entry *rx_lookup_child_flow(struct nx_flowswitch *fsw,
976 struct flow_entry *, struct __kern_packet *);
977 extern struct flow_entry *tx_lookup_child_flow(struct flow_entry *, uuid_t);
978
979 extern struct flow_entry_dead *flow_entry_dead_alloc(zalloc_flags_t);
980 extern void flow_entry_dead_free(struct flow_entry_dead *);
981
982 extern void flow_entry_stats_get(struct flow_entry *, struct sk_stats_flow *);
983
984 extern int flow_pkt_classify(struct __kern_packet *pkt, struct ifnet *ifp,
985 sa_family_t af, bool input);
986
987 extern void flow_track_stats(struct flow_entry *, uint64_t, uint64_t,
988 bool, bool);
989 extern int flow_pkt_track(struct flow_entry *, struct __kern_packet *, bool);
990 extern boolean_t flow_track_tcp_want_abort(struct flow_entry *);
991 extern void flow_track_abort_tcp( struct flow_entry *fe,
992 struct __kern_packet *in_pkt, struct __kern_packet *rst_pkt);
993 extern void flow_track_abort_quic(struct flow_entry *fe, uint8_t *token);
994
995 extern void fsw_host_rx(struct nx_flowswitch *, struct pktq *);
996 extern void fsw_host_sendup(struct ifnet *, struct mbuf *, struct mbuf *,
997 uint32_t, uint32_t);
998
999 extern void flow_rx_agg_tcp(struct nx_flowswitch *fsw, struct flow_entry *fe);
1000
1001 extern void flow_route_init(void);
1002 extern void flow_route_fini(void);
1003 extern struct flow_route_bucket *flow_route_buckets_alloc(size_t, size_t *, size_t *);
1004 extern void flow_route_buckets_free(struct flow_route_bucket *, size_t);
1005 extern void flow_route_bucket_init(struct flow_route_bucket *);
1006 extern void flow_route_bucket_destroy(struct flow_route_bucket *);
1007 extern void flow_route_bucket_purge_all(struct flow_route_bucket *);
1008 extern struct flow_route_id_bucket *flow_route_id_buckets_alloc(size_t,
1009 size_t *, size_t *);
1010 extern void flow_route_id_buckets_free(struct flow_route_id_bucket *, size_t);
1011 extern void flow_route_id_bucket_init(struct flow_route_id_bucket *);
1012 extern void flow_route_id_bucket_destroy(struct flow_route_id_bucket *);
1013
1014 extern int flow_route_select_laddr(union sockaddr_in_4_6 *,
1015 union sockaddr_in_4_6 *, struct ifnet *, struct rtentry *, uint32_t *, int);
1016 extern int flow_route_find(struct kern_nexus *, struct flow_mgr *,
1017 struct ifnet *, struct nx_flow_req *, flow_route_ctor_fn_t,
1018 flow_route_resolve_fn_t, void *, struct flow_route **);
1019 extern int flow_route_configure(struct flow_route *, struct ifnet *, struct nx_flow_req *);
1020 extern void flow_route_retain(struct flow_route *);
1021 extern void flow_route_release(struct flow_route *);
1022 extern uint32_t flow_route_prune(struct flow_mgr *, struct ifnet *,
1023 uint32_t *);
1024 extern void flow_route_cleanup(struct flow_route *);
1025 extern boolean_t flow_route_laddr_validate(union sockaddr_in_4_6 *,
1026 struct ifnet *, uint32_t *);
1027 extern boolean_t flow_route_key_validate(struct flow_key *, struct ifnet *,
1028 uint32_t *);
1029 extern void flow_qset_select_dynamic(struct nx_flowswitch *,
1030 struct flow_entry *, boolean_t);
1031 extern void flow_stats_init(void);
1032 extern void flow_stats_fini(void);
1033 extern struct flow_stats *flow_stats_alloc(boolean_t cansleep);
1034
1035 #if SK_LOG
1036 #define FLOWKEY_DBGBUF_SIZE 256
1037 #define FLOWENTRY_DBGBUF_SIZE 512
1038 extern char *fk_as_string(const struct flow_key *fk, char *, size_t);
1039 extern char *fe_as_string(const struct flow_entry *fe, char *, size_t);
1040 #endif /* SK_LOG */
1041 __END_DECLS
1042 #endif /* BSD_KERNEL_PRIVATE */
1043 #endif /* !_SKYWALK_NEXUS_FLOWSIWTCH_FLOW_FLOWVAR_H_ */
1044