1 /*
2 * Copyright (c) 2016-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Once a packet is classified, it goes through checks to see if there
31 * is a matching flow entry in the flow table. The key used to search
32 * the entry is composed of the fields contained in struct flow_ptrs.
33 *
34 * Flow entry insertion and deletion to the flow table, on behalf of
35 * the owning client process, requires the use of the rule ID (UUID)
36 * as the search key.
37 *
38 * Because of the above, each flow entry simultaneously exists in two
39 * respective trees: flow_entry_tree and flow_entry_id_tree.
40 *
41 * Using a single RW lock to protect the two trees is simple, but the
42 * data path performance is impacted during flow insertion and deletion,
43 * especially as the number of client processes and flows grow.
44 *
45 * To solve that, we deploy the following scheme:
46 *
47 * Given that the flow_entry_tree is searched on a per-packet basis,
48 * we break it down into a series of trees, each one contained within
49 * a flow_bucket structure. The hash from flow_ptrs determines the
50 * index of the flow_bucket to search the flow_entry_tree from.
51 *
52 * The flow_entry_id_tree is searched on each flow insertion and
53 * deletion, and similarly we break it down into a series of trees,
54 * each contained within a flow_owner_bucket structure. We use the
55 * client process ID (pid_t) to determine the bucket index.
56 *
57 * Each flow_bucket and flow_owner_bucket structure is dynamically
58 * created, and is aligned on the CPU cache boundary. The amount
59 * of those buckets is determined by client module at the time the
60 * flow manager context is initialized. This is done to avoid false
61 * sharing, especially given that each bucket has its own RW lock.
62 */
63
64 #ifndef _SKYWALK_NEXUS_FLOWSIWTCH_FLOW_FLOWVAR_H_
65 #define _SKYWALK_NEXUS_FLOWSIWTCH_FLOW_FLOWVAR_H_
66
67 #ifdef BSD_KERNEL_PRIVATE
68 #include <skywalk/core/skywalk_var.h>
69 #include <skywalk/lib/cuckoo_hashtable.h>
70 #include <skywalk/namespace/netns.h>
71 #include <skywalk/namespace/protons.h>
72 #include <skywalk/packet/packet_var.h>
73 #include <net/flowhash.h>
74 #include <netinet/ip.h>
75 #include <netinet/in_stat.h>
76 #include <netinet/ip6.h>
77 #include <sys/eventhandler.h>
78
79 RB_HEAD(flow_owner_tree, flow_owner);
80
81 struct flow_owner_bucket {
82 decl_lck_mtx_data(, fob_lock);
83 struct flow_owner_tree fob_owner_head;
84 uint16_t fob_busy_flags;
85 uint16_t fob_open_waiters;
86 uint16_t fob_close_waiters;
87 uint16_t fob_dtor_waiters;
88 const size_t fob_idx;
89 };
90
91 #define FOBF_OPEN_BUSY 0x1 /* flow open monitor */
92 #define FOBF_CLOSE_BUSY 0x2 /* flow close monitor */
93 #define FOBF_DEAD 0x4 /* no longer usable */
94
95 #define FOB_LOCK(_fob) \
96 lck_mtx_lock(&(_fob)->fob_lock)
97 #define FOB_LOCK_SPIN(_fob) \
98 lck_mtx_lock_spin(&(_fob)->fob_lock)
99 #define FOB_LOCK_CONVERT(_fob) \
100 lck_mtx_convert_spin(&(_fob)->fob_lock)
101 #define FOB_TRY_LOCK(_fob) \
102 lck_mtx_try_lock(&(_fob)->fob_lock)
103 #define FOB_LOCK_ASSERT_HELD(_fob) \
104 LCK_MTX_ASSERT(&(_fob)->fob_lock, LCK_MTX_ASSERT_OWNED)
105 #define FOB_LOCK_ASSERT_NOTHELD(_fob) \
106 LCK_MTX_ASSERT(&(_fob)->fob_lock, LCK_MTX_ASSERT_NOTOWNED)
107 #define FOB_UNLOCK(_fob) \
108 lck_mtx_unlock(&(_fob)->fob_lock)
109
110 RB_HEAD(flow_entry_id_tree, flow_entry);
111
112 #define FLOW_PROCESS_NAME_LENGTH 24
113
114 struct flow_owner {
115 RB_ENTRY(flow_owner) fo_link;
116 struct flow_entry_id_tree fo_flow_entry_id_head;
117 const struct flow_owner_bucket *fo_bucket;
118 void *fo_context;
119 pid_t fo_pid;
120 bool fo_nx_port_pid_bound;
121 bool fo_nx_port_destroyed;
122 bool fo_low_latency;
123 nexus_port_t fo_nx_port;
124 uuid_t fo_key;
125
126 struct nexus_adapter * const fo_nx_port_na;
127 struct nx_flowswitch * const fo_fsw;
128
129 /*
130 * Array of bitmaps to manage the flow advisory table indices.
131 * Currently we are restricting a flow owner to a single nexus
132 * port, so this structure is effectively managing the flow advisory
133 * indices for a port.
134 */
135 bitmap_t *fo_flowadv_bmap;
136 uint32_t fo_flowadv_max;
137 uint32_t fo_num_flowadv;
138
139 /* for debugging */
140 char fo_name[FLOW_PROCESS_NAME_LENGTH];
141 };
142
143 #define FO_BUCKET(_fo) \
144 __DECONST(struct flow_owner_bucket *, (_fo)->fo_bucket)
145
146 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_owner_tree, flow_owner,
147 fo_link, fo_cmp);
148 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_entry_id_tree, flow_entry,
149 fe_id_link, fe_id_cmp);
150
151 typedef enum {
152 /*
153 * TCP states.
154 */
155 FT_STATE_CLOSED = 0, /* closed */
156 FT_STATE_LISTEN, /* listening for connection */
157 FT_STATE_SYN_SENT, /* active, have sent SYN */
158 FT_STATE_SYN_RECEIVED, /* have sent and rcvd SYN */
159 FT_STATE_ESTABLISHED, /* established */
160 FT_STATE_CLOSE_WAIT, /* rcvd FIN, waiting close */
161 FT_STATE_FIN_WAIT_1, /* have sent FIN */
162 FT_STATE_CLOSING, /* exchanged FINs, waiting FIN|ACK */
163 FT_STATE_LAST_ACK, /* rcvd FIN, closed, waiting FIN|ACK */
164 FT_STATE_FIN_WAIT_2, /* closed, FIN is ACK'd */
165 FT_STATE_TIME_WAIT, /* quiet wait after close */
166
167 /*
168 * UDP states.
169 */
170 FT_STATE_NO_TRAFFIC = 20, /* no packet observed */
171 FT_STATE_SINGLE, /* single packet */
172 FT_STATE_MULTIPLE, /* multiple packets */
173
174 FT_STATE_MAX = 255
175 } flow_track_state_t;
176
177 struct flow_track_rtt {
178 uint64_t frtt_timestamp; /* tracked segment timestamp */
179 uint64_t frtt_last; /* previous net_uptime(rate limiting) */
180 uint32_t frtt_seg_begin; /* tracked segment begin SEQ */
181 uint32_t frtt_seg_end; /* tracked segment end SEQ */
182 uint32_t frtt_usec; /* avg RTT in usec */
183 };
184
185 #define FLOWTRACK_RTT_SAMPLE_INTERVAL 2 /* sample ACK RTT every 2 sec */
186
187 struct flow_track {
188 /*
189 * TCP specific tracking info.
190 */
191 uint32_t fse_seqlo; /* max sequence number sent */
192 uint32_t fse_seqhi; /* max the other end ACKd + win */
193 uint32_t fse_seqlast; /* last sequence number (FIN) */
194 uint16_t fse_max_win; /* largest window (pre scaling) */
195 uint16_t fse_mss; /* maximum segment size option */
196 uint8_t fse_state; /* active state level (FT_STATE_*) */
197 uint8_t fse_wscale; /* window scaling factor */
198 uint16_t fse_flags; /* FLOWSTATEF_* */
199 uint32_t fse_syn_ts; /* SYN timestamp */
200 uint32_t fse_syn_cnt; /* # of SYNs per second */
201
202 struct flow_track_rtt fse_rtt; /* ACK RTT tracking */
203 #define fse_rtt_usec fse_rtt.frtt_usec
204 } __sk_aligned(8);
205
206 /* valid values for fse_flags */
207 #define FLOWSTATEF_WSCALE 0x1 /* fse_wscale is valid */
208
209 struct flow_llhdr {
210 uint32_t flh_gencnt; /* link-layer address gencnt */
211
212 const uint8_t flh_off;
213 const uint8_t flh_len;
214 uint16_t flh_pad; /* for future */
215
216 union _flh_u {
217 uint64_t _buf[2];
218 struct {
219 uint16_t _eth_pad;
220 struct ether_header _eth;
221 } _eth_padded;
222 } __sk_aligned(8) _flh;
223 #define flh_eth_padded _flh._eth_padded
224 #define flh_eth _flh._eth_padded._eth
225 };
226
227 typedef enum {
228 FE_QSET_SELECT_NONE,
229 FE_QSET_SELECT_FIXED,
230 FE_QSET_SELECT_DYNAMIC
231 } flow_qset_select_t;
232
233 extern kern_allocation_name_t skmem_tag_flow_demux;
234 typedef int (*flow_demux_memcmp_mask_t)(const uint8_t *src1, const uint8_t *src2,
235 const uint8_t *byte_mask);
236
237 struct kern_flow_demux_pattern {
238 struct flow_demux_pattern fdp_demux_pattern;
239 flow_demux_memcmp_mask_t fdp_memcmp_mask;
240 };
241
242 #define MAX_PKT_DEMUX_LIMIT 1000
243
244 TAILQ_HEAD(flow_entry_list, flow_entry);
245
246 #define FLOW_PROC_FLAG_GSO 0x0001
247 typedef void (*flow_action_t)(struct nx_flowswitch *fsw, struct flow_entry *fe,
248 uint32_t flags);
249
250 struct flow_entry {
251 /**** Common Group ****/
252 os_refcnt_t fe_refcnt;
253 struct flow_key fe_key;
254 uint32_t fe_flags;
255 uint32_t fe_key_hash;
256 struct cuckoo_node fe_cnode;
257
258 uuid_t fe_uuid __sk_aligned(8);
259 nexus_port_t fe_nx_port;
260 uint32_t fe_laddr_gencnt;
261 uint32_t fe_want_nonviable;
262 uint32_t fe_want_withdraw;
263 uint8_t fe_transport_protocol;
264
265 /**** Rx Group ****/
266 uint16_t fe_rx_frag_count;
267 uint32_t fe_rx_pktq_bytes;
268 struct pktq fe_rx_pktq;
269 TAILQ_ENTRY(flow_entry) fe_rx_link;
270 flow_action_t fe_rx_process;
271
272 /*
273 * largest allocated packet size.
274 * used by:
275 * - mbuf batch allocation logic during RX aggregtion and netif copy.
276 * - packet allocation logic during RX aggregation.
277 */
278 uint32_t fe_rx_largest_size;
279
280 /**** Tx Group ****/
281 bool fe_tx_is_cont_frag;
282 uint32_t fe_tx_frag_id;
283 struct pktq fe_tx_pktq;
284 TAILQ_ENTRY(flow_entry) fe_tx_link;
285 flow_action_t fe_tx_process;
286
287 uuid_t fe_eproc_uuid __sk_aligned(8);
288 flowadv_idx_t fe_adv_idx;
289 kern_packet_svc_class_t fe_svc_class;
290 uint32_t fe_policy_id; /* policy id matched to flow */
291
292 /**** Misc Group ****/
293 struct nx_flowswitch * const fe_fsw;
294 struct ns_token *fe_port_reservation;
295 struct protons_token *fe_proto_reservation;
296 void *fe_ipsec_reservation;
297
298 struct flow_track fe_ltrack; /* local endpoint state */
299 struct flow_track fe_rtrack; /* remote endpoint state */
300
301 /*
302 * Flow stats are kept externally stand-alone, refcnt'ed by various
303 * users (e.g. flow_entry, necp_client_flow, etc.)
304 */
305 struct flow_stats *fe_stats;
306 struct flow_route *fe_route;
307
308 RB_ENTRY(flow_entry) fe_id_link;
309
310 TAILQ_ENTRY(flow_entry) fe_linger_link;
311 uint64_t fe_linger_expire; /* expiration deadline */
312 uint32_t fe_linger_wait; /* linger time (seconds) */
313
314 pid_t fe_pid;
315 pid_t fe_epid;
316 char fe_proc_name[FLOW_PROCESS_NAME_LENGTH];
317 char fe_eproc_name[FLOW_PROCESS_NAME_LENGTH];
318
319 uint32_t fe_flowid; /* globally unique flow ID */
320
321 /* Logical link related information */
322 struct netif_qset *fe_qset;
323 uint64_t fe_qset_id;
324 flow_qset_select_t fe_qset_select;
325 uint32_t fe_tr_genid;
326
327 /* Parent child information */
328 decl_lck_rw_data(, fe_child_list_lock);
329 struct flow_entry_list fe_child_list;
330 TAILQ_ENTRY(flow_entry) fe_child_link;
331 #if DEVELOPMENT || DEBUG
332 int16_t fe_child_count;
333 #endif // DEVELOPMENT || DEBUG
334 uint8_t fe_demux_pattern_count;
335 struct kern_flow_demux_pattern *fe_demux_patterns;
336 uint8_t *fe_demux_pkt_data;
337 };
338
339 /* valid values for fe_flags */
340 #define FLOWENTF_INITED 0x00000001 /* {src,dst} states initialized */
341 #define FLOWENTF_TRACK 0x00000010 /* enable state tracking */
342 #define FLOWENTF_CONNECTED 0x00000020 /* connected mode */
343 #define FLOWENTF_LISTENER 0x00000040 /* listener mode */
344 #define FLOWENTF_QOS_MARKING 0x00000100 /* flow can have qos marking */
345 #define FLOWENTF_LOW_LATENCY 0x00000200 /* low latency flow */
346 #define FLOWENTF_WAIT_CLOSE 0x00001000 /* defer free after close */
347 #define FLOWENTF_CLOSE_NOTIFY 0x00002000 /* notify NECP upon tear down */
348 #define FLOWENTF_EXTRL_PORT 0x00004000 /* port reservation is held externally */
349 #define FLOWENTF_EXTRL_PROTO 0x00008000 /* proto reservation is held externally */
350 #define FLOWENTF_EXTRL_FLOWID 0x00010000 /* flowid reservation is held externally */
351 #define FLOWENTF_CHILD 0x00020000 /* child flow */
352 #define FLOWENTF_PARENT 0x00040000 /* parent flow */
353 #define FLOWENTF_NOWAKEFROMSLEEP 0x00080000 /* don't wake for this flow */
354 #define FLOWENTF_ABORTED 0x01000000 /* has sent RST to peer */
355 #define FLOWENTF_NONVIABLE 0x02000000 /* disabled; awaiting tear down */
356 #define FLOWENTF_WITHDRAWN 0x04000000 /* flow has been withdrawn */
357 #define FLOWENTF_TORN_DOWN 0x08000000 /* torn down and awaiting destroy */
358 #define FLOWENTF_HALF_CLOSED 0x10000000 /* flow is half closed */
359 #define FLOWENTF_DESTROYED 0x40000000 /* not in RB trees anymore */
360 #define FLOWENTF_LINGERING 0x80000000 /* destroyed and in linger list */
361
362 #define FLOWENTF_BITS \
363 "\020\01INITED\05TRACK\06CONNECTED\07LISTNER\011QOS_MARKING" \
364 "\012LOW_LATENCY\015WAIT_CLOSE\016CLOSE_NOTIFY\017EXT_PORT" \
365 "\020EXT_PROTO\021EXT_FLOWID\031ABORTED\032NONVIABLE\033WITHDRAWN" \
366 "\034TORN_DOWN\035HALF_CLOSED\037DESTROYED\40LINGERING"
367
368 TAILQ_HEAD(flow_entry_linger_head, flow_entry);
369
370 struct flow_entry_dead {
371 LIST_ENTRY(flow_entry_dead) fed_link;
372
373 boolean_t fed_want_nonviable;
374 boolean_t fed_want_clonotify;
375
376 /* rule (flow) UUID */
377 union {
378 uint64_t fed_uuid_64[2];
379 uint32_t fed_uuid_32[4];
380 uuid_t fed_uuid;
381 } __sk_aligned(8);
382 };
383
384 /*
385 * Minimum refcnt for a flow route entry to be considered as idle.
386 */
387 #define FLOW_ROUTE_MINREF 2 /* for the 2 RB trees */
388
389 struct flow_route {
390 RB_ENTRY(flow_route) fr_link;
391 RB_ENTRY(flow_route) fr_id_link;
392
393 /*
394 * fr_laddr represents the local address that the system chooses
395 * for the foreign destination in fr_faddr. The flow entry that
396 * is referring to this flow route object may choose a different
397 * local address if it wishes.
398 *
399 * fr_gaddr represents the gateway address to reach the final
400 * foreign destination fr_faddr, valid only if the destination is
401 * not directly attached (FLOWRTF_GATEWAY is set).
402 *
403 * The use of sockaddr for storage is for convenience; the port
404 * value is not applicable for this object, as this is shared
405 * among flow entries.
406 */
407 union sockaddr_in_4_6 fr_laddr; /* local IP address */
408 union sockaddr_in_4_6 fr_faddr; /* remote IP address */
409 #define fr_af fr_faddr.sa.sa_family
410 union sockaddr_in_4_6 fr_gaddr; /* gateway IP address */
411
412 struct flow_llhdr fr_llhdr;
413 #define fr_eth_padded fr_llhdr.flh_eth_padded
414 #define fr_eth fr_llhdr.flh_eth
415
416 /*
417 * In flow_route_tree, we use the destination address as key.
418 * To speed up searches, we initialize fr_addr_key to the address
419 * portion of fr_faddr depending on the address family.
420 */
421 void *fr_addr_key;
422
423 /* flow route UUID */
424 uuid_t fr_uuid __sk_aligned(8);
425
426 /*
427 * fr_usecnt is updated atomically; incremented when a flow entry
428 * refers to this object and decremented otherwise. Periodically,
429 * the flowswitch instance garbage collects flow_route objects
430 * that aren't being referred to by any flow entries.
431 *
432 * fr_expire is set when fr_usecnt reaches its minimum count, and
433 * is cleared when it goes above the minimum count.
434 *
435 * The spin lock fr_reflock is used to serialize both.
436 */
437 decl_lck_spin_data(, fr_reflock);
438 uint64_t fr_expire;
439 volatile uint32_t fr_usecnt;
440
441 uint32_t fr_flags;
442 uint32_t fr_laddr_gencnt; /* local IP gencnt */
443 uint32_t fr_addr_len; /* sizeof {in,in6}_addr */
444
445 volatile uint32_t fr_want_configure;
446 volatile uint32_t fr_want_probe;
447
448 /* lock to serialize resolver */
449 decl_lck_mtx_data(, fr_lock);
450
451 /*
452 * fr_rt_dst is the route to final destination, and along with
453 * fr_rt_evhdlr_tag, they are used in route event registration.
454 *
455 * fr_rt_gw is valid only if FLOWRTF_GATEWAY is set.
456 */
457 eventhandler_tag fr_rt_evhdlr_tag;
458 struct rtentry *fr_rt_dst;
459 struct rtentry *fr_rt_gw;
460
461 /* nexus UUID */
462 uuid_t fr_nx_uuid __sk_aligned(8);
463
464 const struct flow_mgr *fr_mgr;
465 const struct flow_route_bucket *fr_frb;
466 const struct flow_route_id_bucket *fr_frib;
467 };
468
469 /* valid values for fr_flags */
470 #define FLOWRTF_ATTACHED 0x00000001 /* attached to RB trees */
471 #define FLOWRTF_ONLINK 0x00000010 /* dst directly on the link */
472 #define FLOWRTF_GATEWAY 0x00000020 /* gw IP address is valid */
473 #define FLOWRTF_RESOLVED 0x00000040 /* flow route is resolved */
474 #define FLOWRTF_HAS_LLINFO 0x00000080 /* has dst link-layer address */
475 #define FLOWRTF_DELETED 0x00000100 /* route has been deleted */
476 #define FLOWRTF_DST_LL_MCAST 0x00000200 /* dst is link layer multicast */
477 #define FLOWRTF_DST_LL_BCAST 0x00000400 /* dst is link layer broadcast */
478 #define FLOWRTF_STABLE_ADDR 0x00000800 /* local address prefers stable */
479
480 #define FR_LOCK(_fr) \
481 lck_mtx_lock(&(_fr)->fr_lock)
482 #define FR_TRY_LOCK(_fr) \
483 lck_mtx_try_lock(&(_fr)->fr_lock)
484 #define FR_LOCK_ASSERT_HELD(_fr) \
485 LCK_MTX_ASSERT(&(_fr)->fr_lock, LCK_MTX_ASSERT_OWNED)
486 #define FR_LOCK_ASSERT_NOTHELD(_fr) \
487 LCK_MTX_ASSERT(&(_fr)->fr_lock, LCK_MTX_ASSERT_NOTOWNED)
488 #define FR_UNLOCK(_fr) \
489 lck_mtx_unlock(&(_fr)->fr_lock)
490
491 #define FLOWRT_UPD_ETH_DST(_fr, _addr) do { \
492 bcopy((_addr), (_fr)->fr_eth.ether_dhost, ETHER_ADDR_LEN); \
493 (_fr)->fr_flags &= ~(FLOWRTF_DST_LL_MCAST|FLOWRTF_DST_LL_BCAST);\
494 if (ETHER_IS_MULTICAST(_addr)) { \
495 if (_ether_cmp(etherbroadcastaddr, (_addr)) == 0) \
496 (_fr)->fr_flags |= FLOWRTF_DST_LL_BCAST; \
497 else \
498 (_fr)->fr_flags |= FLOWRTF_DST_LL_MCAST; \
499 } \
500 } while (0)
501
502 RB_HEAD(flow_route_tree, flow_route);
503 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_route_tree, flow_route,
504 fr_link, fr_cmp);
505
506 struct flow_route_bucket {
507 decl_lck_rw_data(, frb_lock);
508 struct flow_route_tree frb_head;
509 const uint32_t frb_idx;
510 };
511
512 #define FRB_WLOCK(_frb) \
513 lck_rw_lock_exclusive(&(_frb)->frb_lock)
514 #define FRB_WLOCKTORLOCK(_frb) \
515 lck_rw_lock_exclusive_to_shared(&(_frb)->frb_lock)
516 #define FRB_WTRYLOCK(_frb) \
517 lck_rw_try_lock_exclusive(&(_frb)->frb_lock)
518 #define FRB_WUNLOCK(_frb) \
519 lck_rw_unlock_exclusive(&(_frb)->frb_lock)
520 #define FRB_RLOCK(_frb) \
521 lck_rw_lock_shared(&(_frb)->frb_lock)
522 #define FRB_RLOCKTOWLOCK(_frb) \
523 lck_rw_lock_shared_to_exclusive(&(_frb)->frb_lock)
524 #define FRB_RTRYLOCK(_frb) \
525 lck_rw_try_lock_shared(&(_frb)->frb_lock)
526 #define FRB_RUNLOCK(_frb) \
527 lck_rw_unlock_shared(&(_frb)->frb_lock)
528 #define FRB_UNLOCK(_frb) \
529 lck_rw_done(&(_frb)->frb_lock)
530 #define FRB_WLOCK_ASSERT_HELD(_frb) \
531 LCK_RW_ASSERT(&(_frb)->frb_lock, LCK_RW_ASSERT_EXCLUSIVE)
532 #define FRB_RLOCK_ASSERT_HELD(_frb) \
533 LCK_RW_ASSERT(&(_frb)->frb_lock, LCK_RW_ASSERT_SHARED)
534 #define FRB_LOCK_ASSERT_HELD(_frb) \
535 LCK_RW_ASSERT(&(_frb)->frb_lock, LCK_RW_ASSERT_HELD)
536
537 RB_HEAD(flow_route_id_tree, flow_route);
538 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_route_id_tree, flow_route,
539 fr_id_link, fr_id_cmp);
540
541 struct flow_route_id_bucket {
542 decl_lck_rw_data(, frib_lock);
543 struct flow_route_id_tree frib_head;
544 const uint32_t frib_idx;
545 };
546
547 #define FRIB_WLOCK(_frib) \
548 lck_rw_lock_exclusive(&(_frib)->frib_lock)
549 #define FRIB_WLOCKTORLOCK(_frib) \
550 lck_rw_lock_exclusive_to_shared(&(_frib)->frib_lock)
551 #define FRIB_WTRYLOCK(_frib) \
552 lck_rw_try_lock_exclusive(&(_frib)->frib_lock)
553 #define FRIB_WUNLOCK(_frib) \
554 lck_rw_unlock_exclusive(&(_frib)->frib_lock)
555 #define FRIB_RLOCK(_frib) \
556 lck_rw_lock_shared(&(_frib)->frib_lock)
557 #define FRIB_RLOCKTOWLOCK(_frib) \
558 lck_rw_lock_shared_to_exclusive(&(_frib)->frib_lock)
559 #define FRIB_RTRYLOCK(_frib) \
560 lck_rw_try_lock_shared(&(_frib)->frib_lock)
561 #define FRIB_RUNLOCK(_frib) \
562 lck_rw_unlock_shared(&(_frib)->frib_lock)
563 #define FRIB_UNLOCK(_frib) \
564 lck_rw_done(&(_frib)->frib_lock)
565 #define FRIB_WLOCK_ASSERT_HELD(_frib) \
566 LCK_RW_ASSERT(&(_frib)->frib_lock, LCK_RW_ASSERT_EXCLUSIVE)
567 #define FRIB_RLOCK_ASSERT_HELD(_frib) \
568 LCK_RW_ASSERT(&(_frib)->frib_lock, LCK_RW_ASSERT_SHARED)
569 #define FRIB_LOCK_ASSERT_HELD(_frib) \
570 LCK_RW_ASSERT(&(_frib)->frib_lock, LCK_RW_ASSERT_HELD)
571
572 struct flow_mgr {
573 char fm_name[IFNAMSIZ];
574 uuid_t fm_uuid;
575 RB_ENTRY(flow_mgr) fm_link;
576
577 struct cuckoo_hashtable *fm_flow_table;
578 size_t fm_flow_hash_count[FKMASK_IDX_MAX]; /* # of flows with mask */
579 uint16_t fm_flow_hash_masks[FKMASK_IDX_MAX];
580
581 void *fm_owner_buckets; /* cache-aligned fob */
582 const size_t fm_owner_buckets_cnt; /* total # of fobs */
583 const size_t fm_owner_bucket_sz; /* size of each fob */
584 const size_t fm_owner_bucket_tot_sz; /* allocated size of each fob */
585
586 void *fm_route_buckets; /* cache-aligned frb */
587 const size_t fm_route_buckets_cnt; /* total # of frb */
588 const size_t fm_route_bucket_sz; /* size of each frb */
589 const size_t fm_route_bucket_tot_sz; /* allocated size of each frb */
590
591 void *fm_route_id_buckets; /* cache-aligned frib */
592 const size_t fm_route_id_buckets_cnt; /* total # of frib */
593 const size_t fm_route_id_bucket_sz; /* size of each frib */
594 const size_t fm_route_id_bucket_tot_sz; /* allocated size of each frib */
595 };
596
597 /*
598 * this func compare match with key;
599 * return values:
600 * 0 as long as @key(exact) matches what @match(wildcard) wants to match on.
601 * 1 when it doesn't match
602 */
603 static inline int
flow_key_cmp(const struct flow_key * match,const struct flow_key * key)604 flow_key_cmp(const struct flow_key *match, const struct flow_key *key)
605 {
606 #define FK_CMP(field, mask) \
607 if ((match->fk_mask & mask) != 0) { \
608 if ((key->fk_mask & mask) == 0) { \
609 return 1; \
610 } \
611 int d = memcmp(&match->field, &key->field, sizeof(match->field)); \
612 if (d != 0) { \
613 return d; \
614 } \
615 }
616
617 FK_CMP(fk_ipver, FKMASK_IPVER);
618 FK_CMP(fk_proto, FKMASK_PROTO);
619 FK_CMP(fk_src, FKMASK_SRC);
620 FK_CMP(fk_dst, FKMASK_DST);
621 FK_CMP(fk_sport, FKMASK_SPORT);
622 FK_CMP(fk_dport, FKMASK_DPORT);
623
624 return 0;
625 }
626
627 /*
628 * Similar to flow_key_cmp() except using memory compare with mask,
629 * done with SIMD instructions, if available for the platform.
630 */
631 static inline int
flow_key_cmp_mask(const struct flow_key * match,const struct flow_key * key,const struct flow_key * mask)632 flow_key_cmp_mask(const struct flow_key *match,
633 const struct flow_key *key, const struct flow_key *mask)
634 {
635 _CASSERT(FLOW_KEY_LEN == 48);
636 _CASSERT(FLOW_KEY_LEN == sizeof(struct flow_key));
637 _CASSERT((sizeof(struct flow_entry) % 16) == 0);
638 _CASSERT((offsetof(struct flow_entry, fe_key) % 16) == 0);
639
640 return sk_memcmp_mask_48B((const uint8_t *)match,
641 (const uint8_t *)key, (const uint8_t *)mask);
642 }
643
644 static inline uint32_t
flow_key_hash(const struct flow_key * key)645 flow_key_hash(const struct flow_key *key)
646 {
647 uint32_t hash = FK_HASH_SEED;
648 #define FK_HASH(field, mask) \
649 if ((key->fk_mask & mask) != 0) { \
650 hash = net_flowhash(&key->field, sizeof(key->field), hash); \
651 }
652
653 FK_HASH(fk_ipver, FKMASK_IPVER);
654 FK_HASH(fk_proto, FKMASK_PROTO);
655 FK_HASH(fk_src, FKMASK_SRC);
656 FK_HASH(fk_dst, FKMASK_DST);
657 FK_HASH(fk_sport, FKMASK_SPORT);
658 FK_HASH(fk_dport, FKMASK_DPORT);
659
660 return hash;
661 }
662
663 __attribute__((always_inline))
664 static inline void
flow_key_unpack(const struct flow_key * key,union sockaddr_in_4_6 * laddr,union sockaddr_in_4_6 * faddr,uint8_t * protocol)665 flow_key_unpack(const struct flow_key *key, union sockaddr_in_4_6 *laddr,
666 union sockaddr_in_4_6 *faddr, uint8_t *protocol)
667 {
668 *protocol = key->fk_proto;
669 if (key->fk_ipver == IPVERSION) {
670 laddr->sa.sa_family = AF_INET;
671 laddr->sin.sin_addr = key->fk_src4;
672 laddr->sin.sin_port = key->fk_sport;
673 faddr->sa.sa_family = AF_INET;
674 faddr->sin.sin_addr = key->fk_dst4;
675 faddr->sin.sin_port = key->fk_dport;
676 } else if (key->fk_ipver == IPV6_VERSION) {
677 laddr->sa.sa_family = AF_INET6;
678 laddr->sin6.sin6_addr = key->fk_src6;
679 laddr->sin6.sin6_port = key->fk_sport;
680 faddr->sa.sa_family = AF_INET6;
681 faddr->sin6.sin6_addr = key->fk_dst6;
682 faddr->sin6.sin6_port = key->fk_dport;
683 }
684 }
685
686 __attribute__((always_inline))
687 static inline int
flow_req2key(struct nx_flow_req * req,struct flow_key * key)688 flow_req2key(struct nx_flow_req *req, struct flow_key *key)
689 {
690 FLOW_KEY_CLEAR(key);
691
692 if (req->nfr_saddr.sa.sa_family == AF_INET) {
693 key->fk_ipver = IPVERSION;
694 key->fk_proto = req->nfr_ip_protocol;
695 key->fk_mask |= FKMASK_PROTO;
696 if (sk_sa_has_addr(SA(&req->nfr_saddr))) {
697 key->fk_src4 = req->nfr_saddr.sin.sin_addr;
698 key->fk_mask |= (FKMASK_IPVER | FKMASK_SRC);
699 }
700 if (sk_sa_has_addr(SA(&req->nfr_daddr))) {
701 key->fk_dst4 = req->nfr_daddr.sin.sin_addr;
702 key->fk_mask |= (FKMASK_IPVER | FKMASK_DST);
703 }
704 if (sk_sa_has_port(SA(&req->nfr_saddr))) {
705 key->fk_sport = req->nfr_saddr.sin.sin_port;
706 key->fk_mask |= FKMASK_SPORT;
707 }
708 if (sk_sa_has_port(SA(&req->nfr_daddr))) {
709 key->fk_dport = req->nfr_daddr.sin.sin_port;
710 key->fk_mask |= FKMASK_DPORT;
711 }
712 } else if (req->nfr_saddr.sa.sa_family == AF_INET6) {
713 key->fk_ipver = IPV6_VERSION;
714 key->fk_proto = req->nfr_ip_protocol;
715 key->fk_mask |= FKMASK_PROTO;
716 if (sk_sa_has_addr(SA(&req->nfr_saddr))) {
717 key->fk_src6 = req->nfr_saddr.sin6.sin6_addr;
718 key->fk_mask |= (FKMASK_IPVER | FKMASK_SRC);
719 }
720 if (sk_sa_has_addr(SA(&req->nfr_daddr))) {
721 key->fk_dst6 = req->nfr_daddr.sin6.sin6_addr;
722 key->fk_mask |= (FKMASK_IPVER | FKMASK_DST);
723 }
724 if (sk_sa_has_port(SA(&req->nfr_saddr))) {
725 key->fk_sport = req->nfr_saddr.sin6.sin6_port;
726 key->fk_mask |= FKMASK_SPORT;
727 }
728 if (sk_sa_has_port(SA(&req->nfr_daddr))) {
729 key->fk_dport = req->nfr_daddr.sin6.sin6_port;
730 key->fk_mask |= FKMASK_DPORT;
731 }
732 } else {
733 SK_ERR("unknown AF %d", req->nfr_saddr.sa.sa_family);
734 return ENOTSUP;
735 }
736
737 switch (key->fk_mask) {
738 case FKMASK_5TUPLE:
739 case FKMASK_4TUPLE:
740 case FKMASK_3TUPLE:
741 case FKMASK_2TUPLE:
742 case FKMASK_IPFLOW3:
743 case FKMASK_IPFLOW2:
744 case FKMASK_IPFLOW1:
745 break;
746 default:
747 SK_ERR("unknown flow key mask 0x%04x", key->fk_mask);
748 return ENOTSUP;
749 }
750
751 return 0;
752 }
753
754 __attribute__((always_inline))
755 static inline void
flow_pkt2key(struct __kern_packet * pkt,boolean_t input,struct flow_key * key)756 flow_pkt2key(struct __kern_packet *pkt, boolean_t input,
757 struct flow_key *key)
758 {
759 struct __flow *flow = pkt->pkt_flow;
760
761 FLOW_KEY_CLEAR(key);
762
763 if (__improbable((pkt->pkt_qum_qflags & QUM_F_FLOW_CLASSIFIED) == 0)) {
764 return;
765 }
766
767 ASSERT(flow->flow_l3._l3_ip_ver != 0);
768
769 key->fk_ipver = flow->flow_l3._l3_ip_ver;
770 key->fk_proto = flow->flow_ip_proto;
771 if (input) {
772 if (flow->flow_ip_ver == IPVERSION) {
773 key->fk_src4 = flow->flow_ipv4_dst;
774 key->fk_sport = flow->flow_tcp_dst;
775 key->fk_dst4 = flow->flow_ipv4_src;
776 key->fk_dport = flow->flow_tcp_src;
777 } else {
778 key->fk_src6 = flow->flow_ipv6_dst;
779 key->fk_sport = flow->flow_tcp_dst;
780 key->fk_dst6 = flow->flow_ipv6_src;
781 key->fk_dport = flow->flow_tcp_src;
782 }
783 } else {
784 if (flow->flow_ip_ver == IPVERSION) {
785 key->fk_src4 = flow->flow_ipv4_src;
786 key->fk_sport = flow->flow_tcp_src;
787 key->fk_dst4 = flow->flow_ipv4_dst;
788 key->fk_dport = flow->flow_tcp_dst;
789 } else {
790 key->fk_src6 = flow->flow_ipv6_src;
791 key->fk_sport = flow->flow_tcp_src;
792 key->fk_dst6 = flow->flow_ipv6_dst;
793 key->fk_dport = flow->flow_tcp_dst;
794 }
795 }
796 }
797
798 __attribute__((always_inline))
799 static inline int
flow_ip_cmp(const void * a0,const void * b0,size_t alen)800 flow_ip_cmp(const void *a0, const void *b0, size_t alen)
801 {
802 struct flow_ip_addr *a = __DECONST(struct flow_ip_addr *, a0),
803 *b = __DECONST(struct flow_ip_addr *, b0);
804
805 switch (alen) {
806 case sizeof(struct in_addr):
807 if (a->_addr32[0] > b->_addr32[0]) {
808 return 1;
809 }
810 if (a->_addr32[0] < b->_addr32[0]) {
811 return -1;
812 }
813 break;
814
815 case sizeof(struct in6_addr):
816 if (a->_addr64[1] > b->_addr64[1]) {
817 return 1;
818 }
819 if (a->_addr64[1] < b->_addr64[1]) {
820 return -1;
821 }
822 if (a->_addr64[0] > b->_addr64[0]) {
823 return 1;
824 }
825 if (a->_addr64[0] < b->_addr64[0]) {
826 return -1;
827 }
828 break;
829
830 default:
831 VERIFY(0);
832 /* NOTREACHED */
833 __builtin_unreachable();
834 }
835 return 0;
836 }
837
838 __attribute__((always_inline))
839 static inline struct flow_owner_bucket *
flow_mgr_get_fob_at_idx(struct flow_mgr * fm,uint32_t idx)840 flow_mgr_get_fob_at_idx(struct flow_mgr *fm, uint32_t idx)
841 {
842 return (struct flow_owner_bucket *)(void *)
843 ((intptr_t)fm->fm_owner_buckets +
844 (idx * fm->fm_owner_bucket_sz));
845 }
846
847 __attribute__((always_inline))
848 static inline struct flow_route_bucket *
flow_mgr_get_frb_at_idx(struct flow_mgr * fm,uint32_t idx)849 flow_mgr_get_frb_at_idx(struct flow_mgr *fm, uint32_t idx)
850 {
851 return (struct flow_route_bucket *)(void *)
852 ((intptr_t)fm->fm_route_buckets +
853 (idx * fm->fm_route_bucket_sz));
854 }
855
856 __attribute__((always_inline))
857 static inline struct flow_route_id_bucket *
flow_mgr_get_frib_at_idx(struct flow_mgr * fm,uint32_t idx)858 flow_mgr_get_frib_at_idx(struct flow_mgr *fm, uint32_t idx)
859 {
860 return (struct flow_route_id_bucket *)(void *)
861 ((intptr_t)fm->fm_route_id_buckets +
862 (idx * fm->fm_route_id_bucket_sz));
863 }
864
865 __attribute__((always_inline))
866 static inline uint32_t
flow_mgr_get_fob_idx(struct flow_mgr * fm,struct flow_owner_bucket * bkt)867 flow_mgr_get_fob_idx(struct flow_mgr *fm,
868 struct flow_owner_bucket *bkt)
869 {
870 ASSERT(((intptr_t)bkt - (intptr_t)fm->fm_owner_buckets) %
871 fm->fm_owner_bucket_sz == 0);
872 return (uint32_t)(((intptr_t)bkt - (intptr_t)fm->fm_owner_buckets) /
873 fm->fm_owner_bucket_sz);
874 }
875
876 __attribute__((always_inline))
877 static inline size_t
flow_mgr_get_num_flows(struct flow_mgr * mgr)878 flow_mgr_get_num_flows(struct flow_mgr *mgr)
879 {
880 ASSERT(mgr->fm_flow_table != NULL);
881 return cuckoo_hashtable_entries(mgr->fm_flow_table);
882 }
883
884 extern unsigned int sk_fo_size;
885 extern struct skmem_cache *sk_fo_cache;
886
887 extern unsigned int sk_fe_size;
888 extern struct skmem_cache *sk_fe_cache;
889
890 extern unsigned int sk_fab_size;
891 extern struct skmem_cache *sk_fab_cache;
892
893 extern uint32_t flow_seed;
894
895 extern struct skmem_cache *flow_route_cache;
896 extern struct skmem_cache *flow_stats_cache;
897
898 __BEGIN_DECLS
899
900 typedef void (*flow_route_ctor_fn_t)(void *arg, struct flow_route *);
901 typedef int (*flow_route_resolve_fn_t)(void *arg, struct flow_route *,
902 struct __kern_packet *);
903
904 extern int flow_init(void);
905 extern void flow_fini(void);
906
907 extern void flow_mgr_init(void);
908 extern void flow_mgr_fini(void);
909 extern struct flow_mgr *flow_mgr_find_lock(uuid_t);
910 extern void flow_mgr_unlock(void);
911 extern struct flow_mgr * flow_mgr_create(size_t, size_t, size_t, size_t);
912 extern void flow_mgr_destroy(struct flow_mgr *);
913 extern void flow_mgr_terminate(struct flow_mgr *);
914 extern int flow_mgr_flow_add(struct kern_nexus *nx, struct flow_mgr *fm,
915 struct flow_owner *fo, struct ifnet *ifp, struct nx_flow_req *req,
916 flow_route_ctor_fn_t fr_ctor, flow_route_resolve_fn_t fr_resolve, void *fr_arg);
917 extern struct flow_owner_bucket *flow_mgr_get_fob_by_pid(
918 struct flow_mgr *, pid_t);
919 extern struct flow_entry *flow_mgr_get_fe_by_uuid_rlock(
920 struct flow_mgr *, uuid_t);
921 extern struct flow_route_bucket *flow_mgr_get_frb_by_addr(
922 struct flow_mgr *, union sockaddr_in_4_6 *);
923 extern struct flow_route_id_bucket *flow_mgr_get_frib_by_uuid(
924 struct flow_mgr *, uuid_t);
925 extern int flow_mgr_flow_hash_mask_add(struct flow_mgr *fm, uint32_t mask);
926 extern int flow_mgr_flow_hash_mask_del(struct flow_mgr *fm, uint32_t mask);
927
928 extern struct flow_entry * fe_alloc(boolean_t can_block);
929
930 extern int flow_namespace_create(union sockaddr_in_4_6 *, uint8_t protocol,
931 netns_token *, uint16_t, struct ns_flow_info *);
932 extern void flow_namespace_half_close(netns_token *token);
933 extern void flow_namespace_withdraw(netns_token *);
934 extern void flow_namespace_destroy(netns_token *);
935
936 extern struct flow_owner_bucket *flow_owner_buckets_alloc(size_t, size_t *, size_t *);
937 extern void flow_owner_buckets_free(struct flow_owner_bucket *, size_t);
938 extern void flow_owner_bucket_init(struct flow_owner_bucket *);
939 extern void flow_owner_bucket_destroy(struct flow_owner_bucket *);
940 extern void flow_owner_bucket_purge_all(struct flow_owner_bucket *);
941 extern void flow_owner_attach_nexus_port(struct flow_mgr *, boolean_t,
942 pid_t, nexus_port_t);
943 extern uint32_t flow_owner_detach_nexus_port(struct flow_mgr *,
944 boolean_t, pid_t, nexus_port_t, boolean_t);
945 extern struct flow_owner *flow_owner_alloc(struct flow_owner_bucket *,
946 struct proc *, nexus_port_t, bool, bool, struct nx_flowswitch*,
947 struct nexus_adapter *, void *, bool);
948 extern void flow_owner_free(struct flow_owner_bucket *, struct flow_owner *);
949 extern struct flow_entry *flow_owner_create_entry(struct flow_owner *,
950 struct nx_flow_req *, boolean_t, uint32_t, boolean_t,
951 struct flow_route *, int *);
952 extern int flow_owner_destroy_entry(struct flow_owner *, uuid_t, bool, void *);
953 extern struct flow_owner *flow_owner_find_by_pid(struct flow_owner_bucket *,
954 pid_t, void *, bool);
955 extern int flow_owner_flowadv_index_alloc(struct flow_owner *, flowadv_idx_t *);
956 extern void flow_owner_flowadv_index_free(struct flow_owner *, flowadv_idx_t);
957 extern uint32_t flow_owner_activate_nexus_port(struct flow_mgr *,
958 boolean_t, pid_t, nexus_port_t, struct nexus_adapter *,
959 na_activate_mode_t);
960
961 extern struct flow_entry *flow_mgr_find_fe_by_key(struct flow_mgr *,
962 struct flow_key *);
963 extern struct flow_entry * flow_mgr_find_conflicting_fe(struct flow_mgr *fm,
964 struct flow_key *fe_key);
965 extern void flow_mgr_foreach_flow(struct flow_mgr *fm,
966 void (^flow_handler)(struct flow_entry *fe));
967 extern struct flow_entry *flow_entry_find_by_uuid(struct flow_owner *,
968 uuid_t);
969 extern struct flow_entry * flow_entry_alloc(struct flow_owner *fo,
970 struct nx_flow_req *req, int *perr);
971 extern void flow_entry_teardown(struct flow_owner *, struct flow_entry *);
972 extern void flow_entry_destroy(struct flow_owner *, struct flow_entry *, bool,
973 void *);
974 extern void flow_entry_retain(struct flow_entry *fe);
975 extern void flow_entry_release(struct flow_entry **pfe);
976 extern uint32_t flow_entry_refcnt(struct flow_entry *fe);
977 extern bool rx_flow_demux_match(struct nx_flowswitch *, struct flow_entry *, struct __kern_packet *);
978 extern struct flow_entry *rx_lookup_child_flow(struct nx_flowswitch *fsw,
979 struct flow_entry *, struct __kern_packet *);
980 extern struct flow_entry *tx_lookup_child_flow(struct flow_entry *, uuid_t);
981
982 extern struct flow_entry_dead *flow_entry_dead_alloc(zalloc_flags_t);
983 extern void flow_entry_dead_free(struct flow_entry_dead *);
984
985 extern void flow_entry_stats_get(struct flow_entry *, struct sk_stats_flow *);
986
987 extern int flow_pkt_classify(struct __kern_packet *pkt, struct ifnet *ifp,
988 sa_family_t af, bool input);
989
990 extern void flow_track_stats(struct flow_entry *, uint64_t, uint64_t,
991 bool, bool);
992 extern int flow_pkt_track(struct flow_entry *, struct __kern_packet *, bool);
993 extern boolean_t flow_track_tcp_want_abort(struct flow_entry *);
994 extern void flow_track_abort_tcp( struct flow_entry *fe,
995 struct __kern_packet *in_pkt, struct __kern_packet *rst_pkt);
996 extern void flow_track_abort_quic(struct flow_entry *fe, uint8_t *token);
997
998 extern void fsw_host_rx(struct nx_flowswitch *, struct pktq *);
999 extern void fsw_host_sendup(struct ifnet *, struct mbuf *, struct mbuf *,
1000 uint32_t, uint32_t);
1001
1002 extern void flow_rx_agg_tcp(struct nx_flowswitch *fsw, struct flow_entry *fe,
1003 uint32_t flags);
1004
1005 extern void flow_route_init(void);
1006 extern void flow_route_fini(void);
1007 extern struct flow_route_bucket *flow_route_buckets_alloc(size_t, size_t *, size_t *);
1008 extern void flow_route_buckets_free(struct flow_route_bucket *, size_t);
1009 extern void flow_route_bucket_init(struct flow_route_bucket *);
1010 extern void flow_route_bucket_destroy(struct flow_route_bucket *);
1011 extern void flow_route_bucket_purge_all(struct flow_route_bucket *);
1012 extern struct flow_route_id_bucket *flow_route_id_buckets_alloc(size_t,
1013 size_t *, size_t *);
1014 extern void flow_route_id_buckets_free(struct flow_route_id_bucket *, size_t);
1015 extern void flow_route_id_bucket_init(struct flow_route_id_bucket *);
1016 extern void flow_route_id_bucket_destroy(struct flow_route_id_bucket *);
1017
1018 extern int flow_route_select_laddr(union sockaddr_in_4_6 *,
1019 union sockaddr_in_4_6 *, struct ifnet *, struct rtentry *, uint32_t *, int);
1020 extern int flow_route_find(struct kern_nexus *, struct flow_mgr *,
1021 struct ifnet *, struct nx_flow_req *, flow_route_ctor_fn_t,
1022 flow_route_resolve_fn_t, void *, struct flow_route **);
1023 extern int flow_route_configure(struct flow_route *, struct ifnet *, struct nx_flow_req *);
1024 extern void flow_route_retain(struct flow_route *);
1025 extern void flow_route_release(struct flow_route *);
1026 extern uint32_t flow_route_prune(struct flow_mgr *, struct ifnet *,
1027 uint32_t *);
1028 extern void flow_route_cleanup(struct flow_route *);
1029 extern boolean_t flow_route_laddr_validate(union sockaddr_in_4_6 *,
1030 struct ifnet *, uint32_t *);
1031 extern boolean_t flow_route_key_validate(struct flow_key *, struct ifnet *,
1032 uint32_t *);
1033 extern void flow_qset_select_dynamic(struct nx_flowswitch *,
1034 struct flow_entry *, boolean_t);
1035 extern void flow_stats_init(void);
1036 extern void flow_stats_fini(void);
1037 extern struct flow_stats *flow_stats_alloc(boolean_t cansleep);
1038
1039 #if SK_LOG
1040 #define FLOWKEY_DBGBUF_SIZE 256
1041 #define FLOWENTRY_DBGBUF_SIZE 512
1042 extern char *fk_as_string(const struct flow_key *fk, char *, size_t);
1043 extern char *fe_as_string(const struct flow_entry *fe, char *, size_t);
1044 #endif /* SK_LOG */
1045 __END_DECLS
1046 #endif /* BSD_KERNEL_PRIVATE */
1047 #endif /* !_SKYWALK_NEXUS_FLOWSIWTCH_FLOW_FLOWVAR_H_ */
1048