1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Once a packet is classified, it goes through checks to see if there
31 * is a matching flow entry in the flow table. The key used to search
32 * the entry is composed of the fields contained in struct flow_ptrs.
33 *
34 * Flow entry insertion and deletion to the flow table, on behalf of
35 * the owning client process, requires the use of the rule ID (UUID)
36 * as the search key.
37 *
38 * Because of the above, each flow entry simultaneously exists in two
39 * respective trees: flow_entry_tree and flow_entry_id_tree.
40 *
41 * Using a single RW lock to protect the two trees is simple, but the
42 * data path performance is impacted during flow insertion and deletion,
43 * especially as the number of client processes and flows grow.
44 *
45 * To solve that, we deploy the following scheme:
46 *
47 * Given that the flow_entry_tree is searched on a per-packet basis,
48 * we break it down into a series of trees, each one contained within
49 * a flow_bucket structure. The hash from flow_ptrs determines the
50 * index of the flow_bucket to search the flow_entry_tree from.
51 *
52 * The flow_entry_id_tree is searched on each flow insertion and
53 * deletion, and similarly we break it down into a series of trees,
54 * each contained within a flow_owner_bucket structure. We use the
55 * client process ID (pid_t) to determine the bucket index.
56 *
57 * Each flow_bucket and flow_owner_bucket structure is dynamically
58 * created, and is aligned on the CPU cache boundary. The amount
59 * of those buckets is determined by client module at the time the
60 * flow manager context is initialized. This is done to avoid false
61 * sharing, especially given that each bucket has its own RW lock.
62 */
63
64 #ifndef _SKYWALK_NEXUS_FLOWSIWTCH_FLOW_FLOWVAR_H_
65 #define _SKYWALK_NEXUS_FLOWSIWTCH_FLOW_FLOWVAR_H_
66
67 #ifdef BSD_KERNEL_PRIVATE
68 #include <skywalk/core/skywalk_var.h>
69 #include <skywalk/lib/cuckoo_hashtable.h>
70 #include <skywalk/namespace/netns.h>
71 #include <skywalk/namespace/protons.h>
72 #include <skywalk/packet/packet_var.h>
73 #include <net/flowhash.h>
74 #include <netinet/ip.h>
75 #include <netinet/in_stat.h>
76 #include <netinet/ip6.h>
77 #include <sys/eventhandler.h>
78
79 RB_HEAD(flow_owner_tree, flow_owner);
80
81 struct flow_owner_bucket {
82 decl_lck_mtx_data(, fob_lock);
83 struct flow_owner_tree fob_owner_head;
84 uint16_t fob_busy_flags;
85 uint16_t fob_open_waiters;
86 uint16_t fob_close_waiters;
87 uint16_t fob_dtor_waiters;
88 const size_t fob_idx;
89 };
90
91 #define FOBF_OPEN_BUSY 0x1 /* flow open monitor */
92 #define FOBF_CLOSE_BUSY 0x2 /* flow close monitor */
93 #define FOBF_DEAD 0x4 /* no longer usable */
94
95 #define FOB_LOCK(_fob) \
96 lck_mtx_lock(&(_fob)->fob_lock)
97 #define FOB_LOCK_SPIN(_fob) \
98 lck_mtx_lock_spin(&(_fob)->fob_lock)
99 #define FOB_LOCK_CONVERT(_fob) \
100 lck_mtx_convert_spin(&(_fob)->fob_lock)
101 #define FOB_TRY_LOCK(_fob) \
102 lck_mtx_try_lock(&(_fob)->fob_lock)
103 #define FOB_LOCK_ASSERT_HELD(_fob) \
104 LCK_MTX_ASSERT(&(_fob)->fob_lock, LCK_MTX_ASSERT_OWNED)
105 #define FOB_LOCK_ASSERT_NOTHELD(_fob) \
106 LCK_MTX_ASSERT(&(_fob)->fob_lock, LCK_MTX_ASSERT_NOTOWNED)
107 #define FOB_UNLOCK(_fob) \
108 lck_mtx_unlock(&(_fob)->fob_lock)
109
110 RB_HEAD(flow_entry_id_tree, flow_entry);
111
112 #define FLOW_PROCESS_NAME_LENGTH 24
113
114 struct flow_owner {
115 RB_ENTRY(flow_owner) fo_link;
116 struct flow_entry_id_tree fo_flow_entry_id_head;
117 const struct flow_owner_bucket *fo_bucket;
118 void *fo_context;
119 pid_t fo_pid;
120 bool fo_nx_port_pid_bound;
121 bool fo_nx_port_destroyed;
122 bool fo_low_latency;
123 nexus_port_t fo_nx_port;
124 uuid_t fo_key;
125
126 struct nexus_adapter * const fo_nx_port_na;
127 struct nx_flowswitch * const fo_fsw;
128
129 /*
130 * Array of bitmaps to manage the flow advisory table indices.
131 * Currently we are restricting a flow owner to a single nexus
132 * port, so this structure is effectively managing the flow advisory
133 * indices for a port.
134 */
135 bitmap_t *fo_flowadv_bmap;
136 uint32_t fo_flowadv_max;
137 uint32_t fo_num_flowadv;
138
139 /* for debugging */
140 char fo_name[FLOW_PROCESS_NAME_LENGTH];
141 };
142
143 #define FO_BUCKET(_fo) \
144 __DECONST(struct flow_owner_bucket *, (_fo)->fo_bucket)
145
146 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_owner_tree, flow_owner,
147 fo_link, fo_cmp);
148 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_entry_id_tree, flow_entry,
149 fe_id_link, fe_id_cmp);
150
151 typedef enum {
152 /*
153 * TCP states.
154 */
155 FT_STATE_CLOSED = 0, /* closed */
156 FT_STATE_LISTEN, /* listening for connection */
157 FT_STATE_SYN_SENT, /* active, have sent SYN */
158 FT_STATE_SYN_RECEIVED, /* have sent and rcvd SYN */
159 FT_STATE_ESTABLISHED, /* established */
160 FT_STATE_CLOSE_WAIT, /* rcvd FIN, waiting close */
161 FT_STATE_FIN_WAIT_1, /* have sent FIN */
162 FT_STATE_CLOSING, /* exchanged FINs, waiting FIN|ACK */
163 FT_STATE_LAST_ACK, /* rcvd FIN, closed, waiting FIN|ACK */
164 FT_STATE_FIN_WAIT_2, /* closed, FIN is ACK'd */
165 FT_STATE_TIME_WAIT, /* quiet wait after close */
166
167 /*
168 * UDP states.
169 */
170 FT_STATE_NO_TRAFFIC = 20, /* no packet observed */
171 FT_STATE_SINGLE, /* single packet */
172 FT_STATE_MULTIPLE, /* multiple packets */
173
174 FT_STATE_MAX = 255
175 } flow_track_state_t;
176
177 struct flow_track_rtt {
178 uint64_t frtt_timestamp; /* tracked segment timestamp */
179 uint64_t frtt_last; /* previous net_uptime(rate limiting) */
180 uint32_t frtt_seg_begin; /* tracked segment begin SEQ */
181 uint32_t frtt_seg_end; /* tracked segment end SEQ */
182 uint32_t frtt_usec; /* avg RTT in usec */
183 };
184
185 #define FLOWTRACK_RTT_SAMPLE_INTERVAL 2 /* sample ACK RTT every 2 sec */
186
187 struct flow_track {
188 /*
189 * TCP specific tracking info.
190 */
191 uint32_t fse_seqlo; /* max sequence number sent */
192 uint32_t fse_seqhi; /* max the other end ACKd + win */
193 uint32_t fse_seqlast; /* last sequence number (FIN) */
194 uint16_t fse_max_win; /* largest window (pre scaling) */
195 uint16_t fse_mss; /* maximum segment size option */
196 uint8_t fse_state; /* active state level (FT_STATE_*) */
197 uint8_t fse_wscale; /* window scaling factor */
198 uint16_t fse_flags; /* FLOWSTATEF_* */
199 uint32_t fse_syn_ts; /* SYN timestamp */
200 uint32_t fse_syn_cnt; /* # of SYNs per second */
201
202 struct flow_track_rtt fse_rtt; /* ACK RTT tracking */
203 #define fse_rtt_usec fse_rtt.frtt_usec
204 } __sk_aligned(8);
205
206 /* valid values for fse_flags */
207 #define FLOWSTATEF_WSCALE 0x1 /* fse_wscale is valid */
208
209 struct flow_llhdr {
210 uint32_t flh_gencnt; /* link-layer address gencnt */
211
212 const uint8_t flh_off;
213 const uint8_t flh_len;
214 uint16_t flh_pad; /* for future */
215
216 union _flh_u {
217 uint64_t _buf[2];
218 struct {
219 uint16_t _eth_pad;
220 struct ether_header _eth;
221 } _eth_padded;
222 } __sk_aligned(8) _flh;
223 #define flh_eth_padded _flh._eth_padded
224 #define flh_eth _flh._eth_padded._eth
225 };
226
227
228 TAILQ_HEAD(flow_entry_list, flow_entry);
229
230 typedef void (*flow_action_t)(struct nx_flowswitch *fsw, struct flow_entry *fe);
231
232 struct flow_entry {
233 /**** Common Group ****/
234 os_refcnt_t fe_refcnt;
235 struct flow_key fe_key;
236 uint32_t fe_flags;
237 uint32_t fe_key_hash;
238 struct cuckoo_node fe_cnode;
239
240 uuid_t fe_uuid __sk_aligned(8);
241 nexus_port_t fe_nx_port;
242 uint32_t fe_laddr_gencnt;
243 uint32_t fe_want_nonviable;
244 uint32_t fe_want_withdraw;
245 uint8_t fe_transport_protocol;
246
247 /**** Rx Group ****/
248 uint16_t fe_rx_frag_count;
249 uint32_t fe_rx_pktq_bytes;
250 struct pktq fe_rx_pktq;
251 TAILQ_ENTRY(flow_entry) fe_rx_link;
252 flow_action_t fe_rx_process;
253 uint32_t fe_rx_largest_msize; /* used for mbuf batch allocation */
254 bool fe_rx_nodelay;
255
256 /**** Tx Group ****/
257 bool fe_tx_is_cont_frag;
258 uint32_t fe_tx_frag_id;
259 struct pktq fe_tx_pktq;
260 TAILQ_ENTRY(flow_entry) fe_tx_link;
261 flow_action_t fe_tx_process;
262
263 uuid_t fe_eproc_uuid __sk_aligned(8);
264 flowadv_idx_t fe_adv_idx;
265 kern_packet_svc_class_t fe_svc_class;
266 uint32_t fe_policy_id; /* policy id matched to flow */
267
268 /**** Misc Group ****/
269 struct nx_flowswitch * const fe_fsw;
270 struct ns_token *fe_port_reservation;
271 struct protons_token *fe_proto_reservation;
272 void *fe_ipsec_reservation;
273
274 struct flow_track fe_ltrack; /* local endpoint state */
275 struct flow_track fe_rtrack; /* remote endpoint state */
276
277 /*
278 * Flow stats are kept externally stand-alone, refcnt'ed by various
279 * users (e.g. flow_entry, necp_client_flow, etc.)
280 */
281 struct flow_stats *fe_stats;
282 struct flow_route *fe_route;
283
284 RB_ENTRY(flow_entry) fe_id_link;
285
286 TAILQ_ENTRY(flow_entry) fe_linger_link;
287 uint64_t fe_linger_expire; /* expiration deadline */
288 uint32_t fe_linger_wait; /* linger time (seconds) */
289
290 pid_t fe_pid;
291 pid_t fe_epid;
292 char fe_proc_name[FLOW_PROCESS_NAME_LENGTH];
293 char fe_eproc_name[FLOW_PROCESS_NAME_LENGTH];
294
295 uint32_t fe_inp_flowhash; /* flowhash for looking up inpcb */
296
297 /* Logical link related information */
298 struct netif_qset *fe_qset;
299 };
300
301 /* valid values for fe_flags */
302 #define FLOWENTF_INITED 0x00000001 /* {src,dst} states initialized */
303 #define FLOWENTF_TRACK 0x00000010 /* enable state tracking */
304 #define FLOWENTF_CONNECTED 0x00000020 /* connected mode */
305 #define FLOWENTF_LISTENER 0x00000040 /* listener mode */
306 #define FLOWENTF_QOS_MARKING 0x00000100 /* flow can have qos marking */
307 #define FLOWENTF_LOW_LATENCY 0x00000200 /* low latency flow */
308 #define FLOWENTF_WAIT_CLOSE 0x00001000 /* defer free after close */
309 #define FLOWENTF_CLOSE_NOTIFY 0x00002000 /* notify NECP upon tear down */
310 #define FLOWENTF_EXTRL_PORT 0x00004000 /* port reservation is held externally */
311 #define FLOWENTF_EXTRL_PROTO 0x00008000 /* proto reservation is held externally */
312 #define FLOWENTF_ABORTED 0x01000000 /* has sent RST to peer */
313 #define FLOWENTF_NONVIABLE 0x02000000 /* disabled; awaiting tear down */
314 #define FLOWENTF_WITHDRAWN 0x04000000 /* flow has been withdrawn */
315 #define FLOWENTF_TORN_DOWN 0x08000000 /* torn down and awaiting destroy */
316 #define FLOWENTF_HALF_CLOSED 0x10000000 /* flow is half closed */
317 #define FLOWENTF_DESTROYED 0x40000000 /* not in RB trees anymore */
318 #define FLOWENTF_LINGERING 0x80000000 /* destroyed and in linger list */
319
320 #define FLOWENTF_BITS \
321 "\020\01INITED\05TRACK\06CONNECTED\07LISTNER\011QOS_MARKING" \
322 "\012LOW_LATENCY\015WAIT_CLOSE\016CLOSE_NOTIFY\017EXT_PORT" \
323 "\020EXT_PROTO\031ABORTED\032NONVIABLE\033WITHDRAWN\034TORN_DOWN" \
324 "\035HALF_CLOSED\037DESTROYED\40LINGERING"
325
326 TAILQ_HEAD(flow_entry_linger_head, flow_entry);
327
328 struct flow_entry_dead {
329 LIST_ENTRY(flow_entry_dead) fed_link;
330
331 boolean_t fed_want_nonviable;
332 boolean_t fed_want_clonotify;
333
334 /* rule (flow) UUID */
335 union {
336 uint64_t fed_uuid_64[2];
337 uint32_t fed_uuid_32[4];
338 uuid_t fed_uuid;
339 } __sk_aligned(8);
340 };
341
342 /*
343 * Minimum refcnt for a flow route entry to be considered as idle.
344 */
345 #define FLOW_ROUTE_MINREF 2 /* for the 2 RB trees */
346
347 struct flow_route {
348 RB_ENTRY(flow_route) fr_link;
349 RB_ENTRY(flow_route) fr_id_link;
350
351 /*
352 * fr_laddr represents the local address that the system chooses
353 * for the foreign destination in fr_faddr. The flow entry that
354 * is referring to this flow route object may choose a different
355 * local address if it wishes.
356 *
357 * fr_gaddr represents the gateway address to reach the final
358 * foreign destination fr_faddr, valid only if the destination is
359 * not directly attached (FLOWRTF_GATEWAY is set).
360 *
361 * The use of sockaddr for storage is for convenience; the port
362 * value is not applicable for this object, as this is shared
363 * among flow entries.
364 */
365 union sockaddr_in_4_6 fr_laddr; /* local IP address */
366 union sockaddr_in_4_6 fr_faddr; /* remote IP address */
367 #define fr_af fr_faddr.sa.sa_family
368 union sockaddr_in_4_6 fr_gaddr; /* gateway IP address */
369
370 struct flow_llhdr fr_llhdr;
371 #define fr_eth_padded fr_llhdr.flh_eth_padded
372 #define fr_eth fr_llhdr.flh_eth
373
374 /*
375 * In flow_route_tree, we use the destination address as key.
376 * To speed up searches, we initialize fr_addr_key to the address
377 * portion of fr_faddr depending on the address family.
378 */
379 void *fr_addr_key;
380
381 /* flow route UUID */
382 uuid_t fr_uuid __sk_aligned(8);
383
384 /*
385 * fr_usecnt is updated atomically; incremented when a flow entry
386 * refers to this object and decremented otherwise. Periodically,
387 * the flowswitch instance garbage collects flow_route objects
388 * that aren't being referred to by any flow entries.
389 *
390 * fr_expire is set when fr_usecnt reaches its minimum count, and
391 * is cleared when it goes above the minimum count.
392 *
393 * The spin lock fr_reflock is used to serialize both.
394 */
395 decl_lck_spin_data(, fr_reflock);
396 uint64_t fr_expire;
397 volatile uint32_t fr_usecnt;
398
399 uint32_t fr_flags;
400 uint32_t fr_laddr_gencnt; /* local IP gencnt */
401 uint32_t fr_addr_len; /* sizeof {in,in6}_addr */
402
403 volatile uint32_t fr_want_configure;
404 volatile uint32_t fr_want_probe;
405
406 /* lock to serialize resolver */
407 decl_lck_mtx_data(, fr_lock);
408
409 /*
410 * fr_rt_dst is the route to final destination, and along with
411 * fr_rt_evhdlr_tag, they are used in route event registration.
412 *
413 * fr_rt_gw is valid only if FLOWRTF_GATEWAY is set.
414 */
415 eventhandler_tag fr_rt_evhdlr_tag;
416 struct rtentry *fr_rt_dst;
417 struct rtentry *fr_rt_gw;
418
419 /* nexus UUID */
420 uuid_t fr_nx_uuid __sk_aligned(8);
421
422 const struct flow_mgr *fr_mgr;
423 const struct flow_route_bucket *fr_frb;
424 const struct flow_route_id_bucket *fr_frib;
425 };
426
427 /* valid values for fr_flags */
428 #define FLOWRTF_ATTACHED 0x00000001 /* attached to RB trees */
429 #define FLOWRTF_ONLINK 0x00000010 /* dst directly on the link */
430 #define FLOWRTF_GATEWAY 0x00000020 /* gw IP address is valid */
431 #define FLOWRTF_RESOLVED 0x00000040 /* flow route is resolved */
432 #define FLOWRTF_HAS_LLINFO 0x00000080 /* has dst link-layer address */
433 #define FLOWRTF_DELETED 0x00000100 /* route has been deleted */
434 #define FLOWRTF_DST_LL_MCAST 0x00000200 /* dst is link layer multicast */
435 #define FLOWRTF_DST_LL_BCAST 0x00000400 /* dst is link layer broadcast */
436 #define FLOWRTF_STABLE_ADDR 0x00000800 /* local address prefers stable */
437
438 #define FR_LOCK(_fr) \
439 lck_mtx_lock(&(_fr)->fr_lock)
440 #define FR_TRY_LOCK(_fr) \
441 lck_mtx_try_lock(&(_fr)->fr_lock)
442 #define FR_LOCK_ASSERT_HELD(_fr) \
443 LCK_MTX_ASSERT(&(_fr)->fr_lock, LCK_MTX_ASSERT_OWNED)
444 #define FR_LOCK_ASSERT_NOTHELD(_fr) \
445 LCK_MTX_ASSERT(&(_fr)->fr_lock, LCK_MTX_ASSERT_NOTOWNED)
446 #define FR_UNLOCK(_fr) \
447 lck_mtx_unlock(&(_fr)->fr_lock)
448
449 #define FLOWRT_UPD_ETH_DST(_fr, _addr) do { \
450 bcopy((_addr), (_fr)->fr_eth.ether_dhost, ETHER_ADDR_LEN); \
451 (_fr)->fr_flags &= ~(FLOWRTF_DST_LL_MCAST|FLOWRTF_DST_LL_BCAST);\
452 if (ETHER_IS_MULTICAST(_addr)) { \
453 if (_ether_cmp(etherbroadcastaddr, (_addr)) == 0) \
454 (_fr)->fr_flags |= FLOWRTF_DST_LL_BCAST; \
455 else \
456 (_fr)->fr_flags |= FLOWRTF_DST_LL_MCAST; \
457 } \
458 } while (0)
459
460 RB_HEAD(flow_route_tree, flow_route);
461 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_route_tree, flow_route,
462 fr_link, fr_cmp);
463
464 struct flow_route_bucket {
465 decl_lck_rw_data(, frb_lock);
466 struct flow_route_tree frb_head;
467 const uint32_t frb_idx;
468 };
469
470 #define FRB_WLOCK(_frb) \
471 lck_rw_lock_exclusive(&(_frb)->frb_lock)
472 #define FRB_WLOCKTORLOCK(_frb) \
473 lck_rw_lock_exclusive_to_shared(&(_frb)->frb_lock)
474 #define FRB_WTRYLOCK(_frb) \
475 lck_rw_try_lock_exclusive(&(_frb)->frb_lock)
476 #define FRB_WUNLOCK(_frb) \
477 lck_rw_unlock_exclusive(&(_frb)->frb_lock)
478 #define FRB_RLOCK(_frb) \
479 lck_rw_lock_shared(&(_frb)->frb_lock)
480 #define FRB_RLOCKTOWLOCK(_frb) \
481 lck_rw_lock_shared_to_exclusive(&(_frb)->frb_lock)
482 #define FRB_RTRYLOCK(_frb) \
483 lck_rw_try_lock_shared(&(_frb)->frb_lock)
484 #define FRB_RUNLOCK(_frb) \
485 lck_rw_unlock_shared(&(_frb)->frb_lock)
486 #define FRB_UNLOCK(_frb) \
487 lck_rw_done(&(_frb)->frb_lock)
488 #define FRB_WLOCK_ASSERT_HELD(_frb) \
489 LCK_RW_ASSERT(&(_frb)->frb_lock, LCK_RW_ASSERT_EXCLUSIVE)
490 #define FRB_RLOCK_ASSERT_HELD(_frb) \
491 LCK_RW_ASSERT(&(_frb)->frb_lock, LCK_RW_ASSERT_SHARED)
492 #define FRB_LOCK_ASSERT_HELD(_frb) \
493 LCK_RW_ASSERT(&(_frb)->frb_lock, LCK_RW_ASSERT_HELD)
494
495 RB_HEAD(flow_route_id_tree, flow_route);
496 RB_PROTOTYPE_SC_PREV(__private_extern__, flow_route_id_tree, flow_route,
497 fr_id_link, fr_id_cmp);
498
499 struct flow_route_id_bucket {
500 decl_lck_rw_data(, frib_lock);
501 struct flow_route_id_tree frib_head;
502 const uint32_t frib_idx;
503 };
504
505 #define FRIB_WLOCK(_frib) \
506 lck_rw_lock_exclusive(&(_frib)->frib_lock)
507 #define FRIB_WLOCKTORLOCK(_frib) \
508 lck_rw_lock_exclusive_to_shared(&(_frib)->frib_lock)
509 #define FRIB_WTRYLOCK(_frib) \
510 lck_rw_try_lock_exclusive(&(_frib)->frib_lock)
511 #define FRIB_WUNLOCK(_frib) \
512 lck_rw_unlock_exclusive(&(_frib)->frib_lock)
513 #define FRIB_RLOCK(_frib) \
514 lck_rw_lock_shared(&(_frib)->frib_lock)
515 #define FRIB_RLOCKTOWLOCK(_frib) \
516 lck_rw_lock_shared_to_exclusive(&(_frib)->frib_lock)
517 #define FRIB_RTRYLOCK(_frib) \
518 lck_rw_try_lock_shared(&(_frib)->frib_lock)
519 #define FRIB_RUNLOCK(_frib) \
520 lck_rw_unlock_shared(&(_frib)->frib_lock)
521 #define FRIB_UNLOCK(_frib) \
522 lck_rw_done(&(_frib)->frib_lock)
523 #define FRIB_WLOCK_ASSERT_HELD(_frib) \
524 LCK_RW_ASSERT(&(_frib)->frib_lock, LCK_RW_ASSERT_EXCLUSIVE)
525 #define FRIB_RLOCK_ASSERT_HELD(_frib) \
526 LCK_RW_ASSERT(&(_frib)->frib_lock, LCK_RW_ASSERT_SHARED)
527 #define FRIB_LOCK_ASSERT_HELD(_frib) \
528 LCK_RW_ASSERT(&(_frib)->frib_lock, LCK_RW_ASSERT_HELD)
529
530 struct flow_mgr {
531 char fm_name[IFNAMSIZ];
532 uuid_t fm_uuid;
533 RB_ENTRY(flow_mgr) fm_link;
534
535 struct cuckoo_hashtable *fm_flow_table;
536 size_t fm_flow_hash_count[FKMASK_IDX_MAX]; /* # of flows with mask */
537 uint16_t fm_flow_hash_masks[FKMASK_IDX_MAX];
538
539 void *fm_owner_buckets; /* cache-aligned fob */
540 const size_t fm_owner_buckets_cnt; /* total # of fobs */
541 const size_t fm_owner_bucket_sz; /* size of each fob */
542 const size_t fm_owner_bucket_tot_sz; /* allocated size of each fob */
543
544 void *fm_route_buckets; /* cache-aligned frb */
545 const size_t fm_route_buckets_cnt; /* total # of frb */
546 const size_t fm_route_bucket_sz; /* size of each frb */
547 const size_t fm_route_bucket_tot_sz; /* allocated size of each frb */
548
549 void *fm_route_id_buckets; /* cache-aligned frib */
550 const size_t fm_route_id_buckets_cnt; /* total # of frib */
551 const size_t fm_route_id_bucket_sz; /* size of each frib */
552 const size_t fm_route_id_bucket_tot_sz; /* allocated size of each frib */
553
554 struct flow_entry *fm_host_fe;
555 };
556
557 /*
558 * this func compare match with key;
559 * return values:
560 * 0 as long as @key(exact) matches what @match(wildcard) wants to match on.
561 * 1 when it doesn't match
562 */
563 static inline int
flow_key_cmp(const struct flow_key * match,const struct flow_key * key)564 flow_key_cmp(const struct flow_key *match, const struct flow_key *key)
565 {
566 #define FK_CMP(field, mask) \
567 if ((match->fk_mask & mask) != 0) { \
568 if ((key->fk_mask & mask) == 0) { \
569 return 1; \
570 } \
571 int d = memcmp(&match->field, &key->field, sizeof(match->field)); \
572 if (d != 0) { \
573 return d; \
574 } \
575 }
576
577 FK_CMP(fk_ipver, FKMASK_IPVER);
578 FK_CMP(fk_proto, FKMASK_PROTO);
579 FK_CMP(fk_src, FKMASK_SRC);
580 FK_CMP(fk_dst, FKMASK_DST);
581 FK_CMP(fk_sport, FKMASK_SPORT);
582 FK_CMP(fk_dport, FKMASK_DPORT);
583
584 return 0;
585 }
586
587 /*
588 * Similar to flow_key_cmp() except using memory compare with mask,
589 * done with SIMD instructions, if available for the platform.
590 */
591 static inline int
flow_key_cmp_mask(const struct flow_key * match,const struct flow_key * key,const struct flow_key * mask)592 flow_key_cmp_mask(const struct flow_key *match,
593 const struct flow_key *key, const struct flow_key *mask)
594 {
595 _CASSERT(FLOW_KEY_LEN == 48);
596 _CASSERT(FLOW_KEY_LEN == sizeof(struct flow_key));
597 _CASSERT((sizeof(struct flow_entry) % 16) == 0);
598 _CASSERT((offsetof(struct flow_entry, fe_key) % 16) == 0);
599
600 return sk_memcmp_mask_48B((const uint8_t *)match,
601 (const uint8_t *)key, (const uint8_t *)mask);
602 }
603
604 static inline uint32_t
flow_key_hash(const struct flow_key * key)605 flow_key_hash(const struct flow_key *key)
606 {
607 uint32_t hash = FK_HASH_SEED;
608 #define FK_HASH(field, mask) \
609 if ((key->fk_mask & mask) != 0) { \
610 hash = net_flowhash(&key->field, sizeof(key->field), hash); \
611 }
612
613 FK_HASH(fk_ipver, FKMASK_IPVER);
614 FK_HASH(fk_proto, FKMASK_PROTO);
615 FK_HASH(fk_src, FKMASK_SRC);
616 FK_HASH(fk_dst, FKMASK_DST);
617 FK_HASH(fk_sport, FKMASK_SPORT);
618 FK_HASH(fk_dport, FKMASK_DPORT);
619
620 return hash;
621 }
622
623 __attribute__((always_inline))
624 static inline void
flow_key_unpack(const struct flow_key * key,union sockaddr_in_4_6 * laddr,union sockaddr_in_4_6 * faddr,uint8_t * protocol)625 flow_key_unpack(const struct flow_key *key, union sockaddr_in_4_6 *laddr,
626 union sockaddr_in_4_6 *faddr, uint8_t *protocol)
627 {
628 *protocol = key->fk_proto;
629 if (key->fk_ipver == IPVERSION) {
630 laddr->sa.sa_family = AF_INET;
631 laddr->sin.sin_addr = key->fk_src4;
632 laddr->sin.sin_port = key->fk_sport;
633 faddr->sa.sa_family = AF_INET;
634 faddr->sin.sin_addr = key->fk_dst4;
635 faddr->sin.sin_port = key->fk_dport;
636 } else if (key->fk_ipver == IPV6_VERSION) {
637 laddr->sa.sa_family = AF_INET6;
638 laddr->sin6.sin6_addr = key->fk_src6;
639 laddr->sin6.sin6_port = key->fk_sport;
640 faddr->sa.sa_family = AF_INET6;
641 faddr->sin6.sin6_addr = key->fk_dst6;
642 faddr->sin6.sin6_port = key->fk_dport;
643 }
644 }
645
646 __attribute__((always_inline))
647 static inline int
flow_req2key(struct nx_flow_req * req,struct flow_key * key)648 flow_req2key(struct nx_flow_req *req, struct flow_key *key)
649 {
650 FLOW_KEY_CLEAR(key);
651
652 if (req->nfr_saddr.sa.sa_family == AF_INET) {
653 key->fk_ipver = IPVERSION;
654 key->fk_proto = req->nfr_ip_protocol;
655 key->fk_mask |= FKMASK_PROTO;
656 if (sk_sa_has_addr(SA(&req->nfr_saddr))) {
657 key->fk_src4 = req->nfr_saddr.sin.sin_addr;
658 key->fk_mask |= (FKMASK_IPVER | FKMASK_SRC);
659 }
660 if (sk_sa_has_addr(SA(&req->nfr_daddr))) {
661 key->fk_dst4 = req->nfr_daddr.sin.sin_addr;
662 key->fk_mask |= (FKMASK_IPVER | FKMASK_DST);
663 }
664 if (sk_sa_has_port(SA(&req->nfr_saddr))) {
665 key->fk_sport = req->nfr_saddr.sin.sin_port;
666 key->fk_mask |= FKMASK_SPORT;
667 }
668 if (sk_sa_has_port(SA(&req->nfr_daddr))) {
669 key->fk_dport = req->nfr_daddr.sin.sin_port;
670 key->fk_mask |= FKMASK_DPORT;
671 }
672 } else if (req->nfr_saddr.sa.sa_family == AF_INET6) {
673 key->fk_ipver = IPV6_VERSION;
674 key->fk_proto = req->nfr_ip_protocol;
675 key->fk_mask |= FKMASK_PROTO;
676 if (sk_sa_has_addr(SA(&req->nfr_saddr))) {
677 key->fk_src6 = req->nfr_saddr.sin6.sin6_addr;
678 key->fk_mask |= (FKMASK_IPVER | FKMASK_SRC);
679 }
680 if (sk_sa_has_addr(SA(&req->nfr_daddr))) {
681 key->fk_dst6 = req->nfr_daddr.sin6.sin6_addr;
682 key->fk_mask |= (FKMASK_IPVER | FKMASK_DST);
683 }
684 if (sk_sa_has_port(SA(&req->nfr_saddr))) {
685 key->fk_sport = req->nfr_saddr.sin6.sin6_port;
686 key->fk_mask |= FKMASK_SPORT;
687 }
688 if (sk_sa_has_port(SA(&req->nfr_daddr))) {
689 key->fk_dport = req->nfr_daddr.sin6.sin6_port;
690 key->fk_mask |= FKMASK_DPORT;
691 }
692 } else {
693 SK_ERR("unknown AF %d", req->nfr_saddr.sa.sa_family);
694 return ENOTSUP;
695 }
696
697 switch (key->fk_mask) {
698 case FKMASK_5TUPLE:
699 case FKMASK_4TUPLE:
700 case FKMASK_3TUPLE:
701 case FKMASK_2TUPLE:
702 case FKMASK_IPFLOW3:
703 case FKMASK_IPFLOW2:
704 case FKMASK_IPFLOW1:
705 break;
706 default:
707 SK_ERR("unknown flow key mask 0x%04x", key->fk_mask);
708 return ENOTSUP;
709 }
710
711 return 0;
712 }
713
714 __attribute__((always_inline))
715 static inline void
flow_pkt2key(struct __kern_packet * pkt,boolean_t input,struct flow_key * key)716 flow_pkt2key(struct __kern_packet *pkt, boolean_t input,
717 struct flow_key *key)
718 {
719 struct __flow *flow = pkt->pkt_flow;
720
721 FLOW_KEY_CLEAR(key);
722
723 if (__improbable((pkt->pkt_qum_qflags & QUM_F_FLOW_CLASSIFIED) == 0)) {
724 return;
725 }
726
727 ASSERT(flow->flow_l3._l3_ip_ver != 0);
728
729 key->fk_ipver = flow->flow_l3._l3_ip_ver;
730 key->fk_proto = flow->flow_ip_proto;
731 if (input) {
732 if (flow->flow_ip_ver == IPVERSION) {
733 key->fk_src4 = flow->flow_ipv4_dst;
734 key->fk_sport = flow->flow_tcp_dst;
735 key->fk_dst4 = flow->flow_ipv4_src;
736 key->fk_dport = flow->flow_tcp_src;
737 } else {
738 key->fk_src6 = flow->flow_ipv6_dst;
739 key->fk_sport = flow->flow_tcp_dst;
740 key->fk_dst6 = flow->flow_ipv6_src;
741 key->fk_dport = flow->flow_tcp_src;
742 }
743 } else {
744 if (flow->flow_ip_ver == IPVERSION) {
745 key->fk_src4 = flow->flow_ipv4_src;
746 key->fk_sport = flow->flow_tcp_src;
747 key->fk_dst4 = flow->flow_ipv4_dst;
748 key->fk_dport = flow->flow_tcp_dst;
749 } else {
750 key->fk_src6 = flow->flow_ipv6_src;
751 key->fk_sport = flow->flow_tcp_src;
752 key->fk_dst6 = flow->flow_ipv6_dst;
753 key->fk_dport = flow->flow_tcp_dst;
754 }
755 }
756 }
757
758 __attribute__((always_inline))
759 static inline int
flow_ip_cmp(const void * a0,const void * b0,size_t alen)760 flow_ip_cmp(const void *a0, const void *b0, size_t alen)
761 {
762 struct flow_ip_addr *a = __DECONST(struct flow_ip_addr *, a0),
763 *b = __DECONST(struct flow_ip_addr *, b0);
764
765 switch (alen) {
766 case sizeof(struct in_addr):
767 if (a->_addr32[0] > b->_addr32[0]) {
768 return 1;
769 }
770 if (a->_addr32[0] < b->_addr32[0]) {
771 return -1;
772 }
773 break;
774
775 case sizeof(struct in6_addr):
776 if (a->_addr64[1] > b->_addr64[1]) {
777 return 1;
778 }
779 if (a->_addr64[1] < b->_addr64[1]) {
780 return -1;
781 }
782 if (a->_addr64[0] > b->_addr64[0]) {
783 return 1;
784 }
785 if (a->_addr64[0] < b->_addr64[0]) {
786 return -1;
787 }
788 break;
789
790 default:
791 VERIFY(0);
792 /* NOTREACHED */
793 __builtin_unreachable();
794 }
795 return 0;
796 }
797
798 __attribute__((always_inline))
799 static inline struct flow_owner_bucket *
flow_mgr_get_fob_at_idx(struct flow_mgr * fm,uint32_t idx)800 flow_mgr_get_fob_at_idx(struct flow_mgr *fm, uint32_t idx)
801 {
802 return (struct flow_owner_bucket *)(void *)
803 ((intptr_t)fm->fm_owner_buckets +
804 (idx * fm->fm_owner_bucket_sz));
805 }
806
807 __attribute__((always_inline))
808 static inline struct flow_route_bucket *
flow_mgr_get_frb_at_idx(struct flow_mgr * fm,uint32_t idx)809 flow_mgr_get_frb_at_idx(struct flow_mgr *fm, uint32_t idx)
810 {
811 return (struct flow_route_bucket *)(void *)
812 ((intptr_t)fm->fm_route_buckets +
813 (idx * fm->fm_route_bucket_sz));
814 }
815
816 __attribute__((always_inline))
817 static inline struct flow_route_id_bucket *
flow_mgr_get_frib_at_idx(struct flow_mgr * fm,uint32_t idx)818 flow_mgr_get_frib_at_idx(struct flow_mgr *fm, uint32_t idx)
819 {
820 return (struct flow_route_id_bucket *)(void *)
821 ((intptr_t)fm->fm_route_id_buckets +
822 (idx * fm->fm_route_id_bucket_sz));
823 }
824
825 __attribute__((always_inline))
826 static inline uint32_t
flow_mgr_get_fob_idx(struct flow_mgr * fm,struct flow_owner_bucket * bkt)827 flow_mgr_get_fob_idx(struct flow_mgr *fm,
828 struct flow_owner_bucket *bkt)
829 {
830 ASSERT(((intptr_t)bkt - (intptr_t)fm->fm_owner_buckets) %
831 fm->fm_owner_bucket_sz == 0);
832 return (uint32_t)(((intptr_t)bkt - (intptr_t)fm->fm_owner_buckets) /
833 fm->fm_owner_bucket_sz);
834 }
835
836 extern unsigned int sk_fo_size;
837 extern struct skmem_cache *sk_fo_cache;
838
839 extern unsigned int sk_fe_size;
840 extern struct skmem_cache *sk_fe_cache;
841
842 extern unsigned int sk_fab_size;
843 extern struct skmem_cache *sk_fab_cache;
844
845 extern uint32_t flow_seed;
846
847 extern struct skmem_cache *flow_route_cache;
848 extern struct skmem_cache *flow_stats_cache;
849
850 __BEGIN_DECLS
851
852 typedef void (*flow_route_ctor_fn_t)(void *arg, struct flow_route *);
853 typedef int (*flow_route_resolve_fn_t)(void *arg, struct flow_route *,
854 struct __kern_packet *);
855
856 extern int flow_init(void);
857 extern void flow_fini(void);
858
859 extern void flow_mgr_init(void);
860 extern void flow_mgr_fini(void);
861 extern struct flow_mgr *flow_mgr_find_lock(uuid_t);
862 extern void flow_mgr_unlock(void);
863 extern struct flow_mgr * flow_mgr_create(size_t, size_t, size_t, size_t);
864 extern void flow_mgr_destroy(struct flow_mgr *);
865 extern void flow_mgr_terminate(struct flow_mgr *);
866 extern int flow_mgr_flow_add(struct kern_nexus *nx, struct flow_mgr *fm,
867 struct flow_owner *fo, struct ifnet *ifp, struct nx_flow_req *req,
868 flow_route_ctor_fn_t fr_ctor, flow_route_resolve_fn_t fr_resolve, void *fr_arg);
869 extern struct flow_owner_bucket *flow_mgr_get_fob_by_pid(
870 struct flow_mgr *, pid_t);
871 extern struct flow_entry *flow_mgr_get_fe_by_uuid_rlock(
872 struct flow_mgr *, uuid_t);
873 extern struct flow_route_bucket *flow_mgr_get_frb_by_addr(
874 struct flow_mgr *, union sockaddr_in_4_6 *);
875 extern struct flow_route_id_bucket *flow_mgr_get_frib_by_uuid(
876 struct flow_mgr *, uuid_t);
877 extern int flow_mgr_flow_hash_mask_add(struct flow_mgr *fm, uint32_t mask);
878 extern int flow_mgr_flow_hash_mask_del(struct flow_mgr *fm, uint32_t mask);
879
880 extern struct flow_entry * fe_alloc(boolean_t can_block);
881 extern void flow_mgr_setup_host_flow(struct flow_mgr *fm, struct nx_flowswitch *fsw);
882 extern void flow_mgr_teardown_host_flow(struct flow_mgr *fm);
883
884 extern int flow_namespace_create(union sockaddr_in_4_6 *, uint8_t protocol,
885 netns_token *, uint16_t, struct ns_flow_info *);
886 extern void flow_namespace_half_close(netns_token *token);
887 extern void flow_namespace_withdraw(netns_token *);
888 extern void flow_namespace_destroy(netns_token *);
889
890 extern struct flow_owner_bucket *flow_owner_buckets_alloc(size_t, size_t *, size_t *);
891 extern void flow_owner_buckets_free(struct flow_owner_bucket *, size_t);
892 extern void flow_owner_bucket_init(struct flow_owner_bucket *);
893 extern void flow_owner_bucket_destroy(struct flow_owner_bucket *);
894 extern void flow_owner_bucket_purge_all(struct flow_owner_bucket *);
895 extern void flow_owner_attach_nexus_port(struct flow_mgr *, boolean_t,
896 pid_t, nexus_port_t);
897 extern uint32_t flow_owner_detach_nexus_port(struct flow_mgr *,
898 boolean_t, pid_t, nexus_port_t, boolean_t);
899 extern struct flow_owner *flow_owner_alloc(struct flow_owner_bucket *,
900 struct proc *, nexus_port_t, bool, bool, struct nx_flowswitch*,
901 struct nexus_adapter *, void *, bool);
902 extern void flow_owner_free(struct flow_owner_bucket *, struct flow_owner *);
903 extern struct flow_entry *flow_owner_create_entry(struct flow_owner *,
904 struct nx_flow_req *, boolean_t, uint32_t, boolean_t,
905 struct flow_route *, int *);
906 extern int flow_owner_destroy_entry(struct flow_owner *, uuid_t, bool, void *);
907 extern struct flow_owner *flow_owner_find_by_pid(struct flow_owner_bucket *,
908 pid_t, void *, bool);
909 extern int flow_owner_flowadv_index_alloc(struct flow_owner *, flowadv_idx_t *);
910 extern void flow_owner_flowadv_index_free(struct flow_owner *, flowadv_idx_t);
911 extern uint32_t flow_owner_activate_nexus_port(struct flow_mgr *,
912 boolean_t, pid_t, nexus_port_t, struct nexus_adapter *,
913 na_activate_mode_t);
914
915 extern struct flow_entry *flow_mgr_find_fe_by_key(struct flow_mgr *,
916 struct flow_key *);
917 extern struct flow_entry * flow_mgr_find_conflicting_fe(struct flow_mgr *fm,
918 struct flow_key *fe_key);
919 extern void flow_mgr_foreach_flow(struct flow_mgr *fm,
920 void (^flow_handler)(struct flow_entry *fe));
921 extern struct flow_entry * flow_mgr_get_host_fe(struct flow_mgr *fm);
922 extern struct flow_entry *flow_entry_find_by_uuid(struct flow_owner *,
923 uuid_t);
924 extern struct flow_entry * flow_entry_alloc(struct flow_owner *fo,
925 struct nx_flow_req *req, int *perr);
926 extern void flow_entry_teardown(struct flow_owner *, struct flow_entry *);
927 extern void flow_entry_destroy(struct flow_owner *, struct flow_entry *, bool,
928 void *);
929 extern void flow_entry_retain(struct flow_entry *fe);
930 extern void flow_entry_release(struct flow_entry **pfe);
931 extern uint32_t flow_entry_refcnt(struct flow_entry *fe);
932
933 extern struct flow_entry_dead *flow_entry_dead_alloc(zalloc_flags_t);
934 extern void flow_entry_dead_free(struct flow_entry_dead *);
935
936 extern void flow_entry_stats_get(struct flow_entry *, struct sk_stats_flow *);
937
938 extern int flow_pkt_classify(struct __kern_packet *pkt, struct ifnet *ifp,
939 sa_family_t af, bool input);
940
941 extern void flow_track_stats(struct flow_entry *, uint64_t, uint64_t,
942 bool, bool);
943 extern int flow_pkt_track(struct flow_entry *, struct __kern_packet *, bool);
944 extern boolean_t flow_track_tcp_want_abort(struct flow_entry *);
945 extern void fsw_host_rx(struct nx_flowswitch *, struct flow_entry *);
946 extern void fsw_host_sendup(struct ifnet *, struct mbuf *, struct mbuf *,
947 uint32_t, uint32_t);
948
949 extern void flow_rx_agg_tcp(struct nx_flowswitch *fsw, struct flow_entry *fe);
950
951 extern void flow_route_init(void);
952 extern void flow_route_fini(void);
953 extern struct flow_route_bucket *flow_route_buckets_alloc(size_t, size_t *, size_t *);
954 extern void flow_route_buckets_free(struct flow_route_bucket *, size_t);
955 extern void flow_route_bucket_init(struct flow_route_bucket *);
956 extern void flow_route_bucket_destroy(struct flow_route_bucket *);
957 extern void flow_route_bucket_purge_all(struct flow_route_bucket *);
958 extern struct flow_route_id_bucket *flow_route_id_buckets_alloc(size_t,
959 size_t *, size_t *);
960 extern void flow_route_id_buckets_free(struct flow_route_id_bucket *, size_t);
961 extern void flow_route_id_bucket_init(struct flow_route_id_bucket *);
962 extern void flow_route_id_bucket_destroy(struct flow_route_id_bucket *);
963
964 extern int flow_route_select_laddr(union sockaddr_in_4_6 *,
965 union sockaddr_in_4_6 *, struct ifnet *, struct rtentry *, uint32_t *, int);
966 extern int flow_route_find(struct kern_nexus *, struct flow_mgr *,
967 struct ifnet *, struct nx_flow_req *, flow_route_ctor_fn_t,
968 flow_route_resolve_fn_t, void *, struct flow_route **);
969 extern int flow_route_configure(struct flow_route *, struct ifnet *, struct nx_flow_req *);
970 extern void flow_route_retain(struct flow_route *);
971 extern void flow_route_release(struct flow_route *);
972 extern uint32_t flow_route_prune(struct flow_mgr *, struct ifnet *,
973 uint32_t *);
974 extern void flow_route_cleanup(struct flow_route *);
975 extern boolean_t flow_route_laddr_validate(union sockaddr_in_4_6 *,
976 struct ifnet *, uint32_t *);
977 extern boolean_t flow_route_key_validate(struct flow_key *, struct ifnet *,
978 uint32_t *);
979
980 extern void flow_stats_init(void);
981 extern void flow_stats_fini(void);
982 extern struct flow_stats *flow_stats_alloc(boolean_t cansleep);
983
984 #if SK_LOG
985 #define FLOWKEY_DBGBUF_SIZE 256
986 #define FLOWENTRY_DBGBUF_SIZE 512
987 extern char *fk_as_string(const struct flow_key *fk, char *, size_t);
988 extern char *fe_as_string(const struct flow_entry *fe, char *, size_t);
989 #endif /* SK_LOG */
990 __END_DECLS
991 #endif /* BSD_KERNEL_PRIVATE */
992 #endif /* !_SKYWALK_NEXUS_FLOWSIWTCH_FLOW_FLOWVAR_H_ */
993