1 /* 2 * Copyright (c) 2015-2022 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 /* 30 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 31 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 52 * SUCH DAMAGE. 53 */ 54 55 #ifndef _SKYWALK_NEXUS_FLOWSWITCH_H_ 56 #define _SKYWALK_NEXUS_FLOWSWITCH_H_ 57 58 #include <skywalk/os_skywalk_private.h> 59 #include <net/ethernet.h> 60 #include <net/if_vlan_var.h> 61 #include <netinet/ip6.h> 62 63 #include <skywalk/nexus/flowswitch/flow/flow_var.h> 64 65 #if CONFIG_NEXUS_FLOWSWITCH 66 /* Shared declarations for the flow switch. */ 67 #define NX_FSW_NAME "fsw" /* prefix for flow switch port name */ 68 69 #define NX_FSW_MAXRINGS NX_MAX_NUM_RING_PAIR 70 #define NX_FSW_TXRINGSIZE 256 /* default TX ring size */ 71 #define NX_FSW_RXRINGSIZE 1024 /* default RX ring size */ 72 #define NX_FSW_AFRINGSIZE 256 /* default Alloc/Free ring size */ 73 74 #define NX_FSW_CHUNK 64 /* port chunk */ 75 #define NX_FSW_CHUNK_FREE 0xffffffffffffffff /* entire chunk is free */ 76 77 #define NX_FSW_VP_MIN NX_FSW_CHUNK 78 #define NX_FSW_VP_MAX 4096 /* up to 4k ports */ 79 #define NX_FSW_VP_NOPORT (NX_FSW_VP_MAX+1) 80 #define NX_FSW_VP_BROADCAST NX_FSW_VP_MAX 81 82 #define NX_FSW_MINSLOTS 2 /* XXX unclear how many */ 83 #define NX_FSW_MAXSLOTS NX_MAX_NUM_SLOT_PER_RING /* max # of slots */ 84 85 #define NX_FSW_TXBATCH 64 /* default TX batch size */ 86 #if !XNU_TARGET_OS_OSX 87 #define NX_FSW_RXBATCH 64 /* default RX batch size */ 88 #else /* XNU_TARGET_OS_OSX */ 89 #define NX_FSW_RXBATCH 32 /* default RX batch size */ 90 #endif /* XNU_TARGET_OS_OSX */ 91 92 #define NX_FSW_BUFSIZE (2 * 1024) /* default buffer size */ 93 94 #define NX_FSW_MINBUFSIZE 512 /* min buffer size */ 95 #define NX_FSW_MAXBUFSIZE (32 * 1024) /* max buffer size */ 96 #define NX_FSW_MAXBUFFERS (4 * 1024) /* max number of buffers */ 97 /* max number of buffers for memory constrained device */ 98 #define NX_FSW_MAXBUFFERS_MEM_CONSTRAINED (2 * 1024) 99 /* default user buffer segment size for non memory-constrained device */ 100 #define NX_FSW_BUF_SEG_SIZE (32 * 1024) 101 102 /* {min, def, max} values for large buffer size */ 103 #define NX_FSW_MIN_LARGE_BUFSIZE 0 104 #define NX_FSW_DEF_LARGE_BUFSIZE (16 * 1024) 105 #define NX_FSW_MAX_LARGE_BUFSIZE (32 * 1024) 106 107 /* 108 * TODO: [email protected] -- minimum buflets for now; we will need to 109 * have a way to adjust this based on the underlying interface's 110 * parameters, e.g. jumbo MTU, large segment offload, etc. 111 */ 112 #define NX_FSW_UMD_SIZE _USER_PACKET_SIZE(BUFLETS_MIN) 113 #define NX_FSW_KMD_SIZE _KERN_PACKET_SIZE(BUFLETS_MIN) 114 115 #define NX_FSW_EVENT_RING_NUM 1 /* number of event rings */ 116 #define NX_FSW_EVENT_RING_SIZE 32 /* default event ring size */ 117 118 119 /* 120 * macro to check if TCP RX aggregation is enabled in flowswitch. 121 */ 122 #define NX_FSW_TCP_RX_AGG_ENABLED() (sk_fsw_rx_agg_tcp != 0) 123 124 struct nx_flowswitch; 125 126 /* 127 * Virtual port nexus adapter 128 */ 129 struct nexus_vp_adapter { 130 /* 131 * This is an overlay structure on nexus_adapter; 132 * make sure it contains 'up' as the first member. 133 */ 134 struct nexus_adapter vpna_up; 135 136 /* 137 * Flow switch support: 138 * 139 * If the adapter is associated with a nexus port, vpna_fsw points 140 * to the flow switch this NA is attached to; vpna_nx_port is the 141 * port number used in the flow switch. Otherwise, vpna_fsw would 142 * be NULL and vpna_nx_port would be NEXUS_PORT_ANY. 143 */ 144 struct nx_flowswitch *vpna_fsw; 145 nexus_port_t vpna_nx_port; 146 uint16_t vpna_gencnt; 147 boolean_t vpna_retry; 148 boolean_t vpna_pid_bound; 149 boolean_t vpna_defunct; 150 pid_t vpna_pid; 151 }; 152 153 #define VPNA(_na) ((struct nexus_vp_adapter *)(_na)) 154 155 #define NEXUS_PROVIDER_FLOW_SWITCH "com.apple.nexus.flowswitch" 156 157 /* fsw_state_flags */ 158 #define FSW_STATEF_QUIESCED 0x0001 159 #define FSW_STATEF_NETAGENT_ADDED 0x0002 160 #define FSW_STATEF_NETAGENT_ENABLED 0x0004 161 162 #define FSW_QUIESCED(_fsw) \ 163 (((_fsw)->fsw_state_flags & FSW_STATEF_QUIESCED) != 0) 164 165 #define FSW_NETAGENT_ADDED(_fsw) \ 166 (((_fsw)->fsw_state_flags & FSW_STATEF_NETAGENT_ADDED) != 0) 167 168 #define FSW_NETAGENT_ENABLED(_fsw) \ 169 (((_fsw)->fsw_state_flags & FSW_STATEF_NETAGENT_ENABLED) != 0) 170 171 #if (DEVELOPMENT || DEBUG) 172 #define FSW_RPS_MAX_NTHREADS 64 173 struct fsw_rps_thread { 174 lck_mtx_t frt_lock; 175 struct nx_flowswitch *frt_fsw; 176 struct thread *frt_thread; 177 struct pktq frt_pktq; 178 179 uint32_t frt_idx; 180 uint32_t frt_flags; 181 uint32_t frt_requests; 182 }; 183 184 #define FRT_RUNNING 0x00000001 185 #define FRT_TERMINATEBLOCK 0x10000000 186 #define FRT_TERMINATING 0x20000000 187 #define FRT_TERMINATED 0x40000000 188 #endif /* !DEVELOPMENT && !DEBUG */ 189 190 /* 191 * nx_flowswitch is a descriptor for a flow switch instance. 192 * Interfaces for a flow switch are all in fsw_ports[]. 193 * The array has fixed size, an empty entry does not terminate 194 * the search, but lookups only occur on attach/detach so we 195 * don't mind if they are slow. 196 * 197 * The flow switch is non blocking on the transmit ports: excess 198 * packets are dropped if there is no room on the output port. 199 * 200 * fsw_lock protects accesses to the fsw_ports array. 201 * This is a rw lock (or equivalent). 202 */ 203 struct nx_flowswitch { 204 decl_lck_rw_data(, fsw_lock); 205 uint32_t fsw_tx_rings; 206 uint32_t fsw_rx_rings; 207 208 struct kern_nexus *fsw_nx; 209 210 /* packet type enqueued by the class queues */ 211 classq_pkt_type_t fsw_classq_enq_ptype; 212 boolean_t fsw_classq_enabled; 213 214 /* packet copy routines */ 215 pkt_copy_from_pkt_t *fsw_pkt_copy_from_pkt; 216 pkt_copy_from_mbuf_t *fsw_pkt_copy_from_mbuf; 217 pkt_copy_to_mbuf_t *fsw_pkt_copy_to_mbuf; 218 219 uint8_t fsw_frame_headroom; 220 uint32_t fsw_src_lla_gencnt; 221 uint32_t fsw_pending_nonviable; 222 uint32_t fsw_low_power_gencnt; 223 224 /* The following are protected by fsw_lock. */ 225 struct flow_mgr *fsw_flow_mgr; 226 netagent_session_t fsw_agent_session; 227 uuid_t fsw_agent_uuid; 228 struct ifnet *fsw_ifp; /* host interface */ 229 struct nexus_adapter *fsw_nifna; /* netif adapter */ 230 uint32_t fsw_state_flags; /* FSW_STATEF_* */ 231 232 233 union { 234 uint64_t _buf[1]; 235 uint8_t _eth_src[ETHER_ADDR_LEN]; 236 } __fsw_slladdr __attribute((aligned(sizeof(uint64_t)))); 237 238 #define fsw_slla __fsw_slladdr._buf 239 #define fsw_ether_shost __fsw_slladdr._eth_src 240 241 int (*fsw_resolve)(struct nx_flowswitch *, struct flow_route *, 242 struct __kern_packet *); 243 void (*fsw_frame)(struct nx_flowswitch *, struct flow_route *, 244 struct __kern_packet *); 245 sa_family_t (*fsw_demux)(struct nx_flowswitch *, 246 struct __kern_packet *); 247 248 struct fsw_stats fsw_stats; 249 250 /* 251 * The host interface attachment to the flowswitch (fsw_ifp), as well 252 * as the netagent registration, are guarded by the flowswitch's RW 253 * lock. During fsw_flow_bind() time, we need to make sure they are 254 * valid before proceeding forward, but holding that RW lock across 255 * the routine is not possible since the thread may block if there 256 * are other threads performing fsw_flow_{bind,unbind} on the same 257 * flow owner bucket. To prevent fsw_dtor() from happening while 258 * fsw_flow_bind() is in progress, we need to have it wait until all 259 * pending flow binds are done. To do this we add a busy counter 260 * incremented at flow bind time, and use the lock for synchronization. 261 */ 262 decl_lck_mtx_data(, fsw_detach_barrier_lock); 263 uint32_t fsw_detach_flags; /* see fsw_DETACHF_* */ 264 uint32_t fsw_detach_barriers; 265 uint32_t fsw_detach_waiters; 266 267 uint32_t fsw_ifp_dlt; 268 269 /* 270 * largest allocated packet size. 271 * used by: mbuf batch allocation logic during netif copy. 272 */ 273 uint32_t fsw_rx_largest_size; 274 275 void (*fsw_ctor)(struct nx_flowswitch *, struct flow_route *); 276 277 /* store stats from na that is going to be deactivated */ 278 struct __nx_stats_fsw *fsw_closed_na_stats; 279 280 /* ip fragments manager */ 281 struct fsw_ip_frag_mgr *fsw_ipfm; 282 283 struct skoid fsw_skoid; 284 285 /* input network emulator */ 286 struct netem *fsw_input_netem; 287 288 struct kern_channel *fsw_dev_ch; 289 struct kern_channel *fsw_host_ch; 290 291 /* 292 * The reaper thread gets scheduled on-demand, whenever there 293 * is any lingering flow entry needing to be freed or becoming 294 * nonviable. Access is protected by fsw_reap_lock. 295 */ 296 decl_lck_mtx_data(, fsw_reap_lock); 297 uint32_t fsw_reap_flags; /* see fsw_REAPF_* */ 298 uint32_t fsw_reap_requests; 299 struct thread *fsw_reap_thread; 300 char fsw_reap_name[MAXTHREADNAMESIZE]; 301 302 uint64_t fsw_reap_last; 303 uint64_t fsw_drain_channel_chk_last; 304 uint64_t fsw_drain_netif_chk_last; 305 306 decl_lck_mtx_data(, fsw_linger_lock); 307 struct flow_entry_linger_head fsw_linger_head; 308 uint32_t fsw_linger_cnt; 309 310 #if (DEVELOPMENT || DEBUG) 311 uint32_t fsw_rps_nthreads; 312 struct fsw_rps_thread *fsw_rps_threads; 313 #endif /* !DEVELOPMENT && !DEBUG */ 314 }; 315 316 #define NX_FSW_PRIVATE(_nx) ((struct nx_flowswitch *)(_nx)->nx_arg) 317 318 #define FSW_RWINIT(_fsw) \ 319 lck_rw_init(&(_fsw)->fsw_lock, &nexus_lock_group, &nexus_lock_attr) 320 #define FSW_WLOCK(_fsw) \ 321 lck_rw_lock_exclusive(&(_fsw)->fsw_lock) 322 #define FSW_WUNLOCK(_fsw) \ 323 lck_rw_unlock_exclusive(&(_fsw)->fsw_lock) 324 #define FSW_WLOCKTORLOCK(_fsw) \ 325 lck_rw_lock_exclusive_to_shared(&(_fsw)->fsw_lock) 326 #define FSW_RLOCK(_fsw) \ 327 lck_rw_lock_shared(&(_fsw)->fsw_lock) 328 #define FSW_RLOCKTOWLOCK(_fsw) \ 329 lck_rw_lock_shared_to_exclusive(&(_fsw)->fsw_lock) 330 #define FSW_RTRYLOCK(_fsw) \ 331 lck_rw_try_lock(&(_fsw)->fsw_lock, LCK_RW_TYPE_SHARED) 332 #define FSW_RUNLOCK(_fsw) \ 333 lck_rw_unlock_shared(&(_fsw)->fsw_lock) 334 #define FSW_UNLOCK(_fsw) \ 335 lck_rw_done(&(_fsw)->fsw_lock) 336 #define FSW_RWDESTROY(_fsw) \ 337 lck_rw_destroy(&(_fsw)->fsw_lock, &nexus_lock_group) 338 #define FSW_WLOCK_ASSERT_HELD(_fsw) \ 339 LCK_RW_ASSERT(&(_fsw)->fsw_lock, LCK_RW_ASSERT_EXCLUSIVE) 340 #define FSW_RLOCK_ASSERT_HELD(_fsw) \ 341 LCK_RW_ASSERT(&(_fsw)->fsw_lock, LCK_RW_ASSERT_SHARED) 342 #define FSW_LOCK_ASSERT_HELD(_fsw) \ 343 LCK_RW_ASSERT(&(_fsw)->fsw_lock, LCK_RW_ASSERT_HELD) 344 345 extern struct nxdom nx_flowswitch_dom_s; 346 extern struct kern_nexus_domain_provider nx_fsw_prov_s; 347 348 SYSCTL_DECL(_kern_skywalk_flowswitch); 349 350 /* functions used by external modules to interface with flow switch */ 351 __BEGIN_DECLS 352 extern int nx_fsw_na_find(struct kern_nexus *, struct kern_channel *, 353 struct chreq *, struct nxbind *, struct proc *, struct nexus_adapter **, 354 boolean_t); 355 extern boolean_t nx_fsw_dom_port_is_reserved(struct kern_nexus *nx, 356 nexus_port_t nx_port); 357 extern int nx_fsw_netagent_add(struct kern_nexus *nx); 358 extern int nx_fsw_netagent_remove(struct kern_nexus *nx); 359 extern void nx_fsw_netagent_update(struct kern_nexus *nx); 360 extern void fsw_devna_rx(struct nexus_adapter *, struct __kern_packet *, 361 struct nexus_pkt_stats *); 362 extern struct nx_flowswitch *fsw_ifp_to_fsw(struct ifnet *); 363 364 __END_DECLS 365 #endif /* CONFIG_NEXUS_FLOWSWITCH */ 366 #endif /* _SKYWALK_NEXUS_FLOWSWITCH_H_ */ 367