Home
last modified time | relevance | path

Searched refs:nif (Results 1 – 15 of 15) sorted by relevance

/xnu-8019.80.24/bsd/skywalk/nexus/netif/ !
H A Dnx_netif_filter.c83 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_filter_inject() local
84 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_filter_inject()
90 lck_mtx_lock(&nif->nif_filter_lock); in nx_netif_filter_inject()
91 if ((nif->nif_filter_flags & NETIF_FILTER_FLAG_ENABLED) == 0) { in nx_netif_filter_inject()
110 f = STAILQ_FIRST(&nif->nif_filter_list); in nx_netif_filter_inject()
129 lck_mtx_unlock(&nif->nif_filter_lock); in nx_netif_filter_inject()
166 lck_mtx_lock(&nif->nif_filter_lock); in nx_netif_filter_inject()
172 lck_mtx_unlock(&nif->nif_filter_lock); in nx_netif_filter_inject()
177 nx_netif_filter_add(struct nx_netif *nif, nexus_port_t port, void *cb_arg, in nx_netif_filter_add() argument
182 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_filter_add()
[all …]
H A Dnx_netif_flow.c352 nx_netif_flow_deliver(struct nx_netif *nif, struct netif_flow *f, in nx_netif_flow_deliver() argument
355 #pragma unused(nif) in nx_netif_flow_deliver()
360 nx_netif_snoop(struct nx_netif *nif, struct __kern_packet *pkt, in nx_netif_snoop() argument
364 if (!NETIF_IS_LOW_LATENCY(nif)) { in nx_netif_snoop()
368 pktap_input_packet(nif->nif_ifp, AF_INET6, DLT_EN10MB, in nx_netif_snoop()
372 pktap_output_packet(nif->nif_ifp, AF_INET6, DLT_EN10MB, in nx_netif_snoop()
384 nx_netif_validate_macaddr(struct nx_netif *nif, struct __kern_packet *pkt, in nx_netif_validate_macaddr() argument
387 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_validate_macaddr()
388 struct ifnet *ifp = nif->nif_ifp; in nx_netif_validate_macaddr()
401 DTRACE_SKYWALK2(bad__pkt__sz, struct nx_netif *, nif, in nx_netif_validate_macaddr()
[all …]
H A Dnx_netif_netagent.c46 get_mac_addr(struct nx_netif *nif, struct ether_addr *addr) in get_mac_addr() argument
48 struct ifnet *ifp = nif->nif_ifp; in get_mac_addr()
69 get_ipv6_ula(struct nx_netif *nif, struct in6_addr *addr) in get_ipv6_ula() argument
79 err = get_mac_addr(nif, &ether_addr); in get_ipv6_ula()
139 get_ipv6_sockaddr(struct nx_netif *nif, struct sockaddr_in6 *sin6) in get_ipv6_sockaddr() argument
145 err = get_ipv6_ula(nif, &sin6->sin6_addr); in get_ipv6_sockaddr()
244 nx_netif_netagent_fill_port_info(struct nx_netif *nif, struct nx_flow_req *nfr, in nx_netif_netagent_fill_port_info() argument
247 #pragma unused(nif) in nx_netif_netagent_fill_port_info()
250 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_netagent_fill_port_info()
300 nx_netif_netagent_flow_bind(struct nx_netif *nif, struct nx_flow_req *nfr) in nx_netif_netagent_flow_bind() argument
[all …]
H A Dnx_netif_llink.c328 nx_netif_generate_internal_llink_id(struct nx_netif *nif) in nx_netif_generate_internal_llink_id() argument
331 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_generate_internal_llink_id()
336 STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) { in nx_netif_generate_internal_llink_id()
351 nx_netif_llink_initialize(struct netif_llink *llink, struct nx_netif *nif, in nx_netif_llink_initialize() argument
356 LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE); in nx_netif_llink_initialize()
358 llink->nll_nif = nif; in nx_netif_llink_initialize()
363 llink->nll_link_id_internal = nx_netif_generate_internal_llink_id(nif); in nx_netif_llink_initialize()
422 nx_netif_llink_create_locked(struct nx_netif *nif, in nx_netif_llink_create_locked() argument
426 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_llink_create_locked()
428 LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE); in nx_netif_llink_create_locked()
[all …]
H A Dnx_netif_vp.c137 struct nx_netif *nif = nifna->nifna_netif; in netif_deliver_pkt() local
138 struct netif_stats *nifs = &nif->nif_stats; in netif_deliver_pkt()
171 if (NETIF_IS_LOW_LATENCY(nif)) { in netif_deliver_pkt()
318 netif_hwna_setup(struct nx_netif *nif) in netif_hwna_setup() argument
321 struct kern_nexus *nx = nif->nif_nx; in netif_hwna_setup()
326 ASSERT(NETIF_IS_LOW_LATENCY(nif)); in netif_hwna_setup()
327 if (nif->nif_hw_ch != NULL) { in netif_hwna_setup()
328 nif->nif_hw_ch_refcnt++; in netif_hwna_setup()
330 if_name(nif->nif_ifp), nif->nif_hw_ch_refcnt); in netif_hwna_setup()
333 ASSERT(nif->nif_hw_ch_refcnt == 0); in netif_hwna_setup()
[all …]
H A Dnx_netif_filter_compat.c37 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_filter_tx_mbuf_enqueue() local
38 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_filter_tx_mbuf_enqueue()
40 if (nif->nif_filter_cnt == 0) { in nx_netif_filter_tx_mbuf_enqueue()
45 DTRACE_SKYWALK2(mbuf__default__drop, struct nx_netif *, nif, in nx_netif_filter_tx_mbuf_enqueue()
81 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_filter_tx_processed_mbuf_dequeue() local
104 m = get_next_mbuf(nif->nif_tx_processed_mbq, &curr, end); in nx_netif_filter_tx_processed_mbuf_dequeue()
122 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_filter_tx_processed_mbuf_enqueue() local
123 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_filter_tx_processed_mbuf_enqueue()
137 q = &nif->nif_tx_processed_mbq[tc]; in nx_netif_filter_tx_processed_mbuf_enqueue()
162 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_tx_processed_mbuf_get_len() local
[all …]
H A Dnx_netif_filter_native.c37 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_filter_tx_pkt_enqueue() local
38 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_filter_tx_pkt_enqueue()
40 if (nif->nif_filter_cnt == 0) { in nx_netif_filter_tx_pkt_enqueue()
44 DTRACE_SKYWALK2(pkt__default__drop, struct nx_netif *, nif, in nx_netif_filter_tx_pkt_enqueue()
80 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_filter_tx_processed_pkt_dequeue() local
103 p = get_next_pkt(nif->nif_tx_processed_pktq, &curr, end); in nx_netif_filter_tx_processed_pkt_dequeue()
122 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_filter_tx_processed_pkt_enqueue() local
123 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_filter_tx_processed_pkt_enqueue()
137 q = &nif->nif_tx_processed_pktq[tc]; in nx_netif_filter_tx_processed_pkt_enqueue()
164 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_tx_processed_pkt_get_len() local
[all …]
H A Dnx_netif.c680 struct nx_netif *nif; in nx_netif_get_llink_info() local
686 nif = NX_NETIF_PRIVATE(nx); in nx_netif_get_llink_info()
687 if (!NETIF_LLINK_ENABLED(nif)) { in nx_netif_get_llink_info()
691 lck_rw_lock_shared(&nif->nif_llink_lock); in nx_netif_get_llink_info()
692 llink_cnt = nif->nif_llink_cnt; in nx_netif_get_llink_info()
725 STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) { in nx_netif_get_llink_info()
762 lck_rw_unlock_shared(&nif->nif_llink_lock); in nx_netif_get_llink_info()
954 struct nx_netif *nif = NX_NETIF_PRIVATE(nx); in __netif_mib_get_stats() local
955 struct ifnet *ifp = nif->nif_ifp; in __netif_mib_get_stats()
964 sns->sns_nifs = nif->nif_stats; in __netif_mib_get_stats()
[all …]
H A Dnx_netif_filter_vp.c311 struct nx_netif *nif = nx->nx_arg; in netif_filter_na_mem_new() local
315 NETIF_WLOCK_ASSERT_HELD(nif); in netif_filter_na_mem_new()
316 ASSERT(nif->nif_ifp != NULL); in netif_filter_na_mem_new()
327 if (nif->nif_filter_pp == NULL) { in netif_filter_na_mem_new()
333 if_name(nif->nif_ifp)); in netif_filter_na_mem_new()
345 nif->nif_filter_pp = pp; in netif_filter_na_mem_new()
348 &nif->nif_filter_pp, NULL, FALSE, FALSE, NULL, &err); in netif_filter_na_mem_new()
487 struct nx_netif *nif = NX_NETIF_PRIVATE(nx); in netif_filter_na_dtor() local
490 NETIF_WLOCK(nif); in netif_filter_na_dtor()
497 nif->nif_filter_vp_cnt--; in netif_filter_na_dtor()
[all …]
H A Dnx_netif_host.c50 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_host_na_activate() local
73 nif->nif_hwassist = ifp->if_hwassist; in nx_netif_host_na_activate()
74 nif->nif_capabilities = ifp->if_capabilities; in nx_netif_host_na_activate()
75 nif->nif_capenable = ifp->if_capenable; in nx_netif_host_na_activate()
106 (nif->nif_hwassist & in nx_netif_host_na_activate()
109 (nif->nif_capabilities & in nx_netif_host_na_activate()
112 (nif->nif_capenable & in nx_netif_host_na_activate()
147 ifp->if_hwassist |= (nif->nif_hwassist & in nx_netif_host_na_activate()
150 (nif->nif_capabilities & (SK_IFCAP_CSUM | IFCAP_TSO)); in nx_netif_host_na_activate()
152 (nif->nif_capenable & (SK_IFCAP_CSUM | IFCAP_TSO)); in nx_netif_host_na_activate()
[all …]
H A Dnx_netif_util.c156 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_mbuf_to_filter_pkt() local
157 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_mbuf_to_filter_pkt()
158 struct kern_pbufpool *pp = nif->nif_filter_pp; in nx_netif_mbuf_to_filter_pkt()
159 ifnet_t ifp = nif->nif_ifp; in nx_netif_mbuf_to_filter_pkt()
223 nif->nif_pkt_copy_from_mbuf(type, fph, off, m, 0, in nx_netif_mbuf_to_filter_pkt()
334 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_pkt_to_filter_pkt() local
335 struct netif_stats *nifs = &nif->nif_stats; in nx_netif_pkt_to_filter_pkt()
336 struct kern_pbufpool *pp = nif->nif_filter_pp; in nx_netif_pkt_to_filter_pkt()
337 ifnet_t ifp = nif->nif_ifp; in nx_netif_pkt_to_filter_pkt()
419 nif->nif_pkt_copy_from_mbuf(type, fph, off, m, 0, in nx_netif_pkt_to_filter_pkt()
[all …]
H A Dnx_netif_compat.c414 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_compat_na_activate() local
522 nx_netif_mit_init(nif, na->na_ifp, in nx_netif_compat_na_activate()
535 nx_netif_mit_init(nif, na->na_ifp, in nx_netif_compat_na_activate()
1294 struct nx_netif *nif = nifna->nifna_netif; in nx_netif_compat_na_rxsync() local
1472 nif->nif_pkt_copy_from_mbuf(NR_RX, ph, 0, m, 0, in nx_netif_compat_na_rxsync()
1575 struct nx_netif *nif = NX_NETIF_PRIVATE(nx); in nx_netif_compat_attach() local
1617 devnca->nca_up.nifna_netif = nif; in nx_netif_compat_attach()
1618 nx_netif_retain(nif); in nx_netif_compat_attach()
1687 nif->nif_pkt_copy_from_mbuf = in nx_netif_compat_attach()
1689 nif->nif_pkt_copy_to_mbuf = in nx_netif_compat_attach()
[all …]
H A Dnx_netif_gso.c150 struct nx_netif *nif = NA(ifp)->nifna_netif; in netif_gso_check_netif_active() local
151 struct netif_stats *nifs = &nif->nif_stats; in netif_gso_check_netif_active()
152 struct kern_nexus *nx = nif->nif_nx; in netif_gso_check_netif_active()
H A Dnx_netif_mit.c154 nx_netif_mit_init(struct nx_netif *nif, const struct ifnet *ifp, in nx_netif_mit_init() argument
158 #pragma unused(nif) in nx_netif_mit_init()
283 skoid_create(&mit->mit_skoid, SKOID_DNODE(nif->nif_skoid), oid_name, 0); in nx_netif_mit_init()
/xnu-8019.80.24/bsd/skywalk/nexus/flowswitch/flow/ !
H A Dflow_entry.c194 struct nx_netif *nif; in flow_entry_alloc() local
280 nif = NX_NETIF_PRIVATE(dev_na->na_nx); in flow_entry_alloc()
281 if (NETIF_LLINK_ENABLED(nif)) { in flow_entry_alloc()
282 fe->fe_qset = nx_netif_find_qset(nif, req->nfr_qset_id); in flow_entry_alloc()