1 /*
2 * Copyright (c) 2020-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 2014, Stefano Garzarella - Universita` di Pisa.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <sys/types.h>
58 #include <sys/systm.h>
59 #include <sys/mbuf.h>
60 #include <sys/socket.h>
61 #include <sys/sysctl.h>
62 #include <sys/malloc.h>
63
64 #include <netinet/in.h>
65 #include <netinet/ip_var.h>
66 #include <netinet/ip.h>
67 #include <netinet/tcp.h>
68 #include <netinet/tcpip.h>
69 #include <netinet/ip6.h>
70 #include <netinet6/ip6_var.h>
71
72 #include <net/if.h>
73 #include <net/if_var.h>
74 #include <net/ethernet.h>
75 #include <net/pktap.h>
76 #include <skywalk/os_skywalk_private.h>
77 #include <skywalk/nexus/netif/nx_netif.h>
78
79 #define CSUM_GSO_MASK 0x00300000
80 #define CSUM_GSO_OFFSET 20
81 #define CSUM_TO_GSO(x) ((x & CSUM_GSO_MASK) >> CSUM_GSO_OFFSET)
82
83 enum netif_gso_type {
84 GSO_NONE,
85 GSO_TCP4,
86 GSO_TCP6,
87 GSO_END_OF_TYPE
88 };
89
90 uint32_t netif_chain_enqueue = 1;
91 #if (DEVELOPMENT || DEBUG)
92 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, chain_enqueue,
93 CTLFLAG_RW | CTLFLAG_LOCKED, &netif_chain_enqueue, 0,
94 "netif chain enqueue");
95 #endif /* (DEVELOPMENT || DEBUG) */
96
97 /*
98 * Array of function pointers that execute GSO depending on packet type
99 */
100 int (*netif_gso_functions[GSO_END_OF_TYPE]) (struct ifnet*, struct mbuf*);
101
102 /*
103 * Structure that contains the state during the TCP segmentation
104 */
105 struct netif_gso_ip_tcp_state {
106 void (*update)(struct netif_gso_ip_tcp_state*,
107 struct __kern_packet *pkt, uint8_t *__bidi_indexable baddr);
108 void (*internal)(struct netif_gso_ip_tcp_state*, uint32_t partial,
109 uint16_t payload_len, uint32_t *csum_flags);
110 union {
111 struct ip *ip;
112 struct ip6_hdr *ip6;
113 } hdr;
114 int af;
115 struct tcphdr *tcp;
116 struct kern_pbufpool *pp;
117 uint32_t psuedo_hdr_csum;
118 uint32_t tcp_seq;
119 uint16_t hlen;
120 uint16_t mss;
121 uint16_t ip_id;
122 uint8_t mac_hlen;
123 uint8_t ip_hlen;
124 uint8_t tcp_hlen;
125 boolean_t copy_data_sum;
126 };
127
128 static inline uint8_t
netif_gso_get_frame_header_len(struct mbuf * m,uint8_t * hlen)129 netif_gso_get_frame_header_len(struct mbuf *m, uint8_t *hlen)
130 {
131 uint64_t len;
132 char *__single ph = m->m_pkthdr.pkt_hdr;
133
134 if (__improbable(m_pktlen(m) == 0 || ph == NULL ||
135 ph < (char *)m->m_data)) {
136 return ERANGE;
137 }
138 len = (ph - m_mtod_current(m));
139 if (__improbable(len > UINT8_MAX)) {
140 return ERANGE;
141 }
142 *hlen = (uint8_t)len;
143 return 0;
144 }
145
146 static inline int
netif_gso_check_netif_active(struct ifnet * ifp,struct mbuf * m,struct kern_pbufpool ** pp)147 netif_gso_check_netif_active(struct ifnet *ifp, struct mbuf *m,
148 struct kern_pbufpool **pp)
149 {
150 struct __kern_channel_ring *kring;
151 struct nx_netif *nif = NA(ifp)->nifna_netif;
152 struct netif_stats *nifs = &nif->nif_stats;
153 struct kern_nexus *nx = nif->nif_nx;
154 struct nexus_adapter *hwna = nx_port_get_na(nx, NEXUS_PORT_NET_IF_DEV);
155 uint32_t sc_idx = MBUF_SCIDX(m_get_service_class(m));
156
157 if (__improbable(!NA_IS_ACTIVE(hwna))) {
158 STATS_INC(nifs, NETIF_STATS_DROP_NA_INACTIVE);
159 SK_DF(SK_VERB_NETIF,
160 "\"%s\" (0x%llx) not in skywalk mode anymore",
161 hwna->na_name, SK_KVA(hwna));
162 return ENXIO;
163 }
164
165 VERIFY(sc_idx < KPKT_SC_MAX_CLASSES);
166 kring = &hwna->na_tx_rings[hwna->na_kring_svc_lut[sc_idx]];
167 if (__improbable(KR_DROP(kring))) {
168 STATS_INC(nifs, NETIF_STATS_DROP_KRDROP_MODE);
169 SK_DF(SK_VERB_NETIF,
170 "kr \"%s\" (0x%llx) krflags 0x%b or %s in drop mode",
171 kring->ckr_name, SK_KVA(kring), kring->ckr_flags,
172 CKRF_BITS, ifp->if_xname);
173 return ENXIO;
174 }
175 *pp = kring->ckr_pp;
176 return 0;
177 }
178
179 boolean_t
netif_chain_enqueue_enabled(struct ifnet * ifp)180 netif_chain_enqueue_enabled(struct ifnet *ifp)
181 {
182 return netif_chain_enqueue != 0 && ifp->if_output_netem == NULL &&
183 (ifp->if_eflags & IFEF_ENQUEUE_MULTI) == 0;
184 }
185
186 static inline int
netif_gso_send(struct ifnet * ifp,struct __kern_packet * head,struct __kern_packet * tail,uint32_t count,uint32_t bytes)187 netif_gso_send(struct ifnet *ifp, struct __kern_packet *head,
188 struct __kern_packet *tail, uint32_t count, uint32_t bytes)
189 {
190 struct nx_netif *nif = NA(ifp)->nifna_netif;
191 struct netif_stats *nifs = &nif->nif_stats;
192 struct netif_qset *__single qset = NULL;
193 uint64_t qset_id = 0;
194 int error = 0;
195 boolean_t dropped;
196
197 if (NX_LLINK_PROV(nif->nif_nx) &&
198 ifp->if_traffic_rule_count > 0 &&
199 nxctl_inet_traffic_rule_find_qset_id_with_pkt(ifp->if_xname,
200 head, &qset_id) == 0) {
201 qset = nx_netif_find_qset(nif, qset_id);
202 ASSERT(qset != NULL);
203 }
204 if (netif_chain_enqueue_enabled(ifp)) {
205 dropped = false;
206 if (qset != NULL) {
207 head->pkt_qset_idx = qset->nqs_idx;
208 error = ifnet_enqueue_ifcq_pkt_chain(ifp, qset->nqs_ifcq,
209 head, tail, count, bytes, false, &dropped);
210 } else {
211 error = ifnet_enqueue_pkt_chain(ifp, head, tail,
212 count, bytes, false, &dropped);
213 }
214 if (__improbable(dropped)) {
215 STATS_ADD(nifs, NETIF_STATS_TX_DROP_ENQ_AQM, count);
216 STATS_ADD(nifs, NETIF_STATS_DROP, count);
217 }
218 } else {
219 struct __kern_packet *pkt = head, *next;
220 uint32_t c = 0, b = 0;
221
222 while (pkt != NULL) {
223 int err;
224
225 next = pkt->pkt_nextpkt;
226 pkt->pkt_nextpkt = NULL;
227 c++;
228 b += pkt->pkt_length;
229
230 dropped = false;
231 if (qset != NULL) {
232 pkt->pkt_qset_idx = qset->nqs_idx;
233 err = ifnet_enqueue_ifcq_pkt(ifp, qset->nqs_ifcq,
234 pkt, false, &dropped);
235 } else {
236 err = ifnet_enqueue_pkt(ifp, pkt, false, &dropped);
237 }
238 if (error == 0 && __improbable(err != 0)) {
239 error = err;
240 }
241 if (__improbable(dropped)) {
242 STATS_INC(nifs, NETIF_STATS_TX_DROP_ENQ_AQM);
243 STATS_INC(nifs, NETIF_STATS_DROP);
244 }
245 pkt = next;
246 }
247 ASSERT(c == count);
248 ASSERT(b == bytes);
249 }
250 if (qset != NULL) {
251 nx_netif_qset_release(&qset);
252 }
253 netif_transmit(ifp, NETIF_XMIT_FLAG_HOST);
254 return error;
255 }
256
257 /*
258 * Segment and transmit a queue of packets which fit the given mss + hdr_len.
259 * m points to mbuf chain to be segmented.
260 * This function splits the payload (m-> m_pkthdr.len - hdr_len)
261 * into segments of length MSS bytes and then copy the first hdr_len bytes
262 * from m at the top of each segment.
263 */
264 static inline int
netif_gso_tcp_segment_mbuf(struct mbuf * m,struct ifnet * ifp,struct netif_gso_ip_tcp_state * state,struct kern_pbufpool * pp)265 netif_gso_tcp_segment_mbuf(struct mbuf *m, struct ifnet *ifp,
266 struct netif_gso_ip_tcp_state *state, struct kern_pbufpool *pp)
267 {
268 uuid_t euuid;
269 struct pktq pktq_alloc, pktq_seg;
270 uint64_t timestamp = 0, m_tx_timestamp = 0;
271 uint64_t pflags;
272 int error = 0;
273 uint32_t policy_id;
274 uint32_t skip_policy_id;
275 uint32_t svc_class;
276 uint32_t n, n_pkts, n_bytes;
277 int32_t off = 0, total_len = m->m_pkthdr.len;
278 uint8_t tx_headroom = (uint8_t)ifp->if_tx_headroom;
279 struct netif_stats *nifs = &NA(ifp)->nifna_netif->nif_stats;
280 struct __kern_packet *pkt_chain_head, *pkt_chain_tail;
281 struct m_tag *ts_tag = NULL;
282 uint16_t mss = state->mss;
283 bool skip_pktap;
284
285 VERIFY(total_len > state->hlen);
286 VERIFY(((tx_headroom + state->mac_hlen) & 0x1) == 0);
287 VERIFY((tx_headroom + state->hlen + mss) <= PP_BUF_SIZE_DEF(pp));
288
289 KPKTQ_INIT(&pktq_alloc);
290 KPKTQ_INIT(&pktq_seg);
291 /* batch allocate enough packets */
292 n_pkts = (uint32_t)(SK_ROUNDUP((total_len - state->hlen), mss) / mss);
293 error = pp_alloc_pktq(pp, 1, &pktq_alloc, n_pkts, NULL,
294 NULL, SKMEM_NOSLEEP);
295 if (__improbable(error != 0)) {
296 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_NOMEM);
297 SK_ERR("failed to alloc %u pkts", n_pkts);
298 pp_free_pktq(&pktq_alloc);
299 error = ENOBUFS;
300 goto done;
301 }
302
303 ASSERT(m->m_pkthdr.pkt_proto == IPPROTO_TCP);
304 ASSERT((m->m_flags & M_BCAST) == 0);
305 ASSERT((m->m_flags & M_MCAST) == 0);
306 ASSERT(((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0));
307 pflags = m->m_pkthdr.pkt_flags & PKT_F_COMMON_MASK;
308 pflags |= PKTF_START_SEQ;
309 pflags |= (m->m_pkthdr.pkt_ext_flags & PKTF_EXT_L4S) ? PKT_F_L4S : 0;
310 (void) mbuf_get_timestamp(m, ×tamp, NULL);
311 necp_get_app_uuid_from_packet(m, euuid);
312 policy_id = necp_get_policy_id_from_packet(m);
313 skip_policy_id = necp_get_skip_policy_id_from_packet(m);
314 svc_class = m_get_service_class(m);
315 skip_pktap = (m->m_pkthdr.pkt_flags & PKTF_SKIP_PKTAP) != 0 ||
316 pktap_total_tap_count == 0;
317
318 ts_tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_AQM);
319 if (ts_tag != NULL) {
320 m_tx_timestamp = *(uint64_t *)(ts_tag->m_tag_data);
321 }
322
323 for (n = 1, off = state->hlen; off < total_len; off += mss, n++) {
324 uint8_t *baddr, *baddr0;
325 uint32_t partial = 0;
326 struct __kern_packet *pkt;
327
328 KPKTQ_DEQUEUE(&pktq_alloc, pkt);
329 ASSERT(pkt != NULL);
330
331 /* get buffer address from packet */
332 MD_BUFLET_ADDR_ABS(pkt, baddr0);
333 baddr = baddr0;
334 baddr += tx_headroom;
335
336 /*
337 * Copy the link-layer, IP and TCP header from the
338 * original packet.
339 */
340 m_copydata(m, 0, state->hlen, baddr);
341 baddr += state->hlen;
342
343 /*
344 * Copy the payload from original packet and
345 * compute partial checksum on the payload.
346 */
347 if (off + mss > total_len) {
348 /* if last segment is less than mss */
349 mss = (uint16_t)(total_len - off);
350 }
351 if (state->copy_data_sum) {
352 partial = m_copydata_sum(m, off, mss, baddr, 0, NULL);
353 } else {
354 m_copydata(m, off, mss, baddr);
355 }
356
357 /*
358 * update packet metadata
359 */
360 pkt->pkt_headroom = tx_headroom;
361 pkt->pkt_l2_len = state->mac_hlen;
362 pkt->pkt_link_flags = 0;
363 pkt->pkt_csum_flags = 0;
364 pkt->pkt_csum_tx_start_off = 0;
365 pkt->pkt_csum_tx_stuff_off = 0;
366 uuid_copy(pkt->pkt_policy_euuid, euuid);
367 pkt->pkt_policy_id = policy_id;
368 pkt->pkt_skip_policy_id = skip_policy_id;
369 pkt->pkt_timestamp = timestamp;
370 pkt->pkt_svc_class = svc_class;
371 pkt->pkt_pflags |= pflags;
372 pkt->pkt_flowsrc_type = m->m_pkthdr.pkt_flowsrc;
373 pkt->pkt_flow_token = m->m_pkthdr.pkt_flowid;
374 pkt->pkt_comp_gencnt = m->m_pkthdr.comp_gencnt;
375 pkt->pkt_flow_ip_proto = IPPROTO_TCP;
376 pkt->pkt_transport_protocol = IPPROTO_TCP;
377 pkt->pkt_flow_tcp_seq = htonl(state->tcp_seq);
378 __packet_set_tx_timestamp(SK_PKT2PH(pkt), m_tx_timestamp);
379
380 state->update(state, pkt, baddr0);
381 /*
382 * FIN or PUSH flags if present will be set only on the last
383 * segment.
384 */
385 if (n != n_pkts) {
386 state->tcp->th_flags &= ~(TH_FIN | TH_PUSH);
387 }
388 /*
389 * CWR flag if present is set only on the first segment
390 * and cleared on the subsequent segments.
391 */
392 if (n != 1) {
393 state->tcp->th_flags &= ~TH_CWR;
394 state->tcp->th_seq = htonl(state->tcp_seq);
395 }
396 ASSERT(state->tcp->th_seq == pkt->pkt_flow_tcp_seq);
397 state->internal(state, partial, mss, &pkt->pkt_csum_flags);
398 METADATA_ADJUST_LEN(pkt, state->hlen + mss, tx_headroom);
399 VERIFY(__packet_finalize(SK_PKT2PH(pkt)) == 0);
400 KPKTQ_ENQUEUE(&pktq_seg, pkt);
401 if (!skip_pktap) {
402 nx_netif_pktap_output(ifp, state->af, pkt);
403 }
404 }
405 ASSERT(off == total_len);
406 STATS_ADD(nifs, NETIF_STATS_GSO_SEG, n_pkts);
407
408 /* ifnet_enqueue_pkt_chain() consumes the packet chain */
409 pkt_chain_head = KPKTQ_FIRST(&pktq_seg);
410 pkt_chain_tail = KPKTQ_LAST(&pktq_seg);
411 KPKTQ_INIT(&pktq_seg);
412 n_bytes = total_len + (state->hlen * (n_pkts - 1));
413
414 error = netif_gso_send(ifp, pkt_chain_head, pkt_chain_tail,
415 n_pkts, n_bytes);
416
417 done:
418 KPKTQ_FINI(&pktq_alloc);
419 return error;
420 }
421
422 /*
423 * Update the pointers to TCP and IPv4 headers
424 */
425 static void
netif_gso_ipv4_tcp_update(struct netif_gso_ip_tcp_state * state,struct __kern_packet * pkt,uint8_t * __bidi_indexable baddr)426 netif_gso_ipv4_tcp_update(struct netif_gso_ip_tcp_state *state,
427 struct __kern_packet *pkt, uint8_t *__bidi_indexable baddr)
428 {
429 state->hdr.ip = (struct ip *)(void *)(baddr + pkt->pkt_headroom +
430 pkt->pkt_l2_len);
431 state->tcp = (struct tcphdr *)(void *)(baddr + pkt->pkt_headroom +
432 pkt->pkt_l2_len + state->ip_hlen);
433 }
434
435 /*
436 * Finalize the TCP and IPv4 headers
437 */
438 static void
netif_gso_ipv4_tcp_internal(struct netif_gso_ip_tcp_state * state,uint32_t partial,uint16_t payload_len,uint32_t * csum_flags __unused)439 netif_gso_ipv4_tcp_internal(struct netif_gso_ip_tcp_state *state,
440 uint32_t partial, uint16_t payload_len, uint32_t *csum_flags __unused)
441 {
442 int hlen;
443 uint8_t *__sized_by(hlen) buffer;
444
445 /*
446 * Update IP header
447 */
448 state->hdr.ip->ip_id = htons((state->ip_id)++);
449 state->hdr.ip->ip_len = htons(state->ip_hlen + state->tcp_hlen +
450 payload_len);
451 /*
452 * IP header checksum
453 */
454 state->hdr.ip->ip_sum = 0;
455 buffer = (uint8_t *__bidi_indexable)(struct ip *__bidi_indexable)
456 state->hdr.ip;
457 hlen = state->ip_hlen;
458 state->hdr.ip->ip_sum = inet_cksum_buffer(buffer, 0, 0, hlen);
459 /*
460 * TCP Checksum
461 */
462 state->tcp->th_sum = 0;
463 partial = __packet_cksum(state->tcp, state->tcp_hlen, partial);
464 partial += htons(state->tcp_hlen + IPPROTO_TCP + payload_len);
465 partial += state->psuedo_hdr_csum;
466 ADDCARRY(partial);
467 state->tcp->th_sum = ~(uint16_t)partial;
468 /*
469 * Update tcp sequence number in gso state
470 */
471 state->tcp_seq += payload_len;
472 }
473
474 static void
netif_gso_ipv4_tcp_internal_nosum(struct netif_gso_ip_tcp_state * state,uint32_t partial __unused,uint16_t payload_len __unused,uint32_t * csum_flags)475 netif_gso_ipv4_tcp_internal_nosum(struct netif_gso_ip_tcp_state *state,
476 uint32_t partial __unused, uint16_t payload_len __unused,
477 uint32_t *csum_flags)
478 {
479 /*
480 * Update IP header
481 */
482 state->hdr.ip->ip_id = htons((state->ip_id)++);
483 state->hdr.ip->ip_len = htons(state->ip_hlen + state->tcp_hlen +
484 payload_len);
485 /*
486 * Update tcp sequence number in gso state
487 */
488 state->tcp_seq += payload_len;
489
490 /* offload csum to hardware */
491 *csum_flags |= PACKET_CSUM_IP | PACKET_CSUM_TCP;
492 }
493
494 /*
495 * Updates the pointers to TCP and IPv6 headers
496 */
497 static void
netif_gso_ipv6_tcp_update(struct netif_gso_ip_tcp_state * state,struct __kern_packet * pkt,uint8_t * __bidi_indexable baddr)498 netif_gso_ipv6_tcp_update(struct netif_gso_ip_tcp_state *state,
499 struct __kern_packet *pkt, uint8_t *__bidi_indexable baddr)
500 {
501 state->hdr.ip6 = (struct ip6_hdr *)(baddr + pkt->pkt_headroom +
502 pkt->pkt_l2_len);
503 state->tcp = (struct tcphdr *)(void *)(baddr + pkt->pkt_headroom +
504 pkt->pkt_l2_len + state->ip_hlen);
505 }
506
507 /*
508 * Finalize the TCP and IPv6 headers
509 */
510 static void
netif_gso_ipv6_tcp_internal_nosum(struct netif_gso_ip_tcp_state * state,uint32_t partial __unused,uint16_t payload_len __unused,uint32_t * csum_flags)511 netif_gso_ipv6_tcp_internal_nosum(struct netif_gso_ip_tcp_state *state,
512 uint32_t partial __unused, uint16_t payload_len __unused,
513 uint32_t *csum_flags)
514 {
515 /*
516 * Update IP header
517 */
518 state->hdr.ip6->ip6_plen = htons(state->tcp_hlen + payload_len);
519
520 /*
521 * Update tcp sequence number
522 */
523 state->tcp_seq += payload_len;
524
525 /* offload csum to hardware */
526 *csum_flags |= PACKET_CSUM_TCPIPV6;
527 }
528
529 /*
530 * Finalize the TCP and IPv6 headers
531 */
532 static void
netif_gso_ipv6_tcp_internal(struct netif_gso_ip_tcp_state * state,uint32_t partial,uint16_t payload_len,uint32_t * csum_flags __unused)533 netif_gso_ipv6_tcp_internal(struct netif_gso_ip_tcp_state *state,
534 uint32_t partial, uint16_t payload_len, uint32_t *csum_flags __unused)
535 {
536 /*
537 * Update IP header
538 */
539 state->hdr.ip6->ip6_plen = htons(state->tcp_hlen + payload_len);
540 /*
541 * TCP Checksum
542 */
543 state->tcp->th_sum = 0;
544 partial = __packet_cksum(state->tcp, state->tcp_hlen, partial);
545 partial += htonl(state->tcp_hlen + IPPROTO_TCP + payload_len);
546 partial += state->psuedo_hdr_csum;
547 ADDCARRY(partial);
548 state->tcp->th_sum = ~(uint16_t)partial;
549 /*
550 * Update tcp sequence number
551 */
552 state->tcp_seq += payload_len;
553 }
554
555 /*
556 * Init the state during the TCP segmentation
557 */
558 static inline void
netif_gso_ip_tcp_init_state(struct netif_gso_ip_tcp_state * state,struct mbuf * m,uint8_t mac_hlen,uint8_t ip_hlen,bool isipv6,ifnet_t ifp)559 netif_gso_ip_tcp_init_state(struct netif_gso_ip_tcp_state *state,
560 struct mbuf *m, uint8_t mac_hlen, uint8_t ip_hlen, bool isipv6, ifnet_t ifp)
561 {
562 if (isipv6) {
563 state->af = AF_INET6;
564 state->hdr.ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) +
565 mac_hlen);
566 /* should be atleast 16 bit aligned */
567 VERIFY(((uintptr_t)state->hdr.ip6 & (uintptr_t)0x1) == 0);
568 state->tcp = (struct tcphdr *)(void *)(m_mtod_current(m) +
569 mac_hlen + ip_hlen);
570 state->update = netif_gso_ipv6_tcp_update;
571 if (ifp->if_hwassist & IFNET_CSUM_TCPIPV6) {
572 state->internal = netif_gso_ipv6_tcp_internal_nosum;
573 state->copy_data_sum = false;
574 } else {
575 state->internal = netif_gso_ipv6_tcp_internal;
576 state->copy_data_sum = true;
577 }
578 state->psuedo_hdr_csum = in6_pseudo(&state->hdr.ip6->ip6_src,
579 &state->hdr.ip6->ip6_dst, 0);
580 } else {
581 struct in_addr ip_src, ip_dst;
582
583 state->af = AF_INET;
584 state->hdr.ip = (struct ip *)(void *)(mtod(m, uint8_t *) +
585 mac_hlen);
586 /* should be atleast 16 bit aligned */
587 VERIFY(((uintptr_t)state->hdr.ip & (uintptr_t)0x1) == 0);
588 state->ip_id = ntohs(state->hdr.ip->ip_id);
589 state->tcp = (struct tcphdr *)(void *)(m_mtod_current(m) +
590 mac_hlen + ip_hlen);
591 state->update = netif_gso_ipv4_tcp_update;
592 if ((ifp->if_hwassist & (IFNET_CSUM_IP | IFNET_CSUM_TCP)) ==
593 (IFNET_CSUM_IP | IFNET_CSUM_TCP)) {
594 state->internal = netif_gso_ipv4_tcp_internal_nosum;
595 state->copy_data_sum = false;
596 } else {
597 state->internal = netif_gso_ipv4_tcp_internal;
598 state->copy_data_sum = true;
599 }
600 bcopy(&state->hdr.ip->ip_src, &ip_src, sizeof(ip_src));
601 bcopy(&state->hdr.ip->ip_dst, &ip_dst, sizeof(ip_dst));
602 state->psuedo_hdr_csum = in_pseudo(ip_src.s_addr,
603 ip_dst.s_addr, 0);
604 }
605
606 state->mac_hlen = mac_hlen;
607 state->ip_hlen = ip_hlen;
608 state->tcp_hlen = (uint8_t)(state->tcp->th_off << 2);
609 state->hlen = mac_hlen + ip_hlen + state->tcp_hlen;
610 VERIFY(m->m_pkthdr.tso_segsz != 0);
611 state->mss = (uint16_t)m->m_pkthdr.tso_segsz;
612 state->tcp_seq = ntohl(state->tcp->th_seq);
613 }
614
615 /*
616 * GSO on TCP/IPv4
617 */
618 static int
netif_gso_ipv4_tcp(struct ifnet * ifp,struct mbuf * m)619 netif_gso_ipv4_tcp(struct ifnet *ifp, struct mbuf *m)
620 {
621 struct ip *ip;
622 struct kern_pbufpool *__single pp = NULL;
623 struct netif_gso_ip_tcp_state state;
624 uint16_t hlen;
625 uint8_t ip_hlen;
626 uint8_t mac_hlen;
627 struct netif_stats *nifs = &NA(ifp)->nifna_netif->nif_stats;
628 boolean_t pkt_dropped = false;
629 int error;
630
631 STATS_INC(nifs, NETIF_STATS_GSO_PKT);
632 if (__improbable(m->m_pkthdr.pkt_proto != IPPROTO_TCP)) {
633 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_NONTCP);
634 error = ENOTSUP;
635 pkt_dropped = true;
636 goto done;
637 }
638
639 error = netif_gso_check_netif_active(ifp, m, &pp);
640 if (__improbable(error != 0)) {
641 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_NA_INACTIVE);
642 error = ENXIO;
643 pkt_dropped = true;
644 goto done;
645 }
646
647 error = netif_gso_get_frame_header_len(m, &mac_hlen);
648 if (__improbable(error != 0)) {
649 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_BADLEN);
650 pkt_dropped = true;
651 goto done;
652 }
653
654 hlen = mac_hlen + sizeof(struct ip);
655 if (__improbable(m->m_len < hlen)) {
656 m = m_pullup(m, hlen);
657 if (m == NULL) {
658 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_NOMEM);
659 error = ENOBUFS;
660 pkt_dropped = true;
661 goto done;
662 }
663 }
664 ip = (struct ip *)(void *)(mtod(m, uint8_t *) + mac_hlen);
665 ip_hlen = (uint8_t)(ip->ip_hl << 2);
666 hlen = mac_hlen + ip_hlen + sizeof(struct tcphdr);
667 if (__improbable(m->m_len < hlen)) {
668 m = m_pullup(m, hlen);
669 if (m == NULL) {
670 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_NOMEM);
671 error = ENOBUFS;
672 pkt_dropped = true;
673 goto done;
674 }
675 }
676 netif_gso_ip_tcp_init_state(&state, m, mac_hlen, ip_hlen, false, ifp);
677 error = netif_gso_tcp_segment_mbuf(m, ifp, &state, pp);
678 done:
679 m_freem(m);
680 if (__improbable(pkt_dropped)) {
681 STATS_INC(nifs, NETIF_STATS_DROP);
682 }
683 return error;
684 }
685
686 /*
687 * GSO on TCP/IPv6
688 */
689 static int
netif_gso_ipv6_tcp(struct ifnet * ifp,struct mbuf * m)690 netif_gso_ipv6_tcp(struct ifnet *ifp, struct mbuf *m)
691 {
692 struct ip6_hdr *ip6;
693 struct kern_pbufpool *__single pp = NULL;
694 struct netif_gso_ip_tcp_state state;
695 int lasthdr_off;
696 uint16_t hlen;
697 uint8_t ip_hlen;
698 uint8_t mac_hlen;
699 struct netif_stats *nifs = &NA(ifp)->nifna_netif->nif_stats;
700 boolean_t pkt_dropped = false;
701 int error;
702
703 STATS_INC(nifs, NETIF_STATS_GSO_PKT);
704 if (__improbable(m->m_pkthdr.pkt_proto != IPPROTO_TCP)) {
705 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_NONTCP);
706 error = ENOTSUP;
707 pkt_dropped = true;
708 goto done;
709 }
710
711 error = netif_gso_check_netif_active(ifp, m, &pp);
712 if (__improbable(error != 0)) {
713 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_NA_INACTIVE);
714 error = ENXIO;
715 pkt_dropped = true;
716 goto done;
717 }
718
719 error = netif_gso_get_frame_header_len(m, &mac_hlen);
720 if (__improbable(error != 0)) {
721 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_BADLEN);
722 pkt_dropped = true;
723 goto done;
724 }
725
726 hlen = mac_hlen + sizeof(struct ip6_hdr);
727 if (__improbable(m->m_len < hlen)) {
728 m = m_pullup(m, hlen);
729 if (m == NULL) {
730 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_NOMEM);
731 error = ENOBUFS;
732 pkt_dropped = true;
733 goto done;
734 }
735 }
736 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + mac_hlen);
737 lasthdr_off = ip6_lasthdr(m, mac_hlen, IPPROTO_IPV6, NULL) - mac_hlen;
738 VERIFY(lasthdr_off <= UINT8_MAX);
739 ip_hlen = (uint8_t)lasthdr_off;
740 hlen = mac_hlen + ip_hlen + sizeof(struct tcphdr);
741 if (__improbable(m->m_len < hlen)) {
742 m = m_pullup(m, hlen);
743 if (m == NULL) {
744 STATS_INC(nifs, NETIF_STATS_GSO_PKT_DROP_NOMEM);
745 error = ENOBUFS;
746 pkt_dropped = true;
747 goto done;
748 }
749 }
750 netif_gso_ip_tcp_init_state(&state, m, mac_hlen, ip_hlen, true, ifp);
751 error = netif_gso_tcp_segment_mbuf(m, ifp, &state, pp);
752 done:
753 m_freem(m);
754 if (__improbable(pkt_dropped)) {
755 STATS_INC(nifs, NETIF_STATS_DROP);
756 }
757 return error;
758 }
759
760 int
netif_gso_dispatch(struct ifnet * ifp,struct mbuf * m)761 netif_gso_dispatch(struct ifnet *ifp, struct mbuf *m)
762 {
763 int gso_flags;
764
765 ASSERT(m->m_nextpkt == NULL);
766 gso_flags = CSUM_TO_GSO(m->m_pkthdr.csum_flags);
767 VERIFY(gso_flags < GSO_END_OF_TYPE);
768 return netif_gso_functions[gso_flags](ifp, m);
769 }
770
771 void
netif_gso_init(void)772 netif_gso_init(void)
773 {
774 _CASSERT(CSUM_TO_GSO(~(CSUM_TSO_IPV4 | CSUM_TSO_IPV6)) == GSO_NONE);
775 _CASSERT(CSUM_TO_GSO(CSUM_TSO_IPV4) == GSO_TCP4);
776 _CASSERT(CSUM_TO_GSO(CSUM_TSO_IPV6) == GSO_TCP6);
777 netif_gso_functions[GSO_NONE] = nx_netif_host_output;
778 netif_gso_functions[GSO_TCP4] = netif_gso_ipv4_tcp;
779 netif_gso_functions[GSO_TCP6] = netif_gso_ipv6_tcp;
780 }
781
782 void
netif_gso_fini(void)783 netif_gso_fini(void)
784 {
785 netif_gso_functions[GSO_NONE] = NULL;
786 netif_gso_functions[GSO_TCP4] = NULL;
787 netif_gso_functions[GSO_TCP6] = NULL;
788 }
789