xref: /xnu-8792.81.2/bsd/skywalk/packet/packet_kern.c (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <skywalk/os_skywalk_private.h>
30 #include <netinet/tcp_var.h>
31 
32 static int kern_packet_clone_internal(const kern_packet_t, kern_packet_t *,
33     uint32_t, kern_packet_copy_mode_t);
34 
35 #if (DEBUG || DEVELOPMENT)
36 __attribute__((noreturn))
37 void
pkt_subtype_assert_fail(const kern_packet_t ph,uint64_t type,uint64_t subtype)38 pkt_subtype_assert_fail(const kern_packet_t ph, uint64_t type, uint64_t subtype)
39 {
40 	panic("invalid packet handle 0x%llx (type %llu != %llu || "
41 	    "subtype %llu != %llu)", ph, SK_PTR_TYPE(ph), type,
42 	    SK_PTR_SUBTYPE(ph), subtype);
43 	/* NOTREACHED */
44 	__builtin_unreachable();
45 }
46 
47 __attribute__((noreturn))
48 void
pkt_type_assert_fail(const kern_packet_t ph,uint64_t type)49 pkt_type_assert_fail(const kern_packet_t ph, uint64_t type)
50 {
51 	panic("invalid packet handle 0x%llx (type %llu != %llu)",
52 	    ph, SK_PTR_TYPE(ph), type);
53 	/* NOTREACHED */
54 	__builtin_unreachable();
55 }
56 #endif /* DEBUG || DEVELOPMENT */
57 
58 errno_t
kern_packet_set_headroom(const kern_packet_t ph,const uint8_t headroom)59 kern_packet_set_headroom(const kern_packet_t ph, const uint8_t headroom)
60 {
61 	return __packet_set_headroom(ph, headroom);
62 }
63 
64 uint8_t
kern_packet_get_headroom(const kern_packet_t ph)65 kern_packet_get_headroom(const kern_packet_t ph)
66 {
67 	return __packet_get_headroom(ph);
68 }
69 
70 errno_t
kern_packet_set_link_header_offset(const kern_packet_t ph,const uint8_t off)71 kern_packet_set_link_header_offset(const kern_packet_t ph, const uint8_t off)
72 {
73 	return __packet_set_headroom(ph, off);
74 }
75 
76 uint16_t
kern_packet_get_link_header_offset(const kern_packet_t ph)77 kern_packet_get_link_header_offset(const kern_packet_t ph)
78 {
79 	return __packet_get_headroom(ph);
80 }
81 
82 errno_t
kern_packet_set_link_header_length(const kern_packet_t ph,const uint8_t off)83 kern_packet_set_link_header_length(const kern_packet_t ph, const uint8_t off)
84 {
85 	return __packet_set_link_header_length(ph, off);
86 }
87 
88 uint8_t
kern_packet_get_link_header_length(const kern_packet_t ph)89 kern_packet_get_link_header_length(const kern_packet_t ph)
90 {
91 	return __packet_get_link_header_length(ph);
92 }
93 
94 errno_t
kern_packet_set_link_broadcast(const kern_packet_t ph)95 kern_packet_set_link_broadcast(const kern_packet_t ph)
96 {
97 	return __packet_set_link_broadcast(ph);
98 }
99 
100 boolean_t
kern_packet_get_link_broadcast(const kern_packet_t ph)101 kern_packet_get_link_broadcast(const kern_packet_t ph)
102 {
103 	return __packet_get_link_broadcast(ph);
104 }
105 
106 errno_t
kern_packet_set_link_multicast(const kern_packet_t ph)107 kern_packet_set_link_multicast(const kern_packet_t ph)
108 {
109 	return __packet_set_link_multicast(ph);
110 }
111 
112 errno_t
kern_packet_set_link_ethfcs(const kern_packet_t ph)113 kern_packet_set_link_ethfcs(const kern_packet_t ph)
114 {
115 	return __packet_set_link_ethfcs(ph);
116 }
117 
118 boolean_t
kern_packet_get_link_multicast(const kern_packet_t ph)119 kern_packet_get_link_multicast(const kern_packet_t ph)
120 {
121 	return __packet_get_link_multicast(ph);
122 }
123 
124 boolean_t
kern_packet_get_link_ethfcs(const kern_packet_t ph)125 kern_packet_get_link_ethfcs(const kern_packet_t ph)
126 {
127 	return __packet_get_link_ethfcs(ph);
128 }
129 
130 /* deprecated -- no effect, use set_link_header_length instead  */
131 errno_t
kern_packet_set_network_header_offset(const kern_packet_t ph,const uint16_t off)132 kern_packet_set_network_header_offset(const kern_packet_t ph,
133     const uint16_t off)
134 {
135 #pragma unused(ph, off)
136 	return 0;
137 }
138 
139 /* deprecated -- use get_link_header_length instead  */
140 uint16_t
kern_packet_get_network_header_offset(const kern_packet_t ph)141 kern_packet_get_network_header_offset(const kern_packet_t ph)
142 {
143 	return (uint16_t)__packet_get_headroom(ph) +
144 	       (uint16_t)__packet_get_link_header_length(ph);
145 }
146 
147 /* deprecated */
148 errno_t
kern_packet_set_transport_header_offset(const kern_packet_t ph,const uint16_t off)149 kern_packet_set_transport_header_offset(const kern_packet_t ph,
150     const uint16_t off)
151 {
152 #pragma unused(ph, off)
153 	return 0;
154 }
155 
156 /* deprecated */
157 uint16_t
kern_packet_get_transport_header_offset(const kern_packet_t ph)158 kern_packet_get_transport_header_offset(const kern_packet_t ph)
159 {
160 #pragma unused(ph)
161 	return 0;
162 }
163 
164 boolean_t
kern_packet_get_transport_traffic_background(const kern_packet_t ph)165 kern_packet_get_transport_traffic_background(const kern_packet_t ph)
166 {
167 	return __packet_get_transport_traffic_background(ph);
168 }
169 
170 boolean_t
kern_packet_get_transport_traffic_realtime(const kern_packet_t ph)171 kern_packet_get_transport_traffic_realtime(const kern_packet_t ph)
172 {
173 	return __packet_get_transport_traffic_realtime(ph);
174 }
175 
176 boolean_t
kern_packet_get_transport_retransmit(const kern_packet_t ph)177 kern_packet_get_transport_retransmit(const kern_packet_t ph)
178 {
179 	return __packet_get_transport_retransmit(ph);
180 }
181 
182 boolean_t
kern_packet_get_transport_new_flow(const kern_packet_t ph)183 kern_packet_get_transport_new_flow(const kern_packet_t ph)
184 {
185 	return __packet_get_transport_new_flow(ph);
186 }
187 
188 boolean_t
kern_packet_get_transport_last_packet(const kern_packet_t ph)189 kern_packet_get_transport_last_packet(const kern_packet_t ph)
190 {
191 	return __packet_get_transport_last_packet(ph);
192 }
193 
194 int
kern_packet_set_service_class(const kern_packet_t ph,const kern_packet_svc_class_t sc)195 kern_packet_set_service_class(const kern_packet_t ph,
196     const kern_packet_svc_class_t sc)
197 {
198 	return __packet_set_service_class(ph, sc);
199 }
200 
201 kern_packet_svc_class_t
kern_packet_get_service_class(const kern_packet_t ph)202 kern_packet_get_service_class(const kern_packet_t ph)
203 {
204 	return __packet_get_service_class(ph);
205 }
206 
207 errno_t
kern_packet_set_compression_generation_count(const kern_packet_t ph,uint32_t gencnt)208 kern_packet_set_compression_generation_count(const kern_packet_t ph,
209     uint32_t gencnt)
210 {
211 	return __packet_set_comp_gencnt(ph, gencnt);
212 }
213 
214 errno_t
kern_packet_get_compression_generation_count(const kern_packet_t ph,uint32_t * pgencnt)215 kern_packet_get_compression_generation_count(const kern_packet_t ph, uint32_t *pgencnt)
216 {
217 	return __packet_get_comp_gencnt(ph, pgencnt);
218 }
219 
220 errno_t
kern_packet_get_service_class_index(const kern_packet_svc_class_t svc,uint32_t * index)221 kern_packet_get_service_class_index(const kern_packet_svc_class_t svc,
222     uint32_t *index)
223 {
224 	if (index == NULL || !KPKT_VALID_SVC(svc)) {
225 		return EINVAL;
226 	}
227 
228 	*index = KPKT_SVCIDX(svc);
229 	return 0;
230 }
231 
232 boolean_t
kern_packet_is_high_priority(const kern_packet_t ph)233 kern_packet_is_high_priority(const kern_packet_t ph)
234 {
235 	uint32_t sc;
236 	boolean_t is_hi_priority;
237 
238 	sc = __packet_get_service_class(ph);
239 
240 	switch (sc) {
241 	case PKT_SC_VI:
242 	case PKT_SC_SIG:
243 	case PKT_SC_VO:
244 	case PKT_SC_CTL:
245 		is_hi_priority = (PKT_ADDR(ph)->pkt_comp_gencnt == 0 ||
246 		    PKT_ADDR(ph)->pkt_comp_gencnt == TCP_ACK_COMPRESSION_DUMMY);
247 		break;
248 
249 	case PKT_SC_BK_SYS:
250 	case PKT_SC_BK:
251 	case PKT_SC_BE:
252 	case PKT_SC_RD:
253 	case PKT_SC_OAM:
254 	case PKT_SC_AV:
255 	case PKT_SC_RV:
256 	default:
257 		is_hi_priority = false;
258 	}
259 	return is_hi_priority;
260 }
261 
262 errno_t
kern_packet_set_traffic_class(const kern_packet_t ph,kern_packet_traffic_class_t tc)263 kern_packet_set_traffic_class(const kern_packet_t ph,
264     kern_packet_traffic_class_t tc)
265 {
266 	return __packet_set_traffic_class(ph, tc);
267 }
268 
269 kern_packet_traffic_class_t
kern_packet_get_traffic_class(const kern_packet_t ph)270 kern_packet_get_traffic_class(const kern_packet_t ph)
271 {
272 	return __packet_get_traffic_class(ph);
273 }
274 
275 errno_t
kern_packet_set_inet_checksum(const kern_packet_t ph,const packet_csum_flags_t flags,const uint16_t start,const uint16_t stuff,const boolean_t tx)276 kern_packet_set_inet_checksum(const kern_packet_t ph,
277     const packet_csum_flags_t flags, const uint16_t start,
278     const uint16_t stuff, const boolean_t tx)
279 {
280 	return __packet_set_inet_checksum(ph, flags, start, stuff, tx);
281 }
282 
283 packet_csum_flags_t
kern_packet_get_inet_checksum(const kern_packet_t ph,uint16_t * start,uint16_t * val,const boolean_t tx)284 kern_packet_get_inet_checksum(const kern_packet_t ph, uint16_t *start,
285     uint16_t *val, const boolean_t tx)
286 {
287 	return __packet_get_inet_checksum(ph, start, val, tx);
288 }
289 
290 void
kern_packet_set_flow_uuid(const kern_packet_t ph,const uuid_t flow_uuid)291 kern_packet_set_flow_uuid(const kern_packet_t ph, const uuid_t flow_uuid)
292 {
293 	__packet_set_flow_uuid(ph, flow_uuid);
294 }
295 
296 void
kern_packet_get_flow_uuid(const kern_packet_t ph,uuid_t * flow_uuid)297 kern_packet_get_flow_uuid(const kern_packet_t ph, uuid_t *flow_uuid)
298 {
299 	__packet_get_flow_uuid(ph, *flow_uuid);
300 }
301 
302 void
kern_packet_clear_flow_uuid(const kern_packet_t ph)303 kern_packet_clear_flow_uuid(const kern_packet_t ph)
304 {
305 	__packet_clear_flow_uuid(ph);
306 }
307 
308 void
kern_packet_get_euuid(const kern_packet_t ph,uuid_t euuid)309 kern_packet_get_euuid(const kern_packet_t ph, uuid_t euuid)
310 {
311 	if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
312 		uuid_copy(euuid, PKT_ADDR(ph)->pkt_policy_euuid);
313 	} else {
314 		uuid_clear(euuid);
315 	}
316 }
317 
318 void
kern_packet_set_policy_id(const kern_packet_t ph,uint32_t policy_id)319 kern_packet_set_policy_id(const kern_packet_t ph, uint32_t policy_id)
320 {
321 	if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
322 		PKT_ADDR(ph)->pkt_policy_id = policy_id;
323 	}
324 }
325 
326 uint32_t
kern_packet_get_policy_id(const kern_packet_t ph)327 kern_packet_get_policy_id(const kern_packet_t ph)
328 {
329 	if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
330 		return PKT_ADDR(ph)->pkt_policy_id;
331 	} else {
332 		return 0;
333 	}
334 }
335 
336 uint32_t
kern_packet_get_data_length(const kern_packet_t ph)337 kern_packet_get_data_length(const kern_packet_t ph)
338 {
339 	return __packet_get_data_length(ph);
340 }
341 
342 uint32_t
kern_packet_get_buflet_count(const kern_packet_t ph)343 kern_packet_get_buflet_count(const kern_packet_t ph)
344 {
345 	return __packet_get_buflet_count(ph);
346 }
347 
348 kern_buflet_t
kern_packet_get_next_buflet(const kern_packet_t ph,const kern_buflet_t bprev)349 kern_packet_get_next_buflet(const kern_packet_t ph, const kern_buflet_t bprev)
350 {
351 	return __packet_get_next_buflet(ph, bprev);
352 }
353 
354 errno_t
kern_packet_finalize(const kern_packet_t ph)355 kern_packet_finalize(const kern_packet_t ph)
356 {
357 	return __packet_finalize(ph);
358 }
359 
360 kern_packet_idx_t
kern_packet_get_object_index(const kern_packet_t ph)361 kern_packet_get_object_index(const kern_packet_t ph)
362 {
363 	return __packet_get_object_index(ph);
364 }
365 
366 errno_t
kern_packet_get_timestamp(const kern_packet_t ph,uint64_t * ts,boolean_t * valid)367 kern_packet_get_timestamp(const kern_packet_t ph, uint64_t *ts,
368     boolean_t *valid)
369 {
370 	return __packet_get_timestamp(ph, ts, valid);
371 }
372 
373 errno_t
kern_packet_set_timestamp(const kern_packet_t ph,uint64_t ts,boolean_t valid)374 kern_packet_set_timestamp(const kern_packet_t ph, uint64_t ts, boolean_t valid)
375 {
376 	return __packet_set_timestamp(ph, ts, valid);
377 }
378 
379 struct mbuf *
kern_packet_get_mbuf(const kern_packet_t pkt)380 kern_packet_get_mbuf(const kern_packet_t pkt)
381 {
382 	struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(pkt);
383 
384 	if ((kpkt->pkt_pflags & PKT_F_MBUF_DATA) != 0) {
385 		return kpkt->pkt_mbuf;
386 	}
387 	return NULL;
388 }
389 
390 errno_t
kern_packet_get_timestamp_requested(const kern_packet_t ph,boolean_t * requested)391 kern_packet_get_timestamp_requested(const kern_packet_t ph,
392     boolean_t *requested)
393 {
394 	return __packet_get_timestamp_requested(ph, requested);
395 }
396 
397 void
kern_packet_tx_completion(const kern_packet_t ph,ifnet_t ifp)398 kern_packet_tx_completion(const kern_packet_t ph, ifnet_t ifp)
399 {
400 	struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(ph);
401 
402 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
403 	/*
404 	 * handling of transmit completion events.
405 	 */
406 	(void) kern_channel_event_transmit_status_with_packet(ph, ifp);
407 
408 	/*
409 	 * handling of transmit completion timestamp request callbacks.
410 	 */
411 	if ((kpkt->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0) {
412 		__packet_perform_tx_completion_callbacks(ph, ifp);
413 	}
414 }
415 
416 errno_t
kern_packet_get_tx_completion_status(const kern_packet_t ph,kern_return_t * status)417 kern_packet_get_tx_completion_status(const kern_packet_t ph,
418     kern_return_t *status)
419 {
420 	return __packet_get_tx_completion_status(ph, status);
421 }
422 
423 errno_t
kern_packet_set_tx_completion_status(const kern_packet_t ph,kern_return_t status)424 kern_packet_set_tx_completion_status(const kern_packet_t ph,
425     kern_return_t status)
426 {
427 	return __packet_set_tx_completion_status(ph, status);
428 }
429 
430 void
kern_packet_set_group_start(const kern_packet_t ph)431 kern_packet_set_group_start(const kern_packet_t ph)
432 {
433 	(void) __packet_set_group_start(ph);
434 }
435 
436 boolean_t
kern_packet_get_group_start(const kern_packet_t ph)437 kern_packet_get_group_start(const kern_packet_t ph)
438 {
439 	return __packet_get_group_start(ph);
440 }
441 
442 void
kern_packet_set_group_end(const kern_packet_t ph)443 kern_packet_set_group_end(const kern_packet_t ph)
444 {
445 	(void) __packet_set_group_end(ph);
446 }
447 
448 boolean_t
kern_packet_get_group_end(const kern_packet_t ph)449 kern_packet_get_group_end(const kern_packet_t ph)
450 {
451 	return __packet_get_group_end(ph);
452 }
453 
454 errno_t
kern_packet_get_expire_time(const kern_packet_t ph,uint64_t * ts)455 kern_packet_get_expire_time(const kern_packet_t ph, uint64_t *ts)
456 {
457 	return __packet_get_expire_time(ph, ts);
458 }
459 
460 errno_t
kern_packet_set_expire_time(const kern_packet_t ph,const uint64_t ts)461 kern_packet_set_expire_time(const kern_packet_t ph, const uint64_t ts)
462 {
463 	return __packet_set_expire_time(ph, ts);
464 }
465 
466 errno_t
kern_packet_get_expiry_action(const kern_packet_t ph,packet_expiry_action_t * pea)467 kern_packet_get_expiry_action(const kern_packet_t ph, packet_expiry_action_t *pea)
468 {
469 	return __packet_get_expiry_action(ph, pea);
470 }
471 
472 errno_t
kern_packet_set_expiry_action(const kern_packet_t ph,packet_expiry_action_t pea)473 kern_packet_set_expiry_action(const kern_packet_t ph, packet_expiry_action_t pea)
474 {
475 	return __packet_set_expiry_action(ph, pea);
476 }
477 
478 errno_t
kern_packet_get_token(const kern_packet_t ph,void * token,uint16_t * len)479 kern_packet_get_token(const kern_packet_t ph, void *token, uint16_t *len)
480 {
481 	return __packet_get_token(ph, token, len);
482 }
483 
484 errno_t
kern_packet_set_token(const kern_packet_t ph,const void * token,const uint16_t len)485 kern_packet_set_token(const kern_packet_t ph, const void *token,
486     const uint16_t len)
487 {
488 	return __packet_set_token(ph, token, len);
489 }
490 
491 errno_t
kern_packet_get_packetid(const kern_packet_t ph,packet_id_t * pktid)492 kern_packet_get_packetid(const kern_packet_t ph, packet_id_t *pktid)
493 {
494 	return __packet_get_packetid(ph, pktid);
495 }
496 
497 errno_t
kern_packet_set_vlan_tag(const kern_packet_t ph,const uint16_t tag,const boolean_t tag_in_pkt)498 kern_packet_set_vlan_tag(const kern_packet_t ph, const uint16_t tag,
499     const boolean_t tag_in_pkt)
500 {
501 	return __packet_set_vlan_tag(ph, tag, tag_in_pkt);
502 }
503 
504 errno_t
kern_packet_get_vlan_tag(const kern_packet_t ph,uint16_t * tag,boolean_t * tag_in_pkt)505 kern_packet_get_vlan_tag(const kern_packet_t ph, uint16_t *tag,
506     boolean_t *tag_in_pkt)
507 {
508 	return __packet_get_vlan_tag(ph, tag, tag_in_pkt);
509 }
510 
511 uint16_t
kern_packet_get_vlan_id(const uint16_t tag)512 kern_packet_get_vlan_id(const uint16_t tag)
513 {
514 	return __packet_get_vlan_id(tag);
515 }
516 
517 uint8_t
kern_packet_get_vlan_priority(const uint16_t tag)518 kern_packet_get_vlan_priority(const uint16_t tag)
519 {
520 	return __packet_get_vlan_priority(tag);
521 }
522 
523 errno_t
kern_packet_get_app_metadata(const kern_packet_t ph,packet_app_metadata_type_t * app_type,uint8_t * app_metadata)524 kern_packet_get_app_metadata(const kern_packet_t ph,
525     packet_app_metadata_type_t *app_type, uint8_t *app_metadata)
526 {
527 	return __packet_get_app_metadata(ph, app_type, app_metadata);
528 }
529 
530 void
kern_packet_set_wake_flag(const kern_packet_t ph)531 kern_packet_set_wake_flag(const kern_packet_t ph)
532 {
533 	return __packet_set_wake_flag(ph);
534 }
535 
536 boolean_t
kern_packet_get_wake_flag(const kern_packet_t ph)537 kern_packet_get_wake_flag(const kern_packet_t ph)
538 {
539 	return __packet_get_wake_flag(ph);
540 }
541 
542 uint32_t
kern_inet_checksum(const void * data,uint32_t len,uint32_t sum0)543 kern_inet_checksum(const void *data, uint32_t len, uint32_t sum0)
544 {
545 	return __packet_cksum(data, len, sum0);
546 }
547 
548 uint32_t
kern_copy_and_inet_checksum(const void * src,void * dst,uint32_t len,uint32_t sum0)549 kern_copy_and_inet_checksum(const void *src, void *dst, uint32_t len,
550     uint32_t sum0)
551 {
552 	uint32_t sum = __packet_copy_and_sum(src, dst, len, sum0);
553 	return __packet_fold_sum_final(sum);
554 }
555 
556 /*
557  * Source packet must be finalized (not dropped); cloned packet does not
558  * inherit the finalized flag, or the classified flag, so caller is
559  * responsible for finalizing it and classifying it (as needed).
560  */
561 static int
kern_packet_clone_internal(const kern_packet_t ph1,kern_packet_t * ph2,uint32_t skmflag,kern_packet_copy_mode_t mode)562 kern_packet_clone_internal(const kern_packet_t ph1, kern_packet_t *ph2,
563     uint32_t skmflag, kern_packet_copy_mode_t mode)
564 {
565 	struct kern_pbufpool *pool;
566 	struct __kern_packet *p1 = SK_PTR_ADDR_KPKT(ph1);
567 	struct __kern_packet *p2 = NULL;
568 	struct __kern_buflet *p1_buf, *p2_buf;
569 	uint16_t bufs_cnt_alloc;
570 	int m_how;
571 	int err;
572 
573 	/* TODO: Add quantum support */
574 	VERIFY(SK_PTR_TYPE(ph1) == NEXUS_META_TYPE_PACKET);
575 
576 	/* Source needs to be finalized (not dropped) and with 1 buflet */
577 	if (__improbable((p1->pkt_qum.qum_qflags & QUM_F_FINALIZED) == 0 ||
578 	    (p1->pkt_qum.qum_qflags & QUM_F_DROPPED) != 0 ||
579 	    p1->pkt_bufs_cnt == 0)) {
580 		return EINVAL;
581 	}
582 
583 	/* TODO: Add multi-buflet support */
584 	VERIFY(p1->pkt_bufs_cnt == 1);
585 
586 	switch (mode) {
587 	case KPKT_COPY_HEAVY:
588 		/*
589 		 * Allocate a packet with the same number of buffers as that
590 		 * of the source packet's; this cannot be 0 per check above.
591 		 */
592 		bufs_cnt_alloc = p1->pkt_bufs_cnt;
593 		break;
594 
595 	case KPKT_COPY_LIGHT:
596 		/*
597 		 * Allocate an "empty" packet with no buffers attached; this
598 		 * will work only on pools marked with "on-demand", which is
599 		 * the case today for device drivers needing shared buffers
600 		 * support.
601 		 *
602 		 * TODO: We could make this generic and applicable to regular
603 		 * pools, but it would involve detaching the buffer that comes
604 		 * attached to the constructed packet; this wouldn't be that
605 		 * lightweight in nature, but whatever.  In such a case the
606 		 * number of buffers requested during allocation is the same
607 		 * as the that of the source packet's.  For now, let it fail
608 		 * naturally on regular pools, as part of allocation below.
609 		 *
610 		 * XXX: This would also fail on quantums as we currently
611 		 * restrict quantums to have exactly one buffer.
612 		 */
613 		bufs_cnt_alloc = 0;
614 		break;
615 
616 	default:
617 		VERIFY(0);
618 		/* NOTREACHED */
619 		__builtin_unreachable();
620 	}
621 
622 	*ph2 = 0;
623 	pool = __DECONST(struct kern_pbufpool *, SK_PTR_ADDR_KQUM(ph1)->qum_pp);
624 	if (skmflag & SKMEM_NOSLEEP) {
625 		err = kern_pbufpool_alloc_nosleep(pool, bufs_cnt_alloc, ph2);
626 		m_how = M_NOWAIT;
627 	} else {
628 		err = kern_pbufpool_alloc(pool, bufs_cnt_alloc, ph2);
629 		ASSERT(err != ENOMEM);
630 		m_how = M_WAIT;
631 	}
632 	if (__improbable(err != 0)) {
633 		/* See comments above related to KPKT_COPY_{HEAVY,LIGHT} */
634 		goto error;
635 	}
636 	p2 = SK_PTR_ADDR_KPKT(*ph2);
637 
638 	/* Copy packet metadata */
639 	_QUM_COPY(&(p1)->pkt_qum, &(p2)->pkt_qum);
640 	_PKT_COPY(p1, p2);
641 	ASSERT(p2->pkt_mbuf == NULL);
642 	ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
643 
644 	/* clear trace id */
645 	p2->pkt_trace_id = 0;
646 	/* clear finalized and classified bits from clone */
647 	p2->pkt_qum.qum_qflags &= ~(QUM_F_FINALIZED | QUM_F_FLOW_CLASSIFIED);
648 
649 	switch (mode) {
650 	case KPKT_COPY_HEAVY:
651 		/*
652 		 * Heavy: Copy buffer contents and extra metadata.
653 		 */
654 		ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
655 		if (__probable(p1->pkt_bufs_cnt != 0)) {
656 			uint8_t *saddr, *daddr;
657 			uint16_t copy_len;
658 			/*
659 			 * TODO -- [email protected]
660 			 * Packets from compat driver could have dlen > dlim
661 			 * for flowswitch flow compatibility, cleanup when we
662 			 * make them consistent.
663 			 */
664 			PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
665 			PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
666 			saddr = (void *)p1_buf->buf_addr;
667 			daddr = (void *)p2_buf->buf_addr;
668 			copy_len = MIN(p1_buf->buf_dlen, p1_buf->buf_dlim);
669 			if (copy_len != 0) {
670 				bcopy(saddr, daddr, copy_len);
671 			}
672 			*__DECONST(uint16_t *, &p2_buf->buf_dlim) =
673 			    p1_buf->buf_dlim;
674 			p2_buf->buf_dlen = p1_buf->buf_dlen;
675 			p2_buf->buf_doff = p1_buf->buf_doff;
676 		}
677 
678 		/* Copy AQM metadata */
679 		p2->pkt_flowsrc_type = p1->pkt_flowsrc_type;
680 		p2->pkt_flowsrc_fidx = p1->pkt_flowsrc_fidx;
681 		_CASSERT((offsetof(struct __flow, flow_src_id) % 8) == 0);
682 		_UUID_COPY(p2->pkt_flowsrc_id, p1->pkt_flowsrc_id);
683 		_UUID_COPY(p2->pkt_policy_euuid, p1->pkt_policy_euuid);
684 		p2->pkt_policy_id = p1->pkt_policy_id;
685 
686 		p2->pkt_pflags = p1->pkt_pflags;
687 		if (p1->pkt_pflags & PKT_F_MBUF_DATA) {
688 			ASSERT(p1->pkt_mbuf != NULL);
689 			p2->pkt_mbuf = m_dup(p1->pkt_mbuf, m_how);
690 			if (p2->pkt_mbuf == NULL) {
691 				KPKT_CLEAR_MBUF_DATA(p2);
692 				err = ENOBUFS;
693 				goto error;
694 			}
695 		}
696 		break;
697 
698 	case KPKT_COPY_LIGHT:
699 		/*
700 		 * Lightweight: Duplicate buflet(s) and add refs.
701 		 */
702 		ASSERT(p1->pkt_mbuf == NULL);
703 		ASSERT(p2->pkt_bufs_cnt == 0);
704 		if (__probable(p1->pkt_bufs_cnt != 0)) {
705 			PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
706 			p2_buf = &p2->pkt_qum_buf;
707 			*__DECONST(uint16_t *, &p2->pkt_bufs_cnt) =
708 			    p1->pkt_bufs_cnt;
709 			_KBUF_COPY(p1_buf, p2_buf);
710 			_CASSERT(sizeof(p2_buf->buf_flag) == sizeof(uint16_t));
711 			*__DECONST(uint16_t *, &p2_buf->buf_flag) &=
712 			    ~BUFLET_FLAG_EXTERNAL;
713 			ASSERT(p2_buf->buf_nbft_addr == 0);
714 			ASSERT(p2_buf->buf_nbft_idx == OBJ_IDX_NONE);
715 		}
716 		ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
717 		ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
718 		ASSERT(err == 0);
719 		break;
720 	}
721 
722 error:
723 	if (err != 0 && p2 != NULL) {
724 		uint32_t usecnt = 0;
725 
726 		ASSERT(p2->pkt_mbuf == NULL);
727 		if (__probable(mode == KPKT_COPY_LIGHT)) {
728 			/*
729 			 * This is undoing what _KBUF_COPY() did earlier,
730 			 * in case this routine is modified to handle regular
731 			 * pool (not on-demand), which also decrements the
732 			 * shared buffer's usecnt.  For regular pool, calling
733 			 * kern_pubfpool_free() will not yield a call to
734 			 * destroy the metadata.
735 			 */
736 			PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
737 			KBUF_DTOR(p2_buf, usecnt);
738 		}
739 		kern_pbufpool_free(pool, *ph2);
740 		*ph2 = 0;
741 	}
742 
743 	return err;
744 }
745 
746 errno_t
kern_packet_clone(const kern_packet_t ph1,kern_packet_t * ph2,kern_packet_copy_mode_t mode)747 kern_packet_clone(const kern_packet_t ph1, kern_packet_t *ph2,
748     kern_packet_copy_mode_t mode)
749 {
750 	return kern_packet_clone_internal(ph1, ph2, 0, mode);
751 }
752 
753 errno_t
kern_packet_clone_nosleep(const kern_packet_t ph1,kern_packet_t * ph2,kern_packet_copy_mode_t mode)754 kern_packet_clone_nosleep(const kern_packet_t ph1, kern_packet_t *ph2,
755     kern_packet_copy_mode_t mode)
756 {
757 	return kern_packet_clone_internal(ph1, ph2, SKMEM_NOSLEEP, mode);
758 }
759 
760 errno_t
kern_packet_add_buflet(const kern_packet_t ph,const kern_buflet_t bprev,const kern_buflet_t bnew)761 kern_packet_add_buflet(const kern_packet_t ph, const kern_buflet_t bprev,
762     const kern_buflet_t bnew)
763 {
764 	return __packet_add_buflet(ph, bprev, bnew);
765 }
766 
767 void
kern_packet_append(const kern_packet_t ph1,const kern_packet_t ph2)768 kern_packet_append(const kern_packet_t ph1, const kern_packet_t ph2)
769 {
770 	/*
771 	 * TODO:
772 	 * Add assert for non-zero ph2 here after changing IOSkywalkFamily
773 	 * to use kern_packet_set_next() for clearing the next pointer.
774 	 */
775 	kern_packet_set_next(ph1, ph2);
776 }
777 
778 kern_packet_t
kern_packet_get_next(const kern_packet_t ph)779 kern_packet_get_next(const kern_packet_t ph)
780 {
781 	struct __kern_packet *p, *next;
782 
783 	p = SK_PTR_ADDR_KPKT(ph);
784 	next = p->pkt_nextpkt;
785 	return next == NULL ? 0 : SK_PKT2PH(next);
786 }
787 
788 void
kern_packet_set_next(const kern_packet_t ph1,const kern_packet_t ph2)789 kern_packet_set_next(const kern_packet_t ph1, const kern_packet_t ph2)
790 {
791 	struct __kern_packet *p1, *p2;
792 
793 	ASSERT(ph1 != 0);
794 	p1 = SK_PTR_ADDR_KPKT(ph1);
795 	p2 = (ph2 == 0 ? NULL : SK_PTR_ADDR_KPKT(ph2));
796 	p1->pkt_nextpkt = p2;
797 }
798 
799 void
kern_packet_set_chain_counts(const kern_packet_t ph,uint32_t count,uint32_t bytes)800 kern_packet_set_chain_counts(const kern_packet_t ph, uint32_t count,
801     uint32_t bytes)
802 {
803 	struct __kern_packet *p;
804 
805 	p = SK_PTR_ADDR_KPKT(ph);
806 	p->pkt_chain_count = count;
807 	p->pkt_chain_bytes = bytes;
808 }
809 
810 void
kern_packet_get_chain_counts(const kern_packet_t ph,uint32_t * count,uint32_t * bytes)811 kern_packet_get_chain_counts(const kern_packet_t ph, uint32_t *count,
812     uint32_t *bytes)
813 {
814 	struct __kern_packet *p;
815 
816 	p = SK_PTR_ADDR_KPKT(ph);
817 	*count = p->pkt_chain_count;
818 	*bytes = p->pkt_chain_bytes;
819 }
820 
821 errno_t
kern_buflet_set_data_offset(const kern_buflet_t buf,const uint16_t doff)822 kern_buflet_set_data_offset(const kern_buflet_t buf, const uint16_t doff)
823 {
824 	return __buflet_set_data_offset(buf, doff);
825 }
826 
827 uint16_t
kern_buflet_get_data_offset(const kern_buflet_t buf)828 kern_buflet_get_data_offset(const kern_buflet_t buf)
829 {
830 	return __buflet_get_data_offset(buf);
831 }
832 
833 errno_t
kern_buflet_set_data_length(const kern_buflet_t buf,const uint16_t dlen)834 kern_buflet_set_data_length(const kern_buflet_t buf, const uint16_t dlen)
835 {
836 	return __buflet_set_data_length(buf, dlen);
837 }
838 
839 uint16_t
kern_buflet_get_data_length(const kern_buflet_t buf)840 kern_buflet_get_data_length(const kern_buflet_t buf)
841 {
842 	return __buflet_get_data_length(buf);
843 }
844 
845 void *
kern_buflet_get_object_address(const kern_buflet_t buf)846 kern_buflet_get_object_address(const kern_buflet_t buf)
847 {
848 	return __buflet_get_object_address(buf);
849 }
850 
851 uint32_t
kern_buflet_get_object_limit(const kern_buflet_t buf)852 kern_buflet_get_object_limit(const kern_buflet_t buf)
853 {
854 	return __buflet_get_object_limit(buf);
855 }
856 
857 void *
kern_buflet_get_data_address(const kern_buflet_t buf)858 kern_buflet_get_data_address(const kern_buflet_t buf)
859 {
860 	return __buflet_get_data_address(buf);
861 }
862 
863 errno_t
kern_buflet_set_data_address(const kern_buflet_t buf,const void * daddr)864 kern_buflet_set_data_address(const kern_buflet_t buf, const void *daddr)
865 {
866 	return __buflet_set_data_address(buf, daddr);
867 }
868 
869 kern_segment_t
kern_buflet_get_object_segment(const kern_buflet_t buf,kern_obj_idx_seg_t * idx)870 kern_buflet_get_object_segment(const kern_buflet_t buf,
871     kern_obj_idx_seg_t *idx)
872 {
873 	return __buflet_get_object_segment(buf, idx);
874 }
875 
876 uint16_t
kern_buflet_get_data_limit(const kern_buflet_t buf)877 kern_buflet_get_data_limit(const kern_buflet_t buf)
878 {
879 	return __buflet_get_data_limit(buf);
880 }
881 
882 errno_t
kern_buflet_set_data_limit(const kern_buflet_t buf,const uint16_t dlim)883 kern_buflet_set_data_limit(const kern_buflet_t buf, const uint16_t dlim)
884 {
885 	return __buflet_set_data_limit(buf, dlim);
886 }
887 
888 uint16_t
kern_buflet_get_buffer_offset(const kern_buflet_t buf)889 kern_buflet_get_buffer_offset(const kern_buflet_t buf)
890 {
891 	return __buflet_get_buffer_offset(buf);
892 }
893 
894 errno_t
kern_buflet_set_buffer_offset(const kern_buflet_t buf,const uint16_t off)895 kern_buflet_set_buffer_offset(const kern_buflet_t buf, const uint16_t off)
896 {
897 	return __buflet_set_buffer_offset(buf, off);
898 }
899 
900 uint16_t
kern_buflet_get_gro_len(const kern_buflet_t buf)901 kern_buflet_get_gro_len(const kern_buflet_t buf)
902 {
903 	return __buflet_get_gro_len(buf);
904 }
905 
906 errno_t
kern_buflet_set_gro_len(const kern_buflet_t buf,const uint16_t len)907 kern_buflet_set_gro_len(const kern_buflet_t buf, const uint16_t len)
908 {
909 	return __buflet_set_gro_len(buf, len);
910 }
911 
912 static int
kern_buflet_clone_internal(const kern_buflet_t buf1,kern_buflet_t * pbuf_array,uint32_t * size,kern_pbufpool_t pool,uint32_t skmflag)913 kern_buflet_clone_internal(const kern_buflet_t buf1, kern_buflet_t *pbuf_array,
914     uint32_t *size, kern_pbufpool_t pool, uint32_t skmflag)
915 {
916 	struct __kern_buflet *pbuf;
917 	uint32_t itr;
918 	int err;
919 
920 	if (skmflag & SKMEM_NOSLEEP) {
921 		err = kern_pbufpool_alloc_batch_buflet_nosleep(pool, pbuf_array,
922 		    size, FALSE);
923 	} else {
924 		err = kern_pbufpool_alloc_batch_buflet(pool, pbuf_array, size, FALSE);
925 	}
926 	if (__improbable(*size == 0)) {
927 		SK_ERR("kern_buflet_clone failed to allocated buflet (err %d)", err);
928 		return err;
929 	}
930 
931 	for (itr = 0; itr < *size; itr++) {
932 		pbuf = pbuf_array[itr];
933 		/* Copy metadata from the src buflet */
934 		_KBUF_COPY(buf1, pbuf);
935 		_CASSERT(sizeof(pbuf->buf_flag) == sizeof(uint16_t));
936 		*__DECONST(uint16_t *, &pbuf->buf_flag) |= BUFLET_FLAG_RAW;
937 		BUF_NBFT_ADDR(pbuf, 0);
938 		BUF_NBFT_IDX(pbuf, OBJ_IDX_NONE);
939 	}
940 
941 	return err;
942 }
943 
944 errno_t
kern_buflet_clone(const kern_buflet_t buf1,kern_buflet_t * pbuf_array,uint32_t * size,kern_pbufpool_t pool)945 kern_buflet_clone(const kern_buflet_t buf1, kern_buflet_t *pbuf_array,
946     uint32_t *size, kern_pbufpool_t pool)
947 {
948 	return kern_buflet_clone_internal(buf1, pbuf_array, size, pool, 0);
949 }
950 
951 errno_t
kern_buflet_clone_nosleep(const kern_buflet_t buf1,kern_buflet_t * pbuf_array,uint32_t * size,kern_pbufpool_t pool)952 kern_buflet_clone_nosleep(const kern_buflet_t buf1, kern_buflet_t *pbuf_array,
953     uint32_t *size, kern_pbufpool_t pool)
954 {
955 	return kern_buflet_clone_internal(buf1, pbuf_array, size,
956 	           pool, SKMEM_NOSLEEP);
957 }
958 
959 void *
kern_buflet_get_next_buf(const kern_buflet_t buflet,const void * prev_buf)960 kern_buflet_get_next_buf(const kern_buflet_t buflet, const void *prev_buf)
961 {
962 	return __buflet_get_next_buf(buflet, prev_buf);
963 }
964 
965 
966 packet_trace_id_t
kern_packet_get_trace_id(const kern_packet_t ph)967 kern_packet_get_trace_id(const kern_packet_t ph)
968 {
969 	return __packet_get_trace_id(ph);
970 }
971 
972 void
kern_packet_set_trace_id(const kern_packet_t ph,packet_trace_id_t trace_id)973 kern_packet_set_trace_id(const kern_packet_t ph, packet_trace_id_t trace_id)
974 {
975 	return __packet_set_trace_id(ph, trace_id);
976 }
977 
978 void
kern_packet_trace_event(const kern_packet_t ph,uint32_t event)979 kern_packet_trace_event(const kern_packet_t ph, uint32_t event)
980 {
981 	return __packet_trace_event(ph, event);
982 }
983 
984 errno_t
kern_packet_copy_bytes(kern_packet_t pkt,size_t off,size_t len,void * out_data)985 kern_packet_copy_bytes(kern_packet_t pkt, size_t off, size_t len, void* out_data)
986 {
987 	kern_buflet_t buflet = NULL;
988 	size_t count;
989 	uint8_t *addr;
990 	uint32_t buflet_len;
991 
992 	buflet = __packet_get_next_buflet(pkt, buflet);
993 	if (buflet == NULL) {
994 		return EINVAL;
995 	}
996 	buflet_len = __buflet_get_data_length(buflet);
997 	if (len > buflet_len) {
998 		return EINVAL;
999 	}
1000 	if (off > buflet_len) {
1001 		return EINVAL;
1002 	}
1003 	addr = __buflet_get_data_address(buflet);
1004 	if (addr == NULL) {
1005 		return EINVAL;
1006 	}
1007 	addr += __buflet_get_data_offset(buflet);
1008 	addr += off;
1009 	count = MIN(len, buflet_len - off);
1010 	bcopy((void *) addr, out_data, count);
1011 
1012 	return 0;
1013 }
1014 
1015 
1016 errno_t
kern_packet_get_flowid(const kern_packet_t ph,packet_flowid_t * pflowid)1017 kern_packet_get_flowid(const kern_packet_t ph, packet_flowid_t *pflowid)
1018 {
1019 	return __packet_get_flowid(ph, pflowid);
1020 }
1021 
1022 void
kern_packet_set_trace_tag(const kern_packet_t ph,packet_trace_tag_t tag)1023 kern_packet_set_trace_tag(const kern_packet_t ph, packet_trace_tag_t tag)
1024 {
1025 	__packet_set_trace_tag(ph, tag);
1026 }
1027 
1028 packet_trace_tag_t
kern_packet_get_trace_tag(const kern_packet_t ph)1029 kern_packet_get_trace_tag(const kern_packet_t ph)
1030 {
1031 	return __packet_get_trace_tag(ph);
1032 }
1033 
1034 errno_t
kern_packet_get_tx_nexus_port_id(const kern_packet_t ph,uint32_t * nx_port_id)1035 kern_packet_get_tx_nexus_port_id(const kern_packet_t ph, uint32_t *nx_port_id)
1036 {
1037 	return __packet_get_tx_nx_port_id(ph, nx_port_id);
1038 }
1039 
1040 errno_t
kern_packet_get_protocol_segment_size(const kern_packet_t ph,uint16_t * seg_sz)1041 kern_packet_get_protocol_segment_size(const kern_packet_t ph, uint16_t *seg_sz)
1042 {
1043 	return __packet_get_protocol_segment_size(ph, seg_sz);
1044 }
1045 
1046 void
kern_packet_set_segment_count(const kern_packet_t ph,uint8_t segcount)1047 kern_packet_set_segment_count(const kern_packet_t ph, uint8_t segcount)
1048 {
1049 	__packet_set_segment_count(ph, segcount);
1050 }
1051 
1052 void *
kern_packet_get_priv(const kern_packet_t ph)1053 kern_packet_get_priv(const kern_packet_t ph)
1054 {
1055 	return __packet_get_priv(ph);
1056 }
1057 
1058 void
kern_packet_set_priv(const kern_packet_t ph,void * priv)1059 kern_packet_set_priv(const kern_packet_t ph, void *priv)
1060 {
1061 	return __packet_set_priv(ph, priv);
1062 }
1063 
1064 void
kern_packet_get_tso_flags(const kern_packet_t ph,packet_tso_flags_t * flags)1065 kern_packet_get_tso_flags(const kern_packet_t ph, packet_tso_flags_t *flags)
1066 {
1067 	return __packet_get_tso_flags(ph, flags);
1068 }
1069