xref: /xnu-8020.140.41/bsd/skywalk/packet/packet_kern.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <skywalk/os_skywalk_private.h>
30 #include <netinet/tcp_var.h>
31 
32 static int kern_packet_clone_internal(const kern_packet_t, kern_packet_t *,
33     uint32_t, kern_packet_copy_mode_t);
34 
35 #if (DEBUG || DEVELOPMENT)
36 __attribute__((noreturn))
37 void
pkt_subtype_assert_fail(const kern_packet_t ph,uint64_t type,uint64_t subtype)38 pkt_subtype_assert_fail(const kern_packet_t ph, uint64_t type, uint64_t subtype)
39 {
40 	panic("invalid packet handle 0x%llx (type %llu != %llu || "
41 	    "subtype %llu != %llu)", ph, SK_PTR_TYPE(ph), type,
42 	    SK_PTR_SUBTYPE(ph), subtype);
43 	/* NOTREACHED */
44 	__builtin_unreachable();
45 }
46 
47 __attribute__((noreturn))
48 void
pkt_type_assert_fail(const kern_packet_t ph,uint64_t type)49 pkt_type_assert_fail(const kern_packet_t ph, uint64_t type)
50 {
51 	panic("invalid packet handle 0x%llx (type %llu != %llu)",
52 	    ph, SK_PTR_TYPE(ph), type);
53 	/* NOTREACHED */
54 	__builtin_unreachable();
55 }
56 #endif /* DEBUG || DEVELOPMENT */
57 
58 errno_t
kern_packet_set_headroom(const kern_packet_t ph,const uint8_t headroom)59 kern_packet_set_headroom(const kern_packet_t ph, const uint8_t headroom)
60 {
61 	return __packet_set_headroom(ph, headroom);
62 }
63 
64 uint8_t
kern_packet_get_headroom(const kern_packet_t ph)65 kern_packet_get_headroom(const kern_packet_t ph)
66 {
67 	return __packet_get_headroom(ph);
68 }
69 
70 errno_t
kern_packet_set_link_header_offset(const kern_packet_t ph,const uint8_t off)71 kern_packet_set_link_header_offset(const kern_packet_t ph, const uint8_t off)
72 {
73 	return __packet_set_headroom(ph, off);
74 }
75 
76 uint16_t
kern_packet_get_link_header_offset(const kern_packet_t ph)77 kern_packet_get_link_header_offset(const kern_packet_t ph)
78 {
79 	return __packet_get_headroom(ph);
80 }
81 
82 errno_t
kern_packet_set_link_header_length(const kern_packet_t ph,const uint8_t off)83 kern_packet_set_link_header_length(const kern_packet_t ph, const uint8_t off)
84 {
85 	return __packet_set_link_header_length(ph, off);
86 }
87 
88 uint8_t
kern_packet_get_link_header_length(const kern_packet_t ph)89 kern_packet_get_link_header_length(const kern_packet_t ph)
90 {
91 	return __packet_get_link_header_length(ph);
92 }
93 
94 errno_t
kern_packet_set_link_broadcast(const kern_packet_t ph)95 kern_packet_set_link_broadcast(const kern_packet_t ph)
96 {
97 	return __packet_set_link_broadcast(ph);
98 }
99 
100 boolean_t
kern_packet_get_link_broadcast(const kern_packet_t ph)101 kern_packet_get_link_broadcast(const kern_packet_t ph)
102 {
103 	return __packet_get_link_broadcast(ph);
104 }
105 
106 errno_t
kern_packet_set_link_multicast(const kern_packet_t ph)107 kern_packet_set_link_multicast(const kern_packet_t ph)
108 {
109 	return __packet_set_link_multicast(ph);
110 }
111 
112 errno_t
kern_packet_set_link_ethfcs(const kern_packet_t ph)113 kern_packet_set_link_ethfcs(const kern_packet_t ph)
114 {
115 	return __packet_set_link_ethfcs(ph);
116 }
117 
118 boolean_t
kern_packet_get_link_multicast(const kern_packet_t ph)119 kern_packet_get_link_multicast(const kern_packet_t ph)
120 {
121 	return __packet_get_link_multicast(ph);
122 }
123 
124 boolean_t
kern_packet_get_link_ethfcs(const kern_packet_t ph)125 kern_packet_get_link_ethfcs(const kern_packet_t ph)
126 {
127 	return __packet_get_link_ethfcs(ph);
128 }
129 
130 /* deprecated -- no effect, use set_link_header_length instead  */
131 errno_t
kern_packet_set_network_header_offset(const kern_packet_t ph,const uint16_t off)132 kern_packet_set_network_header_offset(const kern_packet_t ph,
133     const uint16_t off)
134 {
135 #pragma unused(ph, off)
136 	return 0;
137 }
138 
139 /* deprecated -- use get_link_header_length instead  */
140 uint16_t
kern_packet_get_network_header_offset(const kern_packet_t ph)141 kern_packet_get_network_header_offset(const kern_packet_t ph)
142 {
143 	return (uint16_t)__packet_get_headroom(ph) +
144 	       (uint16_t)__packet_get_link_header_length(ph);
145 }
146 
147 /* deprecated */
148 errno_t
kern_packet_set_transport_header_offset(const kern_packet_t ph,const uint16_t off)149 kern_packet_set_transport_header_offset(const kern_packet_t ph,
150     const uint16_t off)
151 {
152 #pragma unused(ph, off)
153 	return 0;
154 }
155 
156 /* deprecated */
157 uint16_t
kern_packet_get_transport_header_offset(const kern_packet_t ph)158 kern_packet_get_transport_header_offset(const kern_packet_t ph)
159 {
160 #pragma unused(ph)
161 	return 0;
162 }
163 
164 boolean_t
kern_packet_get_transport_traffic_background(const kern_packet_t ph)165 kern_packet_get_transport_traffic_background(const kern_packet_t ph)
166 {
167 	return __packet_get_transport_traffic_background(ph);
168 }
169 
170 boolean_t
kern_packet_get_transport_traffic_realtime(const kern_packet_t ph)171 kern_packet_get_transport_traffic_realtime(const kern_packet_t ph)
172 {
173 	return __packet_get_transport_traffic_realtime(ph);
174 }
175 
176 boolean_t
kern_packet_get_transport_retransmit(const kern_packet_t ph)177 kern_packet_get_transport_retransmit(const kern_packet_t ph)
178 {
179 	return __packet_get_transport_retransmit(ph);
180 }
181 
182 boolean_t
kern_packet_get_transport_new_flow(const kern_packet_t ph)183 kern_packet_get_transport_new_flow(const kern_packet_t ph)
184 {
185 	return __packet_get_transport_new_flow(ph);
186 }
187 
188 boolean_t
kern_packet_get_transport_last_packet(const kern_packet_t ph)189 kern_packet_get_transport_last_packet(const kern_packet_t ph)
190 {
191 	return __packet_get_transport_last_packet(ph);
192 }
193 
194 int
kern_packet_set_service_class(const kern_packet_t ph,const kern_packet_svc_class_t sc)195 kern_packet_set_service_class(const kern_packet_t ph,
196     const kern_packet_svc_class_t sc)
197 {
198 	return __packet_set_service_class(ph, sc);
199 }
200 
201 kern_packet_svc_class_t
kern_packet_get_service_class(const kern_packet_t ph)202 kern_packet_get_service_class(const kern_packet_t ph)
203 {
204 	return __packet_get_service_class(ph);
205 }
206 
207 errno_t
kern_packet_get_service_class_index(const kern_packet_svc_class_t svc,uint32_t * index)208 kern_packet_get_service_class_index(const kern_packet_svc_class_t svc,
209     uint32_t *index)
210 {
211 	if (index == NULL || !KPKT_VALID_SVC(svc)) {
212 		return EINVAL;
213 	}
214 
215 	*index = KPKT_SVCIDX(svc);
216 	return 0;
217 }
218 
219 boolean_t
kern_packet_is_high_priority(const kern_packet_t ph)220 kern_packet_is_high_priority(const kern_packet_t ph)
221 {
222 	uint32_t sc;
223 	boolean_t is_hi_priority;
224 
225 	sc = __packet_get_service_class(ph);
226 
227 	switch (sc) {
228 	case PKT_SC_VI:
229 	case PKT_SC_SIG:
230 	case PKT_SC_VO:
231 	case PKT_SC_CTL:
232 		is_hi_priority = (PKT_ADDR(ph)->pkt_comp_gencnt == 0 ||
233 		    PKT_ADDR(ph)->pkt_comp_gencnt == TCP_ACK_COMPRESSION_DUMMY);
234 		break;
235 
236 	case PKT_SC_BK_SYS:
237 	case PKT_SC_BK:
238 	case PKT_SC_BE:
239 	case PKT_SC_RD:
240 	case PKT_SC_OAM:
241 	case PKT_SC_AV:
242 	case PKT_SC_RV:
243 	default:
244 		is_hi_priority = false;
245 	}
246 	return is_hi_priority;
247 }
248 
249 errno_t
kern_packet_set_traffic_class(const kern_packet_t ph,kern_packet_traffic_class_t tc)250 kern_packet_set_traffic_class(const kern_packet_t ph,
251     kern_packet_traffic_class_t tc)
252 {
253 	return __packet_set_traffic_class(ph, tc);
254 }
255 
256 kern_packet_traffic_class_t
kern_packet_get_traffic_class(const kern_packet_t ph)257 kern_packet_get_traffic_class(const kern_packet_t ph)
258 {
259 	return __packet_get_traffic_class(ph);
260 }
261 
262 errno_t
kern_packet_set_inet_checksum(const kern_packet_t ph,const packet_csum_flags_t flags,const uint16_t start,const uint16_t stuff)263 kern_packet_set_inet_checksum(const kern_packet_t ph,
264     const packet_csum_flags_t flags, const uint16_t start,
265     const uint16_t stuff)
266 {
267 	return __packet_set_inet_checksum(ph, flags, start, stuff, FALSE);
268 }
269 
270 packet_csum_flags_t
kern_packet_get_inet_checksum(const kern_packet_t ph,uint16_t * start,uint16_t * val)271 kern_packet_get_inet_checksum(const kern_packet_t ph, uint16_t *start,
272     uint16_t *val)
273 {
274 	return __packet_get_inet_checksum(ph, start, val, TRUE);
275 }
276 
277 void
kern_packet_set_flow_uuid(const kern_packet_t ph,const uuid_t flow_uuid)278 kern_packet_set_flow_uuid(const kern_packet_t ph, const uuid_t flow_uuid)
279 {
280 	__packet_set_flow_uuid(ph, flow_uuid);
281 }
282 
283 void
kern_packet_get_flow_uuid(const kern_packet_t ph,uuid_t * flow_uuid)284 kern_packet_get_flow_uuid(const kern_packet_t ph, uuid_t *flow_uuid)
285 {
286 	__packet_get_flow_uuid(ph, *flow_uuid);
287 }
288 
289 void
kern_packet_clear_flow_uuid(const kern_packet_t ph)290 kern_packet_clear_flow_uuid(const kern_packet_t ph)
291 {
292 	__packet_clear_flow_uuid(ph);
293 }
294 
295 void
kern_packet_get_euuid(const kern_packet_t ph,uuid_t euuid)296 kern_packet_get_euuid(const kern_packet_t ph, uuid_t euuid)
297 {
298 	if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
299 		uuid_copy(euuid, PKT_ADDR(ph)->pkt_policy_euuid);
300 	} else {
301 		uuid_clear(euuid);
302 	}
303 }
304 
305 void
kern_packet_set_policy_id(const kern_packet_t ph,uint32_t policy_id)306 kern_packet_set_policy_id(const kern_packet_t ph, uint32_t policy_id)
307 {
308 	if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
309 		PKT_ADDR(ph)->pkt_policy_id = policy_id;
310 	}
311 }
312 
313 uint32_t
kern_packet_get_policy_id(const kern_packet_t ph)314 kern_packet_get_policy_id(const kern_packet_t ph)
315 {
316 	if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
317 		return PKT_ADDR(ph)->pkt_policy_id;
318 	} else {
319 		return 0;
320 	}
321 }
322 
323 uint32_t
kern_packet_get_data_length(const kern_packet_t ph)324 kern_packet_get_data_length(const kern_packet_t ph)
325 {
326 	return __packet_get_data_length(ph);
327 }
328 
329 uint32_t
kern_packet_get_buflet_count(const kern_packet_t ph)330 kern_packet_get_buflet_count(const kern_packet_t ph)
331 {
332 	return __packet_get_buflet_count(ph);
333 }
334 
335 kern_buflet_t
kern_packet_get_next_buflet(const kern_packet_t ph,const kern_buflet_t bprev)336 kern_packet_get_next_buflet(const kern_packet_t ph, const kern_buflet_t bprev)
337 {
338 	return __packet_get_next_buflet(ph, bprev);
339 }
340 
341 errno_t
kern_packet_finalize(const kern_packet_t ph)342 kern_packet_finalize(const kern_packet_t ph)
343 {
344 	return __packet_finalize(ph);
345 }
346 
347 kern_packet_idx_t
kern_packet_get_object_index(const kern_packet_t ph)348 kern_packet_get_object_index(const kern_packet_t ph)
349 {
350 	return __packet_get_object_index(ph);
351 }
352 
353 errno_t
kern_packet_get_timestamp(const kern_packet_t ph,uint64_t * ts,boolean_t * valid)354 kern_packet_get_timestamp(const kern_packet_t ph, uint64_t *ts,
355     boolean_t *valid)
356 {
357 	return __packet_get_timestamp(ph, ts, valid);
358 }
359 
360 errno_t
kern_packet_set_timestamp(const kern_packet_t ph,uint64_t ts,boolean_t valid)361 kern_packet_set_timestamp(const kern_packet_t ph, uint64_t ts, boolean_t valid)
362 {
363 	return __packet_set_timestamp(ph, ts, valid);
364 }
365 
366 struct mbuf *
kern_packet_get_mbuf(const kern_packet_t pkt)367 kern_packet_get_mbuf(const kern_packet_t pkt)
368 {
369 	struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(pkt);
370 
371 	if ((kpkt->pkt_pflags & PKT_F_MBUF_DATA) != 0) {
372 		return kpkt->pkt_mbuf;
373 	}
374 	return NULL;
375 }
376 
377 errno_t
kern_packet_get_timestamp_requested(const kern_packet_t ph,boolean_t * requested)378 kern_packet_get_timestamp_requested(const kern_packet_t ph,
379     boolean_t *requested)
380 {
381 	return __packet_get_timestamp_requested(ph, requested);
382 }
383 
384 void
kern_packet_tx_completion(const kern_packet_t ph,ifnet_t ifp)385 kern_packet_tx_completion(const kern_packet_t ph, ifnet_t ifp)
386 {
387 	kern_return_t tx_status;
388 	struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(ph);
389 
390 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
391 	(void) __packet_get_tx_completion_status(ph, &tx_status);
392 	if (tx_status != KERN_SUCCESS) {
393 		(void) kern_channel_event_transmit_status(ph, ifp);
394 	}
395 	if ((kpkt->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0) {
396 		__packet_perform_tx_completion_callbacks(ph, ifp);
397 	}
398 }
399 
400 errno_t
kern_packet_get_tx_completion_status(const kern_packet_t ph,kern_return_t * status)401 kern_packet_get_tx_completion_status(const kern_packet_t ph,
402     kern_return_t *status)
403 {
404 	return __packet_get_tx_completion_status(ph, status);
405 }
406 
407 errno_t
kern_packet_set_tx_completion_status(const kern_packet_t ph,kern_return_t status)408 kern_packet_set_tx_completion_status(const kern_packet_t ph,
409     kern_return_t status)
410 {
411 	return __packet_set_tx_completion_status(ph, status);
412 }
413 
414 void
kern_packet_set_group_start(const kern_packet_t ph)415 kern_packet_set_group_start(const kern_packet_t ph)
416 {
417 	(void) __packet_set_group_start(ph);
418 }
419 
420 boolean_t
kern_packet_get_group_start(const kern_packet_t ph)421 kern_packet_get_group_start(const kern_packet_t ph)
422 {
423 	return __packet_get_group_start(ph);
424 }
425 
426 void
kern_packet_set_group_end(const kern_packet_t ph)427 kern_packet_set_group_end(const kern_packet_t ph)
428 {
429 	(void) __packet_set_group_end(ph);
430 }
431 
432 boolean_t
kern_packet_get_group_end(const kern_packet_t ph)433 kern_packet_get_group_end(const kern_packet_t ph)
434 {
435 	return __packet_get_group_end(ph);
436 }
437 
438 errno_t
kern_packet_get_expire_time(const kern_packet_t ph,uint64_t * ts)439 kern_packet_get_expire_time(const kern_packet_t ph, uint64_t *ts)
440 {
441 	return __packet_get_expire_time(ph, ts);
442 }
443 
444 errno_t
kern_packet_set_expire_time(const kern_packet_t ph,const uint64_t ts)445 kern_packet_set_expire_time(const kern_packet_t ph, const uint64_t ts)
446 {
447 	return __packet_set_expire_time(ph, ts);
448 }
449 
450 errno_t
kern_packet_get_token(const kern_packet_t ph,void * token,uint16_t * len)451 kern_packet_get_token(const kern_packet_t ph, void *token, uint16_t *len)
452 {
453 	return __packet_get_token(ph, token, len);
454 }
455 
456 errno_t
kern_packet_set_token(const kern_packet_t ph,const void * token,const uint16_t len)457 kern_packet_set_token(const kern_packet_t ph, const void *token,
458     const uint16_t len)
459 {
460 	return __packet_set_token(ph, token, len);
461 }
462 
463 errno_t
kern_packet_get_packetid(const kern_packet_t ph,packet_id_t * pktid)464 kern_packet_get_packetid(const kern_packet_t ph, packet_id_t *pktid)
465 {
466 	return __packet_get_packetid(ph, pktid);
467 }
468 
469 errno_t
kern_packet_set_vlan_tag(const kern_packet_t ph,const uint16_t tag,const boolean_t tag_in_pkt)470 kern_packet_set_vlan_tag(const kern_packet_t ph, const uint16_t tag,
471     const boolean_t tag_in_pkt)
472 {
473 	return __packet_set_vlan_tag(ph, tag, tag_in_pkt);
474 }
475 
476 errno_t
kern_packet_get_vlan_tag(const kern_packet_t ph,uint16_t * tag,boolean_t * tag_in_pkt)477 kern_packet_get_vlan_tag(const kern_packet_t ph, uint16_t *tag,
478     boolean_t *tag_in_pkt)
479 {
480 	return __packet_get_vlan_tag(ph, tag, tag_in_pkt);
481 }
482 
483 uint16_t
kern_packet_get_vlan_id(const uint16_t tag)484 kern_packet_get_vlan_id(const uint16_t tag)
485 {
486 	return __packet_get_vlan_id(tag);
487 }
488 
489 uint8_t
kern_packet_get_vlan_priority(const uint16_t tag)490 kern_packet_get_vlan_priority(const uint16_t tag)
491 {
492 	return __packet_get_vlan_priority(tag);
493 }
494 
495 void
kern_packet_set_wake_flag(const kern_packet_t ph)496 kern_packet_set_wake_flag(const kern_packet_t ph)
497 {
498 	return __packet_set_wake_flag(ph);
499 }
500 
501 boolean_t
kern_packet_get_wake_flag(const kern_packet_t ph)502 kern_packet_get_wake_flag(const kern_packet_t ph)
503 {
504 	return __packet_get_wake_flag(ph);
505 }
506 
507 uint32_t
kern_inet_checksum(const void * data,uint32_t len,uint32_t sum0)508 kern_inet_checksum(const void *data, uint32_t len, uint32_t sum0)
509 {
510 	return __packet_cksum(data, len, sum0);
511 }
512 
513 uint32_t
kern_copy_and_inet_checksum(const void * src,void * dst,uint32_t len,uint32_t sum0)514 kern_copy_and_inet_checksum(const void *src, void *dst, uint32_t len,
515     uint32_t sum0)
516 {
517 	uint32_t sum = __packet_copy_and_sum(src, dst, len, sum0);
518 	return __packet_fold_sum_final(sum);
519 }
520 
521 /*
522  * Source packet must be finalized (not dropped); cloned packet does not
523  * inherit the finalized flag, or the classified flag, so caller is
524  * responsible for finalizing it and classifying it (as needed).
525  */
526 static int
kern_packet_clone_internal(const kern_packet_t ph1,kern_packet_t * ph2,uint32_t skmflag,kern_packet_copy_mode_t mode)527 kern_packet_clone_internal(const kern_packet_t ph1, kern_packet_t *ph2,
528     uint32_t skmflag, kern_packet_copy_mode_t mode)
529 {
530 	struct kern_pbufpool *pool;
531 	struct __kern_packet *p1 = SK_PTR_ADDR_KPKT(ph1);
532 	struct __kern_packet *p2 = NULL;
533 	struct __kern_buflet *p1_buf, *p2_buf;
534 	uint16_t bufs_cnt_alloc;
535 	int m_how;
536 	int err;
537 
538 	/* TODO: Add quantum support */
539 	VERIFY(SK_PTR_TYPE(ph1) == NEXUS_META_TYPE_PACKET);
540 
541 	/* Source needs to be finalized (not dropped) and with 1 buflet */
542 	if (__improbable((p1->pkt_qum.qum_qflags & QUM_F_FINALIZED) == 0 ||
543 	    (p1->pkt_qum.qum_qflags & QUM_F_DROPPED) != 0 ||
544 	    p1->pkt_bufs_cnt == 0)) {
545 		return EINVAL;
546 	}
547 
548 	/* TODO: Add multi-buflet support */
549 	VERIFY(p1->pkt_bufs_cnt == 1);
550 
551 	switch (mode) {
552 	case KPKT_COPY_HEAVY:
553 		/*
554 		 * Allocate a packet with the same number of buffers as that
555 		 * of the source packet's; this cannot be 0 per check above.
556 		 */
557 		bufs_cnt_alloc = p1->pkt_bufs_cnt;
558 		break;
559 
560 	case KPKT_COPY_LIGHT:
561 		/*
562 		 * Allocate an "empty" packet with no buffers attached; this
563 		 * will work only on pools marked with "on-demand", which is
564 		 * the case today for device drivers needing shared buffers
565 		 * support.
566 		 *
567 		 * TODO: We could make this generic and applicable to regular
568 		 * pools, but it would involve detaching the buffer that comes
569 		 * attached to the constructed packet; this wouldn't be that
570 		 * lightweight in nature, but whatever.  In such a case the
571 		 * number of buffers requested during allocation is the same
572 		 * as the that of the source packet's.  For now, let it fail
573 		 * naturally on regular pools, as part of allocation below.
574 		 *
575 		 * XXX: This would also fail on quantums as we currently
576 		 * restrict quantums to have exactly one buffer.
577 		 */
578 		bufs_cnt_alloc = 0;
579 		break;
580 
581 	default:
582 		VERIFY(0);
583 		/* NOTREACHED */
584 		__builtin_unreachable();
585 	}
586 
587 	*ph2 = 0;
588 	pool = __DECONST(struct kern_pbufpool *, SK_PTR_ADDR_KQUM(ph1)->qum_pp);
589 	if (skmflag & SKMEM_NOSLEEP) {
590 		err = kern_pbufpool_alloc_nosleep(pool, bufs_cnt_alloc, ph2);
591 		m_how = M_NOWAIT;
592 	} else {
593 		err = kern_pbufpool_alloc(pool, bufs_cnt_alloc, ph2);
594 		ASSERT(err != ENOMEM);
595 		m_how = M_WAIT;
596 	}
597 	if (__improbable(err != 0)) {
598 		/* See comments above related to KPKT_COPY_{HEAVY,LIGHT} */
599 		goto error;
600 	}
601 	p2 = SK_PTR_ADDR_KPKT(*ph2);
602 
603 	/* Copy packet metadata */
604 	_QUM_COPY(&(p1)->pkt_qum, &(p2)->pkt_qum);
605 	_PKT_COPY(p1, p2);
606 	ASSERT(p2->pkt_mbuf == NULL);
607 	ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
608 
609 	/* clear trace id */
610 	p2->pkt_trace_id = 0;
611 	/* clear finalized and classified bits from clone */
612 	p2->pkt_qum.qum_qflags &= ~(QUM_F_FINALIZED | QUM_F_FLOW_CLASSIFIED);
613 
614 	switch (mode) {
615 	case KPKT_COPY_HEAVY:
616 		/*
617 		 * Heavy: Copy buffer contents and extra metadata.
618 		 */
619 		ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
620 		if (__probable(p1->pkt_bufs_cnt != 0)) {
621 			uint8_t *saddr, *daddr;
622 			uint16_t copy_len;
623 			/*
624 			 * TODO -- [email protected]
625 			 * Packets from compat driver could have dlen > dlim
626 			 * for flowswitch flow compatibility, cleanup when we
627 			 * make them consistent.
628 			 */
629 			PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
630 			PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
631 			saddr = (void *)p1_buf->buf_addr;
632 			daddr = (void *)p2_buf->buf_addr;
633 			copy_len = MIN(p1_buf->buf_dlen, p1_buf->buf_dlim);
634 			if (copy_len != 0) {
635 				bcopy(saddr, daddr, copy_len);
636 			}
637 			*__DECONST(uint16_t *, &p2_buf->buf_dlim) =
638 			    p1_buf->buf_dlim;
639 			p2_buf->buf_dlen = p1_buf->buf_dlen;
640 			p2_buf->buf_doff = p1_buf->buf_doff;
641 		}
642 
643 		/* Copy AQM metadata */
644 		p2->pkt_flowsrc_type = p1->pkt_flowsrc_type;
645 		p2->pkt_flowsrc_fidx = p1->pkt_flowsrc_fidx;
646 		_CASSERT((offsetof(struct __flow, flow_src_id) % 8) == 0);
647 		_UUID_COPY(p2->pkt_flowsrc_id, p1->pkt_flowsrc_id);
648 		_UUID_COPY(p2->pkt_policy_euuid, p1->pkt_policy_euuid);
649 		p2->pkt_policy_id = p1->pkt_policy_id;
650 
651 		p2->pkt_pflags = p1->pkt_pflags;
652 		if (p1->pkt_pflags & PKT_F_MBUF_DATA) {
653 			ASSERT(p1->pkt_mbuf != NULL);
654 			p2->pkt_mbuf = m_dup(p1->pkt_mbuf, m_how);
655 			if (p2->pkt_mbuf == NULL) {
656 				KPKT_CLEAR_MBUF_DATA(p2);
657 				err = ENOBUFS;
658 				goto error;
659 			}
660 		}
661 		break;
662 
663 	case KPKT_COPY_LIGHT:
664 		/*
665 		 * Lightweight: Duplicate buflet(s) and add refs.
666 		 */
667 		ASSERT(p1->pkt_mbuf == NULL);
668 		ASSERT(p2->pkt_bufs_cnt == 0);
669 		if (__probable(p1->pkt_bufs_cnt != 0)) {
670 			PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
671 			p2_buf = &p2->pkt_qum_buf;
672 			*__DECONST(uint16_t *, &p2->pkt_bufs_cnt) =
673 			    p1->pkt_bufs_cnt;
674 			_KBUF_COPY(p1_buf, p2_buf);
675 			ASSERT(p2_buf->buf_nbft_addr == 0);
676 			ASSERT(p2_buf->buf_nbft_idx == OBJ_IDX_NONE);
677 		}
678 		ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
679 		ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
680 		ASSERT(err == 0);
681 		break;
682 	}
683 
684 error:
685 	if (err != 0 && p2 != NULL) {
686 		uint32_t usecnt = 0;
687 
688 		ASSERT(p2->pkt_mbuf == NULL);
689 		if (__probable(mode == KPKT_COPY_LIGHT)) {
690 			/*
691 			 * This is undoing what _KBUF_COPY() did earlier,
692 			 * in case this routine is modified to handle regular
693 			 * pool (not on-demand), which also decrements the
694 			 * shared buffer's usecnt.  For regular pool, calling
695 			 * kern_pubfpool_free() will not yield a call to
696 			 * destroy the metadata.
697 			 */
698 			PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
699 			KBUF_DTOR(p2_buf, usecnt);
700 		}
701 		kern_pbufpool_free(pool, *ph2);
702 		*ph2 = 0;
703 	}
704 
705 	return err;
706 }
707 
708 errno_t
kern_packet_clone(const kern_packet_t ph1,kern_packet_t * ph2,kern_packet_copy_mode_t mode)709 kern_packet_clone(const kern_packet_t ph1, kern_packet_t *ph2,
710     kern_packet_copy_mode_t mode)
711 {
712 	return kern_packet_clone_internal(ph1, ph2, 0, mode);
713 }
714 
715 errno_t
kern_packet_clone_nosleep(const kern_packet_t ph1,kern_packet_t * ph2,kern_packet_copy_mode_t mode)716 kern_packet_clone_nosleep(const kern_packet_t ph1, kern_packet_t *ph2,
717     kern_packet_copy_mode_t mode)
718 {
719 	return kern_packet_clone_internal(ph1, ph2, SKMEM_NOSLEEP, mode);
720 }
721 
722 errno_t
kern_packet_add_buflet(const kern_packet_t ph,const kern_buflet_t bprev,const kern_buflet_t bnew)723 kern_packet_add_buflet(const kern_packet_t ph, const kern_buflet_t bprev,
724     const kern_buflet_t bnew)
725 {
726 	return __packet_add_buflet(ph, bprev, bnew);
727 }
728 
729 void
kern_packet_append(const kern_packet_t ph1,const kern_packet_t ph2)730 kern_packet_append(const kern_packet_t ph1, const kern_packet_t ph2)
731 {
732 	/*
733 	 * TODO:
734 	 * Add assert for non-zero ph2 here after changing IOSkywalkFamily
735 	 * to use kern_packet_set_next() for clearing the next pointer.
736 	 */
737 	kern_packet_set_next(ph1, ph2);
738 }
739 
740 kern_packet_t
kern_packet_get_next(const kern_packet_t ph)741 kern_packet_get_next(const kern_packet_t ph)
742 {
743 	struct __kern_packet *p, *next;
744 
745 	p = SK_PTR_ADDR_KPKT(ph);
746 	next = p->pkt_nextpkt;
747 	return next == NULL ? 0 : SK_PKT2PH(next);
748 }
749 
750 void
kern_packet_set_next(const kern_packet_t ph1,const kern_packet_t ph2)751 kern_packet_set_next(const kern_packet_t ph1, const kern_packet_t ph2)
752 {
753 	struct __kern_packet *p1, *p2;
754 
755 	ASSERT(ph1 != 0);
756 	p1 = SK_PTR_ADDR_KPKT(ph1);
757 	p2 = (ph2 == 0 ? NULL : SK_PTR_ADDR_KPKT(ph2));
758 	p1->pkt_nextpkt = p2;
759 }
760 
761 void
kern_packet_set_chain_counts(const kern_packet_t ph,uint32_t count,uint32_t bytes)762 kern_packet_set_chain_counts(const kern_packet_t ph, uint32_t count,
763     uint32_t bytes)
764 {
765 	struct __kern_packet *p;
766 
767 	p = SK_PTR_ADDR_KPKT(ph);
768 	p->pkt_chain_count = count;
769 	p->pkt_chain_bytes = bytes;
770 }
771 
772 void
kern_packet_get_chain_counts(const kern_packet_t ph,uint32_t * count,uint32_t * bytes)773 kern_packet_get_chain_counts(const kern_packet_t ph, uint32_t *count,
774     uint32_t *bytes)
775 {
776 	struct __kern_packet *p;
777 
778 	p = SK_PTR_ADDR_KPKT(ph);
779 	*count = p->pkt_chain_count;
780 	*bytes = p->pkt_chain_bytes;
781 }
782 
783 errno_t
kern_buflet_set_data_offset(const kern_buflet_t buf,const uint16_t doff)784 kern_buflet_set_data_offset(const kern_buflet_t buf, const uint16_t doff)
785 {
786 	return __buflet_set_data_offset(buf, doff);
787 }
788 
789 uint16_t
kern_buflet_get_data_offset(const kern_buflet_t buf)790 kern_buflet_get_data_offset(const kern_buflet_t buf)
791 {
792 	return __buflet_get_data_offset(buf);
793 }
794 
795 errno_t
kern_buflet_set_data_length(const kern_buflet_t buf,const uint16_t dlen)796 kern_buflet_set_data_length(const kern_buflet_t buf, const uint16_t dlen)
797 {
798 	return __buflet_set_data_length(buf, dlen);
799 }
800 
801 uint16_t
kern_buflet_get_data_length(const kern_buflet_t buf)802 kern_buflet_get_data_length(const kern_buflet_t buf)
803 {
804 	return __buflet_get_data_length(buf);
805 }
806 
807 void *
kern_buflet_get_object_address(const kern_buflet_t buf)808 kern_buflet_get_object_address(const kern_buflet_t buf)
809 {
810 	return __buflet_get_object_address(buf);
811 }
812 
813 uint32_t
kern_buflet_get_object_limit(const kern_buflet_t buf)814 kern_buflet_get_object_limit(const kern_buflet_t buf)
815 {
816 	return __buflet_get_object_limit(buf);
817 }
818 
819 void *
kern_buflet_get_data_address(const kern_buflet_t buf)820 kern_buflet_get_data_address(const kern_buflet_t buf)
821 {
822 	return __buflet_get_data_address(buf);
823 }
824 
825 errno_t
kern_buflet_set_data_address(const kern_buflet_t buf,const void * daddr)826 kern_buflet_set_data_address(const kern_buflet_t buf, const void *daddr)
827 {
828 	return __buflet_set_data_address(buf, daddr);
829 }
830 
831 kern_segment_t
kern_buflet_get_object_segment(const kern_buflet_t buf,kern_obj_idx_seg_t * idx)832 kern_buflet_get_object_segment(const kern_buflet_t buf,
833     kern_obj_idx_seg_t *idx)
834 {
835 	return __buflet_get_object_segment(buf, idx);
836 }
837 
838 uint16_t
kern_buflet_get_data_limit(const kern_buflet_t buf)839 kern_buflet_get_data_limit(const kern_buflet_t buf)
840 {
841 	return __buflet_get_data_limit(buf);
842 }
843 
844 errno_t
kern_buflet_set_data_limit(const kern_buflet_t buf,const uint16_t dlim)845 kern_buflet_set_data_limit(const kern_buflet_t buf, const uint16_t dlim)
846 {
847 	return __buflet_set_data_limit(buf, dlim);
848 }
849 
850 packet_trace_id_t
kern_packet_get_trace_id(const kern_packet_t ph)851 kern_packet_get_trace_id(const kern_packet_t ph)
852 {
853 	return __packet_get_trace_id(ph);
854 }
855 
856 void
kern_packet_set_trace_id(const kern_packet_t ph,packet_trace_id_t trace_id)857 kern_packet_set_trace_id(const kern_packet_t ph, packet_trace_id_t trace_id)
858 {
859 	return __packet_set_trace_id(ph, trace_id);
860 }
861 
862 void
kern_packet_trace_event(const kern_packet_t ph,uint32_t event)863 kern_packet_trace_event(const kern_packet_t ph, uint32_t event)
864 {
865 	return __packet_trace_event(ph, event);
866 }
867 
868 errno_t
kern_packet_copy_bytes(kern_packet_t pkt,size_t off,size_t len,void * out_data)869 kern_packet_copy_bytes(kern_packet_t pkt, size_t off, size_t len, void* out_data)
870 {
871 	kern_buflet_t buflet = NULL;
872 	size_t count;
873 	uint8_t *addr;
874 	uint32_t buflet_len;
875 
876 	buflet = __packet_get_next_buflet(pkt, buflet);
877 	if (buflet == NULL) {
878 		return EINVAL;
879 	}
880 	buflet_len = __buflet_get_data_length(buflet);
881 	if (len > buflet_len) {
882 		return EINVAL;
883 	}
884 	if (off > buflet_len) {
885 		return EINVAL;
886 	}
887 	addr = __buflet_get_data_address(buflet);
888 	if (addr == NULL) {
889 		return EINVAL;
890 	}
891 	addr += __buflet_get_data_offset(buflet);
892 	addr += off;
893 	count = MIN(len, buflet_len - off);
894 	bcopy((void *) addr, out_data, count);
895 
896 	return 0;
897 }
898