xref: /xnu-12377.41.6/bsd/skywalk/packet/packet_kern.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2016-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <skywalk/os_skywalk_private.h>
30 #include <netinet/tcp_var.h>
31 
32 static int kern_packet_clone_internal(const kern_packet_t, kern_packet_t *,
33     uint32_t, kern_packet_copy_mode_t);
34 
35 errno_t
kern_packet_set_headroom(const kern_packet_t ph,const uint8_t headroom)36 kern_packet_set_headroom(const kern_packet_t ph, const uint8_t headroom)
37 {
38 	return __packet_set_headroom(ph, headroom);
39 }
40 
41 uint8_t
kern_packet_get_headroom(const kern_packet_t ph)42 kern_packet_get_headroom(const kern_packet_t ph)
43 {
44 	return __packet_get_headroom(ph);
45 }
46 
47 errno_t
kern_packet_set_link_header_offset(const kern_packet_t ph,const uint8_t off)48 kern_packet_set_link_header_offset(const kern_packet_t ph, const uint8_t off)
49 {
50 	return __packet_set_headroom(ph, off);
51 }
52 
53 uint16_t
kern_packet_get_link_header_offset(const kern_packet_t ph)54 kern_packet_get_link_header_offset(const kern_packet_t ph)
55 {
56 	return __packet_get_headroom(ph);
57 }
58 
59 errno_t
kern_packet_set_link_header_length(const kern_packet_t ph,const uint8_t off)60 kern_packet_set_link_header_length(const kern_packet_t ph, const uint8_t off)
61 {
62 	return __packet_set_link_header_length(ph, off);
63 }
64 
65 uint8_t
kern_packet_get_link_header_length(const kern_packet_t ph)66 kern_packet_get_link_header_length(const kern_packet_t ph)
67 {
68 	return __packet_get_link_header_length(ph);
69 }
70 
71 errno_t
kern_packet_set_link_broadcast(const kern_packet_t ph)72 kern_packet_set_link_broadcast(const kern_packet_t ph)
73 {
74 	return __packet_set_link_broadcast(ph);
75 }
76 
77 boolean_t
kern_packet_get_link_broadcast(const kern_packet_t ph)78 kern_packet_get_link_broadcast(const kern_packet_t ph)
79 {
80 	return __packet_get_link_broadcast(ph);
81 }
82 
83 errno_t
kern_packet_set_link_multicast(const kern_packet_t ph)84 kern_packet_set_link_multicast(const kern_packet_t ph)
85 {
86 	return __packet_set_link_multicast(ph);
87 }
88 
89 errno_t
kern_packet_set_link_ethfcs(const kern_packet_t ph)90 kern_packet_set_link_ethfcs(const kern_packet_t ph)
91 {
92 	return __packet_set_link_ethfcs(ph);
93 }
94 
95 boolean_t
kern_packet_get_link_multicast(const kern_packet_t ph)96 kern_packet_get_link_multicast(const kern_packet_t ph)
97 {
98 	return __packet_get_link_multicast(ph);
99 }
100 
101 boolean_t
kern_packet_get_link_ethfcs(const kern_packet_t ph)102 kern_packet_get_link_ethfcs(const kern_packet_t ph)
103 {
104 	return __packet_get_link_ethfcs(ph);
105 }
106 
107 /* deprecated -- no effect, use set_link_header_length instead  */
108 errno_t
kern_packet_set_network_header_offset(const kern_packet_t ph,const uint16_t off)109 kern_packet_set_network_header_offset(const kern_packet_t ph,
110     const uint16_t off)
111 {
112 #pragma unused(ph, off)
113 	return 0;
114 }
115 
116 /* deprecated -- use get_link_header_length instead  */
117 uint16_t
kern_packet_get_network_header_offset(const kern_packet_t ph)118 kern_packet_get_network_header_offset(const kern_packet_t ph)
119 {
120 	return (uint16_t)__packet_get_headroom(ph) +
121 	       (uint16_t)__packet_get_link_header_length(ph);
122 }
123 
124 /* deprecated */
125 errno_t
kern_packet_set_transport_header_offset(const kern_packet_t ph,const uint16_t off)126 kern_packet_set_transport_header_offset(const kern_packet_t ph,
127     const uint16_t off)
128 {
129 #pragma unused(ph, off)
130 	return 0;
131 }
132 
133 /* deprecated */
134 uint16_t
kern_packet_get_transport_header_offset(const kern_packet_t ph)135 kern_packet_get_transport_header_offset(const kern_packet_t ph)
136 {
137 #pragma unused(ph)
138 	return 0;
139 }
140 
141 boolean_t
kern_packet_get_transport_traffic_background(const kern_packet_t ph)142 kern_packet_get_transport_traffic_background(const kern_packet_t ph)
143 {
144 	return __packet_get_transport_traffic_background(ph);
145 }
146 
147 boolean_t
kern_packet_get_transport_traffic_realtime(const kern_packet_t ph)148 kern_packet_get_transport_traffic_realtime(const kern_packet_t ph)
149 {
150 	return __packet_get_transport_traffic_realtime(ph);
151 }
152 
153 boolean_t
kern_packet_get_transport_retransmit(const kern_packet_t ph)154 kern_packet_get_transport_retransmit(const kern_packet_t ph)
155 {
156 	return __packet_get_transport_retransmit(ph);
157 }
158 
159 boolean_t
kern_packet_get_transport_new_flow(const kern_packet_t ph)160 kern_packet_get_transport_new_flow(const kern_packet_t ph)
161 {
162 	return __packet_get_transport_new_flow(ph);
163 }
164 
165 boolean_t
kern_packet_get_transport_last_packet(const kern_packet_t ph)166 kern_packet_get_transport_last_packet(const kern_packet_t ph)
167 {
168 	return __packet_get_transport_last_packet(ph);
169 }
170 
171 int
kern_packet_set_service_class(const kern_packet_t ph,const kern_packet_svc_class_t sc)172 kern_packet_set_service_class(const kern_packet_t ph,
173     const kern_packet_svc_class_t sc)
174 {
175 	return __packet_set_service_class(ph, sc);
176 }
177 
178 kern_packet_svc_class_t
kern_packet_get_service_class(const kern_packet_t ph)179 kern_packet_get_service_class(const kern_packet_t ph)
180 {
181 	return __packet_get_service_class(ph);
182 }
183 
184 errno_t
kern_packet_set_compression_generation_count(const kern_packet_t ph,uint32_t gencnt)185 kern_packet_set_compression_generation_count(const kern_packet_t ph,
186     uint32_t gencnt)
187 {
188 	return __packet_set_comp_gencnt(ph, gencnt);
189 }
190 
191 errno_t
kern_packet_get_compression_generation_count(const kern_packet_t ph,uint32_t * pgencnt)192 kern_packet_get_compression_generation_count(const kern_packet_t ph, uint32_t *pgencnt)
193 {
194 	return __packet_get_comp_gencnt(ph, pgencnt);
195 }
196 
197 errno_t
kern_packet_get_service_class_index(const kern_packet_svc_class_t svc,uint32_t * index)198 kern_packet_get_service_class_index(const kern_packet_svc_class_t svc,
199     uint32_t *index)
200 {
201 	if (index == NULL || !KPKT_VALID_SVC(svc)) {
202 		return EINVAL;
203 	}
204 
205 	*index = KPKT_SVCIDX(svc);
206 	return 0;
207 }
208 
209 boolean_t
kern_packet_is_high_priority(const kern_packet_t ph)210 kern_packet_is_high_priority(const kern_packet_t ph)
211 {
212 	uint32_t sc;
213 	boolean_t is_hi_priority;
214 
215 	sc = __packet_get_service_class(ph);
216 
217 	switch (sc) {
218 	case PKT_SC_VI:
219 	case PKT_SC_SIG:
220 	case PKT_SC_VO:
221 	case PKT_SC_CTL:
222 		is_hi_priority = (PKT_ADDR(ph)->pkt_comp_gencnt == 0 ||
223 		    PKT_ADDR(ph)->pkt_comp_gencnt == TCP_ACK_COMPRESSION_DUMMY);
224 		break;
225 
226 	case PKT_SC_BK_SYS:
227 	case PKT_SC_BK:
228 	case PKT_SC_BE:
229 	case PKT_SC_RD:
230 	case PKT_SC_OAM:
231 	case PKT_SC_AV:
232 	case PKT_SC_RV:
233 	default:
234 		is_hi_priority = false;
235 	}
236 	return is_hi_priority;
237 }
238 
239 errno_t
kern_packet_set_traffic_class(const kern_packet_t ph,kern_packet_traffic_class_t tc)240 kern_packet_set_traffic_class(const kern_packet_t ph,
241     kern_packet_traffic_class_t tc)
242 {
243 	return __packet_set_traffic_class(ph, tc);
244 }
245 
246 kern_packet_traffic_class_t
kern_packet_get_traffic_class(const kern_packet_t ph)247 kern_packet_get_traffic_class(const kern_packet_t ph)
248 {
249 	return __packet_get_traffic_class(ph);
250 }
251 
252 errno_t
kern_packet_set_inet_checksum(const kern_packet_t ph,const packet_csum_flags_t flags,const uint16_t start,const uint16_t stuff,const boolean_t tx)253 kern_packet_set_inet_checksum(const kern_packet_t ph,
254     const packet_csum_flags_t flags, const uint16_t start,
255     const uint16_t stuff, const boolean_t tx)
256 {
257 	return __packet_set_inet_checksum(ph, flags, start, stuff, tx);
258 }
259 
260 packet_csum_flags_t
kern_packet_get_inet_checksum(const kern_packet_t ph,uint16_t * start,uint16_t * val,const boolean_t tx)261 kern_packet_get_inet_checksum(const kern_packet_t ph, uint16_t *start,
262     uint16_t *val, const boolean_t tx)
263 {
264 	return __packet_get_inet_checksum(ph, start, val, tx);
265 }
266 
267 void
kern_packet_set_flow_uuid(const kern_packet_t ph,const uuid_t flow_uuid)268 kern_packet_set_flow_uuid(const kern_packet_t ph, const uuid_t flow_uuid)
269 {
270 	__packet_set_flow_uuid(ph, flow_uuid);
271 }
272 
273 void
kern_packet_get_flow_uuid(const kern_packet_t ph,uuid_t * flow_uuid)274 kern_packet_get_flow_uuid(const kern_packet_t ph, uuid_t *flow_uuid)
275 {
276 	__packet_get_flow_uuid(ph, *flow_uuid);
277 }
278 
279 void
kern_packet_clear_flow_uuid(const kern_packet_t ph)280 kern_packet_clear_flow_uuid(const kern_packet_t ph)
281 {
282 	__packet_clear_flow_uuid(ph);
283 }
284 
285 void
kern_packet_get_euuid(const kern_packet_t ph,uuid_t euuid)286 kern_packet_get_euuid(const kern_packet_t ph, uuid_t euuid)
287 {
288 	uuid_copy(euuid, PKT_ADDR(ph)->pkt_policy_euuid);
289 }
290 
291 void
kern_packet_set_policy_id(const kern_packet_t ph,uint32_t policy_id)292 kern_packet_set_policy_id(const kern_packet_t ph, uint32_t policy_id)
293 {
294 	PKT_ADDR(ph)->pkt_policy_id = policy_id;
295 }
296 
297 uint32_t
kern_packet_get_policy_id(const kern_packet_t ph)298 kern_packet_get_policy_id(const kern_packet_t ph)
299 {
300 	return PKT_ADDR(ph)->pkt_policy_id;
301 }
302 
303 void
kern_packet_set_skip_policy_id(const kern_packet_t ph,uint32_t skip_policy_id)304 kern_packet_set_skip_policy_id(const kern_packet_t ph, uint32_t skip_policy_id)
305 {
306 	PKT_ADDR(ph)->pkt_skip_policy_id = skip_policy_id;
307 }
308 
309 uint32_t
kern_packet_get_skip_policy_id(const kern_packet_t ph)310 kern_packet_get_skip_policy_id(const kern_packet_t ph)
311 {
312 	return PKT_ADDR(ph)->pkt_skip_policy_id;
313 }
314 
315 uint32_t
kern_packet_get_data_length(const kern_packet_t ph)316 kern_packet_get_data_length(const kern_packet_t ph)
317 {
318 	return __packet_get_data_length(ph);
319 }
320 
321 uint32_t
kern_packet_get_buflet_count(const kern_packet_t ph)322 kern_packet_get_buflet_count(const kern_packet_t ph)
323 {
324 	return __packet_get_buflet_count(ph);
325 }
326 
327 kern_buflet_t
kern_packet_get_next_buflet(const kern_packet_t ph,const kern_buflet_t bprev)328 kern_packet_get_next_buflet(const kern_packet_t ph, const kern_buflet_t bprev)
329 {
330 	return __packet_get_next_buflet(ph, bprev);
331 }
332 
333 errno_t
kern_packet_finalize(const kern_packet_t ph)334 kern_packet_finalize(const kern_packet_t ph)
335 {
336 	return __packet_finalize(ph);
337 }
338 
339 kern_packet_idx_t
kern_packet_get_object_index(const kern_packet_t ph)340 kern_packet_get_object_index(const kern_packet_t ph)
341 {
342 	return __packet_get_object_index(ph);
343 }
344 
345 errno_t
kern_packet_get_timestamp(const kern_packet_t ph,uint64_t * ts,boolean_t * valid)346 kern_packet_get_timestamp(const kern_packet_t ph, uint64_t *ts,
347     boolean_t *valid)
348 {
349 	return __packet_get_timestamp(ph, ts, valid);
350 }
351 
352 errno_t
kern_packet_set_timestamp(const kern_packet_t ph,uint64_t ts,boolean_t valid)353 kern_packet_set_timestamp(const kern_packet_t ph, uint64_t ts, boolean_t valid)
354 {
355 	return __packet_set_timestamp(ph, ts, valid);
356 }
357 
358 struct mbuf *
kern_packet_get_mbuf(const kern_packet_t pkt)359 kern_packet_get_mbuf(const kern_packet_t pkt)
360 {
361 	struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(pkt);
362 
363 	if ((kpkt->pkt_pflags & PKT_F_MBUF_DATA) != 0) {
364 		return kpkt->pkt_mbuf;
365 	}
366 	return NULL;
367 }
368 
369 errno_t
kern_packet_get_timestamp_requested(const kern_packet_t ph,boolean_t * requested)370 kern_packet_get_timestamp_requested(const kern_packet_t ph,
371     boolean_t *requested)
372 {
373 	return __packet_get_timestamp_requested(ph, requested);
374 }
375 
376 void
kern_packet_tx_completion(const kern_packet_t ph,ifnet_t ifp)377 kern_packet_tx_completion(const kern_packet_t ph, ifnet_t ifp)
378 {
379 	struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(ph);
380 
381 	PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
382 	/*
383 	 * handling of transmit completion events.
384 	 */
385 	(void) kern_channel_event_transmit_status_with_packet(ph, ifp);
386 
387 	/*
388 	 * handling of transmit completion timestamp request callbacks.
389 	 */
390 	if ((kpkt->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0) {
391 		__packet_perform_tx_completion_callbacks(ph, ifp);
392 	}
393 }
394 
395 errno_t
kern_packet_get_tx_completion_status(const kern_packet_t ph,kern_return_t * status)396 kern_packet_get_tx_completion_status(const kern_packet_t ph,
397     kern_return_t *status)
398 {
399 	return __packet_get_tx_completion_status(ph, status);
400 }
401 
402 errno_t
kern_packet_set_tx_completion_status(const kern_packet_t ph,kern_return_t status)403 kern_packet_set_tx_completion_status(const kern_packet_t ph,
404     kern_return_t status)
405 {
406 	return __packet_set_tx_completion_status(ph, status);
407 }
408 
409 void
kern_packet_set_group_start(const kern_packet_t ph)410 kern_packet_set_group_start(const kern_packet_t ph)
411 {
412 	(void) __packet_set_group_start(ph);
413 }
414 
415 boolean_t
kern_packet_get_group_start(const kern_packet_t ph)416 kern_packet_get_group_start(const kern_packet_t ph)
417 {
418 	return __packet_get_group_start(ph);
419 }
420 
421 void
kern_packet_set_group_end(const kern_packet_t ph)422 kern_packet_set_group_end(const kern_packet_t ph)
423 {
424 	(void) __packet_set_group_end(ph);
425 }
426 
427 boolean_t
kern_packet_get_group_end(const kern_packet_t ph)428 kern_packet_get_group_end(const kern_packet_t ph)
429 {
430 	return __packet_get_group_end(ph);
431 }
432 
433 errno_t
kern_packet_get_expire_time(const kern_packet_t ph,uint64_t * ts)434 kern_packet_get_expire_time(const kern_packet_t ph, uint64_t *ts)
435 {
436 	return __packet_get_expire_time(ph, ts);
437 }
438 
439 errno_t
kern_packet_set_expire_time(const kern_packet_t ph,const uint64_t ts)440 kern_packet_set_expire_time(const kern_packet_t ph, const uint64_t ts)
441 {
442 	return __packet_set_expire_time(ph, ts);
443 }
444 
445 errno_t
kern_packet_get_expiry_action(const kern_packet_t ph,packet_expiry_action_t * pea)446 kern_packet_get_expiry_action(const kern_packet_t ph, packet_expiry_action_t *pea)
447 {
448 	return __packet_get_expiry_action(ph, pea);
449 }
450 
451 errno_t
kern_packet_set_expiry_action(const kern_packet_t ph,packet_expiry_action_t pea)452 kern_packet_set_expiry_action(const kern_packet_t ph, packet_expiry_action_t pea)
453 {
454 	return __packet_set_expiry_action(ph, pea);
455 }
456 
457 errno_t
kern_packet_get_token(const kern_packet_t ph,void * __sized_by (* len)token,uint16_t * len)458 kern_packet_get_token(const kern_packet_t ph, void *__sized_by(*len)token, uint16_t *len)
459 {
460 	return __packet_get_token(ph, token, len);
461 }
462 
463 errno_t
kern_packet_set_token(const kern_packet_t ph,const void * __sized_by (len)token,const uint16_t len)464 kern_packet_set_token(const kern_packet_t ph, const void *__sized_by(len)token,
465     const uint16_t len)
466 {
467 	return __packet_set_token(ph, token, len);
468 }
469 
470 errno_t
kern_packet_get_packetid(const kern_packet_t ph,packet_id_t * pktid)471 kern_packet_get_packetid(const kern_packet_t ph, packet_id_t *pktid)
472 {
473 	return __packet_get_packetid(ph, pktid);
474 }
475 
476 errno_t
kern_packet_set_vlan_tag(const kern_packet_t ph,const uint16_t tag)477 kern_packet_set_vlan_tag(const kern_packet_t ph, const uint16_t tag)
478 {
479 	return __packet_set_vlan_tag(ph, tag);
480 }
481 
482 errno_t
kern_packet_get_vlan_tag(const kern_packet_t ph,uint16_t * tag)483 kern_packet_get_vlan_tag(const kern_packet_t ph, uint16_t *tag)
484 {
485 	return __packet_get_vlan_tag(ph, tag);
486 }
487 
488 uint16_t
kern_packet_get_vlan_id(const uint16_t tag)489 kern_packet_get_vlan_id(const uint16_t tag)
490 {
491 	return __packet_get_vlan_id(tag);
492 }
493 
494 uint8_t
kern_packet_get_vlan_priority(const uint16_t tag)495 kern_packet_get_vlan_priority(const uint16_t tag)
496 {
497 	return __packet_get_vlan_priority(tag);
498 }
499 
500 errno_t
kern_packet_get_app_metadata(const kern_packet_t ph,packet_app_metadata_type_t * app_type,uint8_t * app_metadata)501 kern_packet_get_app_metadata(const kern_packet_t ph,
502     packet_app_metadata_type_t *app_type, uint8_t *app_metadata)
503 {
504 	return __packet_get_app_metadata(ph, app_type, app_metadata);
505 }
506 
507 void
kern_packet_set_wake_flag(const kern_packet_t ph)508 kern_packet_set_wake_flag(const kern_packet_t ph)
509 {
510 	return __packet_set_wake_flag(ph);
511 }
512 
513 boolean_t
kern_packet_get_wake_flag(const kern_packet_t ph)514 kern_packet_get_wake_flag(const kern_packet_t ph)
515 {
516 	return __packet_get_wake_flag(ph);
517 }
518 
519 void
kern_packet_set_ulpn_flag(const kern_packet_t ph)520 kern_packet_set_ulpn_flag(const kern_packet_t ph)
521 {
522 	return __packet_set_ulpn_flag(ph);
523 }
524 
525 boolean_t
kern_packet_get_ulpn_flag(const kern_packet_t ph)526 kern_packet_get_ulpn_flag(const kern_packet_t ph)
527 {
528 	return __packet_get_ulpn_flag(ph);
529 }
530 
531 boolean_t
kern_packet_get_lpw_flag(const kern_packet_t ph)532 kern_packet_get_lpw_flag(const kern_packet_t ph)
533 {
534 	return __packet_get_lpw_flag(ph);
535 }
536 
537 uint32_t
kern_inet_checksum(const void * data,uint32_t len,uint32_t sum0)538 kern_inet_checksum(const void *data, uint32_t len, uint32_t sum0)
539 {
540 	return __packet_cksum(data, len, sum0);
541 }
542 
543 uint32_t
kern_copy_and_inet_checksum(const void * __sized_by (len)src,void * __sized_by (len)dst,uint32_t len,uint32_t sum0)544 kern_copy_and_inet_checksum(const void *__sized_by(len) src, void *__sized_by(len) dst,
545     uint32_t len, uint32_t sum0)
546 {
547 	uint32_t sum = __packet_copy_and_sum(src, dst, len, sum0);
548 	return __packet_fold_sum_final(sum);
549 }
550 
551 /*
552  * Source packet must be finalized (not dropped); cloned packet does not
553  * inherit the finalized flag, or the classified flag, so caller is
554  * responsible for finalizing it and classifying it (as needed).
555  */
556 static int
kern_packet_clone_internal(const kern_packet_t ph1,kern_packet_t * ph2,uint32_t skmflag,kern_packet_copy_mode_t mode)557 kern_packet_clone_internal(const kern_packet_t ph1, kern_packet_t *ph2,
558     uint32_t skmflag, kern_packet_copy_mode_t mode)
559 {
560 	struct kern_pbufpool *pool;
561 	struct __kern_packet *p1 = SK_PTR_ADDR_KPKT(ph1);
562 	struct __kern_packet *p2 = NULL;
563 	struct __kern_buflet *p1_buf, *p2_buf;
564 	uint16_t bufs_cnt_alloc;
565 	int m_how;
566 	int err;
567 
568 	/* TODO: Add quantum support */
569 
570 	/* Source needs to be finalized (not dropped) and with 1 buflet */
571 	if ((p1->pkt_qum.qum_qflags & QUM_F_DROPPED) != 0 ||
572 	    p1->pkt_bufs_cnt == 0) {
573 		return EINVAL;
574 	}
575 
576 	/* TODO: Add multi-buflet support */
577 	VERIFY(p1->pkt_bufs_cnt == 1);
578 
579 	switch (mode) {
580 	case KPKT_COPY_HEAVY:
581 		/*
582 		 * Allocate a packet with the same number of buffers as that
583 		 * of the source packet's; this cannot be 0 per check above.
584 		 */
585 		bufs_cnt_alloc = p1->pkt_bufs_cnt;
586 		break;
587 
588 	case KPKT_COPY_LIGHT:
589 		/*
590 		 * Allocate an "empty" packet with no buffers attached; this
591 		 * will work only on pools marked with "on-demand", which is
592 		 * the case today for device drivers needing shared buffers
593 		 * support.
594 		 *
595 		 * TODO: We could make this generic and applicable to regular
596 		 * pools, but it would involve detaching the buffer that comes
597 		 * attached to the constructed packet; this wouldn't be that
598 		 * lightweight in nature, but whatever.  In such a case the
599 		 * number of buffers requested during allocation is the same
600 		 * as the that of the source packet's.  For now, let it fail
601 		 * naturally on regular pools, as part of allocation below.
602 		 *
603 		 * XXX: This would also fail on quantums as we currently
604 		 * restrict quantums to have exactly one buffer.
605 		 */
606 		bufs_cnt_alloc = 0;
607 		break;
608 
609 	default:
610 		VERIFY(0);
611 		/* NOTREACHED */
612 		__builtin_unreachable();
613 	}
614 
615 	*ph2 = 0;
616 	pool = __DECONST(struct kern_pbufpool *, SK_PTR_ADDR_KQUM(ph1)->qum_pp);
617 	if (skmflag & SKMEM_NOSLEEP) {
618 		err = kern_pbufpool_alloc_nosleep(pool, bufs_cnt_alloc, ph2);
619 		m_how = M_NOWAIT;
620 	} else {
621 		err = kern_pbufpool_alloc(pool, bufs_cnt_alloc, ph2);
622 		ASSERT(err != ENOMEM);
623 		m_how = M_WAIT;
624 	}
625 	if (__improbable(err != 0)) {
626 		/* See comments above related to KPKT_COPY_{HEAVY,LIGHT} */
627 		goto error;
628 	}
629 	p2 = SK_PTR_ADDR_KPKT(*ph2);
630 
631 	/* Copy packet metadata */
632 	_QUM_COPY(&(p1)->pkt_qum, &(p2)->pkt_qum);
633 	_PKT_COPY(p1, p2);
634 	ASSERT(p2->pkt_mbuf == NULL);
635 	ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
636 
637 	/* clear trace id */
638 	p2->pkt_trace_id = 0;
639 	/* clear finalized and classified bits from clone */
640 	p2->pkt_qum.qum_qflags &= ~(QUM_F_FINALIZED | QUM_F_FLOW_CLASSIFIED);
641 
642 	switch (mode) {
643 	case KPKT_COPY_HEAVY:
644 		/*
645 		 * Heavy: Copy buffer contents and extra metadata.
646 		 */
647 		ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
648 		if (__probable(p1->pkt_bufs_cnt != 0)) {
649 			uint8_t *saddr, *daddr;
650 			uint32_t copy_len;
651 			/*
652 			 * TODO -- [email protected]
653 			 * Packets from compat driver could have dlen > dlim
654 			 * for flowswitch flow compatibility, cleanup when we
655 			 * make them consistent.
656 			 */
657 			PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
658 			PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
659 			/*
660 			 * -fbounds-safety: buf_addr is of type
661 			 * mach_vm_address_t, and it's much easier to forge it
662 			 * here than chagne the type to a pointer in
663 			 * struct __buflet. Both saddr and daddr are only used
664 			 * for bcopy with copy_len as length, so the size of the
665 			 * forge is copy_len.
666 			 */
667 			copy_len = MIN(p1_buf->buf_dlen, p1_buf->buf_dlim);
668 			saddr = __unsafe_forge_bidi_indexable(void *,
669 			    p1_buf->buf_addr, copy_len);
670 			daddr = __unsafe_forge_bidi_indexable(void *,
671 			    p2_buf->buf_addr, copy_len);
672 			if (copy_len != 0) {
673 				bcopy(saddr, daddr, copy_len);
674 			}
675 			*__DECONST(uint32_t *, &p2_buf->buf_dlim) =
676 			    p1_buf->buf_dlim;
677 			p2_buf->buf_dlen = p1_buf->buf_dlen;
678 			p2_buf->buf_doff = p1_buf->buf_doff;
679 		}
680 
681 		/* Copy AQM metadata */
682 		p2->pkt_flowsrc_type = p1->pkt_flowsrc_type;
683 		p2->pkt_flowsrc_fidx = p1->pkt_flowsrc_fidx;
684 		static_assert((offsetof(struct __flow, flow_src_id) % 8) == 0);
685 		_UUID_COPY(p2->pkt_flowsrc_id, p1->pkt_flowsrc_id);
686 		_UUID_COPY(p2->pkt_policy_euuid, p1->pkt_policy_euuid);
687 		p2->pkt_policy_id = p1->pkt_policy_id;
688 		p2->pkt_skip_policy_id = p1->pkt_skip_policy_id;
689 
690 		p2->pkt_pflags = p1->pkt_pflags;
691 		if (p1->pkt_pflags & PKT_F_MBUF_DATA) {
692 			ASSERT(p1->pkt_mbuf != NULL);
693 			p2->pkt_mbuf = m_dup(p1->pkt_mbuf, m_how);
694 			if (p2->pkt_mbuf == NULL) {
695 				KPKT_CLEAR_MBUF_DATA(p2);
696 				err = ENOBUFS;
697 				goto error;
698 			}
699 		}
700 		break;
701 
702 	case KPKT_COPY_LIGHT:
703 		/*
704 		 * Lightweight: Duplicate buflet(s) and add refs.
705 		 */
706 		ASSERT(p1->pkt_mbuf == NULL);
707 		ASSERT(p2->pkt_bufs_cnt == 0);
708 		if (__probable(p1->pkt_bufs_cnt != 0)) {
709 			PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
710 			p2_buf = &p2->pkt_qum_buf;
711 			*__DECONST(uint16_t *, &p2->pkt_bufs_cnt) =
712 			    p1->pkt_bufs_cnt;
713 			_KBUF_COPY(p1_buf, p2_buf);
714 			ASSERT(p2_buf->buf_nbft_addr == 0);
715 			ASSERT(p2_buf->buf_nbft_idx == OBJ_IDX_NONE);
716 		}
717 		ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
718 		ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
719 		ASSERT(err == 0);
720 		break;
721 	}
722 
723 error:
724 	if (err != 0 && p2 != NULL) {
725 		uint32_t usecnt = 0;
726 
727 		ASSERT(p2->pkt_mbuf == NULL);
728 		if (__probable(mode == KPKT_COPY_LIGHT)) {
729 			/*
730 			 * This is undoing what _KBUF_COPY() did earlier,
731 			 * in case this routine is modified to handle regular
732 			 * pool (not on-demand), which also decrements the
733 			 * shared buffer's usecnt.  For regular pool, calling
734 			 * kern_pubfpool_free() will not yield a call to
735 			 * destroy the metadata.
736 			 */
737 			PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
738 			KBUF_DTOR(p2_buf, usecnt);
739 		}
740 		kern_pbufpool_free(pool, *ph2);
741 		*ph2 = 0;
742 	}
743 
744 	return err;
745 }
746 
747 errno_t
kern_packet_clone(const kern_packet_t ph1,kern_packet_t * ph2,kern_packet_copy_mode_t mode)748 kern_packet_clone(const kern_packet_t ph1, kern_packet_t *ph2,
749     kern_packet_copy_mode_t mode)
750 {
751 	return kern_packet_clone_internal(ph1, ph2, 0, mode);
752 }
753 
754 errno_t
kern_packet_clone_nosleep(const kern_packet_t ph1,kern_packet_t * ph2,kern_packet_copy_mode_t mode)755 kern_packet_clone_nosleep(const kern_packet_t ph1, kern_packet_t *ph2,
756     kern_packet_copy_mode_t mode)
757 {
758 	return kern_packet_clone_internal(ph1, ph2, SKMEM_NOSLEEP, mode);
759 }
760 
761 errno_t
kern_packet_add_buflet(const kern_packet_t ph,const kern_buflet_t bprev,const kern_buflet_t bnew)762 kern_packet_add_buflet(const kern_packet_t ph, const kern_buflet_t bprev,
763     const kern_buflet_t bnew)
764 {
765 	return __packet_add_buflet(ph, bprev, bnew);
766 }
767 
768 void
kern_packet_append(const kern_packet_t ph1,const kern_packet_t ph2)769 kern_packet_append(const kern_packet_t ph1, const kern_packet_t ph2)
770 {
771 	/*
772 	 * TODO:
773 	 * Add assert for non-zero ph2 here after changing IOSkywalkFamily
774 	 * to use kern_packet_set_next() for clearing the next pointer.
775 	 */
776 	kern_packet_set_next(ph1, ph2);
777 }
778 
779 kern_packet_t
kern_packet_get_next(const kern_packet_t ph)780 kern_packet_get_next(const kern_packet_t ph)
781 {
782 	struct __kern_packet *p, *next;
783 
784 	p = SK_PTR_ADDR_KPKT(ph);
785 	next = p->pkt_nextpkt;
786 	return next == NULL ? 0 : SK_PKT2PH(next);
787 }
788 
789 void
kern_packet_set_next(const kern_packet_t ph1,const kern_packet_t ph2)790 kern_packet_set_next(const kern_packet_t ph1, const kern_packet_t ph2)
791 {
792 	struct __kern_packet *p1, *p2;
793 
794 	ASSERT(ph1 != 0);
795 	p1 = SK_PTR_ADDR_KPKT(ph1);
796 	p2 = (ph2 == 0 ? NULL : SK_PTR_ADDR_KPKT(ph2));
797 	p1->pkt_nextpkt = p2;
798 }
799 
800 void
kern_packet_set_chain_counts(const kern_packet_t ph,uint32_t count,uint32_t bytes)801 kern_packet_set_chain_counts(const kern_packet_t ph, uint32_t count,
802     uint32_t bytes)
803 {
804 	struct __kern_packet *p;
805 
806 	p = SK_PTR_ADDR_KPKT(ph);
807 	p->pkt_chain_count = count;
808 	p->pkt_chain_bytes = bytes;
809 }
810 
811 void
kern_packet_get_chain_counts(const kern_packet_t ph,uint32_t * count,uint32_t * bytes)812 kern_packet_get_chain_counts(const kern_packet_t ph, uint32_t *count,
813     uint32_t *bytes)
814 {
815 	struct __kern_packet *p;
816 
817 	p = SK_PTR_ADDR_KPKT(ph);
818 	*count = p->pkt_chain_count;
819 	*bytes = p->pkt_chain_bytes;
820 }
821 
822 errno_t
kern_buflet_set_data_offset(const kern_buflet_t buf,const uint32_t doff)823 kern_buflet_set_data_offset(const kern_buflet_t buf, const uint32_t doff)
824 {
825 	return __buflet_set_data_offset(buf, doff);
826 }
827 
828 uint32_t
kern_buflet_get_data_offset(const kern_buflet_t buf)829 kern_buflet_get_data_offset(const kern_buflet_t buf)
830 {
831 	return __buflet_get_data_offset(buf);
832 }
833 
834 errno_t
kern_buflet_set_data_length(const kern_buflet_t buf,const uint32_t dlen)835 kern_buflet_set_data_length(const kern_buflet_t buf, const uint32_t dlen)
836 {
837 	return __buflet_set_data_length(buf, dlen);
838 }
839 
840 uint32_t
kern_buflet_get_data_length(const kern_buflet_t buf)841 kern_buflet_get_data_length(const kern_buflet_t buf)
842 {
843 	return __buflet_get_data_length(buf);
844 }
845 
846 void *
kern_buflet_get_object_address(const kern_buflet_t buf)847 kern_buflet_get_object_address(const kern_buflet_t buf)
848 {
849 	return __buflet_get_object_address(buf);
850 }
851 
852 uint32_t
kern_buflet_get_object_limit(const kern_buflet_t buf)853 kern_buflet_get_object_limit(const kern_buflet_t buf)
854 {
855 	return __buflet_get_object_limit(buf);
856 }
857 
858 void *
kern_buflet_get_data_address(const kern_buflet_t buf)859 kern_buflet_get_data_address(const kern_buflet_t buf)
860 {
861 	return __buflet_get_data_address(buf);
862 }
863 
864 errno_t
kern_buflet_set_data_address(const kern_buflet_t buf,const void * daddr)865 kern_buflet_set_data_address(const kern_buflet_t buf, const void *daddr)
866 {
867 	return __buflet_set_data_address(buf, daddr);
868 }
869 
870 errno_t
kern_buflet_set_buffer_offset(const kern_buflet_t buf,const uint32_t off)871 kern_buflet_set_buffer_offset(const kern_buflet_t buf, const uint32_t off)
872 {
873 	return __buflet_set_buffer_offset(buf, off);
874 }
875 
876 kern_segment_t
kern_buflet_get_object_segment(const kern_buflet_t buf,kern_obj_idx_seg_t * idx)877 kern_buflet_get_object_segment(const kern_buflet_t buf,
878     kern_obj_idx_seg_t *idx)
879 {
880 	return __buflet_get_object_segment(buf, idx);
881 }
882 
883 uint32_t
kern_buflet_get_data_limit(const kern_buflet_t buf)884 kern_buflet_get_data_limit(const kern_buflet_t buf)
885 {
886 	return __buflet_get_data_limit(buf);
887 }
888 
889 errno_t
kern_buflet_set_data_limit(const kern_buflet_t buf,const uint32_t dlim)890 kern_buflet_set_data_limit(const kern_buflet_t buf, const uint32_t dlim)
891 {
892 	return __buflet_set_data_limit(buf, dlim);
893 }
894 
895 packet_trace_id_t
kern_packet_get_trace_id(const kern_packet_t ph)896 kern_packet_get_trace_id(const kern_packet_t ph)
897 {
898 	return __packet_get_trace_id(ph);
899 }
900 
901 void
kern_packet_set_trace_id(const kern_packet_t ph,packet_trace_id_t trace_id)902 kern_packet_set_trace_id(const kern_packet_t ph, packet_trace_id_t trace_id)
903 {
904 	return __packet_set_trace_id(ph, trace_id);
905 }
906 
907 void
kern_packet_trace_event(const kern_packet_t ph,uint32_t event)908 kern_packet_trace_event(const kern_packet_t ph, uint32_t event)
909 {
910 	return __packet_trace_event(ph, event);
911 }
912 
913 errno_t
kern_packet_copy_bytes(kern_packet_t pkt,size_t off,size_t len,void * __sized_by (len)out_data)914 kern_packet_copy_bytes(kern_packet_t pkt, size_t off, size_t len,
915     void *__sized_by(len)out_data)
916 {
917 	kern_buflet_t buflet = NULL;
918 	size_t count;
919 	uint8_t *addr;
920 	uint32_t buflet_len;
921 
922 	buflet = __packet_get_next_buflet(pkt, buflet);
923 	if (buflet == NULL) {
924 		return EINVAL;
925 	}
926 	buflet_len = __buflet_get_data_length(buflet);
927 	if (len > buflet_len) {
928 		return EINVAL;
929 	}
930 	if (off > buflet_len) {
931 		return EINVAL;
932 	}
933 	addr = __buflet_get_data_address(buflet);
934 	if (addr == NULL) {
935 		return EINVAL;
936 	}
937 	addr += __buflet_get_data_offset(buflet);
938 	addr += off;
939 	count = MIN(len, buflet_len - off);
940 	bcopy((void *) addr, out_data, count);
941 
942 	return 0;
943 }
944 
945 errno_t
kern_packet_set_fpd_sequence_number(__unused const kern_packet_t ph,__unused uint32_t seq_num)946 kern_packet_set_fpd_sequence_number(__unused const kern_packet_t ph, __unused uint32_t seq_num)
947 {
948 	return 0;
949 }
950 
951 errno_t
kern_packet_set_fpd_context_id(__unused const kern_packet_t ph,__unused uint16_t ctx_id)952 kern_packet_set_fpd_context_id(__unused const kern_packet_t ph, __unused uint16_t ctx_id)
953 {
954 	return 0;
955 }
956 
957 errno_t
kern_packet_set_fpd_command(__unused const kern_packet_t ph,__unused uint8_t cmd)958 kern_packet_set_fpd_command(__unused const kern_packet_t ph, __unused uint8_t cmd)
959 {
960 	return 0;
961 }
962 
963 errno_t
kern_packet_get_flowid(const kern_packet_t ph,packet_flowid_t * pflowid)964 kern_packet_get_flowid(const kern_packet_t ph, packet_flowid_t *pflowid)
965 {
966 	return __packet_get_flowid(ph, pflowid);
967 }
968 
969 void
kern_packet_set_trace_tag(const kern_packet_t ph,packet_trace_tag_t tag)970 kern_packet_set_trace_tag(const kern_packet_t ph, packet_trace_tag_t tag)
971 {
972 	__packet_set_trace_tag(ph, tag);
973 }
974 
975 packet_trace_tag_t
kern_packet_get_trace_tag(const kern_packet_t ph)976 kern_packet_get_trace_tag(const kern_packet_t ph)
977 {
978 	return __packet_get_trace_tag(ph);
979 }
980 
981 errno_t
kern_packet_get_tx_nexus_port_id(const kern_packet_t ph,uint32_t * nx_port_id)982 kern_packet_get_tx_nexus_port_id(const kern_packet_t ph, uint32_t *nx_port_id)
983 {
984 	return __packet_get_tx_nx_port_id(ph, nx_port_id);
985 }
986 
987 uint16_t
kern_packet_get_protocol_segment_size(const kern_packet_t ph)988 kern_packet_get_protocol_segment_size(const kern_packet_t ph)
989 {
990 	return __packet_get_protocol_segment_size(ph);
991 }
992 
993 void
kern_packet_set_segment_count(const kern_packet_t ph,uint8_t segcount)994 kern_packet_set_segment_count(const kern_packet_t ph, uint8_t segcount)
995 {
996 	__packet_set_segment_count(ph, segcount);
997 }
998 
999 void *
kern_packet_get_priv(const kern_packet_t ph)1000 kern_packet_get_priv(const kern_packet_t ph)
1001 {
1002 	return __packet_get_priv(ph);
1003 }
1004 
1005 void
kern_packet_set_priv(const kern_packet_t ph,void * priv)1006 kern_packet_set_priv(const kern_packet_t ph, void *priv)
1007 {
1008 	return __packet_set_priv(ph, priv);
1009 }
1010 
1011 void
kern_packet_get_tso_flags(const kern_packet_t ph,packet_tso_flags_t * flags)1012 kern_packet_get_tso_flags(const kern_packet_t ph, packet_tso_flags_t *flags)
1013 {
1014 	return __packet_get_tso_flags(ph, flags);
1015 }
1016 
1017 errno_t
kern_packet_check_for_expiry_and_notify(const kern_packet_t ph,ifnet_t ifp,uint16_t origin,uint16_t status)1018 kern_packet_check_for_expiry_and_notify(
1019 	const kern_packet_t ph, ifnet_t ifp, uint16_t origin, uint16_t status)
1020 {
1021 	errno_t err = 0;
1022 	uint32_t nx_port_id = 0;
1023 	packet_expiry_action_t exp_action = PACKET_EXPIRY_ACTION_NONE;
1024 	os_channel_event_packet_transmit_expired_t exp_notif = {0};
1025 
1026 	if (__improbable(!ifp)) {
1027 		return EINVAL;
1028 	}
1029 
1030 	err = __packet_get_expire_time(ph, &exp_notif.packet_tx_expiration_deadline);
1031 	if (__probable(err)) {
1032 		if (err == ENOENT) {
1033 			/* Expiration time is not set; can not continue; not an error. */
1034 			return 0;
1035 		}
1036 		return err;
1037 	}
1038 
1039 	err = __packet_get_expiry_action(ph, &exp_action);
1040 	if (__probable(err)) {
1041 		if (err == ENOENT) {
1042 			/* Expiry action is not set; can not continue; not an error. */
1043 			return 0;
1044 		}
1045 		return err;
1046 	}
1047 
1048 	if (exp_action == PACKET_EXPIRY_ACTION_NONE) {
1049 		/* Expiry action is no-op; can not continue; not an error. */
1050 		return 0;
1051 	}
1052 
1053 	exp_notif.packet_tx_expiration_timestamp = mach_absolute_time();
1054 
1055 	/* Check whether the packet has expired */
1056 	if (exp_notif.packet_tx_expiration_timestamp < exp_notif.packet_tx_expiration_deadline) {
1057 		/* The packet hasn't expired yet; can not continue; not an error */
1058 		return 0;
1059 	}
1060 
1061 	/* The packet has expired and notification is requested */
1062 	err = __packet_get_packetid(ph, &exp_notif.packet_id);
1063 	if (__improbable(err)) {
1064 		return err;
1065 	}
1066 
1067 	err = __packet_get_tx_nx_port_id(ph, &nx_port_id);
1068 	if (__improbable(err)) {
1069 		return err;
1070 	}
1071 
1072 	exp_notif.packet_tx_expiration_status = status;
1073 	exp_notif.packet_tx_expiration_origin = origin;
1074 
1075 	/* Send the notification status */
1076 	err = kern_channel_event_transmit_expired(
1077 		ifp, &exp_notif, nx_port_id);
1078 
1079 	return err;
1080 }
1081