1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <netinet/tcp_var.h>
31
32 static int kern_packet_clone_internal(const kern_packet_t, kern_packet_t *,
33 uint32_t, kern_packet_copy_mode_t);
34
35 #if (DEBUG || DEVELOPMENT)
36 __attribute__((noreturn))
37 void
pkt_subtype_assert_fail(const kern_packet_t ph,uint64_t type,uint64_t subtype)38 pkt_subtype_assert_fail(const kern_packet_t ph, uint64_t type, uint64_t subtype)
39 {
40 panic("invalid packet handle 0x%llx (type %llu != %llu || "
41 "subtype %llu != %llu)", ph, SK_PTR_TYPE(ph), type,
42 SK_PTR_SUBTYPE(ph), subtype);
43 /* NOTREACHED */
44 __builtin_unreachable();
45 }
46
47 __attribute__((noreturn))
48 void
pkt_type_assert_fail(const kern_packet_t ph,uint64_t type)49 pkt_type_assert_fail(const kern_packet_t ph, uint64_t type)
50 {
51 panic("invalid packet handle 0x%llx (type %llu != %llu)",
52 ph, SK_PTR_TYPE(ph), type);
53 /* NOTREACHED */
54 __builtin_unreachable();
55 }
56 #endif /* DEBUG || DEVELOPMENT */
57
58 errno_t
kern_packet_set_headroom(const kern_packet_t ph,const uint8_t headroom)59 kern_packet_set_headroom(const kern_packet_t ph, const uint8_t headroom)
60 {
61 return __packet_set_headroom(ph, headroom);
62 }
63
64 uint8_t
kern_packet_get_headroom(const kern_packet_t ph)65 kern_packet_get_headroom(const kern_packet_t ph)
66 {
67 return __packet_get_headroom(ph);
68 }
69
70 errno_t
kern_packet_set_link_header_offset(const kern_packet_t ph,const uint8_t off)71 kern_packet_set_link_header_offset(const kern_packet_t ph, const uint8_t off)
72 {
73 return __packet_set_headroom(ph, off);
74 }
75
76 uint16_t
kern_packet_get_link_header_offset(const kern_packet_t ph)77 kern_packet_get_link_header_offset(const kern_packet_t ph)
78 {
79 return __packet_get_headroom(ph);
80 }
81
82 errno_t
kern_packet_set_link_header_length(const kern_packet_t ph,const uint8_t off)83 kern_packet_set_link_header_length(const kern_packet_t ph, const uint8_t off)
84 {
85 return __packet_set_link_header_length(ph, off);
86 }
87
88 uint8_t
kern_packet_get_link_header_length(const kern_packet_t ph)89 kern_packet_get_link_header_length(const kern_packet_t ph)
90 {
91 return __packet_get_link_header_length(ph);
92 }
93
94 errno_t
kern_packet_set_link_broadcast(const kern_packet_t ph)95 kern_packet_set_link_broadcast(const kern_packet_t ph)
96 {
97 return __packet_set_link_broadcast(ph);
98 }
99
100 boolean_t
kern_packet_get_link_broadcast(const kern_packet_t ph)101 kern_packet_get_link_broadcast(const kern_packet_t ph)
102 {
103 return __packet_get_link_broadcast(ph);
104 }
105
106 errno_t
kern_packet_set_link_multicast(const kern_packet_t ph)107 kern_packet_set_link_multicast(const kern_packet_t ph)
108 {
109 return __packet_set_link_multicast(ph);
110 }
111
112 errno_t
kern_packet_set_link_ethfcs(const kern_packet_t ph)113 kern_packet_set_link_ethfcs(const kern_packet_t ph)
114 {
115 return __packet_set_link_ethfcs(ph);
116 }
117
118 boolean_t
kern_packet_get_link_multicast(const kern_packet_t ph)119 kern_packet_get_link_multicast(const kern_packet_t ph)
120 {
121 return __packet_get_link_multicast(ph);
122 }
123
124 boolean_t
kern_packet_get_link_ethfcs(const kern_packet_t ph)125 kern_packet_get_link_ethfcs(const kern_packet_t ph)
126 {
127 return __packet_get_link_ethfcs(ph);
128 }
129
130 /* deprecated -- no effect, use set_link_header_length instead */
131 errno_t
kern_packet_set_network_header_offset(const kern_packet_t ph,const uint16_t off)132 kern_packet_set_network_header_offset(const kern_packet_t ph,
133 const uint16_t off)
134 {
135 #pragma unused(ph, off)
136 return 0;
137 }
138
139 /* deprecated -- use get_link_header_length instead */
140 uint16_t
kern_packet_get_network_header_offset(const kern_packet_t ph)141 kern_packet_get_network_header_offset(const kern_packet_t ph)
142 {
143 return (uint16_t)__packet_get_headroom(ph) +
144 (uint16_t)__packet_get_link_header_length(ph);
145 }
146
147 /* deprecated */
148 errno_t
kern_packet_set_transport_header_offset(const kern_packet_t ph,const uint16_t off)149 kern_packet_set_transport_header_offset(const kern_packet_t ph,
150 const uint16_t off)
151 {
152 #pragma unused(ph, off)
153 return 0;
154 }
155
156 /* deprecated */
157 uint16_t
kern_packet_get_transport_header_offset(const kern_packet_t ph)158 kern_packet_get_transport_header_offset(const kern_packet_t ph)
159 {
160 #pragma unused(ph)
161 return 0;
162 }
163
164 boolean_t
kern_packet_get_transport_traffic_background(const kern_packet_t ph)165 kern_packet_get_transport_traffic_background(const kern_packet_t ph)
166 {
167 return __packet_get_transport_traffic_background(ph);
168 }
169
170 boolean_t
kern_packet_get_transport_traffic_realtime(const kern_packet_t ph)171 kern_packet_get_transport_traffic_realtime(const kern_packet_t ph)
172 {
173 return __packet_get_transport_traffic_realtime(ph);
174 }
175
176 boolean_t
kern_packet_get_transport_retransmit(const kern_packet_t ph)177 kern_packet_get_transport_retransmit(const kern_packet_t ph)
178 {
179 return __packet_get_transport_retransmit(ph);
180 }
181
182 boolean_t
kern_packet_get_transport_new_flow(const kern_packet_t ph)183 kern_packet_get_transport_new_flow(const kern_packet_t ph)
184 {
185 return __packet_get_transport_new_flow(ph);
186 }
187
188 boolean_t
kern_packet_get_transport_last_packet(const kern_packet_t ph)189 kern_packet_get_transport_last_packet(const kern_packet_t ph)
190 {
191 return __packet_get_transport_last_packet(ph);
192 }
193
194 int
kern_packet_set_service_class(const kern_packet_t ph,const kern_packet_svc_class_t sc)195 kern_packet_set_service_class(const kern_packet_t ph,
196 const kern_packet_svc_class_t sc)
197 {
198 return __packet_set_service_class(ph, sc);
199 }
200
201 kern_packet_svc_class_t
kern_packet_get_service_class(const kern_packet_t ph)202 kern_packet_get_service_class(const kern_packet_t ph)
203 {
204 return __packet_get_service_class(ph);
205 }
206
207 errno_t
kern_packet_get_service_class_index(const kern_packet_svc_class_t svc,uint32_t * index)208 kern_packet_get_service_class_index(const kern_packet_svc_class_t svc,
209 uint32_t *index)
210 {
211 if (index == NULL || !KPKT_VALID_SVC(svc)) {
212 return EINVAL;
213 }
214
215 *index = KPKT_SVCIDX(svc);
216 return 0;
217 }
218
219 boolean_t
kern_packet_is_high_priority(const kern_packet_t ph)220 kern_packet_is_high_priority(const kern_packet_t ph)
221 {
222 uint32_t sc;
223 boolean_t is_hi_priority;
224
225 sc = __packet_get_service_class(ph);
226
227 switch (sc) {
228 case PKT_SC_VI:
229 case PKT_SC_SIG:
230 case PKT_SC_VO:
231 case PKT_SC_CTL:
232 is_hi_priority = (PKT_ADDR(ph)->pkt_comp_gencnt == 0 ||
233 PKT_ADDR(ph)->pkt_comp_gencnt == TCP_ACK_COMPRESSION_DUMMY);
234 break;
235
236 case PKT_SC_BK_SYS:
237 case PKT_SC_BK:
238 case PKT_SC_BE:
239 case PKT_SC_RD:
240 case PKT_SC_OAM:
241 case PKT_SC_AV:
242 case PKT_SC_RV:
243 default:
244 is_hi_priority = false;
245 }
246 return is_hi_priority;
247 }
248
249 errno_t
kern_packet_set_traffic_class(const kern_packet_t ph,kern_packet_traffic_class_t tc)250 kern_packet_set_traffic_class(const kern_packet_t ph,
251 kern_packet_traffic_class_t tc)
252 {
253 return __packet_set_traffic_class(ph, tc);
254 }
255
256 kern_packet_traffic_class_t
kern_packet_get_traffic_class(const kern_packet_t ph)257 kern_packet_get_traffic_class(const kern_packet_t ph)
258 {
259 return __packet_get_traffic_class(ph);
260 }
261
262 errno_t
kern_packet_set_inet_checksum(const kern_packet_t ph,const packet_csum_flags_t flags,const uint16_t start,const uint16_t stuff)263 kern_packet_set_inet_checksum(const kern_packet_t ph,
264 const packet_csum_flags_t flags, const uint16_t start,
265 const uint16_t stuff)
266 {
267 return __packet_set_inet_checksum(ph, flags, start, stuff, FALSE);
268 }
269
270 packet_csum_flags_t
kern_packet_get_inet_checksum(const kern_packet_t ph,uint16_t * start,uint16_t * val)271 kern_packet_get_inet_checksum(const kern_packet_t ph, uint16_t *start,
272 uint16_t *val)
273 {
274 return __packet_get_inet_checksum(ph, start, val, TRUE);
275 }
276
277 void
kern_packet_set_flow_uuid(const kern_packet_t ph,const uuid_t flow_uuid)278 kern_packet_set_flow_uuid(const kern_packet_t ph, const uuid_t flow_uuid)
279 {
280 __packet_set_flow_uuid(ph, flow_uuid);
281 }
282
283 void
kern_packet_get_flow_uuid(const kern_packet_t ph,uuid_t * flow_uuid)284 kern_packet_get_flow_uuid(const kern_packet_t ph, uuid_t *flow_uuid)
285 {
286 __packet_get_flow_uuid(ph, *flow_uuid);
287 }
288
289 void
kern_packet_clear_flow_uuid(const kern_packet_t ph)290 kern_packet_clear_flow_uuid(const kern_packet_t ph)
291 {
292 __packet_clear_flow_uuid(ph);
293 }
294
295 void
kern_packet_get_euuid(const kern_packet_t ph,uuid_t euuid)296 kern_packet_get_euuid(const kern_packet_t ph, uuid_t euuid)
297 {
298 if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
299 uuid_copy(euuid, PKT_ADDR(ph)->pkt_policy_euuid);
300 } else {
301 uuid_clear(euuid);
302 }
303 }
304
305 void
kern_packet_set_policy_id(const kern_packet_t ph,uint32_t policy_id)306 kern_packet_set_policy_id(const kern_packet_t ph, uint32_t policy_id)
307 {
308 if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
309 PKT_ADDR(ph)->pkt_policy_id = policy_id;
310 }
311 }
312
313 uint32_t
kern_packet_get_policy_id(const kern_packet_t ph)314 kern_packet_get_policy_id(const kern_packet_t ph)
315 {
316 if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
317 return PKT_ADDR(ph)->pkt_policy_id;
318 } else {
319 return 0;
320 }
321 }
322
323 uint32_t
kern_packet_get_data_length(const kern_packet_t ph)324 kern_packet_get_data_length(const kern_packet_t ph)
325 {
326 return __packet_get_data_length(ph);
327 }
328
329 uint32_t
kern_packet_get_buflet_count(const kern_packet_t ph)330 kern_packet_get_buflet_count(const kern_packet_t ph)
331 {
332 return __packet_get_buflet_count(ph);
333 }
334
335 kern_buflet_t
kern_packet_get_next_buflet(const kern_packet_t ph,const kern_buflet_t bprev)336 kern_packet_get_next_buflet(const kern_packet_t ph, const kern_buflet_t bprev)
337 {
338 return __packet_get_next_buflet(ph, bprev);
339 }
340
341 errno_t
kern_packet_finalize(const kern_packet_t ph)342 kern_packet_finalize(const kern_packet_t ph)
343 {
344 return __packet_finalize(ph);
345 }
346
347 kern_packet_idx_t
kern_packet_get_object_index(const kern_packet_t ph)348 kern_packet_get_object_index(const kern_packet_t ph)
349 {
350 return __packet_get_object_index(ph);
351 }
352
353 errno_t
kern_packet_get_timestamp(const kern_packet_t ph,uint64_t * ts,boolean_t * valid)354 kern_packet_get_timestamp(const kern_packet_t ph, uint64_t *ts,
355 boolean_t *valid)
356 {
357 return __packet_get_timestamp(ph, ts, valid);
358 }
359
360 errno_t
kern_packet_set_timestamp(const kern_packet_t ph,uint64_t ts,boolean_t valid)361 kern_packet_set_timestamp(const kern_packet_t ph, uint64_t ts, boolean_t valid)
362 {
363 return __packet_set_timestamp(ph, ts, valid);
364 }
365
366 struct mbuf *
kern_packet_get_mbuf(const kern_packet_t pkt)367 kern_packet_get_mbuf(const kern_packet_t pkt)
368 {
369 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(pkt);
370
371 if ((kpkt->pkt_pflags & PKT_F_MBUF_DATA) != 0) {
372 return kpkt->pkt_mbuf;
373 }
374 return NULL;
375 }
376
377 errno_t
kern_packet_get_timestamp_requested(const kern_packet_t ph,boolean_t * requested)378 kern_packet_get_timestamp_requested(const kern_packet_t ph,
379 boolean_t *requested)
380 {
381 return __packet_get_timestamp_requested(ph, requested);
382 }
383
384 void
kern_packet_tx_completion(const kern_packet_t ph,ifnet_t ifp)385 kern_packet_tx_completion(const kern_packet_t ph, ifnet_t ifp)
386 {
387 uint64_t ts;
388 uintptr_t cb_arg, cb_data;
389 kern_return_t tx_status;
390 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(ph);
391
392 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
393 (void) __packet_get_tx_completion_status(ph, &tx_status);
394 if (tx_status != KERN_SUCCESS) {
395 (void) kern_channel_event_transmit_status(ph, ifp);
396 }
397 if ((kpkt->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) == 0) {
398 return;
399 }
400 __packet_get_tx_completion_data(ph, &cb_arg, &cb_data);
401 __packet_get_timestamp(ph, &ts, NULL);
402 while (kpkt->pkt_tx_compl_callbacks != 0) {
403 mbuf_tx_compl_func cb;
404 uint32_t i;
405
406 i = ffs(kpkt->pkt_tx_compl_callbacks) - 1;
407 kpkt->pkt_tx_compl_callbacks &= ~(1 << i);
408 cb = m_get_tx_compl_callback(i);
409 if (__probable(cb != NULL)) {
410 cb(kpkt->pkt_tx_compl_context, ifp, ts, cb_arg, cb_data,
411 tx_status);
412 }
413 }
414 }
415
416 errno_t
kern_packet_get_tx_completion_status(const kern_packet_t ph,kern_return_t * status)417 kern_packet_get_tx_completion_status(const kern_packet_t ph,
418 kern_return_t *status)
419 {
420 return __packet_get_tx_completion_status(ph, status);
421 }
422
423 errno_t
kern_packet_set_tx_completion_status(const kern_packet_t ph,kern_return_t status)424 kern_packet_set_tx_completion_status(const kern_packet_t ph,
425 kern_return_t status)
426 {
427 return __packet_set_tx_completion_status(ph, status);
428 }
429
430 void
kern_packet_set_group_start(const kern_packet_t ph)431 kern_packet_set_group_start(const kern_packet_t ph)
432 {
433 (void) __packet_set_group_start(ph);
434 }
435
436 boolean_t
kern_packet_get_group_start(const kern_packet_t ph)437 kern_packet_get_group_start(const kern_packet_t ph)
438 {
439 return __packet_get_group_start(ph);
440 }
441
442 void
kern_packet_set_group_end(const kern_packet_t ph)443 kern_packet_set_group_end(const kern_packet_t ph)
444 {
445 (void) __packet_set_group_end(ph);
446 }
447
448 boolean_t
kern_packet_get_group_end(const kern_packet_t ph)449 kern_packet_get_group_end(const kern_packet_t ph)
450 {
451 return __packet_get_group_end(ph);
452 }
453
454 errno_t
kern_packet_get_expire_time(const kern_packet_t ph,uint64_t * ts)455 kern_packet_get_expire_time(const kern_packet_t ph, uint64_t *ts)
456 {
457 return __packet_get_expire_time(ph, ts);
458 }
459
460 errno_t
kern_packet_set_expire_time(const kern_packet_t ph,const uint64_t ts)461 kern_packet_set_expire_time(const kern_packet_t ph, const uint64_t ts)
462 {
463 return __packet_set_expire_time(ph, ts);
464 }
465
466 errno_t
kern_packet_get_token(const kern_packet_t ph,void * token,uint16_t * len)467 kern_packet_get_token(const kern_packet_t ph, void *token, uint16_t *len)
468 {
469 return __packet_get_token(ph, token, len);
470 }
471
472 errno_t
kern_packet_set_token(const kern_packet_t ph,const void * token,const uint16_t len)473 kern_packet_set_token(const kern_packet_t ph, const void *token,
474 const uint16_t len)
475 {
476 return __packet_set_token(ph, token, len);
477 }
478
479 errno_t
kern_packet_get_packetid(const kern_packet_t ph,packet_id_t * pktid)480 kern_packet_get_packetid(const kern_packet_t ph, packet_id_t *pktid)
481 {
482 return __packet_get_packetid(ph, pktid);
483 }
484
485 errno_t
kern_packet_set_vlan_tag(const kern_packet_t ph,const uint16_t tag,const boolean_t tag_in_pkt)486 kern_packet_set_vlan_tag(const kern_packet_t ph, const uint16_t tag,
487 const boolean_t tag_in_pkt)
488 {
489 return __packet_set_vlan_tag(ph, tag, tag_in_pkt);
490 }
491
492 errno_t
kern_packet_get_vlan_tag(const kern_packet_t ph,uint16_t * tag,boolean_t * tag_in_pkt)493 kern_packet_get_vlan_tag(const kern_packet_t ph, uint16_t *tag,
494 boolean_t *tag_in_pkt)
495 {
496 return __packet_get_vlan_tag(ph, tag, tag_in_pkt);
497 }
498
499 uint16_t
kern_packet_get_vlan_id(const uint16_t tag)500 kern_packet_get_vlan_id(const uint16_t tag)
501 {
502 return __packet_get_vlan_id(tag);
503 }
504
505 uint8_t
kern_packet_get_vlan_priority(const uint16_t tag)506 kern_packet_get_vlan_priority(const uint16_t tag)
507 {
508 return __packet_get_vlan_priority(tag);
509 }
510
511 void
kern_packet_set_wake_flag(const kern_packet_t ph)512 kern_packet_set_wake_flag(const kern_packet_t ph)
513 {
514 return __packet_set_wake_flag(ph);
515 }
516
517 boolean_t
kern_packet_get_wake_flag(const kern_packet_t ph)518 kern_packet_get_wake_flag(const kern_packet_t ph)
519 {
520 return __packet_get_wake_flag(ph);
521 }
522
523 uint32_t
kern_inet_checksum(const void * data,uint32_t len,uint32_t sum0)524 kern_inet_checksum(const void *data, uint32_t len, uint32_t sum0)
525 {
526 return __packet_cksum(data, len, sum0);
527 }
528
529 uint32_t
kern_copy_and_inet_checksum(const void * src,void * dst,uint32_t len,uint32_t sum0)530 kern_copy_and_inet_checksum(const void *src, void *dst, uint32_t len,
531 uint32_t sum0)
532 {
533 uint32_t sum = __packet_copy_and_sum(src, dst, len, sum0);
534 return __packet_fold_sum_final(sum);
535 }
536
537 /*
538 * Source packet must be finalized (not dropped); cloned packet does not
539 * inherit the finalized flag, or the classified flag, so caller is
540 * responsible for finalizing it and classifying it (as needed).
541 */
542 static int
kern_packet_clone_internal(const kern_packet_t ph1,kern_packet_t * ph2,uint32_t skmflag,kern_packet_copy_mode_t mode)543 kern_packet_clone_internal(const kern_packet_t ph1, kern_packet_t *ph2,
544 uint32_t skmflag, kern_packet_copy_mode_t mode)
545 {
546 struct kern_pbufpool *pool;
547 struct __kern_packet *p1 = SK_PTR_ADDR_KPKT(ph1);
548 struct __kern_packet *p2 = NULL;
549 struct __kern_buflet *p1_buf, *p2_buf;
550 uint16_t bufs_cnt_alloc;
551 int m_how;
552 int err;
553
554 /* TODO: Add quantum support */
555 VERIFY(SK_PTR_TYPE(ph1) == NEXUS_META_TYPE_PACKET);
556
557 /* Source needs to be finalized (not dropped) and with 1 buflet */
558 if (__improbable((p1->pkt_qum.qum_qflags & QUM_F_FINALIZED) == 0 ||
559 (p1->pkt_qum.qum_qflags & QUM_F_DROPPED) != 0 ||
560 p1->pkt_bufs_cnt == 0)) {
561 return EINVAL;
562 }
563
564 /* TODO: Add multi-buflet support */
565 VERIFY(p1->pkt_bufs_cnt == 1);
566
567 switch (mode) {
568 case KPKT_COPY_HEAVY:
569 /*
570 * Allocate a packet with the same number of buffers as that
571 * of the source packet's; this cannot be 0 per check above.
572 */
573 bufs_cnt_alloc = p1->pkt_bufs_cnt;
574 break;
575
576 case KPKT_COPY_LIGHT:
577 /*
578 * Allocate an "empty" packet with no buffers attached; this
579 * will work only on pools marked with "on-demand", which is
580 * the case today for device drivers needing shared buffers
581 * support.
582 *
583 * TODO: We could make this generic and applicable to regular
584 * pools, but it would involve detaching the buffer that comes
585 * attached to the constructed packet; this wouldn't be that
586 * lightweight in nature, but whatever. In such a case the
587 * number of buffers requested during allocation is the same
588 * as the that of the source packet's. For now, let it fail
589 * naturally on regular pools, as part of allocation below.
590 *
591 * XXX: This would also fail on quantums as we currently
592 * restrict quantums to have exactly one buffer.
593 */
594 bufs_cnt_alloc = 0;
595 break;
596
597 default:
598 VERIFY(0);
599 /* NOTREACHED */
600 __builtin_unreachable();
601 }
602
603 *ph2 = 0;
604 pool = __DECONST(struct kern_pbufpool *, SK_PTR_ADDR_KQUM(ph1)->qum_pp);
605 if (skmflag & SKMEM_NOSLEEP) {
606 err = kern_pbufpool_alloc_nosleep(pool, bufs_cnt_alloc, ph2);
607 m_how = M_NOWAIT;
608 } else {
609 err = kern_pbufpool_alloc(pool, bufs_cnt_alloc, ph2);
610 ASSERT(err != ENOMEM);
611 m_how = M_WAIT;
612 }
613 if (__improbable(err != 0)) {
614 /* See comments above related to KPKT_COPY_{HEAVY,LIGHT} */
615 goto error;
616 }
617 p2 = SK_PTR_ADDR_KPKT(*ph2);
618
619 /* Copy packet metadata */
620 _QUM_COPY(&(p1)->pkt_qum, &(p2)->pkt_qum);
621 _PKT_COPY(p1, p2);
622 ASSERT(p2->pkt_mbuf == NULL);
623 ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
624
625 /* clear trace id */
626 p2->pkt_trace_id = 0;
627 /* clear finalized and classified bits from clone */
628 p2->pkt_qum.qum_qflags &= ~(QUM_F_FINALIZED | QUM_F_FLOW_CLASSIFIED);
629
630 switch (mode) {
631 case KPKT_COPY_HEAVY:
632 /*
633 * Heavy: Copy buffer contents and extra metadata.
634 */
635 ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
636 if (__probable(p1->pkt_bufs_cnt != 0)) {
637 uint8_t *saddr, *daddr;
638 uint16_t copy_len;
639 /*
640 * TODO -- [email protected]
641 * Packets from compat driver could have dlen > dlim
642 * for flowswitch flow compatibility, cleanup when we
643 * make them consistent.
644 */
645 PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
646 PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
647 saddr = (void *)p1_buf->buf_addr;
648 daddr = (void *)p2_buf->buf_addr;
649 copy_len = MIN(p1_buf->buf_dlen, p1_buf->buf_dlim);
650 if (copy_len != 0) {
651 bcopy(saddr, daddr, copy_len);
652 }
653 *__DECONST(uint16_t *, &p2_buf->buf_dlim) =
654 p1_buf->buf_dlim;
655 p2_buf->buf_dlen = p1_buf->buf_dlen;
656 p2_buf->buf_doff = p1_buf->buf_doff;
657 }
658
659 /* Copy AQM metadata */
660 p2->pkt_flowsrc_type = p1->pkt_flowsrc_type;
661 p2->pkt_flowsrc_fidx = p1->pkt_flowsrc_fidx;
662 _CASSERT((offsetof(struct __flow, flow_src_id) % 8) == 0);
663 _UUID_COPY(p2->pkt_flowsrc_id, p1->pkt_flowsrc_id);
664 _UUID_COPY(p2->pkt_policy_euuid, p1->pkt_policy_euuid);
665 p2->pkt_policy_id = p1->pkt_policy_id;
666
667 p2->pkt_pflags = p1->pkt_pflags;
668 if (p1->pkt_pflags & PKT_F_MBUF_DATA) {
669 ASSERT(p1->pkt_mbuf != NULL);
670 p2->pkt_mbuf = m_dup(p1->pkt_mbuf, m_how);
671 if (p2->pkt_mbuf == NULL) {
672 KPKT_CLEAR_MBUF_DATA(p2);
673 err = ENOBUFS;
674 goto error;
675 }
676 }
677 break;
678
679 case KPKT_COPY_LIGHT:
680 /*
681 * Lightweight: Duplicate buflet(s) and add refs.
682 */
683 ASSERT(p1->pkt_mbuf == NULL);
684 ASSERT(p2->pkt_bufs_cnt == 0);
685 if (__probable(p1->pkt_bufs_cnt != 0)) {
686 PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
687 p2_buf = &p2->pkt_qum_buf;
688 *__DECONST(uint16_t *, &p2->pkt_bufs_cnt) =
689 p1->pkt_bufs_cnt;
690 _KBUF_COPY(p1_buf, p2_buf);
691 ASSERT(p2_buf->buf_nbft_addr == 0);
692 ASSERT(p2_buf->buf_nbft_idx == OBJ_IDX_NONE);
693 }
694 ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
695 ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
696 ASSERT(err == 0);
697 break;
698 }
699
700 error:
701 if (err != 0 && p2 != NULL) {
702 uint32_t usecnt = 0;
703
704 ASSERT(p2->pkt_mbuf == NULL);
705 if (__probable(mode == KPKT_COPY_LIGHT)) {
706 /*
707 * This is undoing what _KBUF_COPY() did earlier,
708 * in case this routine is modified to handle regular
709 * pool (not on-demand), which also decrements the
710 * shared buffer's usecnt. For regular pool, calling
711 * kern_pubfpool_free() will not yield a call to
712 * destroy the metadata.
713 */
714 PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
715 KBUF_DTOR(p2_buf, usecnt);
716 }
717 kern_pbufpool_free(pool, *ph2);
718 *ph2 = 0;
719 }
720
721 return err;
722 }
723
724 errno_t
kern_packet_clone(const kern_packet_t ph1,kern_packet_t * ph2,kern_packet_copy_mode_t mode)725 kern_packet_clone(const kern_packet_t ph1, kern_packet_t *ph2,
726 kern_packet_copy_mode_t mode)
727 {
728 return kern_packet_clone_internal(ph1, ph2, 0, mode);
729 }
730
731 errno_t
kern_packet_clone_nosleep(const kern_packet_t ph1,kern_packet_t * ph2,kern_packet_copy_mode_t mode)732 kern_packet_clone_nosleep(const kern_packet_t ph1, kern_packet_t *ph2,
733 kern_packet_copy_mode_t mode)
734 {
735 return kern_packet_clone_internal(ph1, ph2, SKMEM_NOSLEEP, mode);
736 }
737
738 errno_t
kern_packet_add_buflet(const kern_packet_t ph,const kern_buflet_t bprev,const kern_buflet_t bnew)739 kern_packet_add_buflet(const kern_packet_t ph, const kern_buflet_t bprev,
740 const kern_buflet_t bnew)
741 {
742 return __packet_add_buflet(ph, bprev, bnew);
743 }
744
745 void
kern_packet_append(const kern_packet_t ph1,const kern_packet_t ph2)746 kern_packet_append(const kern_packet_t ph1, const kern_packet_t ph2)
747 {
748 /*
749 * TODO:
750 * Add assert for non-zero ph2 here after changing IOSkywalkFamily
751 * to use kern_packet_set_next() for clearing the next pointer.
752 */
753 kern_packet_set_next(ph1, ph2);
754 }
755
756 kern_packet_t
kern_packet_get_next(const kern_packet_t ph)757 kern_packet_get_next(const kern_packet_t ph)
758 {
759 struct __kern_packet *p, *next;
760
761 p = SK_PTR_ADDR_KPKT(ph);
762 next = p->pkt_nextpkt;
763 return next == NULL ? 0 : SK_PKT2PH(next);
764 }
765
766 void
kern_packet_set_next(const kern_packet_t ph1,const kern_packet_t ph2)767 kern_packet_set_next(const kern_packet_t ph1, const kern_packet_t ph2)
768 {
769 struct __kern_packet *p1, *p2;
770
771 ASSERT(ph1 != 0);
772 p1 = SK_PTR_ADDR_KPKT(ph1);
773 p2 = (ph2 == 0 ? NULL : SK_PTR_ADDR_KPKT(ph2));
774 p1->pkt_nextpkt = p2;
775 }
776
777 void
kern_packet_set_chain_counts(const kern_packet_t ph,uint32_t count,uint32_t bytes)778 kern_packet_set_chain_counts(const kern_packet_t ph, uint32_t count,
779 uint32_t bytes)
780 {
781 struct __kern_packet *p;
782
783 p = SK_PTR_ADDR_KPKT(ph);
784 p->pkt_chain_count = count;
785 p->pkt_chain_bytes = bytes;
786 }
787
788 void
kern_packet_get_chain_counts(const kern_packet_t ph,uint32_t * count,uint32_t * bytes)789 kern_packet_get_chain_counts(const kern_packet_t ph, uint32_t *count,
790 uint32_t *bytes)
791 {
792 struct __kern_packet *p;
793
794 p = SK_PTR_ADDR_KPKT(ph);
795 *count = p->pkt_chain_count;
796 *bytes = p->pkt_chain_bytes;
797 }
798
799 errno_t
kern_buflet_set_data_offset(const kern_buflet_t buf,const uint16_t doff)800 kern_buflet_set_data_offset(const kern_buflet_t buf, const uint16_t doff)
801 {
802 return __buflet_set_data_offset(buf, doff);
803 }
804
805 uint16_t
kern_buflet_get_data_offset(const kern_buflet_t buf)806 kern_buflet_get_data_offset(const kern_buflet_t buf)
807 {
808 return __buflet_get_data_offset(buf);
809 }
810
811 errno_t
kern_buflet_set_data_length(const kern_buflet_t buf,const uint16_t dlen)812 kern_buflet_set_data_length(const kern_buflet_t buf, const uint16_t dlen)
813 {
814 return __buflet_set_data_length(buf, dlen);
815 }
816
817 uint16_t
kern_buflet_get_data_length(const kern_buflet_t buf)818 kern_buflet_get_data_length(const kern_buflet_t buf)
819 {
820 return __buflet_get_data_length(buf);
821 }
822
823 void *
kern_buflet_get_object_address(const kern_buflet_t buf)824 kern_buflet_get_object_address(const kern_buflet_t buf)
825 {
826 return __buflet_get_object_address(buf);
827 }
828
829 uint32_t
kern_buflet_get_object_limit(const kern_buflet_t buf)830 kern_buflet_get_object_limit(const kern_buflet_t buf)
831 {
832 return __buflet_get_object_limit(buf);
833 }
834
835 void *
kern_buflet_get_data_address(const kern_buflet_t buf)836 kern_buflet_get_data_address(const kern_buflet_t buf)
837 {
838 return __buflet_get_data_address(buf);
839 }
840
841 errno_t
kern_buflet_set_data_address(const kern_buflet_t buf,const void * daddr)842 kern_buflet_set_data_address(const kern_buflet_t buf, const void *daddr)
843 {
844 return __buflet_set_data_address(buf, daddr);
845 }
846
847 kern_segment_t
kern_buflet_get_object_segment(const kern_buflet_t buf,kern_obj_idx_seg_t * idx)848 kern_buflet_get_object_segment(const kern_buflet_t buf,
849 kern_obj_idx_seg_t *idx)
850 {
851 return __buflet_get_object_segment(buf, idx);
852 }
853
854 uint16_t
kern_buflet_get_data_limit(const kern_buflet_t buf)855 kern_buflet_get_data_limit(const kern_buflet_t buf)
856 {
857 return __buflet_get_data_limit(buf);
858 }
859
860 errno_t
kern_buflet_set_data_limit(const kern_buflet_t buf,const uint16_t dlim)861 kern_buflet_set_data_limit(const kern_buflet_t buf, const uint16_t dlim)
862 {
863 return __buflet_set_data_limit(buf, dlim);
864 }
865
866 packet_trace_id_t
kern_packet_get_trace_id(const kern_packet_t ph)867 kern_packet_get_trace_id(const kern_packet_t ph)
868 {
869 return __packet_get_trace_id(ph);
870 }
871
872 void
kern_packet_set_trace_id(const kern_packet_t ph,packet_trace_id_t trace_id)873 kern_packet_set_trace_id(const kern_packet_t ph, packet_trace_id_t trace_id)
874 {
875 return __packet_set_trace_id(ph, trace_id);
876 }
877
878 void
kern_packet_trace_event(const kern_packet_t ph,uint32_t event)879 kern_packet_trace_event(const kern_packet_t ph, uint32_t event)
880 {
881 return __packet_trace_event(ph, event);
882 }
883
884 errno_t
kern_packet_copy_bytes(kern_packet_t pkt,size_t off,size_t len,void * out_data)885 kern_packet_copy_bytes(kern_packet_t pkt, size_t off, size_t len, void* out_data)
886 {
887 kern_buflet_t buflet = NULL;
888 size_t count;
889 uint8_t *addr;
890 uint32_t buflet_len;
891
892 buflet = __packet_get_next_buflet(pkt, buflet);
893 if (buflet == NULL) {
894 return EINVAL;
895 }
896 buflet_len = __buflet_get_data_length(buflet);
897 if (len > buflet_len) {
898 return EINVAL;
899 }
900 if (off > buflet_len) {
901 return EINVAL;
902 }
903 addr = __buflet_get_data_address(buflet);
904 if (addr == NULL) {
905 return EINVAL;
906 }
907 addr += __buflet_get_data_offset(buflet);
908 addr += off;
909 count = MIN(len, buflet_len - off);
910 bcopy((void *) addr, out_data, count);
911
912 return 0;
913 }
914