1 /*
2 * Copyright (c) 1999-2025 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include "net/if_var.h"
30 #include <net/dlil_var_private.h>
31
32
33 LCK_ATTR_DECLARE(dlil_lck_attributes, 0, 0);
34
35 LCK_GRP_DECLARE(dlil_lock_group, "DLIL internal locks");
36 LCK_GRP_DECLARE(ifnet_lock_group, "ifnet locks");
37 LCK_GRP_DECLARE(ifnet_head_lock_group, "ifnet head lock");
38 LCK_GRP_DECLARE(ifnet_snd_lock_group, "ifnet snd locks");
39 LCK_GRP_DECLARE(ifnet_rcv_lock_group, "ifnet rcv locks");
40
41 LCK_ATTR_DECLARE(ifnet_lock_attr, 0, 0);
42 LCK_RW_DECLARE_ATTR(ifnet_head_lock, &ifnet_head_lock_group,
43 &dlil_lck_attributes);
44 LCK_MTX_DECLARE_ATTR(dlil_ifnet_lock, &dlil_lock_group,
45 &dlil_lck_attributes);
46
47
48 LCK_MTX_DECLARE_ATTR(dlil_thread_sync_lock, &dlil_lock_group,
49 &dlil_lck_attributes);
50
51 uint32_t dlil_pending_thread_cnt = 0;
52
53
54 /*
55 * Forward declarations.
56 */
57 __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
58 __private_extern__ void if_rtproto_del(struct ifnet *ifp, int protocol);
59
60
61 /*
62 * Allocation zones
63 */
64 unsigned int dlif_size; /* size of dlil_ifnet to allocate */
65 unsigned int dlif_bufsize; /* size of dlif_size + headroom */
66 ZONE_DECLARE(dlif_zone, struct dlil_ifnet);
67 #define DLIF_ZONE_NAME "ifnet" /* zone name */
68 zone_t dlif_zone; /* zone for dlil_ifnet */
69
70 unsigned int dlif_tcpstat_size; /* size of tcpstat_local to allocate */
71 unsigned int dlif_tcpstat_bufsize; /* size of dlif_tcpstat_size + headroom */
72 ZONE_DECLARE(dlif_tcpstat_zone, struct ifnet_tcpstat);
73 #define DLIF_TCPSTAT_ZONE_NAME "ifnet_tcpstat" /* zone name */
74 zone_t dlif_tcpstat_zone; /* zone for tcpstat_local */
75
76 unsigned int dlif_udpstat_size; /* size of udpstat_local to allocate */
77 unsigned int dlif_udpstat_bufsize; /* size of dlif_udpstat_size + headroom */
78 ZONE_DECLARE(dlif_udpstat_zone, struct ifnet_udpstat);
79 #define DLIF_UDPSTAT_ZONE_NAME "ifnet_udpstat" /* zone name */
80 zone_t dlif_udpstat_zone; /* zone for udpstat_local */
81
82 KALLOC_TYPE_DEFINE(dlif_filt_zone, struct ifnet_filter, NET_KT_DEFAULT);
83
84 KALLOC_TYPE_DEFINE(dlif_proto_zone, struct if_proto, NET_KT_DEFAULT);
85
86 /*
87 * Utility routines
88 */
89 kern_return_t
dlil_affinity_set(struct thread * tp,u_int32_t tag)90 dlil_affinity_set(struct thread *tp, u_int32_t tag)
91 {
92 thread_affinity_policy_data_t policy;
93
94 bzero(&policy, sizeof(policy));
95 policy.affinity_tag = tag;
96 return thread_policy_set(tp, THREAD_AFFINITY_POLICY,
97 (thread_policy_t)&policy, THREAD_AFFINITY_POLICY_COUNT);
98 }
99
100 void
dlil_incr_pending_thread_count(void)101 dlil_incr_pending_thread_count(void)
102 {
103 LCK_MTX_ASSERT(&dlil_thread_sync_lock, LCK_MTX_ASSERT_NOTOWNED);
104 lck_mtx_lock(&dlil_thread_sync_lock);
105 dlil_pending_thread_cnt++;
106 lck_mtx_unlock(&dlil_thread_sync_lock);
107 }
108
109 void
dlil_decr_pending_thread_count(void)110 dlil_decr_pending_thread_count(void)
111 {
112 LCK_MTX_ASSERT(&dlil_thread_sync_lock, LCK_MTX_ASSERT_NOTOWNED);
113 lck_mtx_lock(&dlil_thread_sync_lock);
114 VERIFY(dlil_pending_thread_cnt > 0);
115 dlil_pending_thread_cnt--;
116 if (dlil_pending_thread_cnt == 0) {
117 wakeup(&dlil_pending_thread_cnt);
118 }
119 lck_mtx_unlock(&dlil_thread_sync_lock);
120 }
121
122 boolean_t
packet_has_vlan_tag(struct mbuf * m)123 packet_has_vlan_tag(struct mbuf * m)
124 {
125 u_int tag = 0;
126
127 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) != 0) {
128 tag = EVL_VLANOFTAG(m->m_pkthdr.vlan_tag);
129 if (tag == 0) {
130 /* the packet is just priority-tagged, clear the bit */
131 m->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
132 }
133 }
134 return tag != 0;
135 }
136
137 void
log_hexdump(void * __sized_by (len)data,size_t len)138 log_hexdump(void *__sized_by(len) data, size_t len)
139 {
140 size_t i, j, k;
141 unsigned char *ptr = (unsigned char *)data;
142 #define MAX_DUMP_BUF 32
143 unsigned char buf[3 * MAX_DUMP_BUF + 1];
144
145 for (i = 0; i < len; i += MAX_DUMP_BUF) {
146 for (j = i, k = 0; j < i + MAX_DUMP_BUF && j < len; j++) {
147 unsigned char msnbl = ptr[j] >> 4;
148 unsigned char lsnbl = ptr[j] & 0x0f;
149
150 buf[k++] = msnbl < 10 ? msnbl + '0' : msnbl + 'a' - 10;
151 buf[k++] = lsnbl < 10 ? lsnbl + '0' : lsnbl + 'a' - 10;
152
153 if ((j % 2) == 1) {
154 buf[k++] = ' ';
155 }
156 if ((j % MAX_DUMP_BUF) == MAX_DUMP_BUF - 1) {
157 buf[k++] = ' ';
158 }
159 }
160 buf[k] = 0;
161 os_log(OS_LOG_DEFAULT, "%3lu: %s", i, buf);
162 }
163 }
164
165 /*
166 * Monitor functions.
167 */
168 void
if_flt_monitor_busy(struct ifnet * ifp)169 if_flt_monitor_busy(struct ifnet *ifp)
170 {
171 LCK_MTX_ASSERT(&ifp->if_flt_lock, LCK_MTX_ASSERT_OWNED);
172
173 ++ifp->if_flt_busy;
174 VERIFY(ifp->if_flt_busy != 0);
175 }
176
177 void
if_flt_monitor_unbusy(struct ifnet * ifp)178 if_flt_monitor_unbusy(struct ifnet *ifp)
179 {
180 if_flt_monitor_leave(ifp);
181 }
182
183 void
if_flt_monitor_enter(struct ifnet * ifp)184 if_flt_monitor_enter(struct ifnet *ifp)
185 {
186 LCK_MTX_ASSERT(&ifp->if_flt_lock, LCK_MTX_ASSERT_OWNED);
187
188 while (ifp->if_flt_busy) {
189 ++ifp->if_flt_waiters;
190 (void) msleep(&ifp->if_flt_head, &ifp->if_flt_lock,
191 (PZERO - 1), "if_flt_monitor", NULL);
192 }
193 if_flt_monitor_busy(ifp);
194 }
195
196 void
if_flt_monitor_leave(struct ifnet * ifp)197 if_flt_monitor_leave(struct ifnet *ifp)
198 {
199 LCK_MTX_ASSERT(&ifp->if_flt_lock, LCK_MTX_ASSERT_OWNED);
200
201 VERIFY(ifp->if_flt_busy != 0);
202 --ifp->if_flt_busy;
203
204 if (ifp->if_flt_busy == 0 && ifp->if_flt_waiters > 0) {
205 ifp->if_flt_waiters = 0;
206 wakeup(&ifp->if_flt_head);
207 }
208 }
209
210 /*
211 * Allocation routines
212 */
213 void
dlil_allocation_zones_init(void)214 dlil_allocation_zones_init(void)
215 {
216 dlif_size = (ifnet_debug == 0) ? sizeof(struct dlil_ifnet) :
217 sizeof(struct dlil_ifnet_dbg);
218 /* Enforce 64-bit alignment for dlil_ifnet structure */
219 dlif_bufsize = dlif_size + sizeof(void *) + sizeof(u_int64_t);
220 dlif_bufsize = (uint32_t)P2ROUNDUP(dlif_bufsize, sizeof(u_int64_t));
221 dlif_zone = zone_create(DLIF_ZONE_NAME, dlif_bufsize, ZC_ZFREE_CLEARMEM);
222
223 dlif_tcpstat_size = sizeof(struct tcpstat_local);
224 /* Enforce 64-bit alignment for tcpstat_local structure */
225 dlif_tcpstat_bufsize =
226 dlif_tcpstat_size + sizeof(void *) + sizeof(u_int64_t);
227 dlif_tcpstat_bufsize = (uint32_t)
228 P2ROUNDUP(dlif_tcpstat_bufsize, sizeof(u_int64_t));
229 dlif_tcpstat_zone = zone_create(DLIF_TCPSTAT_ZONE_NAME,
230 dlif_tcpstat_bufsize, ZC_ZFREE_CLEARMEM);
231
232 dlif_udpstat_size = sizeof(struct udpstat_local);
233 /* Enforce 64-bit alignment for udpstat_local structure */
234 dlif_udpstat_bufsize =
235 dlif_udpstat_size + sizeof(void *) + sizeof(u_int64_t);
236 dlif_udpstat_bufsize = (uint32_t)
237 P2ROUNDUP(dlif_udpstat_bufsize, sizeof(u_int64_t));
238 dlif_udpstat_zone = zone_create(DLIF_UDPSTAT_ZONE_NAME,
239 dlif_udpstat_bufsize, ZC_ZFREE_CLEARMEM);
240 }
241
242 static void
_dlil_alloc_aligned_object(struct zone * zone,size_t buffer_size,void * __indexable * __single pbuffer,size_t object_size,void * __indexable * __single pobject)243 _dlil_alloc_aligned_object(struct zone *zone,
244 size_t buffer_size, void *__indexable *__single pbuffer,
245 size_t object_size, void *__indexable *__single pobject)
246 {
247 void *base, *buf, **pbuf;
248
249 void *__unsafe_indexable addr = __zalloc_flags(zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
250 __builtin_assume(addr != NULL);
251 buf = __unsafe_forge_bidi_indexable(void*, addr, buffer_size);
252
253 /* Get the 64-bit aligned base address for this object */
254 base = (void*)((char*)buf + (P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t), sizeof(u_int64_t)) - (intptr_t)buf));
255 VERIFY(((intptr_t)base + object_size) <=
256 ((intptr_t)buf + buffer_size));
257
258 /*
259 * Wind back a pointer size from the aligned base and
260 * save the original address so we can free it later.
261 */
262 pbuf = __unsafe_forge_bidi_indexable(void**, (intptr_t)base - sizeof(void *), sizeof(void *));
263 *pbuf = buf;
264 *pbuffer = buf;
265 *pobject = base;
266 }
267
268 static void
_dlil_free_aligned_object(struct zone * zone,void * pobject)269 _dlil_free_aligned_object(struct zone *zone, void *pobject)
270 {
271 if (pobject != NULL) {
272 void *__single *pbuf;
273 pbuf = __unsafe_forge_single(void**, ((intptr_t)pobject - sizeof(void*)));
274 zfree(zone, *pbuf);
275 }
276 }
277
278 struct dlil_ifnet *
dlif_ifnet_alloc(void)279 dlif_ifnet_alloc(void)
280 {
281 void *__indexable base, *__indexable buf;
282 _dlil_alloc_aligned_object(dlif_zone,
283 dlif_bufsize, &buf,
284 dlif_size, &base);
285
286 return base;
287 }
288
289 void
dlif_ifnet_free(struct dlil_ifnet * ifnet)290 dlif_ifnet_free(struct dlil_ifnet *ifnet)
291 {
292 _dlil_free_aligned_object(dlif_zone, ifnet);
293 }
294
295 struct ifnet_filter *
dlif_filt_alloc(void)296 dlif_filt_alloc(void)
297 {
298 return zalloc_flags(dlif_filt_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
299 }
300
301 void
dlif_filt_free(struct ifnet_filter * filt)302 dlif_filt_free(struct ifnet_filter *filt)
303 {
304 if (filt != NULL) {
305 zfree(dlif_filt_zone, filt);
306 }
307 }
308
309 struct if_proto *
dlif_proto_alloc(void)310 dlif_proto_alloc(void)
311 {
312 return zalloc_flags(dlif_proto_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
313 }
314
315 void
dlif_proto_free(struct if_proto * ifproto)316 dlif_proto_free(struct if_proto *ifproto)
317 {
318 if (ifproto != NULL) {
319 zfree(dlif_proto_zone, ifproto);
320 }
321 }
322
323 struct tcpstat_local *
dlif_tcpstat_alloc(void)324 dlif_tcpstat_alloc(void)
325 {
326 void *__indexable base, *__indexable buf;
327 _dlil_alloc_aligned_object(dlif_tcpstat_zone,
328 dlif_tcpstat_bufsize, &buf,
329 dlif_tcpstat_size, &base);
330 return base;
331 }
332
333 void
dlif_tcpstat_free(struct tcpstat_local * if_tcp_stat)334 dlif_tcpstat_free(struct tcpstat_local *if_tcp_stat)
335 {
336 _dlil_free_aligned_object(dlif_tcpstat_zone, if_tcp_stat);
337 }
338
339 struct udpstat_local *
dlif_udpstat_alloc(void)340 dlif_udpstat_alloc(void)
341 {
342 void *__indexable base, *__indexable buf;
343 _dlil_alloc_aligned_object(dlif_udpstat_zone,
344 dlif_udpstat_bufsize, &buf,
345 dlif_udpstat_size, &base);
346 return base;
347 }
348
349 void
dlif_udpstat_free(struct udpstat_local * if_udp_stat)350 dlif_udpstat_free(struct udpstat_local *if_udp_stat)
351 {
352 _dlil_free_aligned_object(dlif_tcpstat_zone, if_udp_stat);
353 }
354
355 struct ifaddr *
dlil_alloc_lladdr(struct ifnet * ifp,const struct sockaddr_dl * ll_addr)356 dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr)
357 {
358 struct ifaddr *ifa, *oifa = NULL;
359 struct sockaddr_dl *addr_sdl, *mask_sdl;
360 char workbuf[IFNAMSIZ * 2];
361 int namelen, masklen, socksize;
362 struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp;
363
364 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
365 VERIFY(ll_addr == NULL || ll_addr->sdl_alen == ifp->if_addrlen);
366
367 namelen = scnprintf(workbuf, sizeof(workbuf), "%s",
368 if_name(ifp));
369 masklen = offsetof(struct sockaddr_dl, sdl_data[0])
370 + ((namelen > 0) ? namelen : 0);
371 socksize = masklen + ifp->if_addrlen;
372 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof (u_int32_t) - 1)))
373 if ((u_int32_t)socksize < sizeof(struct sockaddr_dl)) {
374 socksize = sizeof(struct sockaddr_dl);
375 }
376 socksize = ROUNDUP(socksize);
377 #undef ROUNDUP
378
379 ifa = ifp->if_lladdr;
380 if (socksize > DLIL_SDLMAXLEN ||
381 (ifa != NULL && ifa != &dl_if->dl_if_lladdr.ifa)) {
382 /*
383 * Rare, but in the event that the link address requires
384 * more storage space than DLIL_SDLMAXLEN, allocate the
385 * largest possible storages for address and mask, such
386 * that we can reuse the same space when if_addrlen grows.
387 * This same space will be used when if_addrlen shrinks.
388 */
389 struct dl_if_lladdr_xtra_space *__single dl_if_lladdr_ext;
390
391 if (ifa == NULL || ifa == &dl_if->dl_if_lladdr.ifa) {
392 dl_if_lladdr_ext = zalloc_permanent(
393 sizeof(*dl_if_lladdr_ext), ZALIGN(struct ifaddr));
394
395 ifa = &dl_if_lladdr_ext->ifa;
396 ifa_lock_init(ifa);
397 ifa_initref(ifa);
398 /* Don't set IFD_ALLOC, as this is permanent */
399 ifa->ifa_debug = IFD_LINK;
400 } else {
401 dl_if_lladdr_ext = __unsafe_forge_single(
402 struct dl_if_lladdr_xtra_space*, ifa);
403 ifa = &dl_if_lladdr_ext->ifa;
404 }
405
406 IFA_LOCK(ifa);
407 /* address and mask sockaddr_dl locations */
408 bzero(dl_if_lladdr_ext->addr_sdl_bytes,
409 sizeof(dl_if_lladdr_ext->addr_sdl_bytes));
410 bzero(dl_if_lladdr_ext->mask_sdl_bytes,
411 sizeof(dl_if_lladdr_ext->mask_sdl_bytes));
412 addr_sdl = SDL(dl_if_lladdr_ext->addr_sdl_bytes);
413 mask_sdl = SDL(dl_if_lladdr_ext->mask_sdl_bytes);
414 } else {
415 VERIFY(ifa == NULL || ifa == &dl_if->dl_if_lladdr.ifa);
416 /*
417 * Use the storage areas for address and mask within the
418 * dlil_ifnet structure. This is the most common case.
419 */
420 if (ifa == NULL) {
421 ifa = &dl_if->dl_if_lladdr.ifa;
422 ifa_lock_init(ifa);
423 ifa_initref(ifa);
424 /* Don't set IFD_ALLOC, as this is permanent */
425 ifa->ifa_debug = IFD_LINK;
426 }
427 IFA_LOCK(ifa);
428 /* address and mask sockaddr_dl locations */
429 bzero(dl_if->dl_if_lladdr.addr_sdl_bytes,
430 sizeof(dl_if->dl_if_lladdr.addr_sdl_bytes));
431 bzero(dl_if->dl_if_lladdr.mask_sdl_bytes,
432 sizeof(dl_if->dl_if_lladdr.mask_sdl_bytes));
433 addr_sdl = SDL(dl_if->dl_if_lladdr.addr_sdl_bytes);
434 mask_sdl = SDL(dl_if->dl_if_lladdr.mask_sdl_bytes);
435 }
436
437 if (ifp->if_lladdr != ifa) {
438 oifa = ifp->if_lladdr;
439 ifp->if_lladdr = ifa;
440 }
441
442 VERIFY(ifa->ifa_debug == IFD_LINK);
443 ifa->ifa_ifp = ifp;
444 ifa->ifa_rtrequest = link_rtrequest;
445 ifa->ifa_addr = SA(addr_sdl);
446 addr_sdl->sdl_len = (u_char)socksize;
447 addr_sdl->sdl_family = AF_LINK;
448 if (namelen > 0) {
449 bcopy(workbuf, addr_sdl->sdl_data, min(namelen,
450 sizeof(addr_sdl->sdl_data)));
451 addr_sdl->sdl_nlen = (u_char)namelen;
452 } else {
453 addr_sdl->sdl_nlen = 0;
454 }
455 addr_sdl->sdl_index = ifp->if_index;
456 addr_sdl->sdl_type = ifp->if_type;
457 if (ll_addr != NULL) {
458 addr_sdl->sdl_alen = ll_addr->sdl_alen;
459 bcopy(CONST_LLADDR(ll_addr), LLADDR(addr_sdl), addr_sdl->sdl_alen);
460 } else {
461 addr_sdl->sdl_alen = 0;
462 }
463 ifa->ifa_netmask = SA(mask_sdl);
464 mask_sdl->sdl_len = (u_char)masklen;
465 while (namelen > 0) {
466 mask_sdl->sdl_data[--namelen] = 0xff;
467 }
468 IFA_UNLOCK(ifa);
469
470 if (oifa != NULL) {
471 ifa_remref(oifa);
472 }
473
474 return ifa;
475 }
476
477
478 __private_extern__ int
dlil_alloc_local_stats(struct ifnet * ifp)479 dlil_alloc_local_stats(struct ifnet *ifp)
480 {
481 int ret = EINVAL;
482
483 if (ifp == NULL) {
484 goto end;
485 }
486
487 if (ifp->if_tcp_stat == NULL && ifp->if_udp_stat == NULL) {
488 ifp->if_tcp_stat = dlif_tcpstat_alloc();
489 ifp->if_udp_stat = dlif_udpstat_alloc();
490
491 VERIFY(IS_P2ALIGNED(ifp->if_tcp_stat, sizeof(u_int64_t)) &&
492 IS_P2ALIGNED(ifp->if_udp_stat, sizeof(u_int64_t)));
493
494 ret = 0;
495 }
496
497 if (ifp->if_ipv4_stat == NULL) {
498 ifp->if_ipv4_stat = kalloc_type(struct if_tcp_ecn_stat, Z_WAITOK | Z_ZERO);
499 }
500
501 if (ifp->if_ipv6_stat == NULL) {
502 ifp->if_ipv6_stat = kalloc_type(struct if_tcp_ecn_stat, Z_WAITOK | Z_ZERO);
503 }
504 end:
505 if (ifp != NULL && ret != 0) {
506 if (ifp->if_tcp_stat != NULL) {
507 dlif_tcpstat_free(ifp->if_tcp_stat);
508 ifp->if_tcp_stat = NULL;
509 }
510 if (ifp->if_udp_stat != NULL) {
511 dlif_udpstat_free(ifp->if_udp_stat);
512 ifp->if_udp_stat = NULL;
513 }
514 /* The macro kfree_type sets the passed pointer to NULL */
515 if (ifp->if_ipv4_stat != NULL) {
516 kfree_type(struct if_tcp_ecn_stat, ifp->if_ipv4_stat);
517 }
518 if (ifp->if_ipv6_stat != NULL) {
519 kfree_type(struct if_tcp_ecn_stat, ifp->if_ipv6_stat);
520 }
521 }
522
523 return ret;
524 }
525
526 errno_t
dlil_if_ref(struct ifnet * ifp)527 dlil_if_ref(struct ifnet *ifp)
528 {
529 struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp;
530
531 if (dl_if == NULL) {
532 return EINVAL;
533 }
534
535 lck_mtx_lock_spin(&dl_if->dl_if_lock);
536 ++dl_if->dl_if_refcnt;
537 if (dl_if->dl_if_refcnt == 0) {
538 panic("%s: wraparound refcnt for ifp=%p", __func__, ifp);
539 /* NOTREACHED */
540 }
541 if (dl_if->dl_if_trace != NULL) {
542 (*dl_if->dl_if_trace)(dl_if, TRUE);
543 }
544 lck_mtx_unlock(&dl_if->dl_if_lock);
545
546 return 0;
547 }
548
549 errno_t
dlil_if_free(struct ifnet * ifp)550 dlil_if_free(struct ifnet *ifp)
551 {
552 struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp;
553 bool need_release = FALSE;
554
555 if (dl_if == NULL) {
556 return EINVAL;
557 }
558
559 lck_mtx_lock_spin(&dl_if->dl_if_lock);
560 switch (dl_if->dl_if_refcnt) {
561 case 0:
562 panic("%s: negative refcnt for ifp=%p", __func__, ifp);
563 /* NOTREACHED */
564 break;
565 case 1:
566 if ((ifp->if_refflags & IFRF_EMBRYONIC) != 0) {
567 need_release = TRUE;
568 }
569 break;
570 default:
571 break;
572 }
573 --dl_if->dl_if_refcnt;
574 if (dl_if->dl_if_trace != NULL) {
575 (*dl_if->dl_if_trace)(dl_if, FALSE);
576 }
577 lck_mtx_unlock(&dl_if->dl_if_lock);
578 if (need_release) {
579 _dlil_if_release(ifp, true);
580 }
581 return 0;
582 }
583
584 void
_dlil_if_release(ifnet_t ifp,bool clear_in_use)585 _dlil_if_release(ifnet_t ifp, bool clear_in_use)
586 {
587 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
588
589 VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ifnet_alloc_count) > 0);
590 if (!(ifp->if_xflags & IFXF_ALLOC_KPI)) {
591 VERIFY(OSDecrementAtomic64(&net_api_stats.nas_ifnet_alloc_os_count) > 0);
592 }
593
594 ifnet_lock_exclusive(ifp);
595 kfree_data_counted_by(ifp->if_broadcast.ptr, ifp->if_broadcast.length);
596 lck_mtx_lock(&dlifp->dl_if_lock);
597 /* Copy the if name to the dedicated storage */
598 ifp->if_name = tsnprintf(dlifp->dl_if_namestorage, sizeof(dlifp->dl_if_namestorage),
599 "%s", ifp->if_name);
600 /* Reset external name (name + unit) */
601 ifp->if_xname = tsnprintf(dlifp->dl_if_xnamestorage, sizeof(dlifp->dl_if_xnamestorage),
602 "%s?", ifp->if_name);
603 if (clear_in_use) {
604 ASSERT((dlifp->dl_if_flags & DLIF_INUSE) != 0);
605 dlifp->dl_if_flags &= ~DLIF_INUSE;
606 }
607 lck_mtx_unlock(&dlifp->dl_if_lock);
608 ifnet_lock_done(ifp);
609 }
610
611 __private_extern__ void
dlil_if_release(ifnet_t ifp)612 dlil_if_release(ifnet_t ifp)
613 {
614 _dlil_if_release(ifp, false);
615 }
616
617 void
if_proto_ref(struct if_proto * proto)618 if_proto_ref(struct if_proto *proto)
619 {
620 os_atomic_inc(&proto->refcount, relaxed);
621 }
622
623 void
if_proto_free(struct if_proto * proto)624 if_proto_free(struct if_proto *proto)
625 {
626 u_int32_t oldval;
627 struct ifnet *ifp = proto->ifp;
628 u_int32_t proto_family = proto->protocol_family;
629 struct kev_dl_proto_data ev_pr_data;
630
631 oldval = os_atomic_dec_orig(&proto->refcount, relaxed);
632 if (oldval > 1) {
633 return;
634 }
635
636 if (proto->proto_kpi == kProtoKPI_v1) {
637 if (proto->kpi.v1.detached) {
638 proto->kpi.v1.detached(ifp, proto->protocol_family);
639 }
640 }
641 if (proto->proto_kpi == kProtoKPI_v2) {
642 if (proto->kpi.v2.detached) {
643 proto->kpi.v2.detached(ifp, proto->protocol_family);
644 }
645 }
646
647 /*
648 * Cleanup routes that may still be in the routing table for that
649 * interface/protocol pair.
650 */
651 if_rtproto_del(ifp, proto_family);
652
653 ifnet_lock_shared(ifp);
654
655 /* No more reference on this, protocol must have been detached */
656 VERIFY(proto->detached);
657
658 /*
659 * The reserved field carries the number of protocol still attached
660 * (subject to change)
661 */
662 ev_pr_data.proto_family = proto_family;
663 ev_pr_data.proto_remaining_count = dlil_ifp_protolist(ifp, NULL, 0);
664
665 ifnet_lock_done(ifp);
666
667 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
668 (struct net_event_data *)&ev_pr_data,
669 sizeof(struct kev_dl_proto_data), FALSE);
670
671 if (ev_pr_data.proto_remaining_count == 0) {
672 /*
673 * The protocol count has gone to zero, mark the interface down.
674 * This used to be done by configd.KernelEventMonitor, but that
675 * is inherently prone to races (rdar://problem/30810208).
676 */
677 (void) ifnet_set_flags(ifp, 0, IFF_UP);
678 (void) ifnet_ioctl(ifp, 0, SIOCSIFFLAGS, NULL);
679 dlil_post_sifflags_msg(ifp);
680 }
681
682 dlif_proto_free(proto);
683 }
684
685 __private_extern__ u_int32_t
dlil_ifp_protolist(struct ifnet * ifp,protocol_family_t * list __counted_by (list_count),u_int32_t list_count)686 dlil_ifp_protolist(struct ifnet *ifp, protocol_family_t *list __counted_by(list_count),
687 u_int32_t list_count)
688 {
689 u_int32_t count = 0;
690 int i;
691
692 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
693
694 if (ifp->if_proto_hash == NULL) {
695 goto done;
696 }
697
698 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
699 if_proto_ref_t proto;
700 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
701 if (list != NULL && count < list_count) {
702 list[count] = proto->protocol_family;
703 }
704 count++;
705 }
706 }
707 done:
708 return count;
709 }
710
711 __private_extern__ u_int32_t
if_get_protolist(struct ifnet * ifp,u_int32_t * __counted_by (count)protolist,u_int32_t count)712 if_get_protolist(struct ifnet * ifp, u_int32_t *__counted_by(count) protolist, u_int32_t count)
713 {
714 u_int32_t actual_count;
715 ifnet_lock_shared(ifp);
716 actual_count = dlil_ifp_protolist(ifp, protolist, count);
717 ifnet_lock_done(ifp);
718 return actual_count;
719 }
720
721 __private_extern__ void
if_free_protolist(u_int32_t * list)722 if_free_protolist(u_int32_t *list)
723 {
724 kfree_data_addr(list);
725 }
726
727 boolean_t
dlil_is_native_netif_nexus(ifnet_t ifp)728 dlil_is_native_netif_nexus(ifnet_t ifp)
729 {
730 return (ifp->if_eflags & IFEF_SKYWALK_NATIVE) && ifp->if_na != NULL;
731 }
732
733
734 /*
735 * Caller must already be holding ifnet lock.
736 */
737 struct if_proto *
find_attached_proto(struct ifnet * ifp,u_int32_t protocol_family)738 find_attached_proto(struct ifnet *ifp, u_int32_t protocol_family)
739 {
740 struct if_proto *proto = NULL;
741 u_int32_t i = proto_hash_value(protocol_family);
742
743 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED);
744
745 if (ifp->if_proto_hash != NULL) {
746 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
747 }
748
749 while (proto != NULL && proto->protocol_family != protocol_family) {
750 proto = SLIST_NEXT(proto, next_hash);
751 }
752
753 if (proto != NULL) {
754 if_proto_ref(proto);
755 }
756
757 return proto;
758 }
759
760 /*
761 * Clat routines.
762 */
763
764 /*
765 * This routine checks if the destination address is not a loopback, link-local,
766 * multicast or broadcast address.
767 */
768 int
dlil_is_clat_needed(protocol_family_t proto_family,mbuf_t m)769 dlil_is_clat_needed(protocol_family_t proto_family, mbuf_t m)
770 {
771 int ret = 0;
772 switch (proto_family) {
773 case PF_INET: {
774 struct ip *iph = mtod(m, struct ip *);
775 if (CLAT46_NEEDED(ntohl(iph->ip_dst.s_addr))) {
776 ret = 1;
777 }
778 break;
779 }
780 case PF_INET6: {
781 struct ip6_hdr *ip6h = mtod(m, struct ip6_hdr *);
782 if ((size_t)m_pktlen(m) >= sizeof(struct ip6_hdr) &&
783 CLAT64_NEEDED(&ip6h->ip6_dst)) {
784 ret = 1;
785 }
786 break;
787 }
788 }
789
790 return ret;
791 }
792
793 /*
794 * @brief This routine translates IPv4 packet to IPv6 packet,
795 * updates protocol checksum and also translates ICMP for code
796 * along with inner header translation.
797 *
798 * @param ifp Pointer to the interface
799 * @param proto_family pointer to protocol family. It is updated if function
800 * performs the translation successfully.
801 * @param m Pointer to the pointer pointing to the packet. Needed because this
802 * routine can end up changing the mbuf to a different one.
803 *
804 * @return 0 on success or else a negative value.
805 */
806 errno_t
dlil_clat46(ifnet_t ifp,protocol_family_t * proto_family,mbuf_t * m)807 dlil_clat46(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m)
808 {
809 VERIFY(*proto_family == PF_INET);
810 VERIFY(IS_INTF_CLAT46(ifp));
811
812 pbuf_t pbuf_store, *pbuf = NULL;
813 struct ip *iph = NULL;
814 struct in_addr osrc, odst;
815 uint8_t proto = 0;
816 struct in6_addr src_storage = {};
817 struct in6_addr *src = NULL;
818 struct sockaddr_in6 dstsock = {};
819 int error = 0;
820 uint16_t off = 0;
821 uint16_t tot_len = 0;
822 uint16_t ip_id_val = 0;
823 uint16_t ip_frag_off = 0;
824
825 boolean_t is_frag = FALSE;
826 boolean_t is_first_frag = TRUE;
827 boolean_t is_last_frag = TRUE;
828
829 /*
830 * Ensure that the incoming mbuf chain contains a valid
831 * IPv4 header in contiguous memory, or exit early.
832 */
833 if ((size_t)(*m)->m_pkthdr.len < sizeof(struct ip) ||
834 ((size_t)(*m)->m_len < sizeof(struct ip) &&
835 (*m = m_pullup(*m, sizeof(struct ip))) == NULL)) {
836 ip6stat.ip6s_clat464_in_tooshort_drop++;
837 return -1;
838 }
839
840 iph = mtod(*m, struct ip *);
841 osrc = iph->ip_src;
842 odst = iph->ip_dst;
843 proto = iph->ip_p;
844 off = (uint16_t)(iph->ip_hl << 2);
845 ip_id_val = iph->ip_id;
846 ip_frag_off = ntohs(iph->ip_off) & IP_OFFMASK;
847
848 tot_len = ntohs(iph->ip_len);
849
850 /* Validate that mbuf contains IP payload equal to `iph->ip_len' */
851 if ((size_t)(*m)->m_pkthdr.len < tot_len) {
852 ip6stat.ip6s_clat464_in_tooshort_drop++;
853 return -1;
854 }
855
856 pbuf_init_mbuf(&pbuf_store, *m, ifp);
857 pbuf = &pbuf_store;
858
859 /*
860 * For packets that are not first frags
861 * we only need to adjust CSUM.
862 * For 4 to 6, Fragmentation header gets appended
863 * after proto translation.
864 */
865 if (ntohs(iph->ip_off) & ~(IP_DF | IP_RF)) {
866 is_frag = TRUE;
867
868 /* If the offset is not zero, it is not first frag */
869 if (ip_frag_off != 0) {
870 is_first_frag = FALSE;
871 }
872
873 /* If IP_MF is set, then it is not last frag */
874 if (ntohs(iph->ip_off) & IP_MF) {
875 is_last_frag = FALSE;
876 }
877 }
878
879 /*
880 * Translate IPv4 destination to IPv6 destination by using the
881 * prefixes learned through prior PLAT discovery.
882 */
883 if ((error = nat464_synthesize_ipv6(ifp, &odst, &dstsock.sin6_addr)) != 0) {
884 ip6stat.ip6s_clat464_out_v6synthfail_drop++;
885 goto cleanup;
886 }
887
888 dstsock.sin6_len = sizeof(struct sockaddr_in6);
889 dstsock.sin6_family = AF_INET6;
890
891 /*
892 * Retrive the local IPv6 CLAT46 address reserved for stateless
893 * translation.
894 */
895 src = in6_selectsrc_core(&dstsock, 0, ifp, 0, &src_storage, NULL, &error,
896 NULL, NULL, TRUE);
897
898 if (src == NULL) {
899 ip6stat.ip6s_clat464_out_nov6addr_drop++;
900 error = -1;
901 goto cleanup;
902 }
903
904 /*
905 * Translate the IP header part first.
906 * NOTE: `nat464_translate_46' handles the situation where the value
907 * `off' is past the end of the mbuf chain that is associated with
908 * the pbuf, in a graceful manner.
909 */
910 error = (nat464_translate_46(pbuf, off, iph->ip_tos, iph->ip_p,
911 iph->ip_ttl, src_storage, dstsock.sin6_addr, tot_len) == NT_NAT64) ? 0 : -1;
912
913 iph = NULL; /* Invalidate iph as pbuf has been modified */
914
915 if (error != 0) {
916 ip6stat.ip6s_clat464_out_46transfail_drop++;
917 goto cleanup;
918 }
919
920 /*
921 * Translate protocol header, update checksum, checksum flags
922 * and related fields.
923 */
924 error = (nat464_translate_proto(pbuf, (struct nat464_addr *)&osrc, (struct nat464_addr *)&odst,
925 proto, PF_INET, PF_INET6, NT_OUT, !is_first_frag) == NT_NAT64) ? 0 : -1;
926
927 if (error != 0) {
928 ip6stat.ip6s_clat464_out_46proto_transfail_drop++;
929 goto cleanup;
930 }
931
932 /* Now insert the IPv6 fragment header */
933 if (is_frag) {
934 error = nat464_insert_frag46(pbuf, ip_id_val, ip_frag_off, is_last_frag);
935
936 if (error != 0) {
937 ip6stat.ip6s_clat464_out_46frag_transfail_drop++;
938 goto cleanup;
939 }
940 }
941
942 cleanup:
943 if (pbuf_is_valid(pbuf)) {
944 *m = pbuf->pb_mbuf;
945 pbuf->pb_mbuf = NULL;
946 pbuf_destroy(pbuf);
947 } else {
948 error = -1;
949 *m = NULL;
950 ip6stat.ip6s_clat464_out_invalpbuf_drop++;
951 }
952
953 if (error == 0) {
954 *proto_family = PF_INET6;
955 ip6stat.ip6s_clat464_out_success++;
956 }
957
958 return error;
959 }
960
961 /*
962 * @brief This routine translates incoming IPv6 to IPv4 packet,
963 * updates protocol checksum and also translates ICMPv6 outer
964 * and inner headers
965 *
966 * @return 0 on success or else a negative value.
967 */
968 errno_t
dlil_clat64(ifnet_t ifp,protocol_family_t * proto_family,mbuf_t * m)969 dlil_clat64(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m)
970 {
971 VERIFY(*proto_family == PF_INET6);
972 VERIFY(IS_INTF_CLAT46(ifp));
973
974 struct ip6_hdr *ip6h = NULL;
975 struct in6_addr osrc, odst;
976 uint8_t proto = 0;
977 struct in6_ifaddr *ia6_clat_dst = NULL;
978 struct in_ifaddr *ia4_clat_dst = NULL;
979 struct in_addr *dst = NULL;
980 struct in_addr src;
981 int error = 0;
982 uint32_t off = 0;
983 u_int64_t tot_len = 0;
984 uint8_t tos = 0;
985 boolean_t is_first_frag = TRUE;
986
987 /*
988 * Ensure that the incoming mbuf chain contains a valid
989 * IPv6 header in contiguous memory, or exit early.
990 */
991 if ((size_t)(*m)->m_pkthdr.len < sizeof(struct ip6_hdr) ||
992 ((size_t)(*m)->m_len < sizeof(struct ip6_hdr) &&
993 (*m = m_pullup(*m, sizeof(struct ip6_hdr))) == NULL)) {
994 ip6stat.ip6s_clat464_in_tooshort_drop++;
995 return -1;
996 }
997
998 ip6h = mtod(*m, struct ip6_hdr *);
999 /* Validate that mbuf contains IP payload equal to ip6_plen */
1000 if ((size_t)(*m)->m_pkthdr.len < ntohs(ip6h->ip6_plen) + sizeof(struct ip6_hdr)) {
1001 ip6stat.ip6s_clat464_in_tooshort_drop++;
1002 return -1;
1003 }
1004
1005 osrc = ip6h->ip6_src;
1006 odst = ip6h->ip6_dst;
1007
1008 /*
1009 * Retrieve the local CLAT46 reserved IPv6 address.
1010 * Let the packet pass if we don't find one, as the flag
1011 * may get set before IPv6 configuration has taken place.
1012 */
1013 ia6_clat_dst = in6ifa_ifpwithflag(ifp, IN6_IFF_CLAT46);
1014 if (ia6_clat_dst == NULL) {
1015 goto done;
1016 }
1017
1018 /*
1019 * Check if the original dest in the packet is same as the reserved
1020 * CLAT46 IPv6 address
1021 */
1022 if (IN6_ARE_ADDR_EQUAL(&odst, &ia6_clat_dst->ia_addr.sin6_addr)) {
1023 bool translate = false;
1024 pbuf_t pbuf_store, *pbuf = NULL;
1025 pbuf_init_mbuf(&pbuf_store, *m, ifp);
1026 pbuf = &pbuf_store;
1027
1028 /*
1029 * Retrieve the local CLAT46 IPv4 address reserved for stateless
1030 * translation.
1031 */
1032 ia4_clat_dst = inifa_ifpclatv4(ifp);
1033 if (ia4_clat_dst == NULL) {
1034 ifa_remref(&ia6_clat_dst->ia_ifa);
1035 ip6stat.ip6s_clat464_in_nov4addr_drop++;
1036 error = -1;
1037 goto cleanup;
1038 }
1039 ifa_remref(&ia6_clat_dst->ia_ifa);
1040
1041 /* Translate IPv6 src to IPv4 src by removing the NAT64 prefix */
1042 dst = &ia4_clat_dst->ia_addr.sin_addr;
1043 error = nat464_synthesize_ipv4(ifp, &osrc, &src, &translate);
1044 if (error != 0) {
1045 ip6stat.ip6s_clat464_in_v4synthfail_drop++;
1046 error = -1;
1047 goto cleanup;
1048 }
1049 if (!translate) {
1050 /* no translation required */
1051 if (ip6h->ip6_nxt != IPPROTO_ICMPV6) {
1052 /* only allow icmpv6 */
1053 ip6stat.ip6s_clat464_in_v4synthfail_drop++;
1054 error = -1;
1055 }
1056 goto cleanup;
1057 }
1058
1059 ip6h = pbuf->pb_data;
1060 off = sizeof(struct ip6_hdr);
1061 proto = ip6h->ip6_nxt;
1062 tos = (ntohl(ip6h->ip6_flow) >> 20) & 0xff;
1063 tot_len = ntohs(ip6h->ip6_plen) + sizeof(struct ip6_hdr);
1064
1065 /*
1066 * Translate the IP header and update the fragmentation
1067 * header if needed
1068 */
1069 error = (nat464_translate_64(pbuf, off, tos, &proto,
1070 ip6h->ip6_hlim, src, *dst, tot_len, &is_first_frag) == NT_NAT64) ?
1071 0 : -1;
1072
1073 ip6h = NULL; /* Invalidate ip6h as pbuf has been changed */
1074
1075 if (error != 0) {
1076 ip6stat.ip6s_clat464_in_64transfail_drop++;
1077 goto cleanup;
1078 }
1079
1080 /*
1081 * Translate protocol header, update checksum, checksum flags
1082 * and related fields.
1083 */
1084 error = (nat464_translate_proto(pbuf, (struct nat464_addr *)&osrc,
1085 (struct nat464_addr *)&odst, proto, PF_INET6, PF_INET,
1086 NT_IN, !is_first_frag) == NT_NAT64) ? 0 : -1;
1087
1088 if (error != 0) {
1089 ip6stat.ip6s_clat464_in_64proto_transfail_drop++;
1090 goto cleanup;
1091 }
1092
1093 cleanup:
1094 if (ia4_clat_dst != NULL) {
1095 ifa_remref(&ia4_clat_dst->ia_ifa);
1096 }
1097
1098 if (pbuf_is_valid(pbuf)) {
1099 *m = pbuf->pb_mbuf;
1100 pbuf->pb_mbuf = NULL;
1101 pbuf_destroy(pbuf);
1102 } else {
1103 error = -1;
1104 ip6stat.ip6s_clat464_in_invalpbuf_drop++;
1105 }
1106
1107 if (error == 0 && translate) {
1108 *proto_family = PF_INET;
1109 ip6stat.ip6s_clat464_in_success++;
1110 }
1111 } /* CLAT traffic */
1112
1113 done:
1114 return error;
1115 }
1116
1117 /*
1118 * Thread management
1119 */
1120 void
dlil_clean_threading_info(struct dlil_threading_info * inp)1121 dlil_clean_threading_info(struct dlil_threading_info *inp)
1122 {
1123 lck_mtx_destroy(&inp->dlth_lock, inp->dlth_lock_grp);
1124 lck_grp_free(inp->dlth_lock_grp);
1125 inp->dlth_lock_grp = NULL;
1126
1127 inp->dlth_flags = 0;
1128 inp->dlth_wtot = 0;
1129 bzero(inp->dlth_name_storage, sizeof(inp->dlth_name_storage));
1130 inp->dlth_name = NULL;
1131 inp->dlth_ifp = NULL;
1132 VERIFY(qhead(&inp->dlth_pkts) == NULL && qempty(&inp->dlth_pkts));
1133 qlimit(&inp->dlth_pkts) = 0;
1134 bzero(&inp->dlth_stats, sizeof(inp->dlth_stats));
1135
1136 VERIFY(!inp->dlth_affinity);
1137 inp->dlth_thread = THREAD_NULL;
1138 inp->dlth_strategy = NULL;
1139 VERIFY(inp->dlth_driver_thread == THREAD_NULL);
1140 VERIFY(inp->dlth_poller_thread == THREAD_NULL);
1141 VERIFY(inp->dlth_affinity_tag == 0);
1142 #if IFNET_INPUT_SANITY_CHK
1143 inp->dlth_pkts_cnt = 0;
1144 #endif /* IFNET_INPUT_SANITY_CHK */
1145 }
1146
1147 /*
1148 * Lock management
1149 */
1150 static errno_t
_dlil_get_lock_assertion_type(ifnet_lock_assert_t what,unsigned int * type)1151 _dlil_get_lock_assertion_type(ifnet_lock_assert_t what, unsigned int *type)
1152 {
1153 switch (what) {
1154 case IFNET_LCK_ASSERT_EXCLUSIVE:
1155 *type = LCK_RW_ASSERT_EXCLUSIVE;
1156 return 0;
1157
1158 case IFNET_LCK_ASSERT_SHARED:
1159 *type = LCK_RW_ASSERT_SHARED;
1160 return 0;
1161
1162 case IFNET_LCK_ASSERT_OWNED:
1163 *type = LCK_RW_ASSERT_HELD;
1164 return 0;
1165
1166 case IFNET_LCK_ASSERT_NOTOWNED:
1167 /* nothing to do here for RW lock; bypass assert */
1168 return ENOENT;
1169
1170 default:
1171 panic("bad ifnet assert type: %d", what);
1172 /* NOTREACHED */
1173 }
1174 }
1175
1176 __private_extern__ void
dlil_if_lock(void)1177 dlil_if_lock(void)
1178 {
1179 lck_mtx_lock(&dlil_ifnet_lock);
1180 }
1181
1182 __private_extern__ void
dlil_if_unlock(void)1183 dlil_if_unlock(void)
1184 {
1185 lck_mtx_unlock(&dlil_ifnet_lock);
1186 }
1187
1188 __private_extern__ void
dlil_if_lock_assert(void)1189 dlil_if_lock_assert(void)
1190 {
1191 LCK_MTX_ASSERT(&dlil_ifnet_lock, LCK_MTX_ASSERT_OWNED);
1192 }
1193
1194 __private_extern__ void
ifnet_head_lock_assert(ifnet_lock_assert_t what)1195 ifnet_head_lock_assert(ifnet_lock_assert_t what)
1196 {
1197 unsigned int type = 0;
1198
1199 if (_dlil_get_lock_assertion_type(what, &type) == 0) {
1200 LCK_RW_ASSERT(&ifnet_head_lock, type);
1201 }
1202 }
1203
1204 __private_extern__ void
ifnet_lock_assert(struct ifnet * ifp,ifnet_lock_assert_t what)1205 ifnet_lock_assert(struct ifnet *ifp, ifnet_lock_assert_t what)
1206 {
1207 #if !MACH_ASSERT
1208 #pragma unused(ifp)
1209 #endif
1210 unsigned int type = 0;
1211
1212 if (_dlil_get_lock_assertion_type(what, &type) == 0) {
1213 LCK_RW_ASSERT(&ifp->if_lock, type);
1214 }
1215 }
1216
1217 __private_extern__ void
ifnet_lock_shared(struct ifnet * ifp)1218 ifnet_lock_shared(struct ifnet *ifp)
1219 {
1220 lck_rw_lock_shared(&ifp->if_lock);
1221 }
1222
1223 __private_extern__ void
ifnet_lock_exclusive(struct ifnet * ifp)1224 ifnet_lock_exclusive(struct ifnet *ifp)
1225 {
1226 lck_rw_lock_exclusive(&ifp->if_lock);
1227 }
1228
1229 __private_extern__ void
ifnet_lock_done(struct ifnet * ifp)1230 ifnet_lock_done(struct ifnet *ifp)
1231 {
1232 lck_rw_done(&ifp->if_lock);
1233 }
1234
1235 #if INET
1236 __private_extern__ void
if_inetdata_lock_shared(struct ifnet * ifp)1237 if_inetdata_lock_shared(struct ifnet *ifp)
1238 {
1239 lck_rw_lock_shared(&ifp->if_inetdata_lock);
1240 }
1241
1242 __private_extern__ void
if_inetdata_lock_exclusive(struct ifnet * ifp)1243 if_inetdata_lock_exclusive(struct ifnet *ifp)
1244 {
1245 lck_rw_lock_exclusive(&ifp->if_inetdata_lock);
1246 }
1247
1248 __private_extern__ void
if_inetdata_lock_done(struct ifnet * ifp)1249 if_inetdata_lock_done(struct ifnet *ifp)
1250 {
1251 lck_rw_done(&ifp->if_inetdata_lock);
1252 }
1253 #endif /* INET */
1254
1255 __private_extern__ void
if_inet6data_lock_shared(struct ifnet * ifp)1256 if_inet6data_lock_shared(struct ifnet *ifp)
1257 {
1258 lck_rw_lock_shared(&ifp->if_inet6data_lock);
1259 }
1260
1261 __private_extern__ void
if_inet6data_lock_exclusive(struct ifnet * ifp)1262 if_inet6data_lock_exclusive(struct ifnet *ifp)
1263 {
1264 lck_rw_lock_exclusive(&ifp->if_inet6data_lock);
1265 }
1266
1267 __private_extern__ void
if_inet6data_lock_done(struct ifnet * ifp)1268 if_inet6data_lock_done(struct ifnet *ifp)
1269 {
1270 lck_rw_done(&ifp->if_inet6data_lock);
1271 }
1272
1273 __private_extern__ void
ifnet_head_lock_shared(void)1274 ifnet_head_lock_shared(void)
1275 {
1276 lck_rw_lock_shared(&ifnet_head_lock);
1277 }
1278
1279 __private_extern__ void
ifnet_head_lock_exclusive(void)1280 ifnet_head_lock_exclusive(void)
1281 {
1282 lck_rw_lock_exclusive(&ifnet_head_lock);
1283 }
1284
1285 __private_extern__ void
ifnet_head_done(void)1286 ifnet_head_done(void)
1287 {
1288 lck_rw_done(&ifnet_head_lock);
1289 }
1290
1291 __private_extern__ void
ifnet_head_assert_exclusive(void)1292 ifnet_head_assert_exclusive(void)
1293 {
1294 LCK_RW_ASSERT(&ifnet_head_lock, LCK_RW_ASSERT_EXCLUSIVE);
1295 }
1296
1297 static errno_t
if_mcasts_update_common(struct ifnet * ifp,bool sync)1298 if_mcasts_update_common(struct ifnet * ifp, bool sync)
1299 {
1300 errno_t err;
1301
1302 if (sync) {
1303 err = ifnet_ioctl(ifp, 0, SIOCADDMULTI, NULL);
1304 if (err == EAFNOSUPPORT) {
1305 err = 0;
1306 }
1307 } else {
1308 ifnet_ioctl_async(ifp, SIOCADDMULTI);
1309 err = 0;
1310 }
1311 DLIL_PRINTF("%s: %s %d suspended link-layer multicast membership(s) "
1312 "(err=%d)\n", if_name(ifp),
1313 (err == 0 ? "successfully restored" : "failed to restore"),
1314 ifp->if_updatemcasts, err);
1315
1316 /* just return success */
1317 return 0;
1318 }
1319
1320 errno_t
if_mcasts_update_async(struct ifnet * ifp)1321 if_mcasts_update_async(struct ifnet *ifp)
1322 {
1323 return if_mcasts_update_common(ifp, false);
1324 }
1325
1326 errno_t
if_mcasts_update(struct ifnet * ifp)1327 if_mcasts_update(struct ifnet *ifp)
1328 {
1329 return if_mcasts_update_common(ifp, true);
1330 }
1331