1 /*
2 * Copyright (c) 2004-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include "kpi_interface.h"
30
31 #include <sys/queue.h>
32 #include <sys/param.h> /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
45 #include <net/dlil.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/net_api_stats.h>
52 #include <net/route.h>
53 #include <net/if_ports_used.h>
54 #include <libkern/libkern.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/locks.h>
57 #include <kern/clock.h>
58 #include <sys/sockio.h>
59 #include <sys/proc.h>
60 #include <sys/sysctl.h>
61 #include <sys/mbuf.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/udp.h>
64 #include <netinet/udp_var.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/in_pcb.h>
68 #ifdef INET
69 #include <netinet/igmp_var.h>
70 #endif
71 #include <netinet6/mld6_var.h>
72 #include <netkey/key.h>
73 #include <stdbool.h>
74
75 #include "net/net_str_id.h"
76
77 #if CONFIG_MACF
78 #include <sys/kauth.h>
79 #include <security/mac_framework.h>
80 #endif
81
82 #if SKYWALK
83 #include <skywalk/os_skywalk_private.h>
84 #include <skywalk/nexus/netif/nx_netif.h>
85 #endif /* SKYWALK */
86
87 #undef ifnet_allocate
88 errno_t ifnet_allocate(const struct ifnet_init_params *init,
89 ifnet_t *ifp);
90
91 static errno_t ifnet_allocate_common(const struct ifnet_init_params *init,
92 ifnet_t *ifp, bool is_internal);
93
94
95 #define TOUCHLASTCHANGE(__if_lastchange) { \
96 (__if_lastchange)->tv_sec = (time_t)net_uptime(); \
97 (__if_lastchange)->tv_usec = 0; \
98 }
99
100 static errno_t ifnet_defrouter_llreachinfo(ifnet_t, sa_family_t,
101 struct ifnet_llreach_info *);
102 static void ifnet_kpi_free(ifnet_t);
103 static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t **,
104 u_int32_t *);
105 static errno_t ifnet_set_lladdr_internal(ifnet_t, const void *, size_t,
106 u_char, int);
107 static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
108
109 /*
110 * Temporary work around until we have real reference counting
111 *
112 * We keep the bits about calling dlil_if_release (which should be
113 * called recycle) transparent by calling it from our if_free function
114 * pointer. We have to keep the client's original detach function
115 * somewhere so we can call it.
116 */
117 static void
ifnet_kpi_free(ifnet_t ifp)118 ifnet_kpi_free(ifnet_t ifp)
119 {
120 if ((ifp->if_refflags & IFRF_EMBRYONIC) == 0) {
121 ifnet_detached_func detach_func;
122
123 detach_func = ifp->if_detach;
124 if (detach_func != NULL) {
125 (*detach_func)(ifp);
126 }
127 }
128
129 ifnet_dispose(ifp);
130 }
131
132 errno_t
ifnet_allocate_common(const struct ifnet_init_params * init,ifnet_t * ifp,bool is_internal)133 ifnet_allocate_common(const struct ifnet_init_params *init,
134 ifnet_t *ifp, bool is_internal)
135 {
136 struct ifnet_init_eparams einit;
137
138 bzero(&einit, sizeof(einit));
139
140 einit.ver = IFNET_INIT_CURRENT_VERSION;
141 einit.len = sizeof(einit);
142 einit.flags = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO;
143 if (!is_internal) {
144 einit.flags |= IFNET_INIT_ALLOC_KPI;
145 }
146 einit.uniqueid = init->uniqueid;
147 einit.uniqueid_len = init->uniqueid_len;
148 einit.name = init->name;
149 einit.unit = init->unit;
150 einit.family = init->family;
151 einit.type = init->type;
152 einit.output = init->output;
153 einit.demux = init->demux;
154 einit.add_proto = init->add_proto;
155 einit.del_proto = init->del_proto;
156 einit.check_multi = init->check_multi;
157 einit.framer = init->framer;
158 einit.softc = init->softc;
159 einit.ioctl = init->ioctl;
160 einit.set_bpf_tap = init->set_bpf_tap;
161 einit.detach = init->detach;
162 einit.event = init->event;
163 einit.broadcast_addr = init->broadcast_addr;
164 einit.broadcast_len = init->broadcast_len;
165
166 return ifnet_allocate_extended(&einit, ifp);
167 }
168
169 errno_t
ifnet_allocate_internal(const struct ifnet_init_params * init,ifnet_t * ifp)170 ifnet_allocate_internal(const struct ifnet_init_params *init, ifnet_t *ifp)
171 {
172 return ifnet_allocate_common(init, ifp, true);
173 }
174
175 errno_t
ifnet_allocate(const struct ifnet_init_params * init,ifnet_t * ifp)176 ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *ifp)
177 {
178 return ifnet_allocate_common(init, ifp, false);
179 }
180
181 static void
ifnet_set_broadcast_addr(ifnet_t ifp,const void * broadcast_addr,u_int32_t broadcast_len)182 ifnet_set_broadcast_addr(ifnet_t ifp, const void * broadcast_addr,
183 u_int32_t broadcast_len)
184 {
185 if (broadcast_len == 0 || broadcast_addr == NULL) {
186 /* no broadcast address */
187 bzero(&ifp->if_broadcast, sizeof(ifp->if_broadcast));
188 } else if (broadcast_len > sizeof(ifp->if_broadcast.u.buffer)) {
189 ifp->if_broadcast.u.ptr
190 = (u_char *)kalloc_data(broadcast_len,
191 Z_WAITOK | Z_NOFAIL);
192 bcopy(broadcast_addr,
193 ifp->if_broadcast.u.ptr,
194 broadcast_len);
195 } else {
196 bcopy(broadcast_addr,
197 ifp->if_broadcast.u.buffer,
198 broadcast_len);
199 }
200 ifp->if_broadcast.length = broadcast_len;
201 }
202
203 errno_t
ifnet_allocate_extended(const struct ifnet_init_eparams * einit0,ifnet_t * interface)204 ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
205 ifnet_t *interface)
206 {
207 #if SKYWALK
208 ifnet_start_func ostart = NULL;
209 #endif /* SKYWALK */
210 struct ifnet_init_eparams einit;
211 struct ifnet *ifp = NULL;
212 char if_xname[IFXNAMSIZ] = {0};
213 int error;
214
215 einit = *einit0;
216
217 if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
218 einit.len < sizeof(einit)) {
219 return EINVAL;
220 }
221
222 if (einit.family == 0 || einit.name == NULL ||
223 strlen(einit.name) >= IFNAMSIZ ||
224 (einit.type & 0xFFFFFF00) != 0 || einit.type == 0) {
225 return EINVAL;
226 }
227
228 #if SKYWALK
229 /* headroom must be a multiple of 8 bytes */
230 if ((einit.tx_headroom & 0x7) != 0) {
231 return EINVAL;
232 }
233 if ((einit.flags & IFNET_INIT_SKYWALK_NATIVE) == 0) {
234 /*
235 * Currently Interface advisory reporting is supported only
236 * for skywalk interface.
237 */
238 if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
239 return EINVAL;
240 }
241 }
242 #endif /* SKYWALK */
243
244 if (einit.flags & IFNET_INIT_LEGACY) {
245 #if SKYWALK
246 if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
247 return EINVAL;
248 }
249 #endif /* SKYWALK */
250 if (einit.output == NULL ||
251 (einit.flags & IFNET_INIT_INPUT_POLL)) {
252 return EINVAL;
253 }
254 einit.pre_enqueue = NULL;
255 einit.start = NULL;
256 einit.output_ctl = NULL;
257 einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
258 einit.input_poll = NULL;
259 einit.input_ctl = NULL;
260 } else {
261 #if SKYWALK
262 /*
263 * For native Skywalk drivers, steer all start requests
264 * to ifp_if_start() until the netif device adapter is
265 * fully activated, at which point we will point it to
266 * nx_netif_doorbell().
267 */
268 if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
269 if (einit.start != NULL) {
270 return EINVAL;
271 }
272 /* override output start callback */
273 ostart = einit.start = ifp_if_start;
274 } else {
275 ostart = einit.start;
276 }
277 #endif /* SKYWALK */
278 if (einit.start == NULL) {
279 return EINVAL;
280 }
281
282 einit.output = NULL;
283 if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX) {
284 return EINVAL;
285 }
286
287 if (einit.flags & IFNET_INIT_INPUT_POLL) {
288 if (einit.input_poll == NULL || einit.input_ctl == NULL) {
289 return EINVAL;
290 }
291 } else {
292 einit.input_poll = NULL;
293 einit.input_ctl = NULL;
294 }
295 }
296
297 if (einit.type > UCHAR_MAX) {
298 return EINVAL;
299 }
300
301 if (einit.unit > SHRT_MAX) {
302 return EINVAL;
303 }
304
305 /* Initialize external name (name + unit) */
306 (void) snprintf(if_xname, sizeof(if_xname), "%s%d",
307 einit.name, einit.unit);
308
309 if (einit.uniqueid == NULL) {
310 einit.uniqueid = if_xname;
311 einit.uniqueid_len = (uint32_t)strlen(if_xname);
312 }
313
314 error = dlil_if_acquire(einit.family, einit.uniqueid,
315 einit.uniqueid_len, if_xname, &ifp);
316
317 if (error == 0) {
318 uint64_t br;
319
320 /*
321 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
322 * to point to storage of at least IFNAMSIZ bytes. It is safe
323 * to write to this.
324 */
325 strlcpy(__DECONST(char *, ifp->if_name), einit.name, IFNAMSIZ);
326 ifp->if_type = (u_char)einit.type;
327 ifp->if_family = einit.family;
328 ifp->if_subfamily = einit.subfamily;
329 ifp->if_unit = (short)einit.unit;
330 ifp->if_output = einit.output;
331 ifp->if_pre_enqueue = einit.pre_enqueue;
332 ifp->if_start = einit.start;
333 ifp->if_output_ctl = einit.output_ctl;
334 ifp->if_output_sched_model = einit.output_sched_model;
335 ifp->if_output_bw.eff_bw = einit.output_bw;
336 ifp->if_output_bw.max_bw = einit.output_bw_max;
337 ifp->if_output_lt.eff_lt = einit.output_lt;
338 ifp->if_output_lt.max_lt = einit.output_lt_max;
339 ifp->if_input_poll = einit.input_poll;
340 ifp->if_input_ctl = einit.input_ctl;
341 ifp->if_input_bw.eff_bw = einit.input_bw;
342 ifp->if_input_bw.max_bw = einit.input_bw_max;
343 ifp->if_input_lt.eff_lt = einit.input_lt;
344 ifp->if_input_lt.max_lt = einit.input_lt_max;
345 ifp->if_demux = einit.demux;
346 ifp->if_add_proto = einit.add_proto;
347 ifp->if_del_proto = einit.del_proto;
348 ifp->if_check_multi = einit.check_multi;
349 ifp->if_framer_legacy = einit.framer;
350 ifp->if_framer = einit.framer_extended;
351 ifp->if_softc = einit.softc;
352 ifp->if_ioctl = einit.ioctl;
353 ifp->if_set_bpf_tap = einit.set_bpf_tap;
354 ifp->if_free = (einit.free != NULL) ? einit.free : ifnet_kpi_free;
355 ifp->if_event = einit.event;
356 ifp->if_detach = einit.detach;
357
358 /* Initialize Network ID */
359 ifp->network_id_len = 0;
360 bzero(&ifp->network_id, sizeof(ifp->network_id));
361
362 /* Initialize external name (name + unit) */
363 snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ,
364 "%s", if_xname);
365
366 /*
367 * On embedded, framer() is already in the extended form;
368 * we simply use it as is, unless the caller specifies
369 * framer_extended() which will then override it.
370 *
371 * On non-embedded, framer() has long been exposed as part
372 * of the public KPI, and therefore its signature must
373 * remain the same (without the pre- and postpend length
374 * parameters.) We special case ether_frameout, such that
375 * it gets mapped to its extended variant. All other cases
376 * utilize the stub routine which will simply return zeroes
377 * for those new parameters.
378 *
379 * Internally, DLIL will only use the extended callback
380 * variant which is represented by if_framer.
381 */
382 #if !XNU_TARGET_OS_OSX
383 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
384 ifp->if_framer = ifp->if_framer_legacy;
385 }
386 #else /* XNU_TARGET_OS_OSX */
387 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
388 if (ifp->if_framer_legacy == ether_frameout) {
389 ifp->if_framer = ether_frameout_extended;
390 } else {
391 ifp->if_framer = ifnet_framer_stub;
392 }
393 }
394 #endif /* XNU_TARGET_OS_OSX */
395
396 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
397 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
398 } else if (ifp->if_output_bw.eff_bw == 0) {
399 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
400 }
401
402 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
403 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
404 } else if (ifp->if_input_bw.eff_bw == 0) {
405 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
406 }
407
408 if (ifp->if_output_bw.max_bw == 0) {
409 ifp->if_output_bw = ifp->if_input_bw;
410 } else if (ifp->if_input_bw.max_bw == 0) {
411 ifp->if_input_bw = ifp->if_output_bw;
412 }
413
414 /* Pin if_baudrate to 32 bits */
415 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
416 if (br != 0) {
417 ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
418 }
419
420 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
421 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
422 } else if (ifp->if_output_lt.eff_lt == 0) {
423 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
424 }
425
426 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
427 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
428 } else if (ifp->if_input_lt.eff_lt == 0) {
429 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
430 }
431
432 if (ifp->if_output_lt.max_lt == 0) {
433 ifp->if_output_lt = ifp->if_input_lt;
434 } else if (ifp->if_input_lt.max_lt == 0) {
435 ifp->if_input_lt = ifp->if_output_lt;
436 }
437
438 if (ifp->if_ioctl == NULL) {
439 ifp->if_ioctl = ifp_if_ioctl;
440 }
441
442 if_clear_eflags(ifp, -1);
443 if (ifp->if_start != NULL) {
444 if_set_eflags(ifp, IFEF_TXSTART);
445 if (ifp->if_pre_enqueue == NULL) {
446 ifp->if_pre_enqueue = ifnet_enqueue;
447 }
448 ifp->if_output = ifp->if_pre_enqueue;
449 }
450
451 if (ifp->if_input_poll != NULL) {
452 if_set_eflags(ifp, IFEF_RXPOLL);
453 }
454
455 ifp->if_output_dlil = dlil_output_handler;
456 ifp->if_input_dlil = dlil_input_handler;
457
458 VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
459 (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
460 ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
461 ifp->if_input_ctl == NULL));
462 VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
463 (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
464
465 ifnet_set_broadcast_addr(ifp, einit.broadcast_addr,
466 einit.broadcast_len);
467
468 if_clear_xflags(ifp, -1);
469 #if SKYWALK
470 ifp->if_tx_headroom = 0;
471 ifp->if_tx_trailer = 0;
472 ifp->if_rx_mit_ival = 0;
473 ifp->if_save_start = ostart;
474 if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
475 VERIFY(ifp->if_eflags & IFEF_TXSTART);
476 VERIFY(!(einit.flags & IFNET_INIT_LEGACY));
477 if_set_eflags(ifp, IFEF_SKYWALK_NATIVE);
478 ifp->if_tx_headroom = einit.tx_headroom;
479 ifp->if_tx_trailer = einit.tx_trailer;
480 ifp->if_rx_mit_ival = einit.rx_mit_ival;
481 /*
482 * For native Skywalk drivers, make sure packets
483 * emitted by the BSD stack get dropped until the
484 * interface is in service. When the netif host
485 * adapter is fully activated, we'll point it to
486 * nx_netif_output().
487 */
488 ifp->if_output = ifp_if_output;
489 /*
490 * Override driver-supplied parameters
491 * and force IFEF_ENQUEUE_MULTI?
492 */
493 if (sk_netif_native_txmodel ==
494 NETIF_NATIVE_TXMODEL_ENQUEUE_MULTI) {
495 einit.start_delay_qlen = sk_tx_delay_qlen;
496 einit.start_delay_timeout = sk_tx_delay_timeout;
497 }
498 /* netif comes with native interfaces */
499 VERIFY((ifp->if_xflags & IFXF_LEGACY) == 0);
500 } else if (!ifnet_needs_compat(ifp)) {
501 /*
502 * If we're told not to plumb in netif compat
503 * for this interface, set IFXF_NX_NOAUTO to
504 * prevent DLIL from auto-attaching the nexus.
505 */
506 einit.flags |= IFNET_INIT_NX_NOAUTO;
507 /* legacy (non-netif) interface */
508 if_set_xflags(ifp, IFXF_LEGACY);
509 }
510
511 ifp->if_save_output = ifp->if_output;
512 if ((einit.flags & IFNET_INIT_NX_NOAUTO) != 0) {
513 if_set_xflags(ifp, IFXF_NX_NOAUTO);
514 }
515 if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
516 if_set_eflags(ifp, IFEF_ADV_REPORT);
517 }
518 #else /* !SKYWALK */
519 /* legacy interface */
520 if_set_xflags(ifp, IFXF_LEGACY);
521 #endif /* !SKYWALK */
522
523 if ((ifp->if_snd = ifclassq_alloc()) == NULL) {
524 panic_plain("%s: ifp=%p couldn't allocate class queues",
525 __func__, ifp);
526 /* NOTREACHED */
527 }
528
529 /*
530 * output target queue delay is specified in millisecond
531 * convert it to nanoseconds
532 */
533 IFCQ_TARGET_QDELAY(ifp->if_snd) =
534 einit.output_target_qdelay * 1000 * 1000;
535 IFCQ_MAXLEN(ifp->if_snd) = einit.sndq_maxlen;
536
537 ifnet_enqueue_multi_setup(ifp, einit.start_delay_qlen,
538 einit.start_delay_timeout);
539
540 IFCQ_PKT_DROP_LIMIT(ifp->if_snd) = IFCQ_DEFAULT_PKT_DROP_LIMIT;
541
542 /*
543 * Set embryonic flag; this will be cleared
544 * later when it is fully attached.
545 */
546 ifp->if_refflags = IFRF_EMBRYONIC;
547
548 /*
549 * Count the newly allocated ifnet
550 */
551 OSIncrementAtomic64(&net_api_stats.nas_ifnet_alloc_count);
552 INC_ATOMIC_INT64_LIM(net_api_stats.nas_ifnet_alloc_total);
553 if ((einit.flags & IFNET_INIT_ALLOC_KPI) != 0) {
554 if_set_xflags(ifp, IFXF_ALLOC_KPI);
555 } else {
556 OSIncrementAtomic64(
557 &net_api_stats.nas_ifnet_alloc_os_count);
558 INC_ATOMIC_INT64_LIM(
559 net_api_stats.nas_ifnet_alloc_os_total);
560 }
561
562 *interface = ifp;
563 }
564 return error;
565 }
566
567 errno_t
ifnet_reference(ifnet_t ifp)568 ifnet_reference(ifnet_t ifp)
569 {
570 return dlil_if_ref(ifp);
571 }
572
573 void
ifnet_dispose(ifnet_t ifp)574 ifnet_dispose(ifnet_t ifp)
575 {
576 dlil_if_release(ifp);
577 }
578
579 errno_t
ifnet_release(ifnet_t ifp)580 ifnet_release(ifnet_t ifp)
581 {
582 return dlil_if_free(ifp);
583 }
584
585 errno_t
ifnet_interface_family_find(const char * module_string,ifnet_family_t * family_id)586 ifnet_interface_family_find(const char *module_string,
587 ifnet_family_t *family_id)
588 {
589 if (module_string == NULL || family_id == NULL) {
590 return EINVAL;
591 }
592
593 return net_str_id_find_internal(module_string, family_id,
594 NSI_IF_FAM_ID, 1);
595 }
596
597 void *
ifnet_softc(ifnet_t interface)598 ifnet_softc(ifnet_t interface)
599 {
600 return (interface == NULL) ? NULL : interface->if_softc;
601 }
602
603 const char *
ifnet_name(ifnet_t interface)604 ifnet_name(ifnet_t interface)
605 {
606 return (interface == NULL) ? NULL : interface->if_name;
607 }
608
609 ifnet_family_t
ifnet_family(ifnet_t interface)610 ifnet_family(ifnet_t interface)
611 {
612 return (interface == NULL) ? 0 : interface->if_family;
613 }
614
615 ifnet_subfamily_t
ifnet_subfamily(ifnet_t interface)616 ifnet_subfamily(ifnet_t interface)
617 {
618 return (interface == NULL) ? 0 : interface->if_subfamily;
619 }
620
621 u_int32_t
ifnet_unit(ifnet_t interface)622 ifnet_unit(ifnet_t interface)
623 {
624 return (interface == NULL) ? (u_int32_t)0xffffffff :
625 (u_int32_t)interface->if_unit;
626 }
627
628 u_int32_t
ifnet_index(ifnet_t interface)629 ifnet_index(ifnet_t interface)
630 {
631 return (interface == NULL) ? (u_int32_t)0xffffffff :
632 interface->if_index;
633 }
634
635 errno_t
ifnet_set_flags(ifnet_t interface,u_int16_t new_flags,u_int16_t mask)636 ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
637 {
638 bool set_IFF_UP;
639 bool change_IFF_UP;
640 uint16_t old_flags;
641
642 if (interface == NULL) {
643 return EINVAL;
644 }
645 set_IFF_UP = (new_flags & IFF_UP) != 0;
646 change_IFF_UP = (mask & IFF_UP) != 0;
647 #if SKYWALK
648 if (set_IFF_UP && change_IFF_UP) {
649 /*
650 * When a native skywalk interface is marked IFF_UP, ensure
651 * the flowswitch is attached.
652 */
653 ifnet_attach_native_flowswitch(interface);
654 }
655 #endif /* SKYWALK */
656
657 ifnet_lock_exclusive(interface);
658
659 /* If we are modifying the up/down state, call if_updown */
660 if (change_IFF_UP) {
661 if_updown(interface, set_IFF_UP);
662 }
663
664 old_flags = interface->if_flags;
665 interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
666 /* If we are modifying the multicast flag, set/unset the silent flag */
667 if ((old_flags & IFF_MULTICAST) !=
668 (interface->if_flags & IFF_MULTICAST)) {
669 #if INET
670 if (IGMP_IFINFO(interface) != NULL) {
671 igmp_initsilent(interface, IGMP_IFINFO(interface));
672 }
673 #endif /* INET */
674 if (MLD_IFINFO(interface) != NULL) {
675 mld6_initsilent(interface, MLD_IFINFO(interface));
676 }
677 }
678
679 ifnet_lock_done(interface);
680
681 return 0;
682 }
683
684 u_int16_t
ifnet_flags(ifnet_t interface)685 ifnet_flags(ifnet_t interface)
686 {
687 return (interface == NULL) ? 0 : interface->if_flags;
688 }
689
690 /*
691 * This routine ensures the following:
692 *
693 * If IFEF_AWDL is set by the caller, also set the rest of flags as
694 * defined in IFEF_AWDL_MASK.
695 *
696 * If IFEF_AWDL has been set on the interface and the caller attempts
697 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
698 * return failure.
699 *
700 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
701 * on the interface.
702 *
703 * All other flags not associated with AWDL are not affected.
704 *
705 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
706 */
707 static errno_t
ifnet_awdl_check_eflags(ifnet_t ifp,u_int32_t * new_eflags,u_int32_t * mask)708 ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
709 {
710 u_int32_t eflags;
711
712 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
713
714 eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
715
716 if (ifp->if_eflags & IFEF_AWDL) {
717 if (eflags & IFEF_AWDL) {
718 if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK) {
719 return EINVAL;
720 }
721 } else {
722 *new_eflags &= ~IFEF_AWDL_MASK;
723 *mask |= IFEF_AWDL_MASK;
724 }
725 } else if (eflags & IFEF_AWDL) {
726 *new_eflags |= IFEF_AWDL_MASK;
727 *mask |= IFEF_AWDL_MASK;
728 } else if (eflags & IFEF_AWDL_RESTRICTED &&
729 !(ifp->if_eflags & IFEF_AWDL)) {
730 return EINVAL;
731 }
732
733 return 0;
734 }
735
736 errno_t
ifnet_set_eflags(ifnet_t interface,u_int32_t new_flags,u_int32_t mask)737 ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
738 {
739 uint32_t oeflags;
740 struct kev_msg ev_msg;
741 struct net_event_data ev_data;
742
743 if (interface == NULL) {
744 return EINVAL;
745 }
746
747 bzero(&ev_msg, sizeof(ev_msg));
748 ifnet_lock_exclusive(interface);
749 /*
750 * Sanity checks for IFEF_AWDL and its related flags.
751 */
752 if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) {
753 ifnet_lock_done(interface);
754 return EINVAL;
755 }
756 /*
757 * Currently Interface advisory reporting is supported only for
758 * skywalk interface.
759 */
760 if ((((new_flags & mask) & IFEF_ADV_REPORT) != 0) &&
761 ((interface->if_eflags & IFEF_SKYWALK_NATIVE) == 0)) {
762 ifnet_lock_done(interface);
763 return EINVAL;
764 }
765 oeflags = interface->if_eflags;
766 if_clear_eflags(interface, mask);
767 if (new_flags != 0) {
768 if_set_eflags(interface, (new_flags & mask));
769 }
770 ifnet_lock_done(interface);
771 if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
772 !(oeflags & IFEF_AWDL_RESTRICTED)) {
773 ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
774 /*
775 * The interface is now restricted to applications that have
776 * the entitlement.
777 * The check for the entitlement will be done in the data
778 * path, so we don't have to do anything here.
779 */
780 } else if (oeflags & IFEF_AWDL_RESTRICTED &&
781 !(interface->if_eflags & IFEF_AWDL_RESTRICTED)) {
782 ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
783 }
784 /*
785 * Notify configd so that it has a chance to perform better
786 * reachability detection.
787 */
788 if (ev_msg.event_code) {
789 bzero(&ev_data, sizeof(ev_data));
790 ev_msg.vendor_code = KEV_VENDOR_APPLE;
791 ev_msg.kev_class = KEV_NETWORK_CLASS;
792 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
793 strlcpy(ev_data.if_name, interface->if_name, IFNAMSIZ);
794 ev_data.if_family = interface->if_family;
795 ev_data.if_unit = interface->if_unit;
796 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
797 ev_msg.dv[0].data_ptr = &ev_data;
798 ev_msg.dv[1].data_length = 0;
799 dlil_post_complete_msg(interface, &ev_msg);
800 }
801
802 return 0;
803 }
804
805 u_int32_t
ifnet_eflags(ifnet_t interface)806 ifnet_eflags(ifnet_t interface)
807 {
808 return (interface == NULL) ? 0 : interface->if_eflags;
809 }
810
811 errno_t
ifnet_set_idle_flags_locked(ifnet_t ifp,u_int32_t new_flags,u_int32_t mask)812 ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
813 {
814 if (ifp == NULL) {
815 return EINVAL;
816 }
817 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
818
819 /*
820 * If this is called prior to ifnet attach, the actual work will
821 * be done at attach time. Otherwise, if it is called after
822 * ifnet detach, then it is a no-op.
823 */
824 if (!ifnet_is_attached(ifp, 0)) {
825 ifp->if_idle_new_flags = new_flags;
826 ifp->if_idle_new_flags_mask = mask;
827 return 0;
828 } else {
829 ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
830 }
831
832 ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
833 return 0;
834 }
835
836 errno_t
ifnet_set_idle_flags(ifnet_t ifp,u_int32_t new_flags,u_int32_t mask)837 ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
838 {
839 errno_t err;
840
841 ifnet_lock_exclusive(ifp);
842 err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
843 ifnet_lock_done(ifp);
844
845 return err;
846 }
847
848 u_int32_t
ifnet_idle_flags(ifnet_t ifp)849 ifnet_idle_flags(ifnet_t ifp)
850 {
851 return (ifp == NULL) ? 0 : ifp->if_idle_flags;
852 }
853
854 errno_t
ifnet_set_link_quality(ifnet_t ifp,int quality)855 ifnet_set_link_quality(ifnet_t ifp, int quality)
856 {
857 errno_t err = 0;
858
859 if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
860 err = EINVAL;
861 goto done;
862 }
863
864 if (!ifnet_is_attached(ifp, 0)) {
865 err = ENXIO;
866 goto done;
867 }
868
869 if_lqm_update(ifp, quality, 0);
870
871 done:
872 return err;
873 }
874
875 int
ifnet_link_quality(ifnet_t ifp)876 ifnet_link_quality(ifnet_t ifp)
877 {
878 int lqm;
879
880 if (ifp == NULL) {
881 return IFNET_LQM_THRESH_OFF;
882 }
883
884 ifnet_lock_shared(ifp);
885 lqm = ifp->if_interface_state.lqm_state;
886 ifnet_lock_done(ifp);
887
888 return lqm;
889 }
890
891 errno_t
ifnet_set_interface_state(ifnet_t ifp,struct if_interface_state * if_interface_state)892 ifnet_set_interface_state(ifnet_t ifp,
893 struct if_interface_state *if_interface_state)
894 {
895 errno_t err = 0;
896
897 if (ifp == NULL || if_interface_state == NULL) {
898 err = EINVAL;
899 goto done;
900 }
901
902 if (!ifnet_is_attached(ifp, 0)) {
903 err = ENXIO;
904 goto done;
905 }
906
907 if_state_update(ifp, if_interface_state);
908
909 done:
910 return err;
911 }
912
913 errno_t
ifnet_get_interface_state(ifnet_t ifp,struct if_interface_state * if_interface_state)914 ifnet_get_interface_state(ifnet_t ifp,
915 struct if_interface_state *if_interface_state)
916 {
917 errno_t err = 0;
918
919 if (ifp == NULL || if_interface_state == NULL) {
920 err = EINVAL;
921 goto done;
922 }
923
924 if (!ifnet_is_attached(ifp, 0)) {
925 err = ENXIO;
926 goto done;
927 }
928
929 if_get_state(ifp, if_interface_state);
930
931 done:
932 return err;
933 }
934
935
936 static errno_t
ifnet_defrouter_llreachinfo(ifnet_t ifp,sa_family_t af,struct ifnet_llreach_info * iflri)937 ifnet_defrouter_llreachinfo(ifnet_t ifp, sa_family_t af,
938 struct ifnet_llreach_info *iflri)
939 {
940 if (ifp == NULL || iflri == NULL) {
941 return EINVAL;
942 }
943
944 VERIFY(af == AF_INET || af == AF_INET6);
945
946 return ifnet_llreach_get_defrouter(ifp, af, iflri);
947 }
948
949 errno_t
ifnet_inet_defrouter_llreachinfo(ifnet_t ifp,struct ifnet_llreach_info * iflri)950 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
951 {
952 return ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri);
953 }
954
955 errno_t
ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp,struct ifnet_llreach_info * iflri)956 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
957 {
958 return ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri);
959 }
960
961 errno_t
ifnet_set_capabilities_supported(ifnet_t ifp,u_int32_t new_caps,u_int32_t mask)962 ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
963 u_int32_t mask)
964 {
965 errno_t error = 0;
966 int tmp;
967
968 if (ifp == NULL) {
969 return EINVAL;
970 }
971
972 ifnet_lock_exclusive(ifp);
973 tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
974 if ((tmp & ~IFCAP_VALID)) {
975 error = EINVAL;
976 } else {
977 ifp->if_capabilities = tmp;
978 }
979 ifnet_lock_done(ifp);
980
981 return error;
982 }
983
984 u_int32_t
ifnet_capabilities_supported(ifnet_t ifp)985 ifnet_capabilities_supported(ifnet_t ifp)
986 {
987 return (ifp == NULL) ? 0 : ifp->if_capabilities;
988 }
989
990
991 errno_t
ifnet_set_capabilities_enabled(ifnet_t ifp,u_int32_t new_caps,u_int32_t mask)992 ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
993 u_int32_t mask)
994 {
995 errno_t error = 0;
996 int tmp;
997 struct kev_msg ev_msg;
998 struct net_event_data ev_data;
999
1000 if (ifp == NULL) {
1001 return EINVAL;
1002 }
1003
1004 ifnet_lock_exclusive(ifp);
1005 tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
1006 if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities)) {
1007 error = EINVAL;
1008 } else {
1009 ifp->if_capenable = tmp;
1010 }
1011 ifnet_lock_done(ifp);
1012
1013 /* Notify application of the change */
1014 bzero(&ev_data, sizeof(struct net_event_data));
1015 bzero(&ev_msg, sizeof(struct kev_msg));
1016 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1017 ev_msg.kev_class = KEV_NETWORK_CLASS;
1018 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1019
1020 ev_msg.event_code = KEV_DL_IFCAP_CHANGED;
1021 strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
1022 ev_data.if_family = ifp->if_family;
1023 ev_data.if_unit = (u_int32_t)ifp->if_unit;
1024 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1025 ev_msg.dv[0].data_ptr = &ev_data;
1026 ev_msg.dv[1].data_length = 0;
1027 dlil_post_complete_msg(ifp, &ev_msg);
1028
1029 return error;
1030 }
1031
1032 u_int32_t
ifnet_capabilities_enabled(ifnet_t ifp)1033 ifnet_capabilities_enabled(ifnet_t ifp)
1034 {
1035 return (ifp == NULL) ? 0 : ifp->if_capenable;
1036 }
1037
1038 static const ifnet_offload_t offload_mask =
1039 (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
1040 IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
1041 IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT |
1042 IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES |
1043 IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP |
1044 IFNET_SW_TIMESTAMP);
1045
1046 static const ifnet_offload_t any_offload_csum = IFNET_CHECKSUMF;
1047
1048 errno_t
ifnet_set_offload(ifnet_t interface,ifnet_offload_t offload)1049 ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
1050 {
1051 u_int32_t ifcaps = 0;
1052
1053 if (interface == NULL) {
1054 return EINVAL;
1055 }
1056
1057 ifnet_lock_exclusive(interface);
1058 interface->if_hwassist = (offload & offload_mask);
1059
1060 #if SKYWALK
1061 /* preserve skywalk capability */
1062 if ((interface->if_capabilities & IFCAP_SKYWALK) != 0) {
1063 ifcaps |= IFCAP_SKYWALK;
1064 }
1065 #endif /* SKYWALK */
1066 /*
1067 * Hardware capable of partial checksum offload is
1068 * flexible enough to handle any transports utilizing
1069 * Internet Checksumming. Include those transports
1070 * here, and leave the final decision to IP.
1071 */
1072 if (interface->if_hwassist & IFNET_CSUM_PARTIAL) {
1073 interface->if_hwassist |= (IFNET_CSUM_TCP | IFNET_CSUM_UDP |
1074 IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6);
1075 }
1076 if (dlil_verbose) {
1077 log(LOG_DEBUG, "%s: set offload flags=%b\n",
1078 if_name(interface),
1079 interface->if_hwassist, IFNET_OFFLOADF_BITS);
1080 }
1081 ifnet_lock_done(interface);
1082
1083 if ((offload & any_offload_csum)) {
1084 ifcaps |= IFCAP_HWCSUM;
1085 }
1086 if ((offload & IFNET_TSO_IPV4)) {
1087 ifcaps |= IFCAP_TSO4;
1088 }
1089 if ((offload & IFNET_TSO_IPV6)) {
1090 ifcaps |= IFCAP_TSO6;
1091 }
1092 if ((offload & IFNET_LRO)) {
1093 ifcaps |= IFCAP_LRO;
1094 }
1095 if ((offload & IFNET_VLAN_MTU)) {
1096 ifcaps |= IFCAP_VLAN_MTU;
1097 }
1098 if ((offload & IFNET_VLAN_TAGGING)) {
1099 ifcaps |= IFCAP_VLAN_HWTAGGING;
1100 }
1101 if ((offload & IFNET_TX_STATUS)) {
1102 ifcaps |= IFCAP_TXSTATUS;
1103 }
1104 if ((offload & IFNET_HW_TIMESTAMP)) {
1105 ifcaps |= IFCAP_HW_TIMESTAMP;
1106 }
1107 if ((offload & IFNET_SW_TIMESTAMP)) {
1108 ifcaps |= IFCAP_SW_TIMESTAMP;
1109 }
1110 if ((offload & IFNET_CSUM_PARTIAL)) {
1111 ifcaps |= IFCAP_CSUM_PARTIAL;
1112 }
1113 if ((offload & IFNET_CSUM_ZERO_INVERT)) {
1114 ifcaps |= IFCAP_CSUM_ZERO_INVERT;
1115 }
1116 if (ifcaps != 0) {
1117 (void) ifnet_set_capabilities_supported(interface, ifcaps,
1118 IFCAP_VALID);
1119 (void) ifnet_set_capabilities_enabled(interface, ifcaps,
1120 IFCAP_VALID);
1121 }
1122
1123 return 0;
1124 }
1125
1126 ifnet_offload_t
ifnet_offload(ifnet_t interface)1127 ifnet_offload(ifnet_t interface)
1128 {
1129 return (interface == NULL) ?
1130 0 : (interface->if_hwassist & offload_mask);
1131 }
1132
1133 errno_t
ifnet_set_tso_mtu(ifnet_t interface,sa_family_t family,u_int32_t mtuLen)1134 ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
1135 {
1136 errno_t error = 0;
1137
1138 if (interface == NULL || mtuLen < interface->if_mtu) {
1139 return EINVAL;
1140 }
1141 if (mtuLen > IP_MAXPACKET) {
1142 return EINVAL;
1143 }
1144
1145 switch (family) {
1146 case AF_INET:
1147 if (interface->if_hwassist & IFNET_TSO_IPV4) {
1148 interface->if_tso_v4_mtu = mtuLen;
1149 } else {
1150 error = EINVAL;
1151 }
1152 break;
1153
1154 case AF_INET6:
1155 if (interface->if_hwassist & IFNET_TSO_IPV6) {
1156 interface->if_tso_v6_mtu = mtuLen;
1157 } else {
1158 error = EINVAL;
1159 }
1160 break;
1161
1162 default:
1163 error = EPROTONOSUPPORT;
1164 break;
1165 }
1166
1167 if (error == 0) {
1168 struct ifclassq *ifq = interface->if_snd;
1169 ASSERT(ifq != NULL);
1170 /* Inform all transmit queues about the new TSO MTU */
1171 IFCQ_LOCK(ifq);
1172 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_MTU);
1173 IFCQ_UNLOCK(ifq);
1174 }
1175
1176 return error;
1177 }
1178
1179 errno_t
ifnet_get_tso_mtu(ifnet_t interface,sa_family_t family,u_int32_t * mtuLen)1180 ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
1181 {
1182 errno_t error = 0;
1183
1184 if (interface == NULL || mtuLen == NULL) {
1185 return EINVAL;
1186 }
1187
1188 switch (family) {
1189 case AF_INET:
1190 if (interface->if_hwassist & IFNET_TSO_IPV4) {
1191 *mtuLen = interface->if_tso_v4_mtu;
1192 } else {
1193 error = EINVAL;
1194 }
1195 break;
1196
1197 case AF_INET6:
1198 if (interface->if_hwassist & IFNET_TSO_IPV6) {
1199 *mtuLen = interface->if_tso_v6_mtu;
1200 } else {
1201 error = EINVAL;
1202 }
1203 break;
1204
1205 default:
1206 error = EPROTONOSUPPORT;
1207 break;
1208 }
1209
1210 return error;
1211 }
1212
1213 errno_t
ifnet_set_wake_flags(ifnet_t interface,u_int32_t properties,u_int32_t mask)1214 ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
1215 {
1216 struct kev_msg ev_msg;
1217 struct net_event_data ev_data;
1218
1219 bzero(&ev_data, sizeof(struct net_event_data));
1220 bzero(&ev_msg, sizeof(struct kev_msg));
1221
1222 if (interface == NULL) {
1223 return EINVAL;
1224 }
1225
1226 /* Do not accept wacky values */
1227 if ((properties & mask) & ~IF_WAKE_VALID_FLAGS) {
1228 return EINVAL;
1229 }
1230
1231 if ((mask & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1232 if ((properties & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1233 if_set_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1234 } else {
1235 if_clear_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1236 }
1237 }
1238
1239 (void) ifnet_touch_lastchange(interface);
1240
1241 /* Notify application of the change */
1242 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1243 ev_msg.kev_class = KEV_NETWORK_CLASS;
1244 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1245
1246 ev_msg.event_code = KEV_DL_WAKEFLAGS_CHANGED;
1247 strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ);
1248 ev_data.if_family = interface->if_family;
1249 ev_data.if_unit = (u_int32_t)interface->if_unit;
1250 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1251 ev_msg.dv[0].data_ptr = &ev_data;
1252 ev_msg.dv[1].data_length = 0;
1253 dlil_post_complete_msg(interface, &ev_msg);
1254
1255 return 0;
1256 }
1257
1258 u_int32_t
ifnet_get_wake_flags(ifnet_t interface)1259 ifnet_get_wake_flags(ifnet_t interface)
1260 {
1261 u_int32_t flags = 0;
1262
1263 if (interface == NULL) {
1264 return 0;
1265 }
1266
1267 if ((interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) != 0) {
1268 flags |= IF_WAKE_ON_MAGIC_PACKET;
1269 }
1270
1271 return flags;
1272 }
1273
1274 /*
1275 * Should MIB data store a copy?
1276 */
1277 errno_t
ifnet_set_link_mib_data(ifnet_t interface,void * mibData,uint32_t mibLen)1278 ifnet_set_link_mib_data(ifnet_t interface, void *mibData, uint32_t mibLen)
1279 {
1280 if (interface == NULL) {
1281 return EINVAL;
1282 }
1283
1284 ifnet_lock_exclusive(interface);
1285 interface->if_linkmib = (void*)mibData;
1286 interface->if_linkmiblen = mibLen;
1287 ifnet_lock_done(interface);
1288 return 0;
1289 }
1290
1291 errno_t
ifnet_get_link_mib_data(ifnet_t interface,void * mibData,uint32_t * mibLen)1292 ifnet_get_link_mib_data(ifnet_t interface, void *mibData, uint32_t *mibLen)
1293 {
1294 errno_t result = 0;
1295
1296 if (interface == NULL) {
1297 return EINVAL;
1298 }
1299
1300 ifnet_lock_shared(interface);
1301 if (*mibLen < interface->if_linkmiblen) {
1302 result = EMSGSIZE;
1303 }
1304 if (result == 0 && interface->if_linkmib == NULL) {
1305 result = ENOTSUP;
1306 }
1307
1308 if (result == 0) {
1309 *mibLen = interface->if_linkmiblen;
1310 bcopy(interface->if_linkmib, mibData, *mibLen);
1311 }
1312 ifnet_lock_done(interface);
1313
1314 return result;
1315 }
1316
1317 uint32_t
ifnet_get_link_mib_data_length(ifnet_t interface)1318 ifnet_get_link_mib_data_length(ifnet_t interface)
1319 {
1320 return (interface == NULL) ? 0 : interface->if_linkmiblen;
1321 }
1322
1323 errno_t
ifnet_output(ifnet_t interface,protocol_family_t protocol_family,mbuf_t m,void * route,const struct sockaddr * dest)1324 ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1325 mbuf_t m, void *route, const struct sockaddr *dest)
1326 {
1327 if (interface == NULL || protocol_family == 0 || m == NULL) {
1328 if (m != NULL) {
1329 mbuf_freem_list(m);
1330 }
1331 return EINVAL;
1332 }
1333 return dlil_output(interface, protocol_family, m, route, dest, 0, NULL);
1334 }
1335
1336 errno_t
ifnet_output_raw(ifnet_t interface,protocol_family_t protocol_family,mbuf_t m)1337 ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1338 {
1339 if (interface == NULL || m == NULL) {
1340 if (m != NULL) {
1341 mbuf_freem_list(m);
1342 }
1343 return EINVAL;
1344 }
1345 return dlil_output(interface, protocol_family, m, NULL, NULL, 1, NULL);
1346 }
1347
1348 errno_t
ifnet_set_mtu(ifnet_t interface,u_int32_t mtu)1349 ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1350 {
1351 if (interface == NULL) {
1352 return EINVAL;
1353 }
1354
1355 interface->if_mtu = mtu;
1356 return 0;
1357 }
1358
1359 u_int32_t
ifnet_mtu(ifnet_t interface)1360 ifnet_mtu(ifnet_t interface)
1361 {
1362 return (interface == NULL) ? 0 : interface->if_mtu;
1363 }
1364
1365 u_char
ifnet_type(ifnet_t interface)1366 ifnet_type(ifnet_t interface)
1367 {
1368 return (interface == NULL) ? 0 : interface->if_data.ifi_type;
1369 }
1370
1371 errno_t
ifnet_set_addrlen(ifnet_t interface,u_char addrlen)1372 ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1373 {
1374 if (interface == NULL) {
1375 return EINVAL;
1376 }
1377
1378 interface->if_data.ifi_addrlen = addrlen;
1379 return 0;
1380 }
1381
1382 u_char
ifnet_addrlen(ifnet_t interface)1383 ifnet_addrlen(ifnet_t interface)
1384 {
1385 return (interface == NULL) ? 0 : interface->if_data.ifi_addrlen;
1386 }
1387
1388 errno_t
ifnet_set_hdrlen(ifnet_t interface,u_char hdrlen)1389 ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1390 {
1391 if (interface == NULL) {
1392 return EINVAL;
1393 }
1394
1395 interface->if_data.ifi_hdrlen = hdrlen;
1396 return 0;
1397 }
1398
1399 u_char
ifnet_hdrlen(ifnet_t interface)1400 ifnet_hdrlen(ifnet_t interface)
1401 {
1402 return (interface == NULL) ? 0 : interface->if_data.ifi_hdrlen;
1403 }
1404
1405 errno_t
ifnet_set_metric(ifnet_t interface,u_int32_t metric)1406 ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1407 {
1408 if (interface == NULL) {
1409 return EINVAL;
1410 }
1411
1412 interface->if_data.ifi_metric = metric;
1413 return 0;
1414 }
1415
1416 u_int32_t
ifnet_metric(ifnet_t interface)1417 ifnet_metric(ifnet_t interface)
1418 {
1419 return (interface == NULL) ? 0 : interface->if_data.ifi_metric;
1420 }
1421
1422 errno_t
ifnet_set_baudrate(struct ifnet * ifp,uint64_t baudrate)1423 ifnet_set_baudrate(struct ifnet *ifp, uint64_t baudrate)
1424 {
1425 if (ifp == NULL) {
1426 return EINVAL;
1427 }
1428
1429 ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1430 ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1431
1432 /* Pin if_baudrate to 32 bits until we can change the storage size */
1433 ifp->if_baudrate = (baudrate > UINT32_MAX) ? UINT32_MAX : (uint32_t)baudrate;
1434
1435 return 0;
1436 }
1437
1438 u_int64_t
ifnet_baudrate(struct ifnet * ifp)1439 ifnet_baudrate(struct ifnet *ifp)
1440 {
1441 return (ifp == NULL) ? 0 : ifp->if_baudrate;
1442 }
1443
1444 errno_t
ifnet_set_bandwidths(struct ifnet * ifp,struct if_bandwidths * output_bw,struct if_bandwidths * input_bw)1445 ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1446 struct if_bandwidths *input_bw)
1447 {
1448 if (ifp == NULL) {
1449 return EINVAL;
1450 }
1451
1452 /* set input values first (if any), as output values depend on them */
1453 if (input_bw != NULL) {
1454 (void) ifnet_set_input_bandwidths(ifp, input_bw);
1455 }
1456
1457 if (output_bw != NULL) {
1458 (void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE);
1459 }
1460
1461 return 0;
1462 }
1463
1464 static void
ifnet_set_link_status_outbw(struct ifnet * ifp)1465 ifnet_set_link_status_outbw(struct ifnet *ifp)
1466 {
1467 struct if_wifi_status_v1 *sr;
1468 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1469 if (ifp->if_output_bw.eff_bw != 0) {
1470 sr->valid_bitmask |=
1471 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
1472 sr->ul_effective_bandwidth =
1473 ifp->if_output_bw.eff_bw > UINT32_MAX ?
1474 UINT32_MAX :
1475 (uint32_t)ifp->if_output_bw.eff_bw;
1476 }
1477 if (ifp->if_output_bw.max_bw != 0) {
1478 sr->valid_bitmask |=
1479 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
1480 sr->ul_max_bandwidth =
1481 ifp->if_output_bw.max_bw > UINT32_MAX ?
1482 UINT32_MAX :
1483 (uint32_t)ifp->if_output_bw.max_bw;
1484 }
1485 }
1486
1487 errno_t
ifnet_set_output_bandwidths(struct ifnet * ifp,struct if_bandwidths * bw,boolean_t locked)1488 ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw,
1489 boolean_t locked)
1490 {
1491 struct if_bandwidths old_bw;
1492 struct ifclassq *ifq;
1493 u_int64_t br;
1494
1495 VERIFY(ifp != NULL && bw != NULL);
1496
1497 ifq = ifp->if_snd;
1498 if (!locked) {
1499 IFCQ_LOCK(ifq);
1500 }
1501 IFCQ_LOCK_ASSERT_HELD(ifq);
1502
1503 old_bw = ifp->if_output_bw;
1504 if (bw->eff_bw != 0) {
1505 ifp->if_output_bw.eff_bw = bw->eff_bw;
1506 }
1507 if (bw->max_bw != 0) {
1508 ifp->if_output_bw.max_bw = bw->max_bw;
1509 }
1510 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
1511 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1512 } else if (ifp->if_output_bw.eff_bw == 0) {
1513 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1514 }
1515
1516 /* Pin if_baudrate to 32 bits */
1517 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1518 if (br != 0) {
1519 ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
1520 }
1521
1522 /* Adjust queue parameters if needed */
1523 if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1524 old_bw.max_bw != ifp->if_output_bw.max_bw) {
1525 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH);
1526 }
1527
1528 if (!locked) {
1529 IFCQ_UNLOCK(ifq);
1530 }
1531
1532 /*
1533 * If this is a Wifi interface, update the values in
1534 * if_link_status structure also.
1535 */
1536 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1537 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1538 ifnet_set_link_status_outbw(ifp);
1539 lck_rw_done(&ifp->if_link_status_lock);
1540 }
1541
1542 return 0;
1543 }
1544
1545 static void
ifnet_set_link_status_inbw(struct ifnet * ifp)1546 ifnet_set_link_status_inbw(struct ifnet *ifp)
1547 {
1548 struct if_wifi_status_v1 *sr;
1549
1550 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1551 if (ifp->if_input_bw.eff_bw != 0) {
1552 sr->valid_bitmask |=
1553 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
1554 sr->dl_effective_bandwidth =
1555 ifp->if_input_bw.eff_bw > UINT32_MAX ?
1556 UINT32_MAX :
1557 (uint32_t)ifp->if_input_bw.eff_bw;
1558 }
1559 if (ifp->if_input_bw.max_bw != 0) {
1560 sr->valid_bitmask |=
1561 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
1562 sr->dl_max_bandwidth = ifp->if_input_bw.max_bw > UINT32_MAX ?
1563 UINT32_MAX :
1564 (uint32_t)ifp->if_input_bw.max_bw;
1565 }
1566 }
1567
1568 errno_t
ifnet_set_input_bandwidths(struct ifnet * ifp,struct if_bandwidths * bw)1569 ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1570 {
1571 struct if_bandwidths old_bw;
1572
1573 VERIFY(ifp != NULL && bw != NULL);
1574
1575 old_bw = ifp->if_input_bw;
1576 if (bw->eff_bw != 0) {
1577 ifp->if_input_bw.eff_bw = bw->eff_bw;
1578 }
1579 if (bw->max_bw != 0) {
1580 ifp->if_input_bw.max_bw = bw->max_bw;
1581 }
1582 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
1583 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1584 } else if (ifp->if_input_bw.eff_bw == 0) {
1585 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1586 }
1587
1588 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1589 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1590 ifnet_set_link_status_inbw(ifp);
1591 lck_rw_done(&ifp->if_link_status_lock);
1592 }
1593
1594 if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1595 old_bw.max_bw != ifp->if_input_bw.max_bw) {
1596 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1597 }
1598
1599 return 0;
1600 }
1601
1602 u_int64_t
ifnet_output_linkrate(struct ifnet * ifp)1603 ifnet_output_linkrate(struct ifnet *ifp)
1604 {
1605 struct ifclassq *ifq = ifp->if_snd;
1606 u_int64_t rate;
1607
1608 IFCQ_LOCK_ASSERT_HELD(ifq);
1609
1610 rate = ifp->if_output_bw.eff_bw;
1611 if (IFCQ_TBR_IS_ENABLED(ifq)) {
1612 u_int64_t tbr_rate = ifq->ifcq_tbr.tbr_rate_raw;
1613 VERIFY(tbr_rate > 0);
1614 rate = MIN(rate, ifq->ifcq_tbr.tbr_rate_raw);
1615 }
1616
1617 return rate;
1618 }
1619
1620 u_int64_t
ifnet_input_linkrate(struct ifnet * ifp)1621 ifnet_input_linkrate(struct ifnet *ifp)
1622 {
1623 return ifp->if_input_bw.eff_bw;
1624 }
1625
1626 errno_t
ifnet_bandwidths(struct ifnet * ifp,struct if_bandwidths * output_bw,struct if_bandwidths * input_bw)1627 ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1628 struct if_bandwidths *input_bw)
1629 {
1630 if (ifp == NULL) {
1631 return EINVAL;
1632 }
1633
1634 if (output_bw != NULL) {
1635 *output_bw = ifp->if_output_bw;
1636 }
1637 if (input_bw != NULL) {
1638 *input_bw = ifp->if_input_bw;
1639 }
1640
1641 return 0;
1642 }
1643
1644 errno_t
ifnet_set_latencies(struct ifnet * ifp,struct if_latencies * output_lt,struct if_latencies * input_lt)1645 ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1646 struct if_latencies *input_lt)
1647 {
1648 if (ifp == NULL) {
1649 return EINVAL;
1650 }
1651
1652 if (output_lt != NULL) {
1653 (void) ifnet_set_output_latencies(ifp, output_lt, FALSE);
1654 }
1655
1656 if (input_lt != NULL) {
1657 (void) ifnet_set_input_latencies(ifp, input_lt);
1658 }
1659
1660 return 0;
1661 }
1662
1663 errno_t
ifnet_set_output_latencies(struct ifnet * ifp,struct if_latencies * lt,boolean_t locked)1664 ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt,
1665 boolean_t locked)
1666 {
1667 struct if_latencies old_lt;
1668 struct ifclassq *ifq;
1669
1670 VERIFY(ifp != NULL && lt != NULL);
1671
1672 ifq = ifp->if_snd;
1673 if (!locked) {
1674 IFCQ_LOCK(ifq);
1675 }
1676 IFCQ_LOCK_ASSERT_HELD(ifq);
1677
1678 old_lt = ifp->if_output_lt;
1679 if (lt->eff_lt != 0) {
1680 ifp->if_output_lt.eff_lt = lt->eff_lt;
1681 }
1682 if (lt->max_lt != 0) {
1683 ifp->if_output_lt.max_lt = lt->max_lt;
1684 }
1685 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
1686 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1687 } else if (ifp->if_output_lt.eff_lt == 0) {
1688 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1689 }
1690
1691 /* Adjust queue parameters if needed */
1692 if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1693 old_lt.max_lt != ifp->if_output_lt.max_lt) {
1694 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY);
1695 }
1696
1697 if (!locked) {
1698 IFCQ_UNLOCK(ifq);
1699 }
1700
1701 return 0;
1702 }
1703
1704 errno_t
ifnet_set_input_latencies(struct ifnet * ifp,struct if_latencies * lt)1705 ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1706 {
1707 struct if_latencies old_lt;
1708
1709 VERIFY(ifp != NULL && lt != NULL);
1710
1711 old_lt = ifp->if_input_lt;
1712 if (lt->eff_lt != 0) {
1713 ifp->if_input_lt.eff_lt = lt->eff_lt;
1714 }
1715 if (lt->max_lt != 0) {
1716 ifp->if_input_lt.max_lt = lt->max_lt;
1717 }
1718 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
1719 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1720 } else if (ifp->if_input_lt.eff_lt == 0) {
1721 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1722 }
1723
1724 if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1725 old_lt.max_lt != ifp->if_input_lt.max_lt) {
1726 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1727 }
1728
1729 return 0;
1730 }
1731
1732 errno_t
ifnet_latencies(struct ifnet * ifp,struct if_latencies * output_lt,struct if_latencies * input_lt)1733 ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1734 struct if_latencies *input_lt)
1735 {
1736 if (ifp == NULL) {
1737 return EINVAL;
1738 }
1739
1740 if (output_lt != NULL) {
1741 *output_lt = ifp->if_output_lt;
1742 }
1743 if (input_lt != NULL) {
1744 *input_lt = ifp->if_input_lt;
1745 }
1746
1747 return 0;
1748 }
1749
1750 errno_t
ifnet_set_poll_params(struct ifnet * ifp,struct ifnet_poll_params * p)1751 ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1752 {
1753 errno_t err;
1754
1755 if (ifp == NULL) {
1756 return EINVAL;
1757 } else if (!ifnet_is_attached(ifp, 1)) {
1758 return ENXIO;
1759 }
1760
1761 #if SKYWALK
1762 if (SKYWALK_CAPABLE(ifp)) {
1763 err = netif_rxpoll_set_params(ifp, p, FALSE);
1764 ifnet_decr_iorefcnt(ifp);
1765 return err;
1766 }
1767 #endif /* SKYWALK */
1768 err = dlil_rxpoll_set_params(ifp, p, FALSE);
1769
1770 /* Release the io ref count */
1771 ifnet_decr_iorefcnt(ifp);
1772
1773 return err;
1774 }
1775
1776 errno_t
ifnet_poll_params(struct ifnet * ifp,struct ifnet_poll_params * p)1777 ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1778 {
1779 errno_t err;
1780
1781 if (ifp == NULL || p == NULL) {
1782 return EINVAL;
1783 } else if (!ifnet_is_attached(ifp, 1)) {
1784 return ENXIO;
1785 }
1786
1787 err = dlil_rxpoll_get_params(ifp, p);
1788
1789 /* Release the io ref count */
1790 ifnet_decr_iorefcnt(ifp);
1791
1792 return err;
1793 }
1794
1795 errno_t
ifnet_stat_increment(struct ifnet * ifp,const struct ifnet_stat_increment_param * s)1796 ifnet_stat_increment(struct ifnet *ifp,
1797 const struct ifnet_stat_increment_param *s)
1798 {
1799 if (ifp == NULL) {
1800 return EINVAL;
1801 }
1802
1803 if (s->packets_in != 0) {
1804 atomic_add_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1805 }
1806 if (s->bytes_in != 0) {
1807 atomic_add_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1808 }
1809 if (s->errors_in != 0) {
1810 atomic_add_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1811 }
1812
1813 if (s->packets_out != 0) {
1814 atomic_add_64(&ifp->if_data.ifi_opackets, s->packets_out);
1815 }
1816 if (s->bytes_out != 0) {
1817 atomic_add_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1818 }
1819 if (s->errors_out != 0) {
1820 atomic_add_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1821 }
1822
1823 if (s->collisions != 0) {
1824 atomic_add_64(&ifp->if_data.ifi_collisions, s->collisions);
1825 }
1826 if (s->dropped != 0) {
1827 atomic_add_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1828 }
1829
1830 /* Touch the last change time. */
1831 TOUCHLASTCHANGE(&ifp->if_lastchange);
1832
1833 if (ifp->if_data_threshold != 0) {
1834 ifnet_notify_data_threshold(ifp);
1835 }
1836
1837 return 0;
1838 }
1839
1840 errno_t
ifnet_stat_increment_in(struct ifnet * ifp,u_int32_t packets_in,u_int32_t bytes_in,u_int32_t errors_in)1841 ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1842 u_int32_t bytes_in, u_int32_t errors_in)
1843 {
1844 if (ifp == NULL) {
1845 return EINVAL;
1846 }
1847
1848 if (packets_in != 0) {
1849 atomic_add_64(&ifp->if_data.ifi_ipackets, packets_in);
1850 }
1851 if (bytes_in != 0) {
1852 atomic_add_64(&ifp->if_data.ifi_ibytes, bytes_in);
1853 }
1854 if (errors_in != 0) {
1855 atomic_add_64(&ifp->if_data.ifi_ierrors, errors_in);
1856 }
1857
1858 TOUCHLASTCHANGE(&ifp->if_lastchange);
1859
1860 if (ifp->if_data_threshold != 0) {
1861 ifnet_notify_data_threshold(ifp);
1862 }
1863
1864 return 0;
1865 }
1866
1867 errno_t
ifnet_stat_increment_out(struct ifnet * ifp,u_int32_t packets_out,u_int32_t bytes_out,u_int32_t errors_out)1868 ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1869 u_int32_t bytes_out, u_int32_t errors_out)
1870 {
1871 if (ifp == NULL) {
1872 return EINVAL;
1873 }
1874
1875 if (packets_out != 0) {
1876 atomic_add_64(&ifp->if_data.ifi_opackets, packets_out);
1877 }
1878 if (bytes_out != 0) {
1879 atomic_add_64(&ifp->if_data.ifi_obytes, bytes_out);
1880 }
1881 if (errors_out != 0) {
1882 atomic_add_64(&ifp->if_data.ifi_oerrors, errors_out);
1883 }
1884
1885 TOUCHLASTCHANGE(&ifp->if_lastchange);
1886
1887 if (ifp->if_data_threshold != 0) {
1888 ifnet_notify_data_threshold(ifp);
1889 }
1890
1891 return 0;
1892 }
1893
1894 errno_t
ifnet_set_stat(struct ifnet * ifp,const struct ifnet_stats_param * s)1895 ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1896 {
1897 if (ifp == NULL) {
1898 return EINVAL;
1899 }
1900
1901 atomic_set_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1902 atomic_set_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1903 atomic_set_64(&ifp->if_data.ifi_imcasts, s->multicasts_in);
1904 atomic_set_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1905
1906 atomic_set_64(&ifp->if_data.ifi_opackets, s->packets_out);
1907 atomic_set_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1908 atomic_set_64(&ifp->if_data.ifi_omcasts, s->multicasts_out);
1909 atomic_set_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1910
1911 atomic_set_64(&ifp->if_data.ifi_collisions, s->collisions);
1912 atomic_set_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1913 atomic_set_64(&ifp->if_data.ifi_noproto, s->no_protocol);
1914
1915 /* Touch the last change time. */
1916 TOUCHLASTCHANGE(&ifp->if_lastchange);
1917
1918 if (ifp->if_data_threshold != 0) {
1919 ifnet_notify_data_threshold(ifp);
1920 }
1921
1922 return 0;
1923 }
1924
1925 errno_t
ifnet_stat(struct ifnet * ifp,struct ifnet_stats_param * s)1926 ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1927 {
1928 if (ifp == NULL) {
1929 return EINVAL;
1930 }
1931
1932 atomic_get_64(s->packets_in, &ifp->if_data.ifi_ipackets);
1933 atomic_get_64(s->bytes_in, &ifp->if_data.ifi_ibytes);
1934 atomic_get_64(s->multicasts_in, &ifp->if_data.ifi_imcasts);
1935 atomic_get_64(s->errors_in, &ifp->if_data.ifi_ierrors);
1936
1937 atomic_get_64(s->packets_out, &ifp->if_data.ifi_opackets);
1938 atomic_get_64(s->bytes_out, &ifp->if_data.ifi_obytes);
1939 atomic_get_64(s->multicasts_out, &ifp->if_data.ifi_omcasts);
1940 atomic_get_64(s->errors_out, &ifp->if_data.ifi_oerrors);
1941
1942 atomic_get_64(s->collisions, &ifp->if_data.ifi_collisions);
1943 atomic_get_64(s->dropped, &ifp->if_data.ifi_iqdrops);
1944 atomic_get_64(s->no_protocol, &ifp->if_data.ifi_noproto);
1945
1946 if (ifp->if_data_threshold != 0) {
1947 ifnet_notify_data_threshold(ifp);
1948 }
1949
1950 return 0;
1951 }
1952
1953 errno_t
ifnet_touch_lastchange(ifnet_t interface)1954 ifnet_touch_lastchange(ifnet_t interface)
1955 {
1956 if (interface == NULL) {
1957 return EINVAL;
1958 }
1959
1960 TOUCHLASTCHANGE(&interface->if_lastchange);
1961
1962 return 0;
1963 }
1964
1965 errno_t
ifnet_lastchange(ifnet_t interface,struct timeval * last_change)1966 ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1967 {
1968 if (interface == NULL) {
1969 return EINVAL;
1970 }
1971
1972 *last_change = interface->if_data.ifi_lastchange;
1973 /* Crude conversion from uptime to calendar time */
1974 last_change->tv_sec += boottime_sec();
1975
1976 return 0;
1977 }
1978
1979 errno_t
ifnet_touch_lastupdown(ifnet_t interface)1980 ifnet_touch_lastupdown(ifnet_t interface)
1981 {
1982 if (interface == NULL) {
1983 return EINVAL;
1984 }
1985
1986 TOUCHLASTCHANGE(&interface->if_lastupdown);
1987
1988 return 0;
1989 }
1990
1991 errno_t
ifnet_updown_delta(ifnet_t interface,struct timeval * updown_delta)1992 ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta)
1993 {
1994 if (interface == NULL) {
1995 return EINVAL;
1996 }
1997
1998 /* Calculate the delta */
1999 updown_delta->tv_sec = (time_t)net_uptime();
2000 if (updown_delta->tv_sec > interface->if_data.ifi_lastupdown.tv_sec) {
2001 updown_delta->tv_sec -= interface->if_data.ifi_lastupdown.tv_sec;
2002 } else {
2003 updown_delta->tv_sec = 0;
2004 }
2005 updown_delta->tv_usec = 0;
2006
2007 return 0;
2008 }
2009
2010 errno_t
ifnet_get_address_list(ifnet_t interface,ifaddr_t ** addresses)2011 ifnet_get_address_list(ifnet_t interface, ifaddr_t **addresses)
2012 {
2013 return addresses == NULL ? EINVAL :
2014 ifnet_get_address_list_family(interface, addresses, 0);
2015 }
2016
2017 struct ifnet_addr_list {
2018 SLIST_ENTRY(ifnet_addr_list) ifal_le;
2019 struct ifaddr *ifal_ifa;
2020 };
2021
2022 errno_t
ifnet_get_address_list_family(ifnet_t interface,ifaddr_t ** addresses,sa_family_t family)2023 ifnet_get_address_list_family(ifnet_t interface, ifaddr_t **addresses,
2024 sa_family_t family)
2025 {
2026 return ifnet_get_address_list_family_internal(interface, addresses,
2027 family, 0, Z_WAITOK, 0);
2028 }
2029
2030 errno_t
ifnet_get_inuse_address_list(ifnet_t interface,ifaddr_t ** addresses)2031 ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t **addresses)
2032 {
2033 return addresses == NULL ? EINVAL :
2034 ifnet_get_address_list_family_internal(interface, addresses,
2035 0, 0, Z_WAITOK, 1);
2036 }
2037
2038 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
2039
2040 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
2041
2042 __private_extern__ errno_t
ifnet_get_address_list_family_internal(ifnet_t interface,ifaddr_t ** addresses,sa_family_t family,int detached,int how,int return_inuse_addrs)2043 ifnet_get_address_list_family_internal(ifnet_t interface, ifaddr_t **addresses,
2044 sa_family_t family, int detached, int how, int return_inuse_addrs)
2045 {
2046 SLIST_HEAD(, ifnet_addr_list) ifal_head;
2047 struct ifnet_addr_list *ifal, *ifal_tmp;
2048 struct ifnet *ifp;
2049 int count = 0;
2050 errno_t err = 0;
2051 int usecount = 0;
2052 int index = 0;
2053
2054 SLIST_INIT(&ifal_head);
2055
2056 if (addresses == NULL) {
2057 err = EINVAL;
2058 goto done;
2059 }
2060 *addresses = NULL;
2061
2062 if (detached) {
2063 /*
2064 * Interface has been detached, so skip the lookup
2065 * at ifnet_head and go directly to inner loop.
2066 */
2067 ifp = interface;
2068 if (ifp == NULL) {
2069 err = EINVAL;
2070 goto done;
2071 }
2072 goto one;
2073 }
2074
2075 ifnet_head_lock_shared();
2076 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2077 if (interface != NULL && ifp != interface) {
2078 continue;
2079 }
2080 one:
2081 ifnet_lock_shared(ifp);
2082 if (interface == NULL || interface == ifp) {
2083 struct ifaddr *ifa;
2084 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
2085 IFA_LOCK(ifa);
2086 if (family != 0 &&
2087 ifa->ifa_addr->sa_family != family) {
2088 IFA_UNLOCK(ifa);
2089 continue;
2090 }
2091 ifal = kalloc_type(struct ifnet_addr_list, how);
2092 if (ifal == NULL) {
2093 IFA_UNLOCK(ifa);
2094 ifnet_lock_done(ifp);
2095 if (!detached) {
2096 ifnet_head_done();
2097 }
2098 err = ENOMEM;
2099 goto done;
2100 }
2101 ifal->ifal_ifa = ifa;
2102 IFA_ADDREF_LOCKED(ifa);
2103 SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
2104 ++count;
2105 IFA_UNLOCK(ifa);
2106 }
2107 }
2108 ifnet_lock_done(ifp);
2109 if (detached) {
2110 break;
2111 }
2112 }
2113 if (!detached) {
2114 ifnet_head_done();
2115 }
2116
2117 if (count == 0) {
2118 err = ENXIO;
2119 goto done;
2120 }
2121
2122 *addresses = kalloc_type(ifaddr_t, count + 1, how | Z_ZERO);
2123 if (*addresses == NULL) {
2124 err = ENOMEM;
2125 goto done;
2126 }
2127
2128 done:
2129 SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
2130 SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
2131 if (err == 0) {
2132 if (return_inuse_addrs) {
2133 usecount = tcp_find_anypcb_byaddr(ifal->ifal_ifa);
2134 usecount += udp_find_anypcb_byaddr(ifal->ifal_ifa);
2135 if (usecount) {
2136 (*addresses)[index] = ifal->ifal_ifa;
2137 index++;
2138 } else {
2139 IFA_REMREF(ifal->ifal_ifa);
2140 }
2141 } else {
2142 (*addresses)[--count] = ifal->ifal_ifa;
2143 }
2144 } else {
2145 IFA_REMREF(ifal->ifal_ifa);
2146 }
2147 kfree_type(struct ifnet_addr_list, ifal);
2148 }
2149
2150 VERIFY(err == 0 || *addresses == NULL);
2151 if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
2152 VERIFY(return_inuse_addrs == 1);
2153 kfree_type(ifaddr_t, count + 1, *addresses);
2154 err = ENXIO;
2155 }
2156 return err;
2157 }
2158
2159 void
ifnet_free_address_list(ifaddr_t * addresses)2160 ifnet_free_address_list(ifaddr_t *addresses)
2161 {
2162 int i;
2163
2164 if (addresses == NULL) {
2165 return;
2166 }
2167
2168 for (i = 0; addresses[i] != NULL; i++) {
2169 IFA_REMREF(addresses[i]);
2170 }
2171
2172 kfree_type(ifaddr_t, i + 1, addresses);
2173 }
2174
2175 void *
ifnet_lladdr(ifnet_t interface)2176 ifnet_lladdr(ifnet_t interface)
2177 {
2178 struct ifaddr *ifa;
2179 void *lladdr;
2180
2181 if (interface == NULL) {
2182 return NULL;
2183 }
2184
2185 /*
2186 * if_lladdr points to the permanent link address of
2187 * the interface and it never gets deallocated; internal
2188 * code should simply use IF_LLADDR() for performance.
2189 */
2190 ifa = interface->if_lladdr;
2191 IFA_LOCK_SPIN(ifa);
2192 lladdr = LLADDR(SDL((void *)ifa->ifa_addr));
2193 IFA_UNLOCK(ifa);
2194
2195 return lladdr;
2196 }
2197
2198 errno_t
ifnet_llbroadcast_copy_bytes(ifnet_t interface,void * addr,size_t buffer_len,size_t * out_len)2199 ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t buffer_len,
2200 size_t *out_len)
2201 {
2202 if (interface == NULL || addr == NULL || out_len == NULL) {
2203 return EINVAL;
2204 }
2205
2206 *out_len = interface->if_broadcast.length;
2207
2208 if (buffer_len < interface->if_broadcast.length) {
2209 return EMSGSIZE;
2210 }
2211
2212 if (interface->if_broadcast.length == 0) {
2213 return ENXIO;
2214 }
2215
2216 if (interface->if_broadcast.length <=
2217 sizeof(interface->if_broadcast.u.buffer)) {
2218 bcopy(interface->if_broadcast.u.buffer, addr,
2219 interface->if_broadcast.length);
2220 } else {
2221 bcopy(interface->if_broadcast.u.ptr, addr,
2222 interface->if_broadcast.length);
2223 }
2224
2225 return 0;
2226 }
2227
2228 static errno_t
ifnet_lladdr_copy_bytes_internal(ifnet_t interface,void * lladdr,size_t lladdr_len,kauth_cred_t * credp)2229 ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *lladdr,
2230 size_t lladdr_len, kauth_cred_t *credp)
2231 {
2232 const u_int8_t *bytes;
2233 size_t bytes_len;
2234 struct ifaddr *ifa;
2235 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
2236 errno_t error = 0;
2237
2238 /*
2239 * Make sure to accomodate the largest possible
2240 * size of SA(if_lladdr)->sa_len.
2241 */
2242 _CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
2243
2244 if (interface == NULL || lladdr == NULL) {
2245 return EINVAL;
2246 }
2247
2248 ifa = interface->if_lladdr;
2249 IFA_LOCK_SPIN(ifa);
2250 bcopy(ifa->ifa_addr, &sdlbuf, SDL(ifa->ifa_addr)->sdl_len);
2251 IFA_UNLOCK(ifa);
2252
2253 bytes = dlil_ifaddr_bytes(SDL(&sdlbuf), &bytes_len, credp);
2254 if (bytes_len != lladdr_len) {
2255 bzero(lladdr, lladdr_len);
2256 error = EMSGSIZE;
2257 } else {
2258 bcopy(bytes, lladdr, bytes_len);
2259 }
2260
2261 return error;
2262 }
2263
2264 errno_t
ifnet_lladdr_copy_bytes(ifnet_t interface,void * lladdr,size_t length)2265 ifnet_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
2266 {
2267 return ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2268 NULL);
2269 }
2270
2271 errno_t
ifnet_guarded_lladdr_copy_bytes(ifnet_t interface,void * lladdr,size_t length)2272 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
2273 {
2274 #if CONFIG_MACF
2275 kauth_cred_t cred;
2276 net_thread_marks_t marks;
2277 #endif
2278 kauth_cred_t *credp;
2279 errno_t error;
2280
2281 credp = NULL;
2282 #if CONFIG_MACF
2283 marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
2284 cred = kauth_cred_proc_ref(current_proc());
2285 credp = &cred;
2286 #else
2287 credp = NULL;
2288 #endif
2289
2290 error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2291 credp);
2292
2293 #if CONFIG_MACF
2294 kauth_cred_unref(credp);
2295 net_thread_marks_pop(marks);
2296 #endif
2297
2298 return error;
2299 }
2300
2301 static errno_t
ifnet_set_lladdr_internal(ifnet_t interface,const void * lladdr,size_t lladdr_len,u_char new_type,int apply_type)2302 ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr,
2303 size_t lladdr_len, u_char new_type, int apply_type)
2304 {
2305 struct ifaddr *ifa;
2306 errno_t error = 0;
2307
2308 if (interface == NULL) {
2309 return EINVAL;
2310 }
2311
2312 ifnet_head_lock_shared();
2313 ifnet_lock_exclusive(interface);
2314 if (lladdr_len != 0 &&
2315 (lladdr_len != interface->if_addrlen || lladdr == 0)) {
2316 ifnet_lock_done(interface);
2317 ifnet_head_done();
2318 return EINVAL;
2319 }
2320 ifa = ifnet_addrs[interface->if_index - 1];
2321 if (ifa != NULL) {
2322 struct sockaddr_dl *sdl;
2323
2324 IFA_LOCK_SPIN(ifa);
2325 sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2326 if (lladdr_len != 0) {
2327 bcopy(lladdr, LLADDR(sdl), lladdr_len);
2328 } else {
2329 bzero(LLADDR(sdl), interface->if_addrlen);
2330 }
2331 /* lladdr_len-check with if_addrlen makes sure it fits in u_char */
2332 sdl->sdl_alen = (u_char)lladdr_len;
2333
2334 if (apply_type) {
2335 sdl->sdl_type = new_type;
2336 }
2337 IFA_UNLOCK(ifa);
2338 } else {
2339 error = ENXIO;
2340 }
2341 ifnet_lock_done(interface);
2342 ifnet_head_done();
2343
2344 /* Generate a kernel event */
2345 if (error == 0) {
2346 intf_event_enqueue_nwk_wq_entry(interface, NULL,
2347 INTF_EVENT_CODE_LLADDR_UPDATE);
2348 dlil_post_msg(interface, KEV_DL_SUBCLASS,
2349 KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0, FALSE);
2350 }
2351
2352 return error;
2353 }
2354
2355 errno_t
ifnet_set_lladdr(ifnet_t interface,const void * lladdr,size_t lladdr_len)2356 ifnet_set_lladdr(ifnet_t interface, const void* lladdr, size_t lladdr_len)
2357 {
2358 return ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0);
2359 }
2360
2361 errno_t
ifnet_set_lladdr_and_type(ifnet_t interface,const void * lladdr,size_t lladdr_len,u_char type)2362 ifnet_set_lladdr_and_type(ifnet_t interface, const void* lladdr,
2363 size_t lladdr_len, u_char type)
2364 {
2365 return ifnet_set_lladdr_internal(interface, lladdr,
2366 lladdr_len, type, 1);
2367 }
2368
2369 errno_t
ifnet_add_multicast(ifnet_t interface,const struct sockaddr * maddr,ifmultiaddr_t * ifmap)2370 ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
2371 ifmultiaddr_t *ifmap)
2372 {
2373 if (interface == NULL || maddr == NULL) {
2374 return EINVAL;
2375 }
2376
2377 /* Don't let users screw up protocols' entries. */
2378 switch (maddr->sa_family) {
2379 case AF_LINK: {
2380 const struct sockaddr_dl *sdl =
2381 (const struct sockaddr_dl *)(uintptr_t)maddr;
2382 if (sdl->sdl_len < sizeof(struct sockaddr_dl) ||
2383 (sdl->sdl_nlen + sdl->sdl_alen + sdl->sdl_slen +
2384 offsetof(struct sockaddr_dl, sdl_data) > sdl->sdl_len)) {
2385 return EINVAL;
2386 }
2387 break;
2388 }
2389 case AF_UNSPEC:
2390 if (maddr->sa_len < ETHER_ADDR_LEN +
2391 offsetof(struct sockaddr, sa_data)) {
2392 return EINVAL;
2393 }
2394 break;
2395 default:
2396 return EINVAL;
2397 }
2398
2399 return if_addmulti_anon(interface, maddr, ifmap);
2400 }
2401
2402 errno_t
ifnet_remove_multicast(ifmultiaddr_t ifma)2403 ifnet_remove_multicast(ifmultiaddr_t ifma)
2404 {
2405 struct sockaddr *maddr;
2406
2407 if (ifma == NULL) {
2408 return EINVAL;
2409 }
2410
2411 maddr = ifma->ifma_addr;
2412 /* Don't let users screw up protocols' entries. */
2413 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) {
2414 return EINVAL;
2415 }
2416
2417 return if_delmulti_anon(ifma->ifma_ifp, maddr);
2418 }
2419
2420 errno_t
ifnet_get_multicast_list(ifnet_t ifp,ifmultiaddr_t ** addresses)2421 ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses)
2422 {
2423 int count = 0;
2424 int cmax = 0;
2425 struct ifmultiaddr *addr;
2426
2427 if (ifp == NULL || addresses == NULL) {
2428 return EINVAL;
2429 }
2430
2431 ifnet_lock_shared(ifp);
2432 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2433 cmax++;
2434 }
2435
2436 *addresses = kalloc_type(ifmultiaddr_t, cmax + 1, Z_WAITOK);
2437 if (*addresses == NULL) {
2438 ifnet_lock_done(ifp);
2439 return ENOMEM;
2440 }
2441
2442 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2443 if (count + 1 > cmax) {
2444 break;
2445 }
2446 (*addresses)[count] = (ifmultiaddr_t)addr;
2447 ifmaddr_reference((*addresses)[count]);
2448 count++;
2449 }
2450 (*addresses)[cmax] = NULL;
2451 ifnet_lock_done(ifp);
2452
2453 return 0;
2454 }
2455
2456 void
ifnet_free_multicast_list(ifmultiaddr_t * addresses)2457 ifnet_free_multicast_list(ifmultiaddr_t *addresses)
2458 {
2459 int i;
2460
2461 if (addresses == NULL) {
2462 return;
2463 }
2464
2465 for (i = 0; addresses[i] != NULL; i++) {
2466 ifmaddr_release(addresses[i]);
2467 }
2468
2469 kfree_type(ifmultiaddr_t, i + 1, addresses);
2470 }
2471
2472 errno_t
ifnet_find_by_name(const char * ifname,ifnet_t * ifpp)2473 ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
2474 {
2475 struct ifnet *ifp;
2476 size_t namelen;
2477
2478 if (ifname == NULL) {
2479 return EINVAL;
2480 }
2481
2482 namelen = strlen(ifname);
2483
2484 *ifpp = NULL;
2485
2486 ifnet_head_lock_shared();
2487 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2488 struct ifaddr *ifa;
2489 struct sockaddr_dl *ll_addr;
2490
2491 ifa = ifnet_addrs[ifp->if_index - 1];
2492 if (ifa == NULL) {
2493 continue;
2494 }
2495
2496 IFA_LOCK(ifa);
2497 ll_addr = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2498
2499 if (namelen == ll_addr->sdl_nlen && strncmp(ll_addr->sdl_data,
2500 ifname, ll_addr->sdl_nlen) == 0) {
2501 IFA_UNLOCK(ifa);
2502 *ifpp = ifp;
2503 ifnet_reference(*ifpp);
2504 break;
2505 }
2506 IFA_UNLOCK(ifa);
2507 }
2508 ifnet_head_done();
2509
2510 return (ifp == NULL) ? ENXIO : 0;
2511 }
2512
2513 errno_t
ifnet_list_get(ifnet_family_t family,ifnet_t ** list,u_int32_t * count)2514 ifnet_list_get(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2515 {
2516 return ifnet_list_get_common(family, FALSE, list, count);
2517 }
2518
2519 __private_extern__ errno_t
ifnet_list_get_all(ifnet_family_t family,ifnet_t ** list,u_int32_t * count)2520 ifnet_list_get_all(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2521 {
2522 return ifnet_list_get_common(family, TRUE, list, count);
2523 }
2524
2525 struct ifnet_list {
2526 SLIST_ENTRY(ifnet_list) ifl_le;
2527 struct ifnet *ifl_ifp;
2528 };
2529
2530 static errno_t
ifnet_list_get_common(ifnet_family_t family,boolean_t get_all,ifnet_t ** list,u_int32_t * count)2531 ifnet_list_get_common(ifnet_family_t family, boolean_t get_all, ifnet_t **list,
2532 u_int32_t *count)
2533 {
2534 #pragma unused(get_all)
2535 SLIST_HEAD(, ifnet_list) ifl_head;
2536 struct ifnet_list *ifl, *ifl_tmp;
2537 struct ifnet *ifp;
2538 int cnt = 0;
2539 errno_t err = 0;
2540
2541 SLIST_INIT(&ifl_head);
2542
2543 if (list == NULL || count == NULL) {
2544 err = EINVAL;
2545 goto done;
2546 }
2547 *count = 0;
2548 *list = NULL;
2549
2550 ifnet_head_lock_shared();
2551 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2552 if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2553 ifl = kalloc_type(struct ifnet_list, Z_NOWAIT);
2554 if (ifl == NULL) {
2555 ifnet_head_done();
2556 err = ENOMEM;
2557 goto done;
2558 }
2559 ifl->ifl_ifp = ifp;
2560 ifnet_reference(ifp);
2561 SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2562 ++cnt;
2563 }
2564 }
2565 ifnet_head_done();
2566
2567 if (cnt == 0) {
2568 err = ENXIO;
2569 goto done;
2570 }
2571
2572 *list = kalloc_type(ifnet_t, cnt + 1, Z_WAITOK | Z_ZERO);
2573 if (*list == NULL) {
2574 err = ENOMEM;
2575 goto done;
2576 }
2577 *count = cnt;
2578
2579 done:
2580 SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2581 SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2582 if (err == 0) {
2583 (*list)[--cnt] = ifl->ifl_ifp;
2584 } else {
2585 ifnet_release(ifl->ifl_ifp);
2586 }
2587 kfree_type(struct ifnet_list, ifl);
2588 }
2589
2590 return err;
2591 }
2592
2593 void
ifnet_list_free(ifnet_t * interfaces)2594 ifnet_list_free(ifnet_t *interfaces)
2595 {
2596 int i;
2597
2598 if (interfaces == NULL) {
2599 return;
2600 }
2601
2602 for (i = 0; interfaces[i]; i++) {
2603 ifnet_release(interfaces[i]);
2604 }
2605
2606 kfree_type(ifnet_t, i + 1, interfaces);
2607 }
2608
2609 /*************************************************************************/
2610 /* ifaddr_t accessors */
2611 /*************************************************************************/
2612
2613 errno_t
ifaddr_reference(ifaddr_t ifa)2614 ifaddr_reference(ifaddr_t ifa)
2615 {
2616 if (ifa == NULL) {
2617 return EINVAL;
2618 }
2619
2620 IFA_ADDREF(ifa);
2621 return 0;
2622 }
2623
2624 errno_t
ifaddr_release(ifaddr_t ifa)2625 ifaddr_release(ifaddr_t ifa)
2626 {
2627 if (ifa == NULL) {
2628 return EINVAL;
2629 }
2630
2631 IFA_REMREF(ifa);
2632 return 0;
2633 }
2634
2635 sa_family_t
ifaddr_address_family(ifaddr_t ifa)2636 ifaddr_address_family(ifaddr_t ifa)
2637 {
2638 sa_family_t family = 0;
2639
2640 if (ifa != NULL) {
2641 IFA_LOCK_SPIN(ifa);
2642 if (ifa->ifa_addr != NULL) {
2643 family = ifa->ifa_addr->sa_family;
2644 }
2645 IFA_UNLOCK(ifa);
2646 }
2647 return family;
2648 }
2649
2650 errno_t
ifaddr_address(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2651 ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2652 {
2653 u_int32_t copylen;
2654
2655 if (ifa == NULL || out_addr == NULL) {
2656 return EINVAL;
2657 }
2658
2659 IFA_LOCK_SPIN(ifa);
2660 if (ifa->ifa_addr == NULL) {
2661 IFA_UNLOCK(ifa);
2662 return ENOTSUP;
2663 }
2664
2665 copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2666 ifa->ifa_addr->sa_len : addr_size;
2667 bcopy(ifa->ifa_addr, out_addr, copylen);
2668
2669 if (ifa->ifa_addr->sa_len > addr_size) {
2670 IFA_UNLOCK(ifa);
2671 return EMSGSIZE;
2672 }
2673
2674 IFA_UNLOCK(ifa);
2675 return 0;
2676 }
2677
2678 errno_t
ifaddr_dstaddress(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2679 ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2680 {
2681 u_int32_t copylen;
2682
2683 if (ifa == NULL || out_addr == NULL) {
2684 return EINVAL;
2685 }
2686
2687 IFA_LOCK_SPIN(ifa);
2688 if (ifa->ifa_dstaddr == NULL) {
2689 IFA_UNLOCK(ifa);
2690 return ENOTSUP;
2691 }
2692
2693 copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2694 ifa->ifa_dstaddr->sa_len : addr_size;
2695 bcopy(ifa->ifa_dstaddr, out_addr, copylen);
2696
2697 if (ifa->ifa_dstaddr->sa_len > addr_size) {
2698 IFA_UNLOCK(ifa);
2699 return EMSGSIZE;
2700 }
2701
2702 IFA_UNLOCK(ifa);
2703 return 0;
2704 }
2705
2706 errno_t
ifaddr_netmask(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2707 ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2708 {
2709 u_int32_t copylen;
2710
2711 if (ifa == NULL || out_addr == NULL) {
2712 return EINVAL;
2713 }
2714
2715 IFA_LOCK_SPIN(ifa);
2716 if (ifa->ifa_netmask == NULL) {
2717 IFA_UNLOCK(ifa);
2718 return ENOTSUP;
2719 }
2720
2721 copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2722 ifa->ifa_netmask->sa_len : addr_size;
2723 bcopy(ifa->ifa_netmask, out_addr, copylen);
2724
2725 if (ifa->ifa_netmask->sa_len > addr_size) {
2726 IFA_UNLOCK(ifa);
2727 return EMSGSIZE;
2728 }
2729
2730 IFA_UNLOCK(ifa);
2731 return 0;
2732 }
2733
2734 ifnet_t
ifaddr_ifnet(ifaddr_t ifa)2735 ifaddr_ifnet(ifaddr_t ifa)
2736 {
2737 struct ifnet *ifp;
2738
2739 if (ifa == NULL) {
2740 return NULL;
2741 }
2742
2743 /* ifa_ifp is set once at creation time; it is never changed */
2744 ifp = ifa->ifa_ifp;
2745
2746 return ifp;
2747 }
2748
2749 ifaddr_t
ifaddr_withaddr(const struct sockaddr * address)2750 ifaddr_withaddr(const struct sockaddr *address)
2751 {
2752 if (address == NULL) {
2753 return NULL;
2754 }
2755
2756 return ifa_ifwithaddr(address);
2757 }
2758
2759 ifaddr_t
ifaddr_withdstaddr(const struct sockaddr * address)2760 ifaddr_withdstaddr(const struct sockaddr *address)
2761 {
2762 if (address == NULL) {
2763 return NULL;
2764 }
2765
2766 return ifa_ifwithdstaddr(address);
2767 }
2768
2769 ifaddr_t
ifaddr_withnet(const struct sockaddr * net)2770 ifaddr_withnet(const struct sockaddr *net)
2771 {
2772 if (net == NULL) {
2773 return NULL;
2774 }
2775
2776 return ifa_ifwithnet(net);
2777 }
2778
2779 ifaddr_t
ifaddr_withroute(int flags,const struct sockaddr * destination,const struct sockaddr * gateway)2780 ifaddr_withroute(int flags, const struct sockaddr *destination,
2781 const struct sockaddr *gateway)
2782 {
2783 if (destination == NULL || gateway == NULL) {
2784 return NULL;
2785 }
2786
2787 return ifa_ifwithroute(flags, destination, gateway);
2788 }
2789
2790 ifaddr_t
ifaddr_findbestforaddr(const struct sockaddr * addr,ifnet_t interface)2791 ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2792 {
2793 if (addr == NULL || interface == NULL) {
2794 return NULL;
2795 }
2796
2797 return ifaof_ifpforaddr_select(addr, interface);
2798 }
2799
2800 errno_t
ifmaddr_reference(ifmultiaddr_t ifmaddr)2801 ifmaddr_reference(ifmultiaddr_t ifmaddr)
2802 {
2803 if (ifmaddr == NULL) {
2804 return EINVAL;
2805 }
2806
2807 IFMA_ADDREF(ifmaddr);
2808 return 0;
2809 }
2810
2811 errno_t
ifmaddr_release(ifmultiaddr_t ifmaddr)2812 ifmaddr_release(ifmultiaddr_t ifmaddr)
2813 {
2814 if (ifmaddr == NULL) {
2815 return EINVAL;
2816 }
2817
2818 IFMA_REMREF(ifmaddr);
2819 return 0;
2820 }
2821
2822 errno_t
ifmaddr_address(ifmultiaddr_t ifma,struct sockaddr * out_addr,u_int32_t addr_size)2823 ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2824 u_int32_t addr_size)
2825 {
2826 u_int32_t copylen;
2827
2828 if (ifma == NULL || out_addr == NULL) {
2829 return EINVAL;
2830 }
2831
2832 IFMA_LOCK(ifma);
2833 if (ifma->ifma_addr == NULL) {
2834 IFMA_UNLOCK(ifma);
2835 return ENOTSUP;
2836 }
2837
2838 copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2839 ifma->ifma_addr->sa_len : addr_size);
2840 bcopy(ifma->ifma_addr, out_addr, copylen);
2841
2842 if (ifma->ifma_addr->sa_len > addr_size) {
2843 IFMA_UNLOCK(ifma);
2844 return EMSGSIZE;
2845 }
2846 IFMA_UNLOCK(ifma);
2847 return 0;
2848 }
2849
2850 errno_t
ifmaddr_lladdress(ifmultiaddr_t ifma,struct sockaddr * out_addr,u_int32_t addr_size)2851 ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2852 u_int32_t addr_size)
2853 {
2854 struct ifmultiaddr *ifma_ll;
2855
2856 if (ifma == NULL || out_addr == NULL) {
2857 return EINVAL;
2858 }
2859 if ((ifma_ll = ifma->ifma_ll) == NULL) {
2860 return ENOTSUP;
2861 }
2862
2863 return ifmaddr_address(ifma_ll, out_addr, addr_size);
2864 }
2865
2866 ifnet_t
ifmaddr_ifnet(ifmultiaddr_t ifma)2867 ifmaddr_ifnet(ifmultiaddr_t ifma)
2868 {
2869 return (ifma == NULL) ? NULL : ifma->ifma_ifp;
2870 }
2871
2872 /**************************************************************************/
2873 /* interface cloner */
2874 /**************************************************************************/
2875
2876 errno_t
ifnet_clone_attach(struct ifnet_clone_params * cloner_params,if_clone_t * ifcloner)2877 ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
2878 if_clone_t *ifcloner)
2879 {
2880 errno_t error = 0;
2881 struct if_clone *ifc = NULL;
2882 size_t namelen;
2883
2884 if (cloner_params == NULL || ifcloner == NULL ||
2885 cloner_params->ifc_name == NULL ||
2886 cloner_params->ifc_create == NULL ||
2887 cloner_params->ifc_destroy == NULL ||
2888 (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
2889 error = EINVAL;
2890 goto fail;
2891 }
2892
2893 if (if_clone_lookup(cloner_params->ifc_name, NULL) != NULL) {
2894 printf("%s: already a cloner for %s\n", __func__,
2895 cloner_params->ifc_name);
2896 error = EEXIST;
2897 goto fail;
2898 }
2899
2900 ifc = kalloc_type(struct if_clone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2901 strlcpy(ifc->ifc_name, cloner_params->ifc_name, IFNAMSIZ + 1);
2902 ifc->ifc_namelen = (uint8_t)namelen;
2903 ifc->ifc_maxunit = IF_MAXUNIT;
2904 ifc->ifc_create = cloner_params->ifc_create;
2905 ifc->ifc_destroy = cloner_params->ifc_destroy;
2906
2907 error = if_clone_attach(ifc);
2908 if (error != 0) {
2909 printf("%s: if_clone_attach failed %d\n", __func__, error);
2910 goto fail;
2911 }
2912 *ifcloner = ifc;
2913
2914 return 0;
2915 fail:
2916 if (ifc != NULL) {
2917 kfree_type(struct if_clone, ifc);
2918 }
2919 return error;
2920 }
2921
2922 errno_t
ifnet_clone_detach(if_clone_t ifcloner)2923 ifnet_clone_detach(if_clone_t ifcloner)
2924 {
2925 errno_t error = 0;
2926 struct if_clone *ifc = ifcloner;
2927
2928 if (ifc == NULL) {
2929 return EINVAL;
2930 }
2931
2932 if ((if_clone_lookup(ifc->ifc_name, NULL)) == NULL) {
2933 printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
2934 error = EINVAL;
2935 goto fail;
2936 }
2937
2938 if_clone_detach(ifc);
2939
2940 kfree_type(struct if_clone, ifc);
2941
2942 fail:
2943 return error;
2944 }
2945
2946 /**************************************************************************/
2947 /* misc */
2948 /**************************************************************************/
2949
2950 errno_t
ifnet_get_local_ports_extended(ifnet_t ifp,protocol_family_t protocol,u_int32_t flags,u_int8_t * bitfield)2951 ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
2952 u_int32_t flags, u_int8_t *bitfield)
2953 {
2954 u_int32_t ifindex;
2955
2956 if (bitfield == NULL) {
2957 return EINVAL;
2958 }
2959
2960 switch (protocol) {
2961 case PF_UNSPEC:
2962 case PF_INET:
2963 case PF_INET6:
2964 break;
2965 default:
2966 return EINVAL;
2967 }
2968
2969 /* bit string is long enough to hold 16-bit port values */
2970 bzero(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
2971
2972 /* no point in continuing if no address is assigned */
2973 if (ifp != NULL && TAILQ_EMPTY(&ifp->if_addrhead)) {
2974 return 0;
2975 }
2976
2977 if_ports_used_update_wakeuuid(ifp);
2978
2979 #if SKYWALK
2980 if (netns_is_enabled()) {
2981 netns_get_local_ports(ifp, protocol, flags, bitfield);
2982 }
2983 #endif /* SKYWALK */
2984
2985 ifindex = (ifp != NULL) ? ifp->if_index : 0;
2986
2987 if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY)) {
2988 udp_get_ports_used(ifp, protocol, flags,
2989 bitfield);
2990 }
2991
2992 if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY)) {
2993 tcp_get_ports_used(ifp, protocol, flags,
2994 bitfield);
2995 }
2996
2997 return 0;
2998 }
2999
3000 errno_t
ifnet_get_local_ports(ifnet_t ifp,u_int8_t * bitfield)3001 ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield)
3002 {
3003 u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
3004 return ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
3005 bitfield);
3006 }
3007
3008 errno_t
ifnet_notice_node_presence(ifnet_t ifp,struct sockaddr * sa,int32_t rssi,int lqm,int npm,u_int8_t srvinfo[48])3009 ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi,
3010 int lqm, int npm, u_int8_t srvinfo[48])
3011 {
3012 if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3013 return EINVAL;
3014 }
3015 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3016 return EINVAL;
3017 }
3018 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3019 return EINVAL;
3020 }
3021
3022 return dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
3023 }
3024
3025 errno_t
ifnet_notice_node_presence_v2(ifnet_t ifp,struct sockaddr * sa,struct sockaddr_dl * sdl,int32_t rssi,int lqm,int npm,u_int8_t srvinfo[48])3026 ifnet_notice_node_presence_v2(ifnet_t ifp, struct sockaddr *sa, struct sockaddr_dl *sdl,
3027 int32_t rssi, int lqm, int npm, u_int8_t srvinfo[48])
3028 {
3029 /* Support older version if sdl is NULL */
3030 if (sdl == NULL) {
3031 return ifnet_notice_node_presence(ifp, sa, rssi, lqm, npm, srvinfo);
3032 }
3033
3034 if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3035 return EINVAL;
3036 }
3037 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3038 return EINVAL;
3039 }
3040
3041 if (sa->sa_family != AF_INET6) {
3042 return EINVAL;
3043 }
3044
3045 if (sdl->sdl_family != AF_LINK) {
3046 return EINVAL;
3047 }
3048
3049 return dlil_node_present_v2(ifp, sa, sdl, rssi, lqm, npm, srvinfo);
3050 }
3051
3052 errno_t
ifnet_notice_node_absence(ifnet_t ifp,struct sockaddr * sa)3053 ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa)
3054 {
3055 if (ifp == NULL || sa == NULL) {
3056 return EINVAL;
3057 }
3058 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3059 return EINVAL;
3060 }
3061 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3062 return EINVAL;
3063 }
3064
3065 dlil_node_absent(ifp, sa);
3066 return 0;
3067 }
3068
3069 errno_t
ifnet_notice_primary_elected(ifnet_t ifp)3070 ifnet_notice_primary_elected(ifnet_t ifp)
3071 {
3072 if (ifp == NULL) {
3073 return EINVAL;
3074 }
3075
3076 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PRIMARY_ELECTED, NULL, 0, FALSE);
3077 return 0;
3078 }
3079
3080 errno_t
ifnet_tx_compl_status(ifnet_t ifp,mbuf_t m,tx_compl_val_t val)3081 ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
3082 {
3083 #pragma unused(val)
3084
3085 m_do_tx_compl_callback(m, ifp);
3086
3087 return 0;
3088 }
3089
3090 errno_t
ifnet_tx_compl(ifnet_t ifp,mbuf_t m)3091 ifnet_tx_compl(ifnet_t ifp, mbuf_t m)
3092 {
3093 m_do_tx_compl_callback(m, ifp);
3094
3095 return 0;
3096 }
3097
3098 errno_t
ifnet_report_issues(ifnet_t ifp,u_int8_t modid[IFNET_MODIDLEN],u_int8_t info[IFNET_MODARGLEN])3099 ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
3100 u_int8_t info[IFNET_MODARGLEN])
3101 {
3102 if (ifp == NULL || modid == NULL) {
3103 return EINVAL;
3104 }
3105
3106 dlil_report_issues(ifp, modid, info);
3107 return 0;
3108 }
3109
3110 errno_t
ifnet_set_delegate(ifnet_t ifp,ifnet_t delegated_ifp)3111 ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
3112 {
3113 ifnet_t odifp = NULL;
3114
3115 if (ifp == NULL) {
3116 return EINVAL;
3117 } else if (!ifnet_is_attached(ifp, 1)) {
3118 return ENXIO;
3119 }
3120
3121 ifnet_lock_exclusive(ifp);
3122 odifp = ifp->if_delegated.ifp;
3123 if (odifp != NULL && odifp == delegated_ifp) {
3124 /* delegate info is unchanged; nothing more to do */
3125 ifnet_lock_done(ifp);
3126 goto done;
3127 }
3128 // Test if this delegate interface would cause a loop
3129 ifnet_t delegate_check_ifp = delegated_ifp;
3130 while (delegate_check_ifp != NULL) {
3131 if (delegate_check_ifp == ifp) {
3132 printf("%s: delegating to %s would cause a loop\n",
3133 ifp->if_xname, delegated_ifp->if_xname);
3134 ifnet_lock_done(ifp);
3135 goto done;
3136 }
3137 delegate_check_ifp = delegate_check_ifp->if_delegated.ifp;
3138 }
3139 bzero(&ifp->if_delegated, sizeof(ifp->if_delegated));
3140 if (delegated_ifp != NULL && ifp != delegated_ifp) {
3141 uint32_t set_eflags;
3142
3143 ifp->if_delegated.ifp = delegated_ifp;
3144 ifnet_reference(delegated_ifp);
3145 ifp->if_delegated.type = delegated_ifp->if_type;
3146 ifp->if_delegated.family = delegated_ifp->if_family;
3147 ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
3148 ifp->if_delegated.expensive =
3149 delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
3150 ifp->if_delegated.constrained =
3151 delegated_ifp->if_xflags & IFXF_CONSTRAINED ? 1 : 0;
3152
3153 /*
3154 * Propogate flags related to ECN from delegated interface
3155 */
3156 if_clear_eflags(ifp, IFEF_ECN_ENABLE | IFEF_ECN_DISABLE);
3157 set_eflags = (delegated_ifp->if_eflags &
3158 (IFEF_ECN_ENABLE | IFEF_ECN_DISABLE));
3159 if_set_eflags(ifp, set_eflags);
3160 printf("%s: is now delegating %s (type 0x%x, family %u, "
3161 "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
3162 delegated_ifp->if_type, delegated_ifp->if_family,
3163 delegated_ifp->if_subfamily);
3164 }
3165
3166 ifnet_lock_done(ifp);
3167
3168 if (odifp != NULL) {
3169 if (odifp != delegated_ifp) {
3170 printf("%s: is no longer delegating %s\n",
3171 ifp->if_xname, odifp->if_xname);
3172 }
3173 ifnet_release(odifp);
3174 }
3175
3176 /* Generate a kernel event */
3177 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0, FALSE);
3178
3179 done:
3180 /* Release the io ref count */
3181 ifnet_decr_iorefcnt(ifp);
3182
3183 return 0;
3184 }
3185
3186 errno_t
ifnet_get_delegate(ifnet_t ifp,ifnet_t * pdelegated_ifp)3187 ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
3188 {
3189 if (ifp == NULL || pdelegated_ifp == NULL) {
3190 return EINVAL;
3191 } else if (!ifnet_is_attached(ifp, 1)) {
3192 return ENXIO;
3193 }
3194
3195 ifnet_lock_shared(ifp);
3196 if (ifp->if_delegated.ifp != NULL) {
3197 ifnet_reference(ifp->if_delegated.ifp);
3198 }
3199 *pdelegated_ifp = ifp->if_delegated.ifp;
3200 ifnet_lock_done(ifp);
3201
3202 /* Release the io ref count */
3203 ifnet_decr_iorefcnt(ifp);
3204
3205 return 0;
3206 }
3207
3208 errno_t
ifnet_get_keepalive_offload_frames(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frames_array,u_int32_t frames_array_count,size_t frame_data_offset,u_int32_t * used_frames_count)3209 ifnet_get_keepalive_offload_frames(ifnet_t ifp,
3210 struct ifnet_keepalive_offload_frame *frames_array,
3211 u_int32_t frames_array_count, size_t frame_data_offset,
3212 u_int32_t *used_frames_count)
3213 {
3214 u_int32_t i;
3215
3216 if (frames_array == NULL || used_frames_count == NULL ||
3217 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
3218 return EINVAL;
3219 }
3220
3221 /* frame_data_offset should be 32-bit aligned */
3222 if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) !=
3223 frame_data_offset) {
3224 return EINVAL;
3225 }
3226
3227 *used_frames_count = 0;
3228 if (frames_array_count == 0) {
3229 return 0;
3230 }
3231
3232 /* Keep-alive offload not required for CLAT interface */
3233 if (IS_INTF_CLAT46(ifp)) {
3234 return 0;
3235 }
3236
3237 for (i = 0; i < frames_array_count; i++) {
3238 struct ifnet_keepalive_offload_frame *frame = frames_array + i;
3239
3240 bzero(frame, sizeof(struct ifnet_keepalive_offload_frame));
3241 }
3242
3243 /* First collect IPsec related keep-alive frames */
3244 *used_frames_count = key_fill_offload_frames_for_savs(ifp,
3245 frames_array, frames_array_count, frame_data_offset);
3246
3247 /* If there is more room, collect other UDP keep-alive frames */
3248 if (*used_frames_count < frames_array_count) {
3249 udp_fill_keepalive_offload_frames(ifp, frames_array,
3250 frames_array_count, frame_data_offset,
3251 used_frames_count);
3252 }
3253
3254 /* If there is more room, collect other TCP keep-alive frames */
3255 if (*used_frames_count < frames_array_count) {
3256 tcp_fill_keepalive_offload_frames(ifp, frames_array,
3257 frames_array_count, frame_data_offset,
3258 used_frames_count);
3259 }
3260
3261 VERIFY(*used_frames_count <= frames_array_count);
3262
3263 return 0;
3264 }
3265
3266 errno_t
ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame)3267 ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,
3268 struct ifnet_keepalive_offload_frame *frame)
3269 {
3270 errno_t error = 0;
3271
3272 if (ifp == NULL || frame == NULL) {
3273 return EINVAL;
3274 }
3275
3276 if (frame->type != IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP) {
3277 return EINVAL;
3278 }
3279 if (frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 &&
3280 frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6) {
3281 return EINVAL;
3282 }
3283 if (frame->local_port == 0 || frame->remote_port == 0) {
3284 return EINVAL;
3285 }
3286
3287 error = tcp_notify_kao_timeout(ifp, frame);
3288
3289 return error;
3290 }
3291
3292 errno_t
ifnet_link_status_report(ifnet_t ifp,const void * buffer,size_t buffer_len)3293 ifnet_link_status_report(ifnet_t ifp, const void *buffer,
3294 size_t buffer_len)
3295 {
3296 struct if_link_status *ifsr;
3297 errno_t err = 0;
3298
3299 if (ifp == NULL || buffer == NULL || buffer_len == 0) {
3300 return EINVAL;
3301 }
3302
3303 ifnet_lock_shared(ifp);
3304
3305 /*
3306 * Make sure that the interface is attached but there is no need
3307 * to take a reference because this call is coming from the driver.
3308 */
3309 if (!ifnet_is_attached(ifp, 0)) {
3310 ifnet_lock_done(ifp);
3311 return ENXIO;
3312 }
3313
3314 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
3315
3316 /*
3317 * If this is the first status report then allocate memory
3318 * to store it.
3319 */
3320 if (ifp->if_link_status == NULL) {
3321 ifp->if_link_status = kalloc_type(struct if_link_status, Z_ZERO);
3322 if (ifp->if_link_status == NULL) {
3323 err = ENOMEM;
3324 goto done;
3325 }
3326 }
3327
3328 ifsr = __DECONST(struct if_link_status *, buffer);
3329
3330 if (ifp->if_type == IFT_CELLULAR) {
3331 struct if_cellular_status_v1 *if_cell_sr, *new_cell_sr;
3332 /*
3333 * Currently we have a single version -- if it does
3334 * not match, just return.
3335 */
3336 if (ifsr->ifsr_version !=
3337 IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION) {
3338 err = ENOTSUP;
3339 goto done;
3340 }
3341
3342 if (ifsr->ifsr_len != sizeof(*if_cell_sr)) {
3343 err = EINVAL;
3344 goto done;
3345 }
3346
3347 if_cell_sr =
3348 &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3349 new_cell_sr = &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3350 /* Check if we need to act on any new notifications */
3351 if ((new_cell_sr->valid_bitmask &
3352 IF_CELL_UL_MSS_RECOMMENDED_VALID) &&
3353 new_cell_sr->mss_recommended !=
3354 if_cell_sr->mss_recommended) {
3355 atomic_bitset_32(&tcbinfo.ipi_flags,
3356 INPCBINFO_UPDATE_MSS);
3357 inpcb_timer_sched(&tcbinfo, INPCB_TIMER_FAST);
3358 #if NECP
3359 necp_update_all_clients();
3360 #endif
3361 }
3362
3363 /* Finally copy the new information */
3364 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
3365 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
3366 if_cell_sr->valid_bitmask = 0;
3367 bcopy(new_cell_sr, if_cell_sr, sizeof(*if_cell_sr));
3368 } else if (IFNET_IS_WIFI(ifp)) {
3369 struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr;
3370
3371 /* Check version */
3372 if (ifsr->ifsr_version !=
3373 IF_WIFI_STATUS_REPORT_CURRENT_VERSION) {
3374 err = ENOTSUP;
3375 goto done;
3376 }
3377
3378 if (ifsr->ifsr_len != sizeof(*if_wifi_sr)) {
3379 err = EINVAL;
3380 goto done;
3381 }
3382
3383 if_wifi_sr =
3384 &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3385 new_wifi_sr =
3386 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3387 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
3388 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
3389 if_wifi_sr->valid_bitmask = 0;
3390 bcopy(new_wifi_sr, if_wifi_sr, sizeof(*if_wifi_sr));
3391
3392 /*
3393 * Update the bandwidth values if we got recent values
3394 * reported through the other KPI.
3395 */
3396 if (!(new_wifi_sr->valid_bitmask &
3397 IF_WIFI_UL_MAX_BANDWIDTH_VALID) &&
3398 ifp->if_output_bw.max_bw > 0) {
3399 if_wifi_sr->valid_bitmask |=
3400 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
3401 if_wifi_sr->ul_max_bandwidth =
3402 ifp->if_output_bw.max_bw > UINT32_MAX ?
3403 UINT32_MAX :
3404 (uint32_t)ifp->if_output_bw.max_bw;
3405 }
3406 if (!(new_wifi_sr->valid_bitmask &
3407 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) &&
3408 ifp->if_output_bw.eff_bw > 0) {
3409 if_wifi_sr->valid_bitmask |=
3410 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3411 if_wifi_sr->ul_effective_bandwidth =
3412 ifp->if_output_bw.eff_bw > UINT32_MAX ?
3413 UINT32_MAX :
3414 (uint32_t)ifp->if_output_bw.eff_bw;
3415 }
3416 if (!(new_wifi_sr->valid_bitmask &
3417 IF_WIFI_DL_MAX_BANDWIDTH_VALID) &&
3418 ifp->if_input_bw.max_bw > 0) {
3419 if_wifi_sr->valid_bitmask |=
3420 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
3421 if_wifi_sr->dl_max_bandwidth =
3422 ifp->if_input_bw.max_bw > UINT32_MAX ?
3423 UINT32_MAX :
3424 (uint32_t)ifp->if_input_bw.max_bw;
3425 }
3426 if (!(new_wifi_sr->valid_bitmask &
3427 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) &&
3428 ifp->if_input_bw.eff_bw > 0) {
3429 if_wifi_sr->valid_bitmask |=
3430 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3431 if_wifi_sr->dl_effective_bandwidth =
3432 ifp->if_input_bw.eff_bw > UINT32_MAX ?
3433 UINT32_MAX :
3434 (uint32_t)ifp->if_input_bw.eff_bw;
3435 }
3436 }
3437
3438 done:
3439 lck_rw_done(&ifp->if_link_status_lock);
3440 ifnet_lock_done(ifp);
3441 return err;
3442 }
3443
3444 /*************************************************************************/
3445 /* Fastlane QoS Ca */
3446 /*************************************************************************/
3447
3448 errno_t
ifnet_set_fastlane_capable(ifnet_t interface,boolean_t capable)3449 ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable)
3450 {
3451 if (interface == NULL) {
3452 return EINVAL;
3453 }
3454
3455 if_set_qosmarking_mode(interface,
3456 capable ? IFRTYPE_QOSMARKING_FASTLANE : IFRTYPE_QOSMARKING_MODE_NONE);
3457
3458 return 0;
3459 }
3460
3461 errno_t
ifnet_get_fastlane_capable(ifnet_t interface,boolean_t * capable)3462 ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable)
3463 {
3464 if (interface == NULL || capable == NULL) {
3465 return EINVAL;
3466 }
3467 if (interface->if_qosmarking_mode == IFRTYPE_QOSMARKING_FASTLANE) {
3468 *capable = true;
3469 } else {
3470 *capable = false;
3471 }
3472 return 0;
3473 }
3474
3475 errno_t
ifnet_get_unsent_bytes(ifnet_t interface,int64_t * unsent_bytes)3476 ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes)
3477 {
3478 int64_t bytes;
3479
3480 if (interface == NULL || unsent_bytes == NULL) {
3481 return EINVAL;
3482 }
3483
3484 bytes = *unsent_bytes = 0;
3485
3486 if (!IF_FULLY_ATTACHED(interface)) {
3487 return ENXIO;
3488 }
3489
3490 bytes = interface->if_sndbyte_unsent;
3491
3492 if (interface->if_eflags & IFEF_TXSTART) {
3493 bytes += IFCQ_BYTES(interface->if_snd);
3494 }
3495 *unsent_bytes = bytes;
3496
3497 return 0;
3498 }
3499
3500 errno_t
ifnet_get_buffer_status(const ifnet_t ifp,ifnet_buffer_status_t * buf_status)3501 ifnet_get_buffer_status(const ifnet_t ifp, ifnet_buffer_status_t *buf_status)
3502 {
3503 if (ifp == NULL || buf_status == NULL) {
3504 return EINVAL;
3505 }
3506
3507 bzero(buf_status, sizeof(*buf_status));
3508
3509 if (!IF_FULLY_ATTACHED(ifp)) {
3510 return ENXIO;
3511 }
3512
3513 if (ifp->if_eflags & IFEF_TXSTART) {
3514 buf_status->buf_interface = IFCQ_BYTES(ifp->if_snd);
3515 }
3516
3517 buf_status->buf_sndbuf = ((buf_status->buf_interface != 0) ||
3518 (ifp->if_sndbyte_unsent != 0)) ? 1 : 0;
3519
3520 return 0;
3521 }
3522
3523 void
ifnet_normalise_unsent_data(void)3524 ifnet_normalise_unsent_data(void)
3525 {
3526 struct ifnet *ifp;
3527
3528 ifnet_head_lock_shared();
3529 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3530 ifnet_lock_exclusive(ifp);
3531 if (!IF_FULLY_ATTACHED(ifp)) {
3532 ifnet_lock_done(ifp);
3533 continue;
3534 }
3535 if (!(ifp->if_eflags & IFEF_TXSTART)) {
3536 ifnet_lock_done(ifp);
3537 continue;
3538 }
3539
3540 if (ifp->if_sndbyte_total > 0 ||
3541 IFCQ_BYTES(ifp->if_snd) > 0) {
3542 ifp->if_unsent_data_cnt++;
3543 }
3544
3545 ifnet_lock_done(ifp);
3546 }
3547 ifnet_head_done();
3548 }
3549
3550 errno_t
ifnet_set_low_power_mode(ifnet_t ifp,boolean_t on)3551 ifnet_set_low_power_mode(ifnet_t ifp, boolean_t on)
3552 {
3553 errno_t error;
3554
3555 error = if_set_low_power(ifp, on);
3556
3557 return error;
3558 }
3559
3560 errno_t
ifnet_get_low_power_mode(ifnet_t ifp,boolean_t * on)3561 ifnet_get_low_power_mode(ifnet_t ifp, boolean_t *on)
3562 {
3563 if (ifp == NULL || on == NULL) {
3564 return EINVAL;
3565 }
3566
3567 *on = ((ifp->if_xflags & IFXF_LOW_POWER) != 0);
3568 return 0;
3569 }
3570