xref: /xnu-12377.41.6/bsd/net/kpi_interface.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2004-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include "kpi_interface.h"
30 
31 #include <sys/queue.h>
32 #include <sys/param.h>  /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
45 #include <net/dlil.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/net_api_stats.h>
52 #include <net/route.h>
53 #include <net/if_ports_used.h>
54 #include <libkern/libkern.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/locks.h>
57 #include <kern/clock.h>
58 #include <kern/uipc_domain.h>
59 #include <sys/sockio.h>
60 #include <sys/proc.h>
61 #include <sys/sysctl.h>
62 #include <sys/mbuf.h>
63 #include <netinet/ip_var.h>
64 #include <netinet/udp.h>
65 #include <netinet/udp_var.h>
66 #include <netinet/tcp.h>
67 #include <netinet/tcp_var.h>
68 #include <netinet/in_pcb.h>
69 #ifdef INET
70 #include <netinet/igmp_var.h>
71 #endif
72 #include <netinet6/mld6_var.h>
73 #include <netkey/key.h>
74 #include <stdbool.h>
75 
76 #include "net/net_str_id.h"
77 #include <net/sockaddr_utils.h>
78 
79 #if CONFIG_MACF
80 #include <sys/kauth.h>
81 #include <security/mac_framework.h>
82 #endif
83 
84 #if SKYWALK
85 #include <skywalk/os_skywalk_private.h>
86 #include <skywalk/nexus/netif/nx_netif.h>
87 #endif /* SKYWALK */
88 
89 extern uint64_t if_creation_generation_count;
90 
91 #undef ifnet_allocate
92 errno_t ifnet_allocate(const struct ifnet_init_params *init,
93     ifnet_t *ifp);
94 
95 static errno_t ifnet_allocate_common(const struct ifnet_init_params *init,
96     ifnet_t *ifp, bool is_internal);
97 
98 
99 #define TOUCHLASTCHANGE(__if_lastchange) {                              \
100 	(__if_lastchange)->tv_sec = (time_t)net_uptime();               \
101 	(__if_lastchange)->tv_usec = 0;                                 \
102 }
103 
104 static errno_t ifnet_defrouter_llreachinfo(ifnet_t, sa_family_t,
105     struct ifnet_llreach_info *);
106 static void ifnet_kpi_free(ifnet_t);
107 static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t *__counted_by(*count) *list,
108     u_int32_t *count);
109 static errno_t ifnet_set_lladdr_internal(ifnet_t,
110     const void *__sized_by(lladdr_len) lladdr, size_t lladdr_len,
111     u_char, int);
112 static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
113 
114 
115 /*
116  * Temporary work around until we have real reference counting
117  *
118  * We keep the bits about calling dlil_if_release (which should be
119  * called recycle) transparent by calling it from our if_free function
120  * pointer. We have to keep the client's original detach function
121  * somewhere so we can call it.
122  */
123 static void
ifnet_kpi_free(ifnet_t ifp)124 ifnet_kpi_free(ifnet_t ifp)
125 {
126 	if ((ifp->if_refflags & IFRF_EMBRYONIC) == 0) {
127 		ifnet_detached_func detach_func;
128 
129 		detach_func = ifp->if_detach;
130 		if (detach_func != NULL) {
131 			(*detach_func)(ifp);
132 		}
133 	}
134 
135 	ifnet_dispose(ifp);
136 }
137 
138 errno_t
ifnet_allocate_common(const struct ifnet_init_params * init,ifnet_t * ifp,bool is_internal)139 ifnet_allocate_common(const struct ifnet_init_params *init,
140     ifnet_t *ifp, bool is_internal)
141 {
142 	struct ifnet_init_eparams einit;
143 
144 	bzero(&einit, sizeof(einit));
145 
146 	einit.ver               = IFNET_INIT_CURRENT_VERSION;
147 	einit.len               = sizeof(einit);
148 	einit.flags             = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO;
149 	if (!is_internal) {
150 		einit.flags |= IFNET_INIT_ALLOC_KPI;
151 	}
152 	einit.uniqueid          = init->uniqueid;
153 	einit.uniqueid_len      = init->uniqueid_len;
154 	einit.name              = init->name;
155 	einit.unit              = init->unit;
156 	einit.family            = init->family;
157 	einit.type              = init->type;
158 	einit.output            = init->output;
159 	einit.demux             = init->demux;
160 	einit.add_proto         = init->add_proto;
161 	einit.del_proto         = init->del_proto;
162 	einit.check_multi       = init->check_multi;
163 	einit.framer            = init->framer;
164 	einit.softc             = init->softc;
165 	einit.ioctl             = init->ioctl;
166 	einit.set_bpf_tap       = init->set_bpf_tap;
167 	einit.detach            = init->detach;
168 	einit.event             = init->event;
169 	einit.broadcast_addr    = init->broadcast_addr;
170 	einit.broadcast_len     = init->broadcast_len;
171 
172 	return ifnet_allocate_extended(&einit, ifp);
173 }
174 
175 errno_t
ifnet_allocate_internal(const struct ifnet_init_params * init,ifnet_t * ifp)176 ifnet_allocate_internal(const struct ifnet_init_params *init, ifnet_t *ifp)
177 {
178 	return ifnet_allocate_common(init, ifp, true);
179 }
180 
181 errno_t
ifnet_allocate(const struct ifnet_init_params * init,ifnet_t * ifp)182 ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *ifp)
183 {
184 	return ifnet_allocate_common(init, ifp, false);
185 }
186 
187 static void
ifnet_set_broadcast_addr(ifnet_t ifp,const void * __sized_by (broadcast_len)broadcast_addr,u_int32_t broadcast_len)188 ifnet_set_broadcast_addr(ifnet_t ifp,
189     const void *__sized_by(broadcast_len) broadcast_addr,
190     u_int32_t broadcast_len)
191 {
192 	if (ifp->if_broadcast.length != 0) {
193 		kfree_data_counted_by(ifp->if_broadcast.ptr,
194 		    ifp->if_broadcast.length);
195 	}
196 	if (broadcast_len != 0 && broadcast_addr != NULL) {
197 		ifp->if_broadcast.ptr = kalloc_data(broadcast_len,
198 		    Z_WAITOK | Z_NOFAIL);
199 		ifp->if_broadcast.length = broadcast_len;
200 		bcopy(broadcast_addr, ifp->if_broadcast.ptr,
201 		    broadcast_len);
202 	}
203 }
204 
205 errno_t
ifnet_allocate_extended(const struct ifnet_init_eparams * einit0,ifnet_t * interface)206 ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
207     ifnet_t *interface)
208 {
209 #if SKYWALK
210 	ifnet_start_func ostart = NULL;
211 #endif /* SKYWALK */
212 	struct ifnet_init_eparams einit;
213 	ifnet_ref_t ifp = NULL;
214 	char if_xname[IFXNAMSIZ] = {0};
215 	int error;
216 
217 	einit = *einit0;
218 
219 	if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
220 	    einit.len < sizeof(einit)) {
221 		return EINVAL;
222 	}
223 
224 	if (einit.family == 0 || einit.name == NULL ||
225 	    strlen(einit.name) >= IFNAMSIZ ||
226 	    (einit.type & 0xFFFFFF00) != 0 || einit.type == 0) {
227 		return EINVAL;
228 	}
229 
230 #if SKYWALK
231 	/* headroom must be a multiple of 8 bytes */
232 	if ((einit.tx_headroom & 0x7) != 0) {
233 		return EINVAL;
234 	}
235 	if ((einit.flags & IFNET_INIT_SKYWALK_NATIVE) == 0) {
236 		/*
237 		 * Currently Interface advisory reporting is supported only
238 		 * for skywalk interface.
239 		 */
240 		if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
241 			return EINVAL;
242 		}
243 	}
244 #endif /* SKYWALK */
245 
246 	if (einit.flags & IFNET_INIT_LEGACY) {
247 #if SKYWALK
248 		if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
249 			return EINVAL;
250 		}
251 #endif /* SKYWALK */
252 		if (einit.output == NULL ||
253 		    (einit.flags & IFNET_INIT_INPUT_POLL)) {
254 			return EINVAL;
255 		}
256 		einit.pre_enqueue = NULL;
257 		einit.start = NULL;
258 		einit.output_ctl = NULL;
259 		einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
260 		einit.input_poll = NULL;
261 		einit.input_ctl = NULL;
262 	} else {
263 #if SKYWALK
264 		/*
265 		 * For native Skywalk drivers, steer all start requests
266 		 * to ifp_if_start() until the netif device adapter is
267 		 * fully activated, at which point we will point it to
268 		 * nx_netif_doorbell().
269 		 */
270 		if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
271 			if (einit.start != NULL) {
272 				return EINVAL;
273 			}
274 			/* override output start callback */
275 			ostart = einit.start = ifp_if_start;
276 		} else {
277 			ostart = einit.start;
278 		}
279 #endif /* SKYWALK */
280 		if (einit.start == NULL) {
281 			return EINVAL;
282 		}
283 
284 		einit.output = NULL;
285 		if (!IFNET_MODEL_IS_VALID(einit.output_sched_model)) {
286 			panic("wrong model %u", einit.output_sched_model);
287 			return EINVAL;
288 		}
289 
290 		if (einit.flags & IFNET_INIT_INPUT_POLL) {
291 			if (einit.input_poll == NULL || einit.input_ctl == NULL) {
292 				return EINVAL;
293 			}
294 		} else {
295 			einit.input_poll = NULL;
296 			einit.input_ctl = NULL;
297 		}
298 	}
299 
300 	if (einit.type > UCHAR_MAX) {
301 		return EINVAL;
302 	}
303 
304 	if (einit.unit > SHRT_MAX) {
305 		return EINVAL;
306 	}
307 
308 	/* Initialize external name (name + unit) */
309 	snprintf(if_xname, sizeof(if_xname), "%s%d",
310 	    einit.name, einit.unit);
311 
312 	if (einit.uniqueid == NULL) {
313 		einit.uniqueid_len = (uint32_t)strbuflen(if_xname);
314 		einit.uniqueid = if_xname;
315 	}
316 
317 	error = dlil_if_acquire(einit.family, einit.uniqueid,
318 	    einit.uniqueid_len,
319 	    __unsafe_null_terminated_from_indexable(if_xname), &ifp);
320 
321 	if (error == 0) {
322 		uint64_t br;
323 
324 		/*
325 		 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
326 		 * to point to storage of at least IFNAMSIZ bytes. It is safe
327 		 * to write to this.
328 		 */
329 		char *ifname = __unsafe_forge_bidi_indexable(char *, __DECONST(char *, ifp->if_name), IFNAMSIZ);
330 		const char *einit_name = __unsafe_forge_bidi_indexable(const char *, einit.name, IFNAMSIZ);
331 		strbufcpy(ifname, IFNAMSIZ, einit_name, IFNAMSIZ);
332 		ifp->if_type            = (u_char)einit.type;
333 		ifp->if_family          = einit.family;
334 		ifp->if_subfamily       = einit.subfamily;
335 		ifp->if_unit            = (short)einit.unit;
336 		ifp->if_output          = einit.output;
337 		ifp->if_pre_enqueue     = einit.pre_enqueue;
338 		ifp->if_start           = einit.start;
339 		ifp->if_output_ctl      = einit.output_ctl;
340 		ifp->if_output_sched_model = einit.output_sched_model;
341 		ifp->if_output_bw.eff_bw = einit.output_bw;
342 		ifp->if_output_bw.max_bw = einit.output_bw_max;
343 		ifp->if_output_lt.eff_lt = einit.output_lt;
344 		ifp->if_output_lt.max_lt = einit.output_lt_max;
345 		ifp->if_input_poll      = einit.input_poll;
346 		ifp->if_input_ctl       = einit.input_ctl;
347 		ifp->if_input_bw.eff_bw = einit.input_bw;
348 		ifp->if_input_bw.max_bw = einit.input_bw_max;
349 		ifp->if_input_lt.eff_lt = einit.input_lt;
350 		ifp->if_input_lt.max_lt = einit.input_lt_max;
351 		ifp->if_demux           = einit.demux;
352 		ifp->if_add_proto       = einit.add_proto;
353 		ifp->if_del_proto       = einit.del_proto;
354 		ifp->if_check_multi     = einit.check_multi;
355 		ifp->if_framer_legacy   = einit.framer;
356 		ifp->if_framer          = einit.framer_extended;
357 		ifp->if_softc           = einit.softc;
358 		ifp->if_ioctl           = einit.ioctl;
359 		ifp->if_set_bpf_tap     = einit.set_bpf_tap;
360 		ifp->if_free            = (einit.free != NULL) ? einit.free : ifnet_kpi_free;
361 		ifp->if_event           = einit.event;
362 		ifp->if_detach          = einit.detach;
363 
364 		/* Initialize Network ID */
365 		ifp->network_id_len     = 0;
366 		bzero(&ifp->network_id, sizeof(ifp->network_id));
367 
368 		/* Initialize external name (name + unit) */
369 		char *ifxname = __unsafe_forge_bidi_indexable(char *, __DECONST(char *, ifp->if_xname), IFXNAMSIZ);
370 		snprintf(ifxname, IFXNAMSIZ, "%s", if_xname);
371 
372 		/*
373 		 * On embedded, framer() is already in the extended form;
374 		 * we simply use it as is, unless the caller specifies
375 		 * framer_extended() which will then override it.
376 		 *
377 		 * On non-embedded, framer() has long been exposed as part
378 		 * of the public KPI, and therefore its signature must
379 		 * remain the same (without the pre- and postpend length
380 		 * parameters.)  We special case ether_frameout, such that
381 		 * it gets mapped to its extended variant.  All other cases
382 		 * utilize the stub routine which will simply return zeroes
383 		 * for those new parameters.
384 		 *
385 		 * Internally, DLIL will only use the extended callback
386 		 * variant which is represented by if_framer.
387 		 */
388 #if !XNU_TARGET_OS_OSX
389 		if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
390 			ifp->if_framer = ifp->if_framer_legacy;
391 		}
392 #else /* XNU_TARGET_OS_OSX */
393 		if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
394 			if (ifp->if_framer_legacy == ether_frameout) {
395 				ifp->if_framer = ether_frameout_extended;
396 			} else {
397 				ifp->if_framer = ifnet_framer_stub;
398 			}
399 		}
400 #endif /* XNU_TARGET_OS_OSX */
401 
402 		if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
403 			ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
404 		} else if (ifp->if_output_bw.eff_bw == 0) {
405 			ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
406 		}
407 
408 		if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
409 			ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
410 		} else if (ifp->if_input_bw.eff_bw == 0) {
411 			ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
412 		}
413 
414 		if (ifp->if_output_bw.max_bw == 0) {
415 			ifp->if_output_bw = ifp->if_input_bw;
416 		} else if (ifp->if_input_bw.max_bw == 0) {
417 			ifp->if_input_bw = ifp->if_output_bw;
418 		}
419 
420 		/* Pin if_baudrate to 32 bits */
421 		br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
422 		if (br != 0) {
423 			ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
424 		}
425 
426 		if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
427 			ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
428 		} else if (ifp->if_output_lt.eff_lt == 0) {
429 			ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
430 		}
431 
432 		if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
433 			ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
434 		} else if (ifp->if_input_lt.eff_lt == 0) {
435 			ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
436 		}
437 
438 		if (ifp->if_output_lt.max_lt == 0) {
439 			ifp->if_output_lt = ifp->if_input_lt;
440 		} else if (ifp->if_input_lt.max_lt == 0) {
441 			ifp->if_input_lt = ifp->if_output_lt;
442 		}
443 
444 		if (ifp->if_ioctl == NULL) {
445 			ifp->if_ioctl = ifp_if_ioctl;
446 		}
447 
448 		if_clear_eflags(ifp, -1);
449 		if (ifp->if_start != NULL) {
450 			if_set_eflags(ifp, IFEF_TXSTART);
451 			if (ifp->if_pre_enqueue == NULL) {
452 				ifp->if_pre_enqueue = ifnet_enqueue;
453 			}
454 			ifp->if_output = ifp->if_pre_enqueue;
455 		}
456 
457 		if (ifp->if_input_poll != NULL) {
458 			if_set_eflags(ifp, IFEF_RXPOLL);
459 		}
460 
461 		ifp->if_output_dlil = dlil_output_handler;
462 		ifp->if_input_dlil = dlil_input_handler;
463 
464 		VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
465 		    (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
466 		    ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
467 		    ifp->if_input_ctl == NULL));
468 		VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
469 		    (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
470 
471 		ifnet_set_broadcast_addr(ifp, einit.broadcast_addr,
472 		    einit.broadcast_len);
473 
474 		if_clear_xflags(ifp, -1);
475 #if SKYWALK
476 		ifp->if_tx_headroom = 0;
477 		ifp->if_tx_trailer = 0;
478 		ifp->if_rx_mit_ival = 0;
479 		ifp->if_save_start = ostart;
480 		if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
481 			VERIFY(ifp->if_eflags & IFEF_TXSTART);
482 			VERIFY(!(einit.flags & IFNET_INIT_LEGACY));
483 			if_set_eflags(ifp, IFEF_SKYWALK_NATIVE);
484 			ifp->if_tx_headroom = einit.tx_headroom;
485 			ifp->if_tx_trailer = einit.tx_trailer;
486 			ifp->if_rx_mit_ival = einit.rx_mit_ival;
487 			/*
488 			 * For native Skywalk drivers, make sure packets
489 			 * emitted by the BSD stack get dropped until the
490 			 * interface is in service.  When the netif host
491 			 * adapter is fully activated, we'll point it to
492 			 * nx_netif_output().
493 			 */
494 			ifp->if_output = ifp_if_output;
495 			/*
496 			 * Override driver-supplied parameters
497 			 * and force IFEF_ENQUEUE_MULTI?
498 			 */
499 			if (sk_netif_native_txmodel ==
500 			    NETIF_NATIVE_TXMODEL_ENQUEUE_MULTI) {
501 				einit.start_delay_qlen = sk_tx_delay_qlen;
502 				einit.start_delay_timeout = sk_tx_delay_timeout;
503 			}
504 			/* netif comes with native interfaces */
505 			VERIFY((ifp->if_xflags & IFXF_LEGACY) == 0);
506 		} else if (!ifnet_needs_compat(ifp)) {
507 			/*
508 			 * If we're told not to plumb in netif compat
509 			 * for this interface, set IFXF_NX_NOAUTO to
510 			 * prevent DLIL from auto-attaching the nexus.
511 			 */
512 			einit.flags |= IFNET_INIT_NX_NOAUTO;
513 			/* legacy (non-netif) interface */
514 			if_set_xflags(ifp, IFXF_LEGACY);
515 		}
516 
517 		ifp->if_save_output = ifp->if_output;
518 		if ((einit.flags & IFNET_INIT_NX_NOAUTO) != 0) {
519 			if_set_xflags(ifp, IFXF_NX_NOAUTO);
520 		}
521 		if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
522 			if_set_eflags(ifp, IFEF_ADV_REPORT);
523 		}
524 #else /* !SKYWALK */
525 		/* legacy interface */
526 		if_set_xflags(ifp, IFXF_LEGACY);
527 #endif /* !SKYWALK */
528 
529 		if ((ifp->if_snd = ifclassq_alloc()) == NULL) {
530 			panic_plain("%s: ifp=%p couldn't allocate class queues",
531 			    __func__, ifp);
532 			/* NOTREACHED */
533 		}
534 
535 		/*
536 		 * output target queue delay is specified in millisecond
537 		 * convert it to nanoseconds
538 		 */
539 		IFCQ_TARGET_QDELAY(ifp->if_snd) =
540 		    einit.output_target_qdelay * 1000 * 1000;
541 		IFCQ_MAXLEN(ifp->if_snd) = einit.sndq_maxlen;
542 
543 		ifnet_enqueue_multi_setup(ifp, einit.start_delay_qlen,
544 		    einit.start_delay_timeout);
545 
546 		IFCQ_PKT_DROP_LIMIT(ifp->if_snd) = IFCQ_DEFAULT_PKT_DROP_LIMIT;
547 
548 		/*
549 		 * Set embryonic flag; this will be cleared
550 		 * later when it is fully attached.
551 		 */
552 		ifp->if_refflags = IFRF_EMBRYONIC;
553 
554 		/*
555 		 * Count the newly allocated ifnet
556 		 */
557 		OSIncrementAtomic64(&net_api_stats.nas_ifnet_alloc_count);
558 		INC_ATOMIC_INT64_LIM(net_api_stats.nas_ifnet_alloc_total);
559 		if ((einit.flags & IFNET_INIT_ALLOC_KPI) != 0) {
560 			if_set_xflags(ifp, IFXF_ALLOC_KPI);
561 		} else {
562 			OSIncrementAtomic64(
563 				&net_api_stats.nas_ifnet_alloc_os_count);
564 			INC_ATOMIC_INT64_LIM(
565 				net_api_stats.nas_ifnet_alloc_os_total);
566 		}
567 
568 		if (ifp->if_subfamily == IFNET_SUBFAMILY_MANAGEMENT) {
569 			if_set_xflags(ifp, IFXF_MANAGEMENT);
570 			if_management_interface_check_needed = true;
571 		}
572 
573 		/*
574 		 * Set the default inband wake packet tagging for the interface family
575 		 */
576 		init_inband_wake_pkt_tagging_for_family(ifp);
577 
578 		/*
579 		 * Increment the generation count on interface creation
580 		 */
581 		ifp->if_creation_generation_id = os_atomic_inc(&if_creation_generation_count, relaxed);
582 
583 		*interface = ifp;
584 	}
585 	return error;
586 }
587 
588 errno_t
ifnet_reference(ifnet_t ifp)589 ifnet_reference(ifnet_t ifp)
590 {
591 	return dlil_if_ref(ifp);
592 }
593 
594 void
ifnet_dispose(ifnet_t ifp)595 ifnet_dispose(ifnet_t ifp)
596 {
597 	dlil_if_release(ifp);
598 }
599 
600 errno_t
ifnet_release(ifnet_t ifp)601 ifnet_release(ifnet_t ifp)
602 {
603 	return dlil_if_free(ifp);
604 }
605 
606 errno_t
ifnet_interface_family_find(const char * module_string,ifnet_family_t * family_id)607 ifnet_interface_family_find(const char *module_string,
608     ifnet_family_t *family_id)
609 {
610 	if (module_string == NULL || family_id == NULL) {
611 		return EINVAL;
612 	}
613 
614 	return net_str_id_find_internal(module_string, family_id,
615 	           NSI_IF_FAM_ID, 1);
616 }
617 
618 void *
ifnet_softc(ifnet_t interface)619 ifnet_softc(ifnet_t interface)
620 {
621 	return (interface == NULL) ? NULL : interface->if_softc;
622 }
623 
624 const char *
ifnet_name(ifnet_t interface)625 ifnet_name(ifnet_t interface)
626 {
627 	return (interface == NULL) ? NULL : interface->if_name;
628 }
629 
630 ifnet_family_t
ifnet_family(ifnet_t interface)631 ifnet_family(ifnet_t interface)
632 {
633 	return (interface == NULL) ? 0 : interface->if_family;
634 }
635 
636 ifnet_subfamily_t
ifnet_subfamily(ifnet_t interface)637 ifnet_subfamily(ifnet_t interface)
638 {
639 	return (interface == NULL) ? 0 : interface->if_subfamily;
640 }
641 
642 u_int32_t
ifnet_unit(ifnet_t interface)643 ifnet_unit(ifnet_t interface)
644 {
645 	return (interface == NULL) ? (u_int32_t)0xffffffff :
646 	       (u_int32_t)interface->if_unit;
647 }
648 
649 u_int32_t
ifnet_index(ifnet_t interface)650 ifnet_index(ifnet_t interface)
651 {
652 	return (interface == NULL) ? (u_int32_t)0xffffffff :
653 	       interface->if_index;
654 }
655 
656 errno_t
ifnet_set_flags(ifnet_t interface,u_int16_t new_flags,u_int16_t mask)657 ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
658 {
659 	bool set_IFF_UP;
660 	bool change_IFF_UP;
661 	uint16_t old_flags;
662 
663 	if (interface == NULL) {
664 		return EINVAL;
665 	}
666 	set_IFF_UP = (new_flags & IFF_UP) != 0;
667 	change_IFF_UP = (mask & IFF_UP) != 0;
668 #if SKYWALK
669 	if (set_IFF_UP && change_IFF_UP) {
670 		/*
671 		 * When a native skywalk interface is marked IFF_UP, ensure
672 		 * the flowswitch is attached.
673 		 */
674 		ifnet_attach_native_flowswitch(interface);
675 	}
676 #endif /* SKYWALK */
677 
678 	ifnet_lock_exclusive(interface);
679 
680 	/* If we are modifying the up/down state, call if_updown */
681 	if (change_IFF_UP) {
682 		if_updown(interface, set_IFF_UP);
683 	}
684 
685 	old_flags = interface->if_flags;
686 	interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
687 	/* If we are modifying the multicast flag, set/unset the silent flag */
688 	if ((old_flags & IFF_MULTICAST) !=
689 	    (interface->if_flags & IFF_MULTICAST)) {
690 #if INET
691 		if (IGMP_IFINFO(interface) != NULL) {
692 			igmp_initsilent(interface, IGMP_IFINFO(interface));
693 		}
694 #endif /* INET */
695 		if (MLD_IFINFO(interface) != NULL) {
696 			mld6_initsilent(interface, MLD_IFINFO(interface));
697 		}
698 	}
699 
700 	ifnet_lock_done(interface);
701 
702 	return 0;
703 }
704 
705 u_int16_t
ifnet_flags(ifnet_t interface)706 ifnet_flags(ifnet_t interface)
707 {
708 	return (interface == NULL) ? 0 : interface->if_flags;
709 }
710 
711 /*
712  * This routine ensures the following:
713  *
714  * If IFEF_AWDL is set by the caller, also set the rest of flags as
715  * defined in IFEF_AWDL_MASK.
716  *
717  * If IFEF_AWDL has been set on the interface and the caller attempts
718  * to clear one or more of the associated flags in IFEF_AWDL_MASK,
719  * return failure.
720  *
721  * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
722  * on the interface.
723  *
724  * All other flags not associated with AWDL are not affected.
725  *
726  * See <net/if.h> for current definition of IFEF_AWDL_MASK.
727  */
728 static errno_t
ifnet_awdl_check_eflags(ifnet_t ifp,u_int32_t * new_eflags,u_int32_t * mask)729 ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
730 {
731 	u_int32_t eflags;
732 
733 	ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
734 
735 	eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
736 
737 	if (ifp->if_eflags & IFEF_AWDL) {
738 		if (eflags & IFEF_AWDL) {
739 			if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK) {
740 				return EINVAL;
741 			}
742 		} else {
743 			*new_eflags &= ~IFEF_AWDL_MASK;
744 			*mask |= IFEF_AWDL_MASK;
745 		}
746 	} else if (eflags & IFEF_AWDL) {
747 		*new_eflags |= IFEF_AWDL_MASK;
748 		*mask |= IFEF_AWDL_MASK;
749 	} else if (eflags & IFEF_AWDL_RESTRICTED &&
750 	    !(ifp->if_eflags & IFEF_AWDL)) {
751 		return EINVAL;
752 	}
753 
754 	return 0;
755 }
756 
757 errno_t
ifnet_set_eflags(ifnet_t interface,u_int32_t new_flags,u_int32_t mask)758 ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
759 {
760 	uint32_t oeflags;
761 	struct kev_msg ev_msg;
762 	struct net_event_data ev_data;
763 
764 	if (interface == NULL) {
765 		return EINVAL;
766 	}
767 
768 	bzero(&ev_msg, sizeof(ev_msg));
769 	ifnet_lock_exclusive(interface);
770 	/*
771 	 * Sanity checks for IFEF_AWDL and its related flags.
772 	 */
773 	if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) {
774 		ifnet_lock_done(interface);
775 		return EINVAL;
776 	}
777 	/*
778 	 * Currently Interface advisory reporting is supported only for
779 	 * skywalk interface.
780 	 */
781 	if ((((new_flags & mask) & IFEF_ADV_REPORT) != 0) &&
782 	    ((interface->if_eflags & IFEF_SKYWALK_NATIVE) == 0)) {
783 		ifnet_lock_done(interface);
784 		return EINVAL;
785 	}
786 	oeflags = interface->if_eflags;
787 	if_clear_eflags(interface, mask);
788 	if (new_flags != 0) {
789 		if_set_eflags(interface, (new_flags & mask));
790 	}
791 	ifnet_lock_done(interface);
792 	if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
793 	    !(oeflags & IFEF_AWDL_RESTRICTED)) {
794 		ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
795 		/*
796 		 * The interface is now restricted to applications that have
797 		 * the entitlement.
798 		 * The check for the entitlement will be done in the data
799 		 * path, so we don't have to do anything here.
800 		 */
801 	} else if (oeflags & IFEF_AWDL_RESTRICTED &&
802 	    !(interface->if_eflags & IFEF_AWDL_RESTRICTED)) {
803 		ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
804 	}
805 	/*
806 	 * Notify configd so that it has a chance to perform better
807 	 * reachability detection.
808 	 */
809 	if (ev_msg.event_code) {
810 		bzero(&ev_data, sizeof(ev_data));
811 		ev_msg.vendor_code = KEV_VENDOR_APPLE;
812 		ev_msg.kev_class = KEV_NETWORK_CLASS;
813 		ev_msg.kev_subclass = KEV_DL_SUBCLASS;
814 		strlcpy(ev_data.if_name, interface->if_name, IFNAMSIZ);
815 		ev_data.if_family = interface->if_family;
816 		ev_data.if_unit = interface->if_unit;
817 		ev_msg.dv[0].data_length = sizeof(struct net_event_data);
818 		ev_msg.dv[0].data_ptr = &ev_data;
819 		ev_msg.dv[1].data_length = 0;
820 		dlil_post_complete_msg(interface, &ev_msg);
821 	}
822 
823 	return 0;
824 }
825 
826 u_int32_t
ifnet_eflags(ifnet_t interface)827 ifnet_eflags(ifnet_t interface)
828 {
829 	return (interface == NULL) ? 0 : interface->if_eflags;
830 }
831 
832 errno_t
ifnet_set_idle_flags_locked(ifnet_t ifp,u_int32_t new_flags,u_int32_t mask)833 ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
834 {
835 	if (ifp == NULL) {
836 		return EINVAL;
837 	}
838 	ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
839 
840 	/*
841 	 * If this is called prior to ifnet attach, the actual work will
842 	 * be done at attach time.  Otherwise, if it is called after
843 	 * ifnet detach, then it is a no-op.
844 	 */
845 	if (!ifnet_is_fully_attached(ifp)) {
846 		ifp->if_idle_new_flags = new_flags;
847 		ifp->if_idle_new_flags_mask = mask;
848 		return 0;
849 	} else {
850 		ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
851 	}
852 
853 	ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
854 	return 0;
855 }
856 
857 errno_t
ifnet_set_idle_flags(ifnet_t ifp,u_int32_t new_flags,u_int32_t mask)858 ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
859 {
860 	errno_t err;
861 
862 	ifnet_lock_exclusive(ifp);
863 	err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
864 	ifnet_lock_done(ifp);
865 
866 	return err;
867 }
868 
869 u_int32_t
ifnet_idle_flags(ifnet_t ifp)870 ifnet_idle_flags(ifnet_t ifp)
871 {
872 	return (ifp == NULL) ? 0 : ifp->if_idle_flags;
873 }
874 
875 errno_t
ifnet_set_link_quality(ifnet_t ifp,int quality)876 ifnet_set_link_quality(ifnet_t ifp, int quality)
877 {
878 	errno_t err = 0;
879 
880 	if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
881 		err = EINVAL;
882 		goto done;
883 	}
884 
885 	if (!ifnet_is_fully_attached(ifp)) {
886 		err = ENXIO;
887 		goto done;
888 	}
889 
890 	if_lqm_update(ifp, quality, 0);
891 
892 done:
893 	return err;
894 }
895 
896 int
ifnet_link_quality(ifnet_t ifp)897 ifnet_link_quality(ifnet_t ifp)
898 {
899 	int lqm;
900 
901 	if (ifp == NULL) {
902 		return IFNET_LQM_THRESH_OFF;
903 	}
904 
905 	ifnet_lock_shared(ifp);
906 	lqm = ifp->if_interface_state.lqm_state;
907 	ifnet_lock_done(ifp);
908 
909 	return lqm;
910 }
911 
912 errno_t
ifnet_set_interface_state(ifnet_t ifp,struct if_interface_state * if_interface_state)913 ifnet_set_interface_state(ifnet_t ifp,
914     struct if_interface_state *if_interface_state)
915 {
916 	errno_t err = 0;
917 
918 	if (ifp == NULL || if_interface_state == NULL) {
919 		err = EINVAL;
920 		goto done;
921 	}
922 
923 	if (!ifnet_is_fully_attached(ifp)) {
924 		err = ENXIO;
925 		goto done;
926 	}
927 
928 	if_state_update(ifp, if_interface_state);
929 
930 done:
931 	return err;
932 }
933 
934 errno_t
ifnet_get_interface_state(ifnet_t ifp,struct if_interface_state * if_interface_state)935 ifnet_get_interface_state(ifnet_t ifp,
936     struct if_interface_state *if_interface_state)
937 {
938 	errno_t err = 0;
939 
940 	if (ifp == NULL || if_interface_state == NULL) {
941 		err = EINVAL;
942 		goto done;
943 	}
944 
945 	if (!ifnet_is_fully_attached(ifp)) {
946 		err = ENXIO;
947 		goto done;
948 	}
949 
950 	if_get_state(ifp, if_interface_state);
951 
952 done:
953 	return err;
954 }
955 
956 
957 static errno_t
ifnet_defrouter_llreachinfo(ifnet_t ifp,sa_family_t af,struct ifnet_llreach_info * iflri)958 ifnet_defrouter_llreachinfo(ifnet_t ifp, sa_family_t af,
959     struct ifnet_llreach_info *iflri)
960 {
961 	if (ifp == NULL || iflri == NULL) {
962 		return EINVAL;
963 	}
964 
965 	VERIFY(af == AF_INET || af == AF_INET6);
966 
967 	return ifnet_llreach_get_defrouter(ifp, af, iflri);
968 }
969 
970 errno_t
ifnet_inet_defrouter_llreachinfo(ifnet_t ifp,struct ifnet_llreach_info * iflri)971 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
972 {
973 	return ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri);
974 }
975 
976 errno_t
ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp,struct ifnet_llreach_info * iflri)977 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
978 {
979 	return ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri);
980 }
981 
982 errno_t
ifnet_set_capabilities_supported(ifnet_t ifp,u_int32_t new_caps,u_int32_t mask)983 ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
984     u_int32_t mask)
985 {
986 	errno_t error = 0;
987 	int tmp;
988 
989 	if (ifp == NULL) {
990 		return EINVAL;
991 	}
992 
993 	ifnet_lock_exclusive(ifp);
994 	tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
995 	if ((tmp & ~IFCAP_VALID)) {
996 		error = EINVAL;
997 	} else {
998 		ifp->if_capabilities = tmp;
999 	}
1000 	ifnet_lock_done(ifp);
1001 
1002 	return error;
1003 }
1004 
1005 u_int32_t
ifnet_capabilities_supported(ifnet_t ifp)1006 ifnet_capabilities_supported(ifnet_t ifp)
1007 {
1008 	return (ifp == NULL) ? 0 : ifp->if_capabilities;
1009 }
1010 
1011 
1012 errno_t
ifnet_set_capabilities_enabled(ifnet_t ifp,u_int32_t new_caps,u_int32_t mask)1013 ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
1014     u_int32_t mask)
1015 {
1016 	errno_t error = 0;
1017 	int tmp;
1018 	struct kev_msg ev_msg;
1019 	struct net_event_data ev_data;
1020 
1021 	if (ifp == NULL) {
1022 		return EINVAL;
1023 	}
1024 
1025 	ifnet_lock_exclusive(ifp);
1026 	tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
1027 	if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities)) {
1028 		error = EINVAL;
1029 	} else {
1030 		ifp->if_capenable = tmp;
1031 	}
1032 	ifnet_lock_done(ifp);
1033 
1034 	/* Notify application of the change */
1035 	bzero(&ev_data, sizeof(struct net_event_data));
1036 	bzero(&ev_msg, sizeof(struct kev_msg));
1037 	ev_msg.vendor_code      = KEV_VENDOR_APPLE;
1038 	ev_msg.kev_class        = KEV_NETWORK_CLASS;
1039 	ev_msg.kev_subclass     = KEV_DL_SUBCLASS;
1040 
1041 	ev_msg.event_code       = KEV_DL_IFCAP_CHANGED;
1042 	strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
1043 	ev_data.if_family       = ifp->if_family;
1044 	ev_data.if_unit         = (u_int32_t)ifp->if_unit;
1045 	ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1046 	ev_msg.dv[0].data_ptr = &ev_data;
1047 	ev_msg.dv[1].data_length = 0;
1048 	dlil_post_complete_msg(ifp, &ev_msg);
1049 
1050 	return error;
1051 }
1052 
1053 u_int32_t
ifnet_capabilities_enabled(ifnet_t ifp)1054 ifnet_capabilities_enabled(ifnet_t ifp)
1055 {
1056 	return (ifp == NULL) ? 0 : ifp->if_capenable;
1057 }
1058 
1059 static const ifnet_offload_t offload_mask =
1060     (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
1061     IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
1062     IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT |
1063     IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES |
1064     IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP |
1065     IFNET_SW_TIMESTAMP | IFNET_LRO | IFNET_LRO_NUM_SEG);
1066 
1067 static const ifnet_offload_t any_offload_csum = IFNET_CHECKSUMF;
1068 
1069 static errno_t
ifnet_set_offload_common(ifnet_t interface,ifnet_offload_t offload,boolean_t set_both)1070 ifnet_set_offload_common(ifnet_t interface, ifnet_offload_t offload, boolean_t set_both)
1071 {
1072 	u_int32_t ifcaps = 0;
1073 
1074 	if (interface == NULL) {
1075 		return EINVAL;
1076 	}
1077 
1078 	ifnet_lock_exclusive(interface);
1079 	interface->if_hwassist = (offload & offload_mask);
1080 
1081 #if SKYWALK
1082 	/* preserve skywalk capability */
1083 	if ((interface->if_capabilities & IFCAP_SKYWALK) != 0) {
1084 		ifcaps |= IFCAP_SKYWALK;
1085 	}
1086 #endif /* SKYWALK */
1087 	if (dlil_verbose) {
1088 		log(LOG_DEBUG, "%s: set offload flags=0x%x\n",
1089 		    if_name(interface),
1090 		    interface->if_hwassist);
1091 	}
1092 	ifnet_lock_done(interface);
1093 
1094 	if ((offload & any_offload_csum)) {
1095 		ifcaps |= IFCAP_HWCSUM;
1096 	}
1097 	if ((offload & IFNET_TSO_IPV4)) {
1098 		ifcaps |= IFCAP_TSO4;
1099 	}
1100 	if ((offload & IFNET_TSO_IPV6)) {
1101 		ifcaps |= IFCAP_TSO6;
1102 	}
1103 	if ((offload & IFNET_LRO)) {
1104 		ifcaps |= IFCAP_LRO;
1105 	}
1106 	if ((offload & IFNET_LRO_NUM_SEG)) {
1107 		ifcaps |= IFCAP_LRO_NUM_SEG;
1108 	}
1109 	if ((offload & IFNET_VLAN_MTU)) {
1110 		ifcaps |= IFCAP_VLAN_MTU;
1111 	}
1112 	if ((offload & IFNET_VLAN_TAGGING)) {
1113 		ifcaps |= IFCAP_VLAN_HWTAGGING;
1114 	}
1115 	if ((offload & IFNET_TX_STATUS)) {
1116 		ifcaps |= IFCAP_TXSTATUS;
1117 	}
1118 	if ((offload & IFNET_HW_TIMESTAMP)) {
1119 		ifcaps |= IFCAP_HW_TIMESTAMP;
1120 	}
1121 	if ((offload & IFNET_SW_TIMESTAMP)) {
1122 		ifcaps |= IFCAP_SW_TIMESTAMP;
1123 	}
1124 	if ((offload & IFNET_CSUM_PARTIAL)) {
1125 		ifcaps |= IFCAP_CSUM_PARTIAL;
1126 	}
1127 	if ((offload & IFNET_CSUM_ZERO_INVERT)) {
1128 		ifcaps |= IFCAP_CSUM_ZERO_INVERT;
1129 	}
1130 	if (ifcaps != 0) {
1131 		if (set_both) {
1132 			(void) ifnet_set_capabilities_supported(interface,
1133 			    ifcaps, IFCAP_VALID);
1134 		}
1135 		(void) ifnet_set_capabilities_enabled(interface, ifcaps,
1136 		    IFCAP_VALID);
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 errno_t
ifnet_set_offload(ifnet_t interface,ifnet_offload_t offload)1143 ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
1144 {
1145 	return ifnet_set_offload_common(interface, offload, TRUE);
1146 }
1147 
1148 errno_t
ifnet_set_offload_enabled(ifnet_t interface,ifnet_offload_t offload)1149 ifnet_set_offload_enabled(ifnet_t interface, ifnet_offload_t offload)
1150 {
1151 	return ifnet_set_offload_common(interface, offload, FALSE);
1152 }
1153 
1154 ifnet_offload_t
ifnet_offload(ifnet_t interface)1155 ifnet_offload(ifnet_t interface)
1156 {
1157 	return (interface == NULL) ?
1158 	       0 : (interface->if_hwassist & offload_mask);
1159 }
1160 
1161 errno_t
ifnet_set_tso_mtu(ifnet_t interface,sa_family_t family,u_int32_t mtuLen)1162 ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
1163 {
1164 	errno_t error = 0;
1165 
1166 	if (interface == NULL || mtuLen < interface->if_mtu) {
1167 		return EINVAL;
1168 	}
1169 	if (mtuLen > IP_MAXPACKET) {
1170 		return EINVAL;
1171 	}
1172 
1173 	switch (family) {
1174 	case AF_INET:
1175 		if (interface->if_hwassist & IFNET_TSO_IPV4) {
1176 			interface->if_tso_v4_mtu = mtuLen;
1177 		} else {
1178 			error = EINVAL;
1179 		}
1180 		break;
1181 
1182 	case AF_INET6:
1183 		if (interface->if_hwassist & IFNET_TSO_IPV6) {
1184 			interface->if_tso_v6_mtu = mtuLen;
1185 		} else {
1186 			error = EINVAL;
1187 		}
1188 		break;
1189 
1190 	default:
1191 		error = EPROTONOSUPPORT;
1192 		break;
1193 	}
1194 
1195 	if (error == 0) {
1196 		struct ifclassq *ifq = interface->if_snd;
1197 		ASSERT(ifq != NULL);
1198 		/* Inform all transmit queues about the new TSO MTU */
1199 		ifclassq_update(ifq, CLASSQ_EV_LINK_MTU, false);
1200 	}
1201 
1202 	return error;
1203 }
1204 
1205 errno_t
ifnet_get_tso_mtu(ifnet_t interface,sa_family_t family,u_int32_t * mtuLen)1206 ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
1207 {
1208 	errno_t error = 0;
1209 
1210 	if (interface == NULL || mtuLen == NULL) {
1211 		return EINVAL;
1212 	}
1213 
1214 	switch (family) {
1215 	case AF_INET:
1216 		if (interface->if_hwassist & IFNET_TSO_IPV4) {
1217 			*mtuLen = interface->if_tso_v4_mtu;
1218 		} else {
1219 			error = EINVAL;
1220 		}
1221 		break;
1222 
1223 	case AF_INET6:
1224 		if (interface->if_hwassist & IFNET_TSO_IPV6) {
1225 			*mtuLen = interface->if_tso_v6_mtu;
1226 		} else {
1227 			error = EINVAL;
1228 		}
1229 		break;
1230 
1231 	default:
1232 		error = EPROTONOSUPPORT;
1233 		break;
1234 	}
1235 
1236 	return error;
1237 }
1238 
1239 errno_t
ifnet_set_wake_flags(ifnet_t interface,u_int32_t properties,u_int32_t mask)1240 ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
1241 {
1242 	struct kev_msg ev_msg;
1243 	struct net_event_data ev_data;
1244 
1245 	bzero(&ev_data, sizeof(struct net_event_data));
1246 	bzero(&ev_msg, sizeof(struct kev_msg));
1247 
1248 	if (interface == NULL) {
1249 		return EINVAL;
1250 	}
1251 
1252 	/* Do not accept wacky values */
1253 	if ((properties & mask) & ~IF_WAKE_VALID_FLAGS) {
1254 		return EINVAL;
1255 	}
1256 
1257 	if ((mask & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1258 		if ((properties & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1259 			if_set_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1260 		} else {
1261 			if_clear_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1262 		}
1263 	}
1264 
1265 	(void) ifnet_touch_lastchange(interface);
1266 
1267 	/* Notify application of the change */
1268 	ev_msg.vendor_code      = KEV_VENDOR_APPLE;
1269 	ev_msg.kev_class        = KEV_NETWORK_CLASS;
1270 	ev_msg.kev_subclass     = KEV_DL_SUBCLASS;
1271 
1272 	ev_msg.event_code       = KEV_DL_WAKEFLAGS_CHANGED;
1273 	strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ);
1274 	ev_data.if_family       = interface->if_family;
1275 	ev_data.if_unit         = (u_int32_t)interface->if_unit;
1276 	ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1277 	ev_msg.dv[0].data_ptr   = &ev_data;
1278 	ev_msg.dv[1].data_length = 0;
1279 	dlil_post_complete_msg(interface, &ev_msg);
1280 
1281 	return 0;
1282 }
1283 
1284 u_int32_t
ifnet_get_wake_flags(ifnet_t interface)1285 ifnet_get_wake_flags(ifnet_t interface)
1286 {
1287 	u_int32_t flags = 0;
1288 
1289 	if (interface == NULL) {
1290 		return 0;
1291 	}
1292 
1293 	if ((interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) != 0) {
1294 		flags |= IF_WAKE_ON_MAGIC_PACKET;
1295 	}
1296 
1297 	return flags;
1298 }
1299 
1300 /*
1301  * Should MIB data store a copy?
1302  */
1303 errno_t
ifnet_set_link_mib_data(ifnet_t interface,void * __sized_by (mibLen)mibData,uint32_t mibLen)1304 ifnet_set_link_mib_data(ifnet_t interface, void *__sized_by(mibLen) mibData, uint32_t mibLen)
1305 {
1306 	if (interface == NULL) {
1307 		return EINVAL;
1308 	}
1309 
1310 	ifnet_lock_exclusive(interface);
1311 	interface->if_linkmib = (void*)mibData;
1312 	interface->if_linkmiblen = mibLen;
1313 	ifnet_lock_done(interface);
1314 	return 0;
1315 }
1316 
1317 errno_t
ifnet_get_link_mib_data(ifnet_t interface,void * __sized_by (* mibLen)mibData,uint32_t * mibLen)1318 ifnet_get_link_mib_data(ifnet_t interface, void *__sized_by(*mibLen) mibData, uint32_t *mibLen)
1319 {
1320 	errno_t result = 0;
1321 
1322 	if (interface == NULL) {
1323 		return EINVAL;
1324 	}
1325 
1326 	ifnet_lock_shared(interface);
1327 	if (*mibLen < interface->if_linkmiblen) {
1328 		result = EMSGSIZE;
1329 	}
1330 	if (result == 0 && interface->if_linkmib == NULL) {
1331 		result = ENOTSUP;
1332 	}
1333 
1334 	if (result == 0) {
1335 		*mibLen = interface->if_linkmiblen;
1336 		bcopy(interface->if_linkmib, mibData, *mibLen);
1337 	}
1338 	ifnet_lock_done(interface);
1339 
1340 	return result;
1341 }
1342 
1343 uint32_t
ifnet_get_link_mib_data_length(ifnet_t interface)1344 ifnet_get_link_mib_data_length(ifnet_t interface)
1345 {
1346 	return (interface == NULL) ? 0 : interface->if_linkmiblen;
1347 }
1348 
1349 errno_t
ifnet_output(ifnet_t interface,protocol_family_t protocol_family,mbuf_t m,void * route,const struct sockaddr * dest)1350 ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1351     mbuf_t m, void *route, const struct sockaddr *dest)
1352 {
1353 	if (interface == NULL || protocol_family == 0 || m == NULL) {
1354 		if (m != NULL) {
1355 			mbuf_freem_list(m);
1356 		}
1357 		return EINVAL;
1358 	}
1359 	return dlil_output(interface, protocol_family, m, route, dest,
1360 	           DLIL_OUTPUT_FLAGS_NONE, NULL);
1361 }
1362 
1363 errno_t
ifnet_output_raw(ifnet_t interface,protocol_family_t protocol_family,mbuf_t m)1364 ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1365 {
1366 	if (interface == NULL || m == NULL) {
1367 		if (m != NULL) {
1368 			mbuf_freem_list(m);
1369 		}
1370 		return EINVAL;
1371 	}
1372 	return dlil_output(interface, protocol_family, m, NULL, NULL,
1373 	           DLIL_OUTPUT_FLAGS_RAW, NULL);
1374 }
1375 
1376 errno_t
ifnet_set_mtu(ifnet_t interface,u_int32_t mtu)1377 ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1378 {
1379 	if (interface == NULL) {
1380 		return EINVAL;
1381 	}
1382 
1383 	interface->if_mtu = mtu;
1384 	return 0;
1385 }
1386 
1387 u_int32_t
ifnet_mtu(ifnet_t interface)1388 ifnet_mtu(ifnet_t interface)
1389 {
1390 	return (interface == NULL) ? 0 : interface->if_mtu;
1391 }
1392 
1393 u_char
ifnet_type(ifnet_t interface)1394 ifnet_type(ifnet_t interface)
1395 {
1396 	return (interface == NULL) ? 0 : interface->if_data.ifi_type;
1397 }
1398 
1399 errno_t
ifnet_set_addrlen(ifnet_t interface,u_char addrlen)1400 ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1401 {
1402 	if (interface == NULL) {
1403 		return EINVAL;
1404 	}
1405 
1406 	interface->if_data.ifi_addrlen = addrlen;
1407 	return 0;
1408 }
1409 
1410 u_char
ifnet_addrlen(ifnet_t interface)1411 ifnet_addrlen(ifnet_t interface)
1412 {
1413 	return (interface == NULL) ? 0 : interface->if_data.ifi_addrlen;
1414 }
1415 
1416 errno_t
ifnet_set_hdrlen(ifnet_t interface,u_char hdrlen)1417 ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1418 {
1419 	if (interface == NULL) {
1420 		return EINVAL;
1421 	}
1422 
1423 	interface->if_data.ifi_hdrlen = hdrlen;
1424 	return 0;
1425 }
1426 
1427 u_char
ifnet_hdrlen(ifnet_t interface)1428 ifnet_hdrlen(ifnet_t interface)
1429 {
1430 	return (interface == NULL) ? 0 : interface->if_data.ifi_hdrlen;
1431 }
1432 
1433 errno_t
ifnet_set_metric(ifnet_t interface,u_int32_t metric)1434 ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1435 {
1436 	if (interface == NULL) {
1437 		return EINVAL;
1438 	}
1439 
1440 	interface->if_data.ifi_metric = metric;
1441 	return 0;
1442 }
1443 
1444 u_int32_t
ifnet_metric(ifnet_t interface)1445 ifnet_metric(ifnet_t interface)
1446 {
1447 	return (interface == NULL) ? 0 : interface->if_data.ifi_metric;
1448 }
1449 
1450 errno_t
ifnet_set_baudrate(struct ifnet * ifp,uint64_t baudrate)1451 ifnet_set_baudrate(struct ifnet *ifp, uint64_t baudrate)
1452 {
1453 	if (ifp == NULL) {
1454 		return EINVAL;
1455 	}
1456 
1457 	ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1458 	    ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1459 
1460 	/* Pin if_baudrate to 32 bits until we can change the storage size */
1461 	ifp->if_baudrate = (baudrate > UINT32_MAX) ? UINT32_MAX : (uint32_t)baudrate;
1462 
1463 	return 0;
1464 }
1465 
1466 u_int64_t
ifnet_baudrate(struct ifnet * ifp)1467 ifnet_baudrate(struct ifnet *ifp)
1468 {
1469 	return (ifp == NULL) ? 0 : ifp->if_baudrate;
1470 }
1471 
1472 errno_t
ifnet_set_bandwidths(struct ifnet * ifp,struct if_bandwidths * output_bw,struct if_bandwidths * input_bw)1473 ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1474     struct if_bandwidths *input_bw)
1475 {
1476 	if (ifp == NULL) {
1477 		return EINVAL;
1478 	}
1479 
1480 	/* set input values first (if any), as output values depend on them */
1481 	if (input_bw != NULL) {
1482 		(void) ifnet_set_input_bandwidths(ifp, input_bw);
1483 	}
1484 
1485 	if (output_bw != NULL) {
1486 		(void) ifnet_set_output_bandwidths(ifp, output_bw);
1487 	}
1488 
1489 	return 0;
1490 }
1491 
1492 static void
ifnet_set_link_status_outbw(struct ifnet * ifp)1493 ifnet_set_link_status_outbw(struct ifnet *ifp)
1494 {
1495 	struct if_wifi_status_v1 *sr;
1496 	sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1497 	if (ifp->if_output_bw.eff_bw != 0) {
1498 		sr->valid_bitmask |=
1499 		    IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
1500 		sr->ul_effective_bandwidth =
1501 		    ifp->if_output_bw.eff_bw > UINT32_MAX ?
1502 		    UINT32_MAX :
1503 		    (uint32_t)ifp->if_output_bw.eff_bw;
1504 	}
1505 	if (ifp->if_output_bw.max_bw != 0) {
1506 		sr->valid_bitmask |=
1507 		    IF_WIFI_UL_MAX_BANDWIDTH_VALID;
1508 		sr->ul_max_bandwidth =
1509 		    ifp->if_output_bw.max_bw > UINT32_MAX ?
1510 		    UINT32_MAX :
1511 		    (uint32_t)ifp->if_output_bw.max_bw;
1512 	}
1513 }
1514 
1515 errno_t
ifnet_set_output_bandwidths(struct ifnet * ifp,struct if_bandwidths * bw)1516 ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1517 {
1518 	struct if_bandwidths old_bw;
1519 	struct ifclassq *ifq;
1520 	u_int64_t br;
1521 
1522 	VERIFY(ifp != NULL && bw != NULL);
1523 
1524 	ifq = ifp->if_snd;
1525 	IFCQ_LOCK(ifq);
1526 
1527 	old_bw = ifp->if_output_bw;
1528 	if (bw->eff_bw != 0) {
1529 		ifp->if_output_bw.eff_bw = bw->eff_bw;
1530 	}
1531 	if (bw->max_bw != 0) {
1532 		ifp->if_output_bw.max_bw = bw->max_bw;
1533 	}
1534 	if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
1535 		ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1536 	} else if (ifp->if_output_bw.eff_bw == 0) {
1537 		ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1538 	}
1539 
1540 	/* Pin if_baudrate to 32 bits */
1541 	br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1542 	if (br != 0) {
1543 		ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
1544 	}
1545 
1546 	/* Adjust queue parameters if needed */
1547 	if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1548 	    old_bw.max_bw != ifp->if_output_bw.max_bw) {
1549 		ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH, true);
1550 	}
1551 	IFCQ_UNLOCK(ifq);
1552 
1553 	/*
1554 	 * If this is a Wifi interface, update the values in
1555 	 * if_link_status structure also.
1556 	 */
1557 	if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1558 		lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1559 		ifnet_set_link_status_outbw(ifp);
1560 		lck_rw_done(&ifp->if_link_status_lock);
1561 	}
1562 
1563 	return 0;
1564 }
1565 
1566 static void
ifnet_set_link_status_inbw(struct ifnet * ifp)1567 ifnet_set_link_status_inbw(struct ifnet *ifp)
1568 {
1569 	struct if_wifi_status_v1 *sr;
1570 
1571 	sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1572 	if (ifp->if_input_bw.eff_bw != 0) {
1573 		sr->valid_bitmask |=
1574 		    IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
1575 		sr->dl_effective_bandwidth =
1576 		    ifp->if_input_bw.eff_bw > UINT32_MAX ?
1577 		    UINT32_MAX :
1578 		    (uint32_t)ifp->if_input_bw.eff_bw;
1579 	}
1580 	if (ifp->if_input_bw.max_bw != 0) {
1581 		sr->valid_bitmask |=
1582 		    IF_WIFI_DL_MAX_BANDWIDTH_VALID;
1583 		sr->dl_max_bandwidth = ifp->if_input_bw.max_bw > UINT32_MAX ?
1584 		    UINT32_MAX :
1585 		    (uint32_t)ifp->if_input_bw.max_bw;
1586 	}
1587 }
1588 
1589 errno_t
ifnet_set_input_bandwidths(struct ifnet * ifp,struct if_bandwidths * bw)1590 ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1591 {
1592 	struct if_bandwidths old_bw;
1593 
1594 	VERIFY(ifp != NULL && bw != NULL);
1595 
1596 	old_bw = ifp->if_input_bw;
1597 	if (bw->eff_bw != 0) {
1598 		ifp->if_input_bw.eff_bw = bw->eff_bw;
1599 	}
1600 	if (bw->max_bw != 0) {
1601 		ifp->if_input_bw.max_bw = bw->max_bw;
1602 	}
1603 	if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
1604 		ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1605 	} else if (ifp->if_input_bw.eff_bw == 0) {
1606 		ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1607 	}
1608 
1609 	if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1610 		lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1611 		ifnet_set_link_status_inbw(ifp);
1612 		lck_rw_done(&ifp->if_link_status_lock);
1613 	}
1614 
1615 	if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1616 	    old_bw.max_bw != ifp->if_input_bw.max_bw) {
1617 		ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1618 	}
1619 
1620 	return 0;
1621 }
1622 
1623 u_int64_t
ifnet_output_linkrate(struct ifnet * ifp)1624 ifnet_output_linkrate(struct ifnet *ifp)
1625 {
1626 	struct ifclassq *ifq = ifp->if_snd;
1627 	u_int64_t rate;
1628 
1629 	IFCQ_LOCK_ASSERT_HELD(ifq);
1630 
1631 	rate = ifp->if_output_bw.eff_bw;
1632 	if (IFCQ_TBR_IS_ENABLED(ifq)) {
1633 		u_int64_t tbr_rate = ifq->ifcq_tbr.tbr_rate_raw;
1634 		VERIFY(tbr_rate > 0);
1635 		rate = MIN(rate, ifq->ifcq_tbr.tbr_rate_raw);
1636 	}
1637 
1638 	return rate;
1639 }
1640 
1641 u_int64_t
ifnet_input_linkrate(struct ifnet * ifp)1642 ifnet_input_linkrate(struct ifnet *ifp)
1643 {
1644 	return ifp->if_input_bw.eff_bw;
1645 }
1646 
1647 errno_t
ifnet_bandwidths(struct ifnet * ifp,struct if_bandwidths * output_bw,struct if_bandwidths * input_bw)1648 ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1649     struct if_bandwidths *input_bw)
1650 {
1651 	if (ifp == NULL) {
1652 		return EINVAL;
1653 	}
1654 
1655 	if (output_bw != NULL) {
1656 		*output_bw = ifp->if_output_bw;
1657 	}
1658 	if (input_bw != NULL) {
1659 		*input_bw = ifp->if_input_bw;
1660 	}
1661 
1662 	return 0;
1663 }
1664 
1665 errno_t
ifnet_set_latencies(struct ifnet * ifp,struct if_latencies * output_lt,struct if_latencies * input_lt)1666 ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1667     struct if_latencies *input_lt)
1668 {
1669 	if (ifp == NULL) {
1670 		return EINVAL;
1671 	}
1672 
1673 	if (output_lt != NULL) {
1674 		(void) ifnet_set_output_latencies(ifp, output_lt);
1675 	}
1676 
1677 	if (input_lt != NULL) {
1678 		(void) ifnet_set_input_latencies(ifp, input_lt);
1679 	}
1680 
1681 	return 0;
1682 }
1683 
1684 errno_t
ifnet_set_output_latencies(struct ifnet * ifp,struct if_latencies * lt)1685 ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt)
1686 {
1687 	struct if_latencies old_lt;
1688 	struct ifclassq *ifq;
1689 
1690 	VERIFY(ifp != NULL && lt != NULL);
1691 
1692 	ifq = ifp->if_snd;
1693 	IFCQ_LOCK(ifq);
1694 
1695 	old_lt = ifp->if_output_lt;
1696 	if (lt->eff_lt != 0) {
1697 		ifp->if_output_lt.eff_lt = lt->eff_lt;
1698 	}
1699 	if (lt->max_lt != 0) {
1700 		ifp->if_output_lt.max_lt = lt->max_lt;
1701 	}
1702 	if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
1703 		ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1704 	} else if (ifp->if_output_lt.eff_lt == 0) {
1705 		ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1706 	}
1707 
1708 	/* Adjust queue parameters if needed */
1709 	if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1710 	    old_lt.max_lt != ifp->if_output_lt.max_lt) {
1711 		ifclassq_update(ifq, CLASSQ_EV_LINK_LATENCY, true);
1712 	}
1713 	IFCQ_UNLOCK(ifq);
1714 
1715 	return 0;
1716 }
1717 
1718 errno_t
ifnet_set_input_latencies(struct ifnet * ifp,struct if_latencies * lt)1719 ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1720 {
1721 	struct if_latencies old_lt;
1722 
1723 	VERIFY(ifp != NULL && lt != NULL);
1724 
1725 	old_lt = ifp->if_input_lt;
1726 	if (lt->eff_lt != 0) {
1727 		ifp->if_input_lt.eff_lt = lt->eff_lt;
1728 	}
1729 	if (lt->max_lt != 0) {
1730 		ifp->if_input_lt.max_lt = lt->max_lt;
1731 	}
1732 	if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
1733 		ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1734 	} else if (ifp->if_input_lt.eff_lt == 0) {
1735 		ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1736 	}
1737 
1738 	if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1739 	    old_lt.max_lt != ifp->if_input_lt.max_lt) {
1740 		ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 errno_t
ifnet_latencies(struct ifnet * ifp,struct if_latencies * output_lt,struct if_latencies * input_lt)1747 ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1748     struct if_latencies *input_lt)
1749 {
1750 	if (ifp == NULL) {
1751 		return EINVAL;
1752 	}
1753 
1754 	if (output_lt != NULL) {
1755 		*output_lt = ifp->if_output_lt;
1756 	}
1757 	if (input_lt != NULL) {
1758 		*input_lt = ifp->if_input_lt;
1759 	}
1760 
1761 	return 0;
1762 }
1763 
1764 errno_t
ifnet_set_poll_params(struct ifnet * ifp,struct ifnet_poll_params * p)1765 ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1766 {
1767 	errno_t err;
1768 
1769 	if (ifp == NULL) {
1770 		return EINVAL;
1771 	} else if (!ifnet_get_ioref(ifp)) {
1772 		return ENXIO;
1773 	}
1774 
1775 #if SKYWALK
1776 	if (SKYWALK_CAPABLE(ifp)) {
1777 		err = netif_rxpoll_set_params(ifp, p, FALSE);
1778 		ifnet_decr_iorefcnt(ifp);
1779 		return err;
1780 	}
1781 #endif /* SKYWALK */
1782 	err = dlil_rxpoll_set_params(ifp, p, FALSE);
1783 
1784 	/* Release the io ref count */
1785 	ifnet_decr_iorefcnt(ifp);
1786 
1787 	return err;
1788 }
1789 
1790 errno_t
ifnet_poll_params(struct ifnet * ifp,struct ifnet_poll_params * p)1791 ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1792 {
1793 	errno_t err;
1794 
1795 	if (ifp == NULL || p == NULL) {
1796 		return EINVAL;
1797 	} else if (!ifnet_get_ioref(ifp)) {
1798 		return ENXIO;
1799 	}
1800 
1801 	err = dlil_rxpoll_get_params(ifp, p);
1802 
1803 	/* Release the io ref count */
1804 	ifnet_decr_iorefcnt(ifp);
1805 
1806 	return err;
1807 }
1808 
1809 errno_t
ifnet_stat_increment(struct ifnet * ifp,const struct ifnet_stat_increment_param * s)1810 ifnet_stat_increment(struct ifnet *ifp,
1811     const struct ifnet_stat_increment_param *s)
1812 {
1813 	if (ifp == NULL) {
1814 		return EINVAL;
1815 	}
1816 
1817 	if (s->packets_in != 0) {
1818 		os_atomic_add(&ifp->if_data.ifi_ipackets, s->packets_in, relaxed);
1819 	}
1820 	if (s->bytes_in != 0) {
1821 		os_atomic_add(&ifp->if_data.ifi_ibytes, s->bytes_in, relaxed);
1822 	}
1823 	if (s->errors_in != 0) {
1824 		os_atomic_add(&ifp->if_data.ifi_ierrors, s->errors_in, relaxed);
1825 	}
1826 
1827 	if (s->packets_out != 0) {
1828 		os_atomic_add(&ifp->if_data.ifi_opackets, s->packets_out, relaxed);
1829 	}
1830 	if (s->bytes_out != 0) {
1831 		os_atomic_add(&ifp->if_data.ifi_obytes, s->bytes_out, relaxed);
1832 	}
1833 	if (s->errors_out != 0) {
1834 		os_atomic_add(&ifp->if_data.ifi_oerrors, s->errors_out, relaxed);
1835 	}
1836 
1837 	if (s->collisions != 0) {
1838 		os_atomic_add(&ifp->if_data.ifi_collisions, s->collisions, relaxed);
1839 	}
1840 	if (s->dropped != 0) {
1841 		os_atomic_add(&ifp->if_data.ifi_iqdrops, s->dropped, relaxed);
1842 	}
1843 
1844 	/* Touch the last change time. */
1845 	TOUCHLASTCHANGE(&ifp->if_lastchange);
1846 
1847 	if (ifp->if_data_threshold != 0) {
1848 		ifnet_notify_data_threshold(ifp);
1849 	}
1850 
1851 	return 0;
1852 }
1853 
1854 errno_t
ifnet_stat_increment_in(struct ifnet * ifp,u_int32_t packets_in,u_int32_t bytes_in,u_int32_t errors_in)1855 ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1856     u_int32_t bytes_in, u_int32_t errors_in)
1857 {
1858 	if (ifp == NULL) {
1859 		return EINVAL;
1860 	}
1861 
1862 	if (packets_in != 0) {
1863 		os_atomic_add(&ifp->if_data.ifi_ipackets, packets_in, relaxed);
1864 	}
1865 	if (bytes_in != 0) {
1866 		os_atomic_add(&ifp->if_data.ifi_ibytes, bytes_in, relaxed);
1867 	}
1868 	if (errors_in != 0) {
1869 		os_atomic_add(&ifp->if_data.ifi_ierrors, errors_in, relaxed);
1870 	}
1871 
1872 	TOUCHLASTCHANGE(&ifp->if_lastchange);
1873 
1874 	if (ifp->if_data_threshold != 0) {
1875 		ifnet_notify_data_threshold(ifp);
1876 	}
1877 
1878 	return 0;
1879 }
1880 
1881 errno_t
ifnet_stat_increment_out(struct ifnet * ifp,u_int32_t packets_out,u_int32_t bytes_out,u_int32_t errors_out)1882 ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1883     u_int32_t bytes_out, u_int32_t errors_out)
1884 {
1885 	if (ifp == NULL) {
1886 		return EINVAL;
1887 	}
1888 
1889 	if (packets_out != 0) {
1890 		os_atomic_add(&ifp->if_data.ifi_opackets, packets_out, relaxed);
1891 	}
1892 	if (bytes_out != 0) {
1893 		os_atomic_add(&ifp->if_data.ifi_obytes, bytes_out, relaxed);
1894 	}
1895 	if (errors_out != 0) {
1896 		os_atomic_add(&ifp->if_data.ifi_oerrors, errors_out, relaxed);
1897 	}
1898 
1899 	TOUCHLASTCHANGE(&ifp->if_lastchange);
1900 
1901 	if (ifp->if_data_threshold != 0) {
1902 		ifnet_notify_data_threshold(ifp);
1903 	}
1904 
1905 	return 0;
1906 }
1907 
1908 errno_t
ifnet_set_stat(struct ifnet * ifp,const struct ifnet_stats_param * s)1909 ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1910 {
1911 	if (ifp == NULL) {
1912 		return EINVAL;
1913 	}
1914 
1915 	os_atomic_store(&ifp->if_data.ifi_ipackets, s->packets_in, release);
1916 	os_atomic_store(&ifp->if_data.ifi_ibytes, s->bytes_in, release);
1917 	os_atomic_store(&ifp->if_data.ifi_imcasts, s->multicasts_in, release);
1918 	os_atomic_store(&ifp->if_data.ifi_ierrors, s->errors_in, release);
1919 
1920 	os_atomic_store(&ifp->if_data.ifi_opackets, s->packets_out, release);
1921 	os_atomic_store(&ifp->if_data.ifi_obytes, s->bytes_out, release);
1922 	os_atomic_store(&ifp->if_data.ifi_omcasts, s->multicasts_out, release);
1923 	os_atomic_store(&ifp->if_data.ifi_oerrors, s->errors_out, release);
1924 
1925 	os_atomic_store(&ifp->if_data.ifi_collisions, s->collisions, release);
1926 	os_atomic_store(&ifp->if_data.ifi_iqdrops, s->dropped, release);
1927 	os_atomic_store(&ifp->if_data.ifi_noproto, s->no_protocol, release);
1928 
1929 	/* Touch the last change time. */
1930 	TOUCHLASTCHANGE(&ifp->if_lastchange);
1931 
1932 	if (ifp->if_data_threshold != 0) {
1933 		ifnet_notify_data_threshold(ifp);
1934 	}
1935 
1936 	return 0;
1937 }
1938 
1939 errno_t
ifnet_stat(struct ifnet * ifp,struct ifnet_stats_param * s)1940 ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1941 {
1942 	if (ifp == NULL) {
1943 		return EINVAL;
1944 	}
1945 
1946 	s->packets_in = os_atomic_load(&ifp->if_data.ifi_ipackets, relaxed);
1947 	s->bytes_in = os_atomic_load(&ifp->if_data.ifi_ibytes, relaxed);
1948 	s->multicasts_in = os_atomic_load(&ifp->if_data.ifi_imcasts, relaxed);
1949 	s->errors_in = os_atomic_load(&ifp->if_data.ifi_ierrors, relaxed);
1950 
1951 	s->packets_out = os_atomic_load(&ifp->if_data.ifi_opackets, relaxed);
1952 	s->bytes_out = os_atomic_load(&ifp->if_data.ifi_obytes, relaxed);
1953 	s->multicasts_out = os_atomic_load(&ifp->if_data.ifi_omcasts, relaxed);
1954 	s->errors_out = os_atomic_load(&ifp->if_data.ifi_oerrors, relaxed);
1955 
1956 	s->collisions = os_atomic_load(&ifp->if_data.ifi_collisions, relaxed);
1957 	s->dropped = os_atomic_load(&ifp->if_data.ifi_iqdrops, relaxed);
1958 	s->no_protocol = os_atomic_load(&ifp->if_data.ifi_noproto, relaxed);
1959 
1960 	if (ifp->if_data_threshold != 0) {
1961 		ifnet_notify_data_threshold(ifp);
1962 	}
1963 
1964 	return 0;
1965 }
1966 
1967 errno_t
ifnet_touch_lastchange(ifnet_t interface)1968 ifnet_touch_lastchange(ifnet_t interface)
1969 {
1970 	if (interface == NULL) {
1971 		return EINVAL;
1972 	}
1973 
1974 	TOUCHLASTCHANGE(&interface->if_lastchange);
1975 
1976 	return 0;
1977 }
1978 
1979 errno_t
ifnet_lastchange(ifnet_t interface,struct timeval * last_change)1980 ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1981 {
1982 	if (interface == NULL) {
1983 		return EINVAL;
1984 	}
1985 
1986 	*last_change = interface->if_data.ifi_lastchange;
1987 	/* Crude conversion from uptime to calendar time */
1988 	last_change->tv_sec += boottime_sec();
1989 
1990 	return 0;
1991 }
1992 
1993 errno_t
ifnet_touch_lastupdown(ifnet_t interface)1994 ifnet_touch_lastupdown(ifnet_t interface)
1995 {
1996 	if (interface == NULL) {
1997 		return EINVAL;
1998 	}
1999 
2000 	TOUCHLASTCHANGE(&interface->if_lastupdown);
2001 
2002 	return 0;
2003 }
2004 
2005 errno_t
ifnet_updown_delta(ifnet_t interface,struct timeval * updown_delta)2006 ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta)
2007 {
2008 	if (interface == NULL) {
2009 		return EINVAL;
2010 	}
2011 
2012 	/* Calculate the delta */
2013 	updown_delta->tv_sec = (time_t)net_uptime();
2014 	if (updown_delta->tv_sec > interface->if_data.ifi_lastupdown.tv_sec) {
2015 		updown_delta->tv_sec -= interface->if_data.ifi_lastupdown.tv_sec;
2016 	} else {
2017 		updown_delta->tv_sec = 0;
2018 	}
2019 	updown_delta->tv_usec = 0;
2020 
2021 	return 0;
2022 }
2023 
2024 errno_t
ifnet_get_address_list(ifnet_t interface,ifaddr_t * __null_terminated * addresses)2025 ifnet_get_address_list(ifnet_t interface, ifaddr_t *__null_terminated *addresses)
2026 {
2027 	return addresses == NULL ? EINVAL :
2028 	       ifnet_get_address_list_family(interface, addresses, 0);
2029 }
2030 
2031 errno_t
ifnet_get_address_list_with_count(ifnet_t interface,ifaddr_t * __counted_by (* addresses_count)* addresses,uint16_t * addresses_count)2032 ifnet_get_address_list_with_count(ifnet_t interface,
2033     ifaddr_t *__counted_by(*addresses_count) * addresses,
2034     uint16_t *addresses_count)
2035 {
2036 	return ifnet_get_address_list_family_internal(interface, addresses,
2037 	           addresses_count, 0, 0, Z_WAITOK, 0);
2038 }
2039 
2040 struct ifnet_addr_list {
2041 	SLIST_ENTRY(ifnet_addr_list)    ifal_le;
2042 	struct ifaddr                   *ifal_ifa;
2043 };
2044 
2045 errno_t
ifnet_get_address_list_family(ifnet_t interface,ifaddr_t * __null_terminated * ret_addresses,sa_family_t family)2046 ifnet_get_address_list_family(ifnet_t interface, ifaddr_t *__null_terminated *ret_addresses,
2047     sa_family_t family)
2048 {
2049 	uint16_t addresses_count = 0;
2050 	ifaddr_t *__counted_by(addresses_count) addresses = NULL;
2051 	errno_t error;
2052 
2053 	error = ifnet_get_address_list_family_internal(interface, &addresses,
2054 	    &addresses_count, family, 0, Z_WAITOK, 0);
2055 	if (addresses_count > 0) {
2056 		*ret_addresses = __unsafe_null_terminated_from_indexable(addresses,
2057 		    &addresses[addresses_count - 1]);
2058 	} else {
2059 		*ret_addresses = NULL;
2060 	}
2061 
2062 	return error;
2063 }
2064 
2065 errno_t
ifnet_get_address_list_family_with_count(ifnet_t interface,ifaddr_t * __counted_by (* addresses_count)* addresses,uint16_t * addresses_count,sa_family_t family)2066 ifnet_get_address_list_family_with_count(ifnet_t interface,
2067     ifaddr_t *__counted_by(*addresses_count) *addresses,
2068     uint16_t *addresses_count, sa_family_t family)
2069 {
2070 	return ifnet_get_address_list_family_internal(interface, addresses,
2071 	           addresses_count, family, 0, Z_WAITOK, 0);
2072 }
2073 
2074 errno_t
ifnet_get_inuse_address_list(ifnet_t interface,ifaddr_t * __null_terminated * ret_addresses)2075 ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t *__null_terminated *ret_addresses)
2076 {
2077 	uint16_t addresses_count = 0;
2078 	ifaddr_t *__counted_by(addresses_count) addresses = NULL;
2079 	errno_t error;
2080 
2081 	error = ifnet_get_address_list_family_internal(interface, &addresses,
2082 	    &addresses_count, 0, 0, Z_WAITOK, 1);
2083 	if (addresses_count > 0) {
2084 		*ret_addresses = __unsafe_null_terminated_from_indexable(addresses,
2085 		    &addresses[addresses_count - 1]);
2086 	} else {
2087 		*ret_addresses = NULL;
2088 	}
2089 
2090 	return error;
2091 }
2092 
2093 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
2094 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
2095 
2096 __private_extern__ errno_t
ifnet_get_address_list_family_internal(ifnet_t interface,ifaddr_t * __counted_by (* addresses_count)* addresses,uint16_t * addresses_count,sa_family_t family,int detached,int how,int return_inuse_addrs)2097 ifnet_get_address_list_family_internal(ifnet_t interface,
2098     ifaddr_t *__counted_by(*addresses_count) *addresses,
2099     uint16_t *addresses_count, sa_family_t family, int detached, int how,
2100     int return_inuse_addrs)
2101 {
2102 	SLIST_HEAD(, ifnet_addr_list) ifal_head;
2103 	struct ifnet_addr_list *ifal, *ifal_tmp;
2104 	struct ifnet *ifp;
2105 	uint16_t count = 0;
2106 	errno_t err = 0;
2107 	int usecount = 0;
2108 	int index = 0;
2109 
2110 	SLIST_INIT(&ifal_head);
2111 
2112 	if (addresses == NULL || addresses_count == NULL) {
2113 		err = EINVAL;
2114 		goto done;
2115 	}
2116 	*addresses = NULL;
2117 	*addresses_count = 0;
2118 
2119 	if (detached) {
2120 		/*
2121 		 * Interface has been detached, so skip the lookup
2122 		 * at ifnet_head and go directly to inner loop.
2123 		 */
2124 		ifp = interface;
2125 		if (ifp == NULL) {
2126 			err = EINVAL;
2127 			goto done;
2128 		}
2129 		goto one;
2130 	}
2131 
2132 	ifnet_head_lock_shared();
2133 	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2134 		if (interface != NULL && ifp != interface) {
2135 			continue;
2136 		}
2137 one:
2138 		ifnet_lock_shared(ifp);
2139 		if (interface == NULL || interface == ifp) {
2140 			struct ifaddr *ifa;
2141 			TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
2142 				IFA_LOCK(ifa);
2143 				if (family != 0 &&
2144 				    ifa->ifa_addr->sa_family != family) {
2145 					IFA_UNLOCK(ifa);
2146 					continue;
2147 				}
2148 				ifal = kalloc_type(struct ifnet_addr_list, how);
2149 				if (ifal == NULL) {
2150 					IFA_UNLOCK(ifa);
2151 					ifnet_lock_done(ifp);
2152 					if (!detached) {
2153 						ifnet_head_done();
2154 					}
2155 					err = ENOMEM;
2156 					goto done;
2157 				}
2158 				ifal->ifal_ifa = ifa;
2159 				ifa_addref(ifa);
2160 				SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
2161 				IFA_UNLOCK(ifa);
2162 				if (__improbable(os_inc_overflow(&count))) {
2163 					ifnet_lock_done(ifp);
2164 					if (!detached) {
2165 						ifnet_head_done();
2166 					}
2167 					err = EINVAL;
2168 					goto done;
2169 				}
2170 			}
2171 		}
2172 		ifnet_lock_done(ifp);
2173 		if (detached) {
2174 			break;
2175 		}
2176 	}
2177 	if (!detached) {
2178 		ifnet_head_done();
2179 	}
2180 
2181 	if (count == 0) {
2182 		err = ENXIO;
2183 		goto done;
2184 	}
2185 
2186 	uint16_t allocation_size = 0;
2187 	if (__improbable(os_add_overflow(count, 1, &allocation_size))) {
2188 		err = EINVAL;
2189 		goto done;
2190 	}
2191 	ifaddr_t *allocation = kalloc_type(ifaddr_t, allocation_size, how | Z_ZERO);
2192 	if (allocation == NULL) {
2193 		err = ENOMEM;
2194 		goto done;
2195 	}
2196 	*addresses = allocation;
2197 	*addresses_count = allocation_size;
2198 
2199 done:
2200 	SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
2201 		SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
2202 		if (err == 0) {
2203 			if (return_inuse_addrs) {
2204 				usecount = tcp_find_anypcb_byaddr(ifal->ifal_ifa);
2205 				usecount += udp_find_anypcb_byaddr(ifal->ifal_ifa);
2206 				if (usecount) {
2207 					(*addresses)[index] = ifal->ifal_ifa;
2208 					index++;
2209 				} else {
2210 					ifa_remref(ifal->ifal_ifa);
2211 				}
2212 			} else {
2213 				(*addresses)[--count] = ifal->ifal_ifa;
2214 			}
2215 		} else {
2216 			ifa_remref(ifal->ifal_ifa);
2217 		}
2218 		kfree_type(struct ifnet_addr_list, ifal);
2219 	}
2220 
2221 	VERIFY(err == 0 || *addresses == NULL);
2222 	if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
2223 		VERIFY(return_inuse_addrs == 1);
2224 		kfree_type_counted_by(ifaddr_t, *addresses_count, *addresses);
2225 		err = ENXIO;
2226 	}
2227 	return err;
2228 }
2229 
2230 void
ifnet_free_address_list(ifaddr_t * __null_terminated addresses)2231 ifnet_free_address_list(ifaddr_t *__null_terminated addresses)
2232 {
2233 	int i = 0;
2234 
2235 	if (addresses == NULL) {
2236 		return;
2237 	}
2238 
2239 	for (ifaddr_t *__null_terminated ptr = addresses; *ptr != NULL; ++ptr, i++) {
2240 		ifa_remref(*ptr);
2241 	}
2242 
2243 	ifaddr_t *free_addresses = __unsafe_null_terminated_to_indexable(addresses);
2244 	kfree_type(ifaddr_t, i + 1, free_addresses);
2245 }
2246 
2247 void
ifnet_address_list_free_counted_by_internal(ifaddr_t * __counted_by (addresses_count)addresses,uint16_t addresses_count)2248 ifnet_address_list_free_counted_by_internal(ifaddr_t *__counted_by(addresses_count) addresses,
2249     uint16_t addresses_count)
2250 {
2251 	if (addresses == NULL) {
2252 		return;
2253 	}
2254 	for (int i = 0; i < addresses_count; i++) {
2255 		if (addresses[i] != NULL) {
2256 			ifa_remref(addresses[i]);
2257 		}
2258 	}
2259 	kfree_type_counted_by(ifaddr_t, addresses_count, addresses);
2260 }
2261 
2262 void *
ifnet_lladdr(ifnet_t interface)2263 ifnet_lladdr(ifnet_t interface)
2264 {
2265 	struct ifaddr *ifa;
2266 	void *lladdr;
2267 
2268 	if (interface == NULL) {
2269 		return NULL;
2270 	}
2271 
2272 	/*
2273 	 * if_lladdr points to the permanent link address of
2274 	 * the interface and it never gets deallocated; internal
2275 	 * code should simply use IF_LLADDR() for performance.
2276 	 */
2277 	ifa = interface->if_lladdr;
2278 	IFA_LOCK_SPIN(ifa);
2279 	struct sockaddr_dl *sdl = SDL(ifa->ifa_addr);
2280 	lladdr = LLADDR(sdl);
2281 	IFA_UNLOCK(ifa);
2282 
2283 	return lladdr;
2284 }
2285 
2286 errno_t
ifnet_llbroadcast_copy_bytes(ifnet_t interface,void * __sized_by (buffer_len)addr,size_t buffer_len,size_t * out_len)2287 ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *__sized_by(buffer_len) addr,
2288     size_t buffer_len, size_t *out_len)
2289 {
2290 	if (interface == NULL || addr == NULL || out_len == NULL) {
2291 		return EINVAL;
2292 	}
2293 
2294 	*out_len = interface->if_broadcast.length;
2295 
2296 	if (buffer_len < interface->if_broadcast.length) {
2297 		return EMSGSIZE;
2298 	}
2299 
2300 	if (interface->if_broadcast.length == 0) {
2301 		return ENXIO;
2302 	}
2303 
2304 	bcopy(interface->if_broadcast.ptr, addr,
2305 	    interface->if_broadcast.length);
2306 
2307 	return 0;
2308 }
2309 
2310 static errno_t
ifnet_lladdr_copy_bytes_internal(ifnet_t interface,void * __sized_by (lladdr_len)lladdr,size_t lladdr_len,kauth_cred_t * credp)2311 ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *__sized_by(lladdr_len) lladdr,
2312     size_t lladdr_len, kauth_cred_t *credp)
2313 {
2314 	size_t bytes_len;
2315 	const u_int8_t *bytes;
2316 	struct ifaddr *ifa;
2317 	uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
2318 	errno_t error = 0;
2319 
2320 	/*
2321 	 * Make sure to accomodate the largest possible
2322 	 * size of SA(if_lladdr)->sa_len.
2323 	 */
2324 	static_assert(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
2325 
2326 	if (interface == NULL || lladdr == NULL) {
2327 		return EINVAL;
2328 	}
2329 
2330 	ifa = interface->if_lladdr;
2331 	IFA_LOCK_SPIN(ifa);
2332 	const struct sockaddr_dl *sdl = SDL(sdlbuf);
2333 	SOCKADDR_COPY(ifa->ifa_addr, sdl, SA(ifa->ifa_addr)->sa_len);
2334 	IFA_UNLOCK(ifa);
2335 
2336 	bytes = dlil_ifaddr_bytes_indexable(SDL(sdlbuf), &bytes_len, credp);
2337 	if (bytes_len != lladdr_len) {
2338 		bzero(lladdr, lladdr_len);
2339 		error = EMSGSIZE;
2340 	} else {
2341 		bcopy(bytes, lladdr, bytes_len);
2342 	}
2343 
2344 	return error;
2345 }
2346 
2347 errno_t
ifnet_lladdr_copy_bytes(ifnet_t interface,void * __sized_by (length)lladdr,size_t length)2348 ifnet_lladdr_copy_bytes(ifnet_t interface, void *__sized_by(length) lladdr, size_t length)
2349 {
2350 	return ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2351 	           NULL);
2352 }
2353 
2354 errno_t
ifnet_guarded_lladdr_copy_bytes(ifnet_t interface,void * __sized_by (length)lladdr,size_t length)2355 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *__sized_by(length) lladdr, size_t length)
2356 {
2357 #if CONFIG_MACF
2358 	kauth_cred_t __single cred;
2359 	net_thread_marks_t __single marks;
2360 #endif
2361 	kauth_cred_t *__single credp;
2362 	errno_t error;
2363 
2364 #if CONFIG_MACF
2365 	marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
2366 	cred  = current_cached_proc_cred(PROC_NULL);
2367 	credp = &cred;
2368 #else
2369 	credp = NULL;
2370 #endif
2371 
2372 	error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2373 	    credp);
2374 
2375 #if CONFIG_MACF
2376 	net_thread_marks_pop(marks);
2377 #endif
2378 
2379 	return error;
2380 }
2381 
2382 static errno_t
ifnet_set_lladdr_internal(ifnet_t interface,const void * __sized_by (lladdr_len)lladdr,size_t lladdr_len,u_char new_type,int apply_type)2383 ifnet_set_lladdr_internal(ifnet_t interface, const void *__sized_by(lladdr_len) lladdr,
2384     size_t lladdr_len, u_char new_type, int apply_type)
2385 {
2386 	struct ifaddr *ifa;
2387 	errno_t error = 0;
2388 
2389 	if (interface == NULL) {
2390 		return EINVAL;
2391 	}
2392 
2393 	ifnet_head_lock_shared();
2394 	ifnet_lock_exclusive(interface);
2395 	if (lladdr_len != 0 &&
2396 	    (lladdr_len != interface->if_addrlen || lladdr == 0)) {
2397 		ifnet_lock_done(interface);
2398 		ifnet_head_done();
2399 		return EINVAL;
2400 	}
2401 	/* The interface needs to be attached to add an address */
2402 	if (interface->if_refflags & IFRF_EMBRYONIC) {
2403 		ifnet_lock_done(interface);
2404 		ifnet_head_done();
2405 		return ENXIO;
2406 	}
2407 
2408 	ifa = ifnet_addrs[interface->if_index - 1];
2409 	if (ifa != NULL) {
2410 		struct sockaddr_dl *sdl;
2411 
2412 		IFA_LOCK_SPIN(ifa);
2413 		sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2414 		if (lladdr_len != 0) {
2415 			bcopy(lladdr, LLADDR(sdl), lladdr_len);
2416 		} else {
2417 			bzero(LLADDR(sdl), interface->if_addrlen);
2418 		}
2419 		/* lladdr_len-check with if_addrlen makes sure it fits in u_char */
2420 		sdl->sdl_alen = (u_char)lladdr_len;
2421 
2422 		if (apply_type) {
2423 			sdl->sdl_type = new_type;
2424 		}
2425 		IFA_UNLOCK(ifa);
2426 	} else {
2427 		error = ENXIO;
2428 	}
2429 	ifnet_lock_done(interface);
2430 	ifnet_head_done();
2431 
2432 	/* Generate a kernel event */
2433 	if (error == 0) {
2434 		intf_event_enqueue_nwk_wq_entry(interface, NULL,
2435 		    INTF_EVENT_CODE_LLADDR_UPDATE);
2436 		dlil_post_msg(interface, KEV_DL_SUBCLASS,
2437 		    KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0, FALSE);
2438 	}
2439 
2440 	return error;
2441 }
2442 
2443 errno_t
ifnet_set_lladdr(ifnet_t interface,const void * __sized_by (lladdr_len)lladdr,size_t lladdr_len)2444 ifnet_set_lladdr(ifnet_t interface, const void *__sized_by(lladdr_len) lladdr, size_t lladdr_len)
2445 {
2446 	return ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0);
2447 }
2448 
2449 errno_t
ifnet_set_lladdr_and_type(ifnet_t interface,const void * __sized_by (lladdr_len)lladdr,size_t lladdr_len,u_char type)2450 ifnet_set_lladdr_and_type(ifnet_t interface, const void *__sized_by(lladdr_len) lladdr,
2451     size_t lladdr_len, u_char type)
2452 {
2453 	return ifnet_set_lladdr_internal(interface, lladdr,
2454 	           lladdr_len, type, 1);
2455 }
2456 
2457 errno_t
ifnet_add_multicast(ifnet_t interface,const struct sockaddr * maddr,ifmultiaddr_t * ifmap)2458 ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
2459     ifmultiaddr_t *ifmap)
2460 {
2461 	if (interface == NULL || maddr == NULL) {
2462 		return EINVAL;
2463 	}
2464 
2465 	/* Don't let users screw up protocols' entries. */
2466 	switch (maddr->sa_family) {
2467 	case AF_LINK: {
2468 		const struct sockaddr_dl *sdl = SDL(maddr);
2469 		if (sdl->sdl_len < sizeof(struct sockaddr_dl) ||
2470 		    (sdl->sdl_nlen + sdl->sdl_alen + sdl->sdl_slen +
2471 		    offsetof(struct sockaddr_dl, sdl_data) > sdl->sdl_len)) {
2472 			return EINVAL;
2473 		}
2474 		break;
2475 	}
2476 	case AF_UNSPEC:
2477 		if (maddr->sa_len < ETHER_ADDR_LEN +
2478 		    offsetof(struct sockaddr, sa_data)) {
2479 			return EINVAL;
2480 		}
2481 		break;
2482 	default:
2483 		return EINVAL;
2484 	}
2485 
2486 	return if_addmulti_anon(interface, maddr, ifmap);
2487 }
2488 
2489 errno_t
ifnet_remove_multicast(ifmultiaddr_t ifma)2490 ifnet_remove_multicast(ifmultiaddr_t ifma)
2491 {
2492 	struct sockaddr *maddr;
2493 
2494 	if (ifma == NULL) {
2495 		return EINVAL;
2496 	}
2497 
2498 	maddr = ifma->ifma_addr;
2499 	/* Don't let users screw up protocols' entries. */
2500 	if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) {
2501 		return EINVAL;
2502 	}
2503 
2504 	return if_delmulti_anon(ifma->ifma_ifp, maddr);
2505 }
2506 
2507 errno_t
ifnet_get_multicast_list(ifnet_t ifp,ifmultiaddr_t * __null_terminated * ret_addresses)2508 ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t *__null_terminated *ret_addresses)
2509 {
2510 	int count = 0;
2511 	int cmax = 0;
2512 	struct ifmultiaddr *addr;
2513 
2514 	if (ifp == NULL || ret_addresses == NULL) {
2515 		return EINVAL;
2516 	}
2517 	*ret_addresses = NULL;
2518 
2519 	ifnet_lock_shared(ifp);
2520 	LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2521 		cmax++;
2522 	}
2523 
2524 	ifmultiaddr_t *addresses = kalloc_type(ifmultiaddr_t, cmax + 1, Z_WAITOK);
2525 	if (addresses == NULL) {
2526 		ifnet_lock_done(ifp);
2527 		return ENOMEM;
2528 	}
2529 
2530 	LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2531 		if (count + 1 > cmax) {
2532 			break;
2533 		}
2534 		addresses[count] = (ifmultiaddr_t)addr;
2535 		ifmaddr_reference(addresses[count]);
2536 		count++;
2537 	}
2538 	addresses[cmax] = NULL;
2539 	ifnet_lock_done(ifp);
2540 
2541 	*ret_addresses = __unsafe_null_terminated_from_indexable(addresses, &addresses[cmax]);
2542 
2543 	return 0;
2544 }
2545 
2546 void
ifnet_free_multicast_list(ifmultiaddr_t * __null_terminated addresses)2547 ifnet_free_multicast_list(ifmultiaddr_t *__null_terminated addresses)
2548 {
2549 	int i = 0;
2550 
2551 	if (addresses == NULL) {
2552 		return;
2553 	}
2554 
2555 	for (ifmultiaddr_t *__null_terminated ptr = addresses; *ptr != NULL; ptr++, i++) {
2556 		ifmaddr_release(*ptr);
2557 	}
2558 
2559 	ifmultiaddr_t *free_addresses = __unsafe_null_terminated_to_indexable(addresses);
2560 	kfree_type(ifmultiaddr_t, i + 1, free_addresses);
2561 }
2562 
2563 errno_t
ifnet_find_by_name(const char * ifname,ifnet_t * ifpp)2564 ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
2565 {
2566 	struct ifnet *ifp;
2567 	size_t namelen;
2568 
2569 	if (ifname == NULL) {
2570 		return EINVAL;
2571 	}
2572 
2573 	namelen = strlen(ifname);
2574 
2575 	*ifpp = NULL;
2576 
2577 	ifnet_head_lock_shared();
2578 	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2579 		struct ifaddr *ifa;
2580 		struct sockaddr_dl *ll_addr;
2581 
2582 		ifa = ifnet_addrs[ifp->if_index - 1];
2583 		if (ifa == NULL) {
2584 			continue;
2585 		}
2586 
2587 		IFA_LOCK(ifa);
2588 		ll_addr = SDL(ifa->ifa_addr);
2589 
2590 		if (namelen == ll_addr->sdl_nlen &&
2591 		    strlcmp(ll_addr->sdl_data, ifname, namelen) == 0) {
2592 			IFA_UNLOCK(ifa);
2593 			*ifpp = ifp;
2594 			ifnet_reference(*ifpp);
2595 			break;
2596 		}
2597 		IFA_UNLOCK(ifa);
2598 	}
2599 	ifnet_head_done();
2600 
2601 	return (ifp == NULL) ? ENXIO : 0;
2602 }
2603 
2604 errno_t
ifnet_list_get(ifnet_family_t family,ifnet_t * __counted_by (* count)* list,u_int32_t * count)2605 ifnet_list_get(ifnet_family_t family, ifnet_t *__counted_by(*count) *list,
2606     u_int32_t *count)
2607 {
2608 	return ifnet_list_get_common(family, FALSE, list, count);
2609 }
2610 
2611 __private_extern__ errno_t
ifnet_list_get_all(ifnet_family_t family,ifnet_t * __counted_by (* count)* list,u_int32_t * count)2612 ifnet_list_get_all(ifnet_family_t family, ifnet_t *__counted_by(*count) *list,
2613     u_int32_t *count)
2614 {
2615 	return ifnet_list_get_common(family, TRUE, list, count);
2616 }
2617 
2618 struct ifnet_list {
2619 	SLIST_ENTRY(ifnet_list) ifl_le;
2620 	struct ifnet            *ifl_ifp;
2621 };
2622 
2623 static errno_t
ifnet_list_get_common(ifnet_family_t family,boolean_t get_all,ifnet_t * __counted_by (* count)* list,u_int32_t * count)2624 ifnet_list_get_common(ifnet_family_t family, boolean_t get_all,
2625     ifnet_t *__counted_by(*count) *list, u_int32_t *count)
2626 {
2627 #pragma unused(get_all)
2628 	SLIST_HEAD(, ifnet_list) ifl_head;
2629 	struct ifnet_list *ifl, *ifl_tmp;
2630 	struct ifnet *ifp;
2631 	ifnet_t *tmp_list = NULL;
2632 	int cnt = 0;
2633 	errno_t err = 0;
2634 
2635 	SLIST_INIT(&ifl_head);
2636 
2637 	if (list == NULL || count == NULL) {
2638 		err = EINVAL;
2639 		goto done;
2640 	}
2641 	*list = NULL;
2642 	*count = 0;
2643 
2644 	ifnet_head_lock_shared();
2645 	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2646 		if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2647 			ifl = kalloc_type(struct ifnet_list, Z_WAITOK | Z_ZERO);
2648 			if (ifl == NULL) {
2649 				ifnet_head_done();
2650 				err = ENOMEM;
2651 				goto done;
2652 			}
2653 			ifl->ifl_ifp = ifp;
2654 			ifnet_reference(ifp);
2655 			SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2656 			++cnt;
2657 		}
2658 	}
2659 	ifnet_head_done();
2660 
2661 	if (cnt == 0) {
2662 		err = ENXIO;
2663 		goto done;
2664 	}
2665 
2666 	tmp_list = kalloc_type(ifnet_t, cnt + 1, Z_WAITOK | Z_ZERO);
2667 	if (tmp_list == NULL) {
2668 		err = ENOMEM;
2669 		goto done;
2670 	}
2671 	*list = tmp_list;
2672 	*count = cnt;
2673 
2674 done:
2675 	SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2676 		SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2677 		if (err == 0) {
2678 			(*list)[--cnt] = ifl->ifl_ifp;
2679 		} else {
2680 			ifnet_release(ifl->ifl_ifp);
2681 		}
2682 		kfree_type(struct ifnet_list, ifl);
2683 	}
2684 
2685 	return err;
2686 }
2687 
2688 void
ifnet_list_free(ifnet_t * __null_terminated interfaces)2689 ifnet_list_free(ifnet_t *__null_terminated interfaces)
2690 {
2691 	int i = 0;
2692 
2693 	if (interfaces == NULL) {
2694 		return;
2695 	}
2696 
2697 	for (ifnet_t *__null_terminated ptr = interfaces; *ptr != NULL; ptr++, i++) {
2698 		ifnet_release(*ptr);
2699 	}
2700 
2701 	ifnet_t *free_interfaces = __unsafe_null_terminated_to_indexable(interfaces);
2702 	kfree_type(ifnet_t, i + 1, free_interfaces);
2703 }
2704 
2705 void
ifnet_list_free_counted_by_internal(ifnet_t * __counted_by (count)interfaces,uint32_t count)2706 ifnet_list_free_counted_by_internal(ifnet_t *__counted_by(count) interfaces, uint32_t count)
2707 {
2708 	if (interfaces == NULL) {
2709 		return;
2710 	}
2711 	for (int i = 0; i < count; i++) {
2712 		ifnet_release(interfaces[i]);
2713 	}
2714 
2715 	/*
2716 	 * When we allocated the ifnet_list, we returned only the number
2717 	 * of ifnet_t pointers without the null terminator in the `count'
2718 	 * variable, so we cheat here by freeing everything.
2719 	 */
2720 	ifnet_t *free_interfaces = interfaces;
2721 	kfree_type(ifnet_t, count + 1, free_interfaces);
2722 	interfaces = NULL;
2723 	count = 0;
2724 }
2725 
2726 /*************************************************************************/
2727 /* ifaddr_t accessors						*/
2728 /*************************************************************************/
2729 
2730 errno_t
ifaddr_reference(ifaddr_t ifa)2731 ifaddr_reference(ifaddr_t ifa)
2732 {
2733 	if (ifa == NULL) {
2734 		return EINVAL;
2735 	}
2736 
2737 	ifa_addref(ifa);
2738 	return 0;
2739 }
2740 
2741 errno_t
ifaddr_release(ifaddr_t ifa)2742 ifaddr_release(ifaddr_t ifa)
2743 {
2744 	if (ifa == NULL) {
2745 		return EINVAL;
2746 	}
2747 
2748 	ifa_remref(ifa);
2749 	return 0;
2750 }
2751 
2752 sa_family_t
ifaddr_address_family(ifaddr_t ifa)2753 ifaddr_address_family(ifaddr_t ifa)
2754 {
2755 	sa_family_t family = 0;
2756 
2757 	if (ifa != NULL) {
2758 		IFA_LOCK_SPIN(ifa);
2759 		if (ifa->ifa_addr != NULL) {
2760 			family = ifa->ifa_addr->sa_family;
2761 		}
2762 		IFA_UNLOCK(ifa);
2763 	}
2764 	return family;
2765 }
2766 
2767 errno_t
ifaddr_address(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2768 ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2769 {
2770 	u_int32_t copylen;
2771 
2772 	if (ifa == NULL || out_addr == NULL) {
2773 		return EINVAL;
2774 	}
2775 
2776 	IFA_LOCK_SPIN(ifa);
2777 	if (ifa->ifa_addr == NULL) {
2778 		IFA_UNLOCK(ifa);
2779 		return ENOTSUP;
2780 	}
2781 
2782 	copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2783 	    ifa->ifa_addr->sa_len : addr_size;
2784 	SOCKADDR_COPY(ifa->ifa_addr, out_addr, copylen);
2785 
2786 	if (ifa->ifa_addr->sa_len > addr_size) {
2787 		IFA_UNLOCK(ifa);
2788 		return EMSGSIZE;
2789 	}
2790 
2791 	IFA_UNLOCK(ifa);
2792 	return 0;
2793 }
2794 
2795 errno_t
ifaddr_dstaddress(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2796 ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2797 {
2798 	u_int32_t copylen;
2799 
2800 	if (ifa == NULL || out_addr == NULL) {
2801 		return EINVAL;
2802 	}
2803 
2804 	IFA_LOCK_SPIN(ifa);
2805 	if (ifa->ifa_dstaddr == NULL) {
2806 		IFA_UNLOCK(ifa);
2807 		return ENOTSUP;
2808 	}
2809 
2810 	copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2811 	    ifa->ifa_dstaddr->sa_len : addr_size;
2812 	SOCKADDR_COPY(ifa->ifa_dstaddr, out_addr, copylen);
2813 
2814 	if (ifa->ifa_dstaddr->sa_len > addr_size) {
2815 		IFA_UNLOCK(ifa);
2816 		return EMSGSIZE;
2817 	}
2818 
2819 	IFA_UNLOCK(ifa);
2820 	return 0;
2821 }
2822 
2823 errno_t
ifaddr_netmask(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2824 ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2825 {
2826 	u_int32_t copylen;
2827 
2828 	if (ifa == NULL || out_addr == NULL) {
2829 		return EINVAL;
2830 	}
2831 
2832 	IFA_LOCK_SPIN(ifa);
2833 	if (ifa->ifa_netmask == NULL) {
2834 		IFA_UNLOCK(ifa);
2835 		return ENOTSUP;
2836 	}
2837 
2838 	copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2839 	    ifa->ifa_netmask->sa_len : addr_size;
2840 	SOCKADDR_COPY(ifa->ifa_netmask, out_addr, copylen);
2841 
2842 	if (ifa->ifa_netmask->sa_len > addr_size) {
2843 		IFA_UNLOCK(ifa);
2844 		return EMSGSIZE;
2845 	}
2846 
2847 	IFA_UNLOCK(ifa);
2848 	return 0;
2849 }
2850 
2851 ifnet_t
ifaddr_ifnet(ifaddr_t ifa)2852 ifaddr_ifnet(ifaddr_t ifa)
2853 {
2854 	struct ifnet *ifp;
2855 
2856 	if (ifa == NULL) {
2857 		return NULL;
2858 	}
2859 
2860 	/* ifa_ifp is set once at creation time; it is never changed */
2861 	ifp = ifa->ifa_ifp;
2862 
2863 	return ifp;
2864 }
2865 
2866 ifaddr_t
ifaddr_withaddr(const struct sockaddr * address)2867 ifaddr_withaddr(const struct sockaddr *address)
2868 {
2869 	if (address == NULL) {
2870 		return NULL;
2871 	}
2872 
2873 	return ifa_ifwithaddr(address);
2874 }
2875 
2876 ifaddr_t
ifaddr_withdstaddr(const struct sockaddr * address)2877 ifaddr_withdstaddr(const struct sockaddr *address)
2878 {
2879 	if (address == NULL) {
2880 		return NULL;
2881 	}
2882 
2883 	return ifa_ifwithdstaddr(address);
2884 }
2885 
2886 ifaddr_t
ifaddr_withnet(const struct sockaddr * net)2887 ifaddr_withnet(const struct sockaddr *net)
2888 {
2889 	if (net == NULL) {
2890 		return NULL;
2891 	}
2892 
2893 	return ifa_ifwithnet(net);
2894 }
2895 
2896 ifaddr_t
ifaddr_withroute(int flags,const struct sockaddr * destination,const struct sockaddr * gateway)2897 ifaddr_withroute(int flags, const struct sockaddr *destination,
2898     const struct sockaddr *gateway)
2899 {
2900 	if (destination == NULL || gateway == NULL) {
2901 		return NULL;
2902 	}
2903 
2904 	return ifa_ifwithroute(flags, destination, gateway);
2905 }
2906 
2907 ifaddr_t
ifaddr_findbestforaddr(const struct sockaddr * addr,ifnet_t interface)2908 ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2909 {
2910 	if (addr == NULL || interface == NULL) {
2911 		return NULL;
2912 	}
2913 
2914 	return ifaof_ifpforaddr_select(addr, interface);
2915 }
2916 
2917 errno_t
ifaddr_get_ia6_flags(ifaddr_t ifa,u_int32_t * out_flags)2918 ifaddr_get_ia6_flags(ifaddr_t ifa, u_int32_t *out_flags)
2919 {
2920 	sa_family_t family = 0;
2921 
2922 	if (ifa == NULL || out_flags == NULL) {
2923 		return EINVAL;
2924 	}
2925 
2926 	IFA_LOCK_SPIN(ifa);
2927 	if (ifa->ifa_addr != NULL) {
2928 		family = ifa->ifa_addr->sa_family;
2929 	}
2930 	IFA_UNLOCK(ifa);
2931 
2932 	if (family != AF_INET6) {
2933 		return EINVAL;
2934 	}
2935 
2936 	*out_flags = ifatoia6(ifa)->ia6_flags;
2937 	return 0;
2938 }
2939 
2940 errno_t
ifmaddr_reference(ifmultiaddr_t ifmaddr)2941 ifmaddr_reference(ifmultiaddr_t ifmaddr)
2942 {
2943 	if (ifmaddr == NULL) {
2944 		return EINVAL;
2945 	}
2946 
2947 	IFMA_ADDREF(ifmaddr);
2948 	return 0;
2949 }
2950 
2951 errno_t
ifmaddr_release(ifmultiaddr_t ifmaddr)2952 ifmaddr_release(ifmultiaddr_t ifmaddr)
2953 {
2954 	if (ifmaddr == NULL) {
2955 		return EINVAL;
2956 	}
2957 
2958 	IFMA_REMREF(ifmaddr);
2959 	return 0;
2960 }
2961 
2962 errno_t
ifmaddr_address(ifmultiaddr_t ifma,struct sockaddr * out_addr,u_int32_t addr_size)2963 ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2964     u_int32_t addr_size)
2965 {
2966 	u_int32_t copylen;
2967 
2968 	if (ifma == NULL || out_addr == NULL) {
2969 		return EINVAL;
2970 	}
2971 
2972 	IFMA_LOCK(ifma);
2973 	if (ifma->ifma_addr == NULL) {
2974 		IFMA_UNLOCK(ifma);
2975 		return ENOTSUP;
2976 	}
2977 
2978 	copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2979 	    ifma->ifma_addr->sa_len : addr_size);
2980 	SOCKADDR_COPY(ifma->ifma_addr, out_addr, copylen);
2981 
2982 	if (ifma->ifma_addr->sa_len > addr_size) {
2983 		IFMA_UNLOCK(ifma);
2984 		return EMSGSIZE;
2985 	}
2986 	IFMA_UNLOCK(ifma);
2987 	return 0;
2988 }
2989 
2990 errno_t
ifmaddr_lladdress(ifmultiaddr_t ifma,struct sockaddr * out_addr,u_int32_t addr_size)2991 ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2992     u_int32_t addr_size)
2993 {
2994 	struct ifmultiaddr *ifma_ll;
2995 
2996 	if (ifma == NULL || out_addr == NULL) {
2997 		return EINVAL;
2998 	}
2999 	if ((ifma_ll = ifma->ifma_ll) == NULL) {
3000 		return ENOTSUP;
3001 	}
3002 
3003 	return ifmaddr_address(ifma_ll, out_addr, addr_size);
3004 }
3005 
3006 ifnet_t
ifmaddr_ifnet(ifmultiaddr_t ifma)3007 ifmaddr_ifnet(ifmultiaddr_t ifma)
3008 {
3009 	return (ifma == NULL) ? NULL : ifma->ifma_ifp;
3010 }
3011 
3012 /**************************************************************************/
3013 /* interface cloner						*/
3014 /**************************************************************************/
3015 
3016 errno_t
ifnet_clone_attach(struct ifnet_clone_params * cloner_params,if_clone_t * ifcloner)3017 ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
3018     if_clone_t *ifcloner)
3019 {
3020 	errno_t error = 0;
3021 	struct if_clone *ifc = NULL;
3022 	size_t namelen;
3023 
3024 	if (cloner_params == NULL || ifcloner == NULL ||
3025 	    cloner_params->ifc_name == NULL ||
3026 	    cloner_params->ifc_create == NULL ||
3027 	    cloner_params->ifc_destroy == NULL ||
3028 	    (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
3029 		error = EINVAL;
3030 		goto fail;
3031 	}
3032 
3033 	if (if_clone_lookup(__terminated_by_to_indexable(cloner_params->ifc_name),
3034 	    namelen, NULL) != NULL) {
3035 		printf("%s: already a cloner for %s\n", __func__,
3036 		    cloner_params->ifc_name);
3037 		error = EEXIST;
3038 		goto fail;
3039 	}
3040 
3041 	ifc = kalloc_type(struct if_clone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3042 	strlcpy(ifc->ifc_name, cloner_params->ifc_name, IFNAMSIZ + 1);
3043 	ifc->ifc_namelen = (uint8_t)namelen;
3044 	ifc->ifc_maxunit = IF_MAXUNIT;
3045 	ifc->ifc_create = cloner_params->ifc_create;
3046 	ifc->ifc_destroy = cloner_params->ifc_destroy;
3047 
3048 	error = if_clone_attach(ifc);
3049 	if (error != 0) {
3050 		printf("%s: if_clone_attach failed %d\n", __func__, error);
3051 		goto fail;
3052 	}
3053 	*ifcloner = ifc;
3054 
3055 	return 0;
3056 fail:
3057 	if (ifc != NULL) {
3058 		kfree_type(struct if_clone, ifc);
3059 	}
3060 	return error;
3061 }
3062 
3063 errno_t
ifnet_clone_detach(if_clone_t ifcloner)3064 ifnet_clone_detach(if_clone_t ifcloner)
3065 {
3066 	errno_t error = 0;
3067 	struct if_clone *ifc = ifcloner;
3068 
3069 	if (ifc == NULL) {
3070 		return EINVAL;
3071 	}
3072 
3073 	if ((if_clone_lookup(ifc->ifc_name, ifc->ifc_namelen, NULL)) == NULL) {
3074 		printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
3075 		error = EINVAL;
3076 		goto fail;
3077 	}
3078 
3079 	if_clone_detach(ifc);
3080 
3081 	kfree_type(struct if_clone, ifc);
3082 
3083 fail:
3084 	return error;
3085 }
3086 
3087 /**************************************************************************/
3088 /* misc							*/
3089 /**************************************************************************/
3090 
3091 static errno_t
ifnet_get_local_ports_extended_inner(ifnet_t ifp,protocol_family_t protocol,u_int32_t flags,u_int8_t bitfield[bitstr_size (IP_PORTRANGE_SIZE)])3092 ifnet_get_local_ports_extended_inner(ifnet_t ifp, protocol_family_t protocol,
3093     u_int32_t flags, u_int8_t bitfield[bitstr_size(IP_PORTRANGE_SIZE)])
3094 {
3095 	u_int32_t ifindex;
3096 
3097 	/* no point in continuing if no address is assigned */
3098 	if (ifp != NULL && TAILQ_EMPTY(&ifp->if_addrhead)) {
3099 		return 0;
3100 	}
3101 
3102 	if_ports_used_update_wakeuuid(ifp);
3103 
3104 #if SKYWALK
3105 	if (netns_is_enabled()) {
3106 		netns_get_local_ports(ifp, protocol, flags, bitfield);
3107 	}
3108 #endif /* SKYWALK */
3109 
3110 	ifindex = (ifp != NULL) ? ifp->if_index : 0;
3111 
3112 	if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY)) {
3113 		udp_get_ports_used(ifp, protocol, flags,
3114 		    bitfield);
3115 	}
3116 
3117 	if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY)) {
3118 		tcp_get_ports_used(ifp, protocol, flags,
3119 		    bitfield);
3120 	}
3121 
3122 	return 0;
3123 }
3124 
3125 errno_t
ifnet_get_local_ports_extended(ifnet_t ifp,protocol_family_t protocol,u_int32_t flags,u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])3126 ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
3127     u_int32_t flags, u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])
3128 {
3129 	ifnet_ref_t parent_ifp = NULL;
3130 
3131 	if (bitfield == NULL) {
3132 		return EINVAL;
3133 	}
3134 
3135 	switch (protocol) {
3136 	case PF_UNSPEC:
3137 	case PF_INET:
3138 	case PF_INET6:
3139 		break;
3140 	default:
3141 		return EINVAL;
3142 	}
3143 
3144 	/* bit string is long enough to hold 16-bit port values */
3145 	bzero(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
3146 
3147 	ifnet_get_local_ports_extended_inner(ifp, protocol, flags, bitfield);
3148 
3149 	/* get local ports for parent interface */
3150 	if (ifp != NULL && ifnet_get_delegate_parent(ifp, &parent_ifp) == 0) {
3151 		ifnet_get_local_ports_extended_inner(parent_ifp, protocol,
3152 		    flags, bitfield);
3153 		ifnet_release_delegate_parent(ifp);
3154 	}
3155 
3156 	return 0;
3157 }
3158 
3159 errno_t
ifnet_get_local_ports(ifnet_t ifp,u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])3160 ifnet_get_local_ports(ifnet_t ifp, u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])
3161 {
3162 	u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
3163 
3164 	return ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
3165 	           bitfield);
3166 }
3167 
3168 errno_t
ifnet_notice_node_presence(ifnet_t ifp,struct sockaddr * sa,int32_t rssi,int lqm,int npm,u_int8_t srvinfo[48])3169 ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi,
3170     int lqm, int npm, u_int8_t srvinfo[48])
3171 {
3172 	if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3173 		return EINVAL;
3174 	}
3175 	if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3176 		return EINVAL;
3177 	}
3178 	if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3179 		return EINVAL;
3180 	}
3181 
3182 	return dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
3183 }
3184 
3185 errno_t
ifnet_notice_node_presence_v2(ifnet_t ifp,struct sockaddr * sa,struct sockaddr_dl * sdl,int32_t rssi,int lqm,int npm,u_int8_t srvinfo[48])3186 ifnet_notice_node_presence_v2(ifnet_t ifp, struct sockaddr *sa, struct sockaddr_dl *sdl,
3187     int32_t rssi, int lqm, int npm, u_int8_t srvinfo[48])
3188 {
3189 	/* Support older version if sdl is NULL */
3190 	if (sdl == NULL) {
3191 		return ifnet_notice_node_presence(ifp, sa, rssi, lqm, npm, srvinfo);
3192 	}
3193 
3194 	if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3195 		return EINVAL;
3196 	}
3197 	if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3198 		return EINVAL;
3199 	}
3200 
3201 	if (sa->sa_family != AF_INET6) {
3202 		return EINVAL;
3203 	}
3204 
3205 	if (sdl->sdl_family != AF_LINK) {
3206 		return EINVAL;
3207 	}
3208 
3209 	return dlil_node_present_v2(ifp, sa, sdl, rssi, lqm, npm, srvinfo);
3210 }
3211 
3212 errno_t
ifnet_notice_node_absence(ifnet_t ifp,struct sockaddr * sa)3213 ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa)
3214 {
3215 	if (ifp == NULL || sa == NULL) {
3216 		return EINVAL;
3217 	}
3218 	if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3219 		return EINVAL;
3220 	}
3221 	if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3222 		return EINVAL;
3223 	}
3224 
3225 	dlil_node_absent(ifp, sa);
3226 	return 0;
3227 }
3228 
3229 errno_t
ifnet_notice_primary_elected(ifnet_t ifp)3230 ifnet_notice_primary_elected(ifnet_t ifp)
3231 {
3232 	if (ifp == NULL) {
3233 		return EINVAL;
3234 	}
3235 
3236 	dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PRIMARY_ELECTED, NULL, 0, FALSE);
3237 	return 0;
3238 }
3239 
3240 errno_t
ifnet_tx_compl_status(ifnet_t ifp,mbuf_t m,tx_compl_val_t val)3241 ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
3242 {
3243 #pragma unused(val)
3244 
3245 	m_do_tx_compl_callback(m, ifp);
3246 
3247 	return 0;
3248 }
3249 
3250 errno_t
ifnet_tx_compl(ifnet_t ifp,mbuf_t m)3251 ifnet_tx_compl(ifnet_t ifp, mbuf_t m)
3252 {
3253 	m_do_tx_compl_callback(m, ifp);
3254 
3255 	return 0;
3256 }
3257 
3258 errno_t
ifnet_report_issues(ifnet_t ifp,u_int8_t modid[IFNET_MODIDLEN],u_int8_t info[IFNET_MODARGLEN])3259 ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
3260     u_int8_t info[IFNET_MODARGLEN])
3261 {
3262 	if (ifp == NULL || modid == NULL) {
3263 		return EINVAL;
3264 	}
3265 
3266 	dlil_report_issues(ifp, modid, info);
3267 	return 0;
3268 }
3269 
3270 errno_t
ifnet_set_delegate(ifnet_t ifp,ifnet_t delegated_ifp)3271 ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
3272 {
3273 	ifnet_t odifp = NULL;
3274 
3275 	if (ifp == NULL) {
3276 		return EINVAL;
3277 	} else if (!ifnet_get_ioref(ifp)) {
3278 		return ENXIO;
3279 	}
3280 
3281 	ifnet_lock_exclusive(ifp);
3282 	odifp = ifp->if_delegated.ifp;
3283 	if (odifp != NULL && odifp == delegated_ifp) {
3284 		/* delegate info is unchanged; nothing more to do */
3285 		ifnet_lock_done(ifp);
3286 		goto done;
3287 	}
3288 	// Test if this delegate interface would cause a loop
3289 	ifnet_t delegate_check_ifp = delegated_ifp;
3290 	while (delegate_check_ifp != NULL) {
3291 		if (delegate_check_ifp == ifp) {
3292 			printf("%s: delegating to %s would cause a loop\n",
3293 			    ifp->if_xname, delegated_ifp->if_xname);
3294 			ifnet_lock_done(ifp);
3295 			goto done;
3296 		}
3297 		delegate_check_ifp = delegate_check_ifp->if_delegated.ifp;
3298 	}
3299 	bzero(&ifp->if_delegated, sizeof(ifp->if_delegated));
3300 	if (delegated_ifp != NULL && ifp != delegated_ifp) {
3301 		ifp->if_delegated.ifp = delegated_ifp;
3302 		ifnet_reference(delegated_ifp);
3303 		ifp->if_delegated.type = delegated_ifp->if_type;
3304 		ifp->if_delegated.family = delegated_ifp->if_family;
3305 		ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
3306 		ifp->if_delegated.expensive =
3307 		    delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
3308 		ifp->if_delegated.constrained =
3309 		    delegated_ifp->if_xflags & IFXF_CONSTRAINED ? 1 : 0;
3310 		ifp->if_delegated.ultra_constrained =
3311 		    delegated_ifp->if_xflags & IFXF_ULTRA_CONSTRAINED ? 1 : 0;
3312 
3313 		printf("%s: is now delegating %s (type 0x%x, family %u, "
3314 		    "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
3315 		    delegated_ifp->if_type, delegated_ifp->if_family,
3316 		    delegated_ifp->if_subfamily);
3317 	}
3318 
3319 	ifnet_lock_done(ifp);
3320 
3321 	if (odifp != NULL) {
3322 		if (odifp != delegated_ifp) {
3323 			printf("%s: is no longer delegating %s\n",
3324 			    ifp->if_xname, odifp->if_xname);
3325 		}
3326 		ifnet_release(odifp);
3327 	}
3328 
3329 	/* Generate a kernel event */
3330 	dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0, FALSE);
3331 
3332 done:
3333 	/* Release the io ref count */
3334 	ifnet_decr_iorefcnt(ifp);
3335 
3336 	return 0;
3337 }
3338 
3339 errno_t
ifnet_get_delegate(ifnet_t ifp,ifnet_t * pdelegated_ifp)3340 ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
3341 {
3342 	if (ifp == NULL || pdelegated_ifp == NULL) {
3343 		return EINVAL;
3344 	} else if (!ifnet_get_ioref(ifp)) {
3345 		return ENXIO;
3346 	}
3347 
3348 	ifnet_lock_shared(ifp);
3349 	if (ifp->if_delegated.ifp != NULL) {
3350 		ifnet_reference(ifp->if_delegated.ifp);
3351 	}
3352 	*pdelegated_ifp = ifp->if_delegated.ifp;
3353 	ifnet_lock_done(ifp);
3354 
3355 	/* Release the io ref count */
3356 	ifnet_decr_iorefcnt(ifp);
3357 
3358 	return 0;
3359 }
3360 
3361 errno_t
ifnet_get_keepalive_offload_frames(ifnet_t ifp,struct ifnet_keepalive_offload_frame * __counted_by (frames_array_count)frames_array,u_int32_t frames_array_count,size_t frame_data_offset,u_int32_t * used_frames_count)3362 ifnet_get_keepalive_offload_frames(ifnet_t ifp,
3363     struct ifnet_keepalive_offload_frame *__counted_by(frames_array_count) frames_array,
3364     u_int32_t frames_array_count, size_t frame_data_offset,
3365     u_int32_t *used_frames_count)
3366 {
3367 	u_int32_t i;
3368 
3369 	if (frames_array == NULL || used_frames_count == NULL ||
3370 	    frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
3371 		return EINVAL;
3372 	}
3373 
3374 	/* frame_data_offset should be 32-bit aligned */
3375 	if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) !=
3376 	    frame_data_offset) {
3377 		return EINVAL;
3378 	}
3379 
3380 	*used_frames_count = 0;
3381 	if (frames_array_count == 0) {
3382 		return 0;
3383 	}
3384 
3385 
3386 	for (i = 0; i < frames_array_count; i++) {
3387 		struct ifnet_keepalive_offload_frame *frame = frames_array + i;
3388 		bzero(frame, sizeof(struct ifnet_keepalive_offload_frame));
3389 	}
3390 
3391 	/* First collect IPsec related keep-alive frames */
3392 	*used_frames_count = key_fill_offload_frames_for_savs(ifp,
3393 	    frames_array, frames_array_count, frame_data_offset);
3394 
3395 	/* Keep-alive offload not required for TCP/UDP on CLAT interface */
3396 	if (IS_INTF_CLAT46(ifp)) {
3397 		return 0;
3398 	}
3399 
3400 	/* If there is more room, collect other UDP keep-alive frames */
3401 	if (*used_frames_count < frames_array_count) {
3402 		udp_fill_keepalive_offload_frames(ifp, frames_array,
3403 		    frames_array_count, frame_data_offset,
3404 		    used_frames_count);
3405 	}
3406 
3407 	/* If there is more room, collect other TCP keep-alive frames */
3408 	if (*used_frames_count < frames_array_count) {
3409 		tcp_fill_keepalive_offload_frames(ifp, frames_array,
3410 		    frames_array_count, frame_data_offset,
3411 		    used_frames_count);
3412 	}
3413 
3414 	VERIFY(*used_frames_count <= frames_array_count);
3415 
3416 	return 0;
3417 }
3418 
3419 errno_t
ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame)3420 ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,
3421     struct ifnet_keepalive_offload_frame *frame)
3422 {
3423 	errno_t error = 0;
3424 
3425 	if (ifp == NULL || frame == NULL) {
3426 		return EINVAL;
3427 	}
3428 
3429 	if (frame->type != IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP) {
3430 		return EINVAL;
3431 	}
3432 	if (frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 &&
3433 	    frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6) {
3434 		return EINVAL;
3435 	}
3436 	if (frame->local_port == 0 || frame->remote_port == 0) {
3437 		return EINVAL;
3438 	}
3439 
3440 	error = tcp_notify_kao_timeout(ifp, frame);
3441 
3442 	return error;
3443 }
3444 
3445 errno_t
ifnet_link_status_report(ifnet_t ifp,const void * __sized_by (buffer_len)buffer,size_t buffer_len)3446 ifnet_link_status_report(ifnet_t ifp, const void *__sized_by(buffer_len) buffer,
3447     size_t buffer_len)
3448 {
3449 	struct if_link_status ifsr = {};
3450 	errno_t err = 0;
3451 
3452 	if (ifp == NULL || buffer == NULL || buffer_len == 0) {
3453 		return EINVAL;
3454 	}
3455 
3456 	ifnet_lock_shared(ifp);
3457 
3458 	/*
3459 	 * Make sure that the interface is attached but there is no need
3460 	 * to take a reference because this call is coming from the driver.
3461 	 */
3462 	if (!ifnet_is_fully_attached(ifp)) {
3463 		ifnet_lock_done(ifp);
3464 		return ENXIO;
3465 	}
3466 
3467 	lck_rw_lock_exclusive(&ifp->if_link_status_lock);
3468 
3469 	/*
3470 	 * If this is the first status report then allocate memory
3471 	 * to store it.
3472 	 */
3473 	if (ifp->if_link_status == NULL) {
3474 		ifp->if_link_status = kalloc_type(struct if_link_status, Z_ZERO);
3475 		if (ifp->if_link_status == NULL) {
3476 			err = ENOMEM;
3477 			goto done;
3478 		}
3479 	}
3480 
3481 	memcpy(&ifsr, buffer, MIN(sizeof(ifsr), buffer_len));
3482 	if (ifp->if_type == IFT_CELLULAR) {
3483 		struct if_cellular_status_v1 *if_cell_sr, *new_cell_sr;
3484 		/*
3485 		 * Currently we have a single version -- if it does
3486 		 * not match, just return.
3487 		 */
3488 		if (ifsr.ifsr_version !=
3489 		    IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION) {
3490 			err = ENOTSUP;
3491 			goto done;
3492 		}
3493 
3494 		if (ifsr.ifsr_len != sizeof(*if_cell_sr)) {
3495 			err = EINVAL;
3496 			goto done;
3497 		}
3498 
3499 		if_cell_sr =
3500 		    &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3501 		new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3502 		/* Check if we need to act on any new notifications */
3503 		if ((new_cell_sr->valid_bitmask &
3504 		    IF_CELL_UL_MSS_RECOMMENDED_VALID) &&
3505 		    new_cell_sr->mss_recommended !=
3506 		    if_cell_sr->mss_recommended) {
3507 			os_atomic_or(&tcbinfo.ipi_flags, INPCBINFO_UPDATE_MSS, relaxed);
3508 			inpcb_timer_sched(&tcbinfo, INPCB_TIMER_FAST);
3509 #if NECP
3510 			necp_update_all_clients();
3511 #endif
3512 		}
3513 
3514 		/* Finally copy the new information */
3515 		ifp->if_link_status->ifsr_version = ifsr.ifsr_version;
3516 		ifp->if_link_status->ifsr_len = ifsr.ifsr_len;
3517 		if_cell_sr->valid_bitmask = 0;
3518 		bcopy(new_cell_sr, if_cell_sr, sizeof(*if_cell_sr));
3519 	} else if (IFNET_IS_WIFI(ifp)) {
3520 		struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr;
3521 
3522 		/* Check version */
3523 		if (ifsr.ifsr_version !=
3524 		    IF_WIFI_STATUS_REPORT_CURRENT_VERSION) {
3525 			err = ENOTSUP;
3526 			goto done;
3527 		}
3528 
3529 		if (ifsr.ifsr_len != sizeof(*if_wifi_sr)) {
3530 			err = EINVAL;
3531 			goto done;
3532 		}
3533 
3534 		if_wifi_sr =
3535 		    &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3536 		new_wifi_sr =
3537 		    &ifsr.ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3538 		ifp->if_link_status->ifsr_version = ifsr.ifsr_version;
3539 		ifp->if_link_status->ifsr_len = ifsr.ifsr_len;
3540 		if_wifi_sr->valid_bitmask = 0;
3541 		bcopy(new_wifi_sr, if_wifi_sr, sizeof(*if_wifi_sr));
3542 
3543 		/*
3544 		 * Update the bandwidth values if we got recent values
3545 		 * reported through the other KPI.
3546 		 */
3547 		if (!(new_wifi_sr->valid_bitmask &
3548 		    IF_WIFI_UL_MAX_BANDWIDTH_VALID) &&
3549 		    ifp->if_output_bw.max_bw > 0) {
3550 			if_wifi_sr->valid_bitmask |=
3551 			    IF_WIFI_UL_MAX_BANDWIDTH_VALID;
3552 			if_wifi_sr->ul_max_bandwidth =
3553 			    ifp->if_output_bw.max_bw > UINT32_MAX ?
3554 			    UINT32_MAX :
3555 			    (uint32_t)ifp->if_output_bw.max_bw;
3556 		}
3557 		if (!(new_wifi_sr->valid_bitmask &
3558 		    IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) &&
3559 		    ifp->if_output_bw.eff_bw > 0) {
3560 			if_wifi_sr->valid_bitmask |=
3561 			    IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3562 			if_wifi_sr->ul_effective_bandwidth =
3563 			    ifp->if_output_bw.eff_bw > UINT32_MAX ?
3564 			    UINT32_MAX :
3565 			    (uint32_t)ifp->if_output_bw.eff_bw;
3566 		}
3567 		if (!(new_wifi_sr->valid_bitmask &
3568 		    IF_WIFI_DL_MAX_BANDWIDTH_VALID) &&
3569 		    ifp->if_input_bw.max_bw > 0) {
3570 			if_wifi_sr->valid_bitmask |=
3571 			    IF_WIFI_DL_MAX_BANDWIDTH_VALID;
3572 			if_wifi_sr->dl_max_bandwidth =
3573 			    ifp->if_input_bw.max_bw > UINT32_MAX ?
3574 			    UINT32_MAX :
3575 			    (uint32_t)ifp->if_input_bw.max_bw;
3576 		}
3577 		if (!(new_wifi_sr->valid_bitmask &
3578 		    IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) &&
3579 		    ifp->if_input_bw.eff_bw > 0) {
3580 			if_wifi_sr->valid_bitmask |=
3581 			    IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3582 			if_wifi_sr->dl_effective_bandwidth =
3583 			    ifp->if_input_bw.eff_bw > UINT32_MAX ?
3584 			    UINT32_MAX :
3585 			    (uint32_t)ifp->if_input_bw.eff_bw;
3586 		}
3587 	}
3588 
3589 done:
3590 	lck_rw_done(&ifp->if_link_status_lock);
3591 	ifnet_lock_done(ifp);
3592 	return err;
3593 }
3594 
3595 /*************************************************************************/
3596 /* Fastlane QoS Ca						*/
3597 /*************************************************************************/
3598 
3599 errno_t
ifnet_set_fastlane_capable(ifnet_t interface,boolean_t capable)3600 ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable)
3601 {
3602 	if (interface == NULL) {
3603 		return EINVAL;
3604 	}
3605 
3606 	if_set_qosmarking_mode(interface,
3607 	    capable ? IFRTYPE_QOSMARKING_FASTLANE : IFRTYPE_QOSMARKING_MODE_NONE);
3608 
3609 	return 0;
3610 }
3611 
3612 errno_t
ifnet_get_fastlane_capable(ifnet_t interface,boolean_t * capable)3613 ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable)
3614 {
3615 	if (interface == NULL || capable == NULL) {
3616 		return EINVAL;
3617 	}
3618 	if (interface->if_qosmarking_mode == IFRTYPE_QOSMARKING_FASTLANE) {
3619 		*capable = true;
3620 	} else {
3621 		*capable = false;
3622 	}
3623 	return 0;
3624 }
3625 
3626 errno_t
ifnet_get_unsent_bytes(ifnet_t interface,int64_t * unsent_bytes)3627 ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes)
3628 {
3629 	int64_t bytes;
3630 
3631 	if (interface == NULL || unsent_bytes == NULL) {
3632 		return EINVAL;
3633 	}
3634 
3635 	bytes = *unsent_bytes = 0;
3636 
3637 	if (!ifnet_is_fully_attached(interface)) {
3638 		return ENXIO;
3639 	}
3640 
3641 	bytes = interface->if_sndbyte_unsent;
3642 
3643 	if (interface->if_eflags & IFEF_TXSTART) {
3644 		bytes += IFCQ_BYTES(interface->if_snd);
3645 	}
3646 	*unsent_bytes = bytes;
3647 
3648 	return 0;
3649 }
3650 
3651 errno_t
ifnet_get_buffer_status(const ifnet_t ifp,ifnet_buffer_status_t * buf_status)3652 ifnet_get_buffer_status(const ifnet_t ifp, ifnet_buffer_status_t *buf_status)
3653 {
3654 	if (ifp == NULL || buf_status == NULL) {
3655 		return EINVAL;
3656 	}
3657 
3658 	bzero(buf_status, sizeof(*buf_status));
3659 
3660 	if (!ifnet_is_fully_attached(ifp)) {
3661 		return ENXIO;
3662 	}
3663 
3664 	if (ifp->if_eflags & IFEF_TXSTART) {
3665 		buf_status->buf_interface = IFCQ_BYTES(ifp->if_snd);
3666 	}
3667 
3668 	buf_status->buf_sndbuf = ((buf_status->buf_interface != 0) ||
3669 	    (ifp->if_sndbyte_unsent != 0)) ? 1 : 0;
3670 
3671 	return 0;
3672 }
3673 
3674 void
ifnet_normalise_unsent_data(void)3675 ifnet_normalise_unsent_data(void)
3676 {
3677 	struct ifnet *ifp;
3678 
3679 	ifnet_head_lock_shared();
3680 	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3681 		ifnet_lock_exclusive(ifp);
3682 		if (!ifnet_is_fully_attached(ifp)) {
3683 			ifnet_lock_done(ifp);
3684 			continue;
3685 		}
3686 		if (!(ifp->if_eflags & IFEF_TXSTART)) {
3687 			ifnet_lock_done(ifp);
3688 			continue;
3689 		}
3690 
3691 		if (ifp->if_sndbyte_total > 0 ||
3692 		    IFCQ_BYTES(ifp->if_snd) > 0) {
3693 			ifp->if_unsent_data_cnt++;
3694 		}
3695 
3696 		ifnet_lock_done(ifp);
3697 	}
3698 	ifnet_head_done();
3699 }
3700 
3701 errno_t
ifnet_set_low_power_mode(ifnet_t ifp,boolean_t on)3702 ifnet_set_low_power_mode(ifnet_t ifp, boolean_t on)
3703 {
3704 	errno_t error;
3705 
3706 	error = if_set_low_power(ifp, on);
3707 
3708 	return error;
3709 }
3710 
3711 errno_t
ifnet_get_low_power_mode(ifnet_t ifp,boolean_t * on)3712 ifnet_get_low_power_mode(ifnet_t ifp, boolean_t *on)
3713 {
3714 	if (ifp == NULL || on == NULL) {
3715 		return EINVAL;
3716 	}
3717 
3718 	*on = ((ifp->if_xflags & IFXF_LOW_POWER) != 0);
3719 	return 0;
3720 }
3721 
3722 errno_t
ifnet_set_rx_flow_steering(ifnet_t ifp,boolean_t on)3723 ifnet_set_rx_flow_steering(ifnet_t ifp, boolean_t on)
3724 {
3725 	errno_t error = 0;
3726 
3727 	if (ifp == NULL) {
3728 		return EINVAL;
3729 	}
3730 
3731 	if (on) {
3732 		error = if_set_xflags(ifp, IFXF_RX_FLOW_STEERING);
3733 	} else {
3734 		if_clear_xflags(ifp, IFXF_RX_FLOW_STEERING);
3735 	}
3736 
3737 	return error;
3738 }
3739 
3740 errno_t
ifnet_get_rx_flow_steering(ifnet_t ifp,boolean_t * on)3741 ifnet_get_rx_flow_steering(ifnet_t ifp, boolean_t *on)
3742 {
3743 	if (ifp == NULL || on == NULL) {
3744 		return EINVAL;
3745 	}
3746 
3747 	*on = ((ifp->if_xflags & IFXF_RX_FLOW_STEERING) != 0);
3748 	return 0;
3749 }
3750 
3751 void
ifnet_enable_cellular_thread_group(ifnet_t ifp)3752 ifnet_enable_cellular_thread_group(ifnet_t ifp)
3753 {
3754 	VERIFY(ifp != NULL);
3755 
3756 	/* This function can only be called when the ifp is just created and
3757 	 * not yet attached.
3758 	 */
3759 	VERIFY(ifp->if_inp == NULL);
3760 	VERIFY(ifp->if_refflags & IFRF_EMBRYONIC);
3761 
3762 	if_set_xflags(ifp, IFXF_REQUIRE_CELL_THREAD_GROUP);
3763 }
3764