xref: /xnu-11417.121.6/bsd/net/kpi_interface.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2004-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include "kpi_interface.h"
30 
31 #include <sys/queue.h>
32 #include <sys/param.h>  /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
45 #include <net/dlil.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/net_api_stats.h>
52 #include <net/route.h>
53 #include <net/if_ports_used.h>
54 #include <libkern/libkern.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/locks.h>
57 #include <kern/clock.h>
58 #include <sys/sockio.h>
59 #include <sys/proc.h>
60 #include <sys/sysctl.h>
61 #include <sys/mbuf.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/udp.h>
64 #include <netinet/udp_var.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/in_pcb.h>
68 #ifdef INET
69 #include <netinet/igmp_var.h>
70 #endif
71 #include <netinet6/mld6_var.h>
72 #include <netkey/key.h>
73 #include <stdbool.h>
74 
75 #include "net/net_str_id.h"
76 #include <net/sockaddr_utils.h>
77 
78 #if CONFIG_MACF
79 #include <sys/kauth.h>
80 #include <security/mac_framework.h>
81 #endif
82 
83 #if SKYWALK
84 #include <skywalk/os_skywalk_private.h>
85 #include <skywalk/nexus/netif/nx_netif.h>
86 #endif /* SKYWALK */
87 
88 extern uint64_t if_creation_generation_count;
89 
90 #undef ifnet_allocate
91 errno_t ifnet_allocate(const struct ifnet_init_params *init,
92     ifnet_t *ifp);
93 
94 static errno_t ifnet_allocate_common(const struct ifnet_init_params *init,
95     ifnet_t *ifp, bool is_internal);
96 
97 
98 #define TOUCHLASTCHANGE(__if_lastchange) {                              \
99 	(__if_lastchange)->tv_sec = (time_t)net_uptime();               \
100 	(__if_lastchange)->tv_usec = 0;                                 \
101 }
102 
103 static errno_t ifnet_defrouter_llreachinfo(ifnet_t, sa_family_t,
104     struct ifnet_llreach_info *);
105 static void ifnet_kpi_free(ifnet_t);
106 static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t *__counted_by(*count) *list,
107     u_int32_t *count);
108 static errno_t ifnet_set_lladdr_internal(ifnet_t,
109     const void *__sized_by(lladdr_len) lladdr, size_t lladdr_len,
110     u_char, int);
111 static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
112 
113 
114 /*
115  * Temporary work around until we have real reference counting
116  *
117  * We keep the bits about calling dlil_if_release (which should be
118  * called recycle) transparent by calling it from our if_free function
119  * pointer. We have to keep the client's original detach function
120  * somewhere so we can call it.
121  */
122 static void
ifnet_kpi_free(ifnet_t ifp)123 ifnet_kpi_free(ifnet_t ifp)
124 {
125 	if ((ifp->if_refflags & IFRF_EMBRYONIC) == 0) {
126 		ifnet_detached_func detach_func;
127 
128 		detach_func = ifp->if_detach;
129 		if (detach_func != NULL) {
130 			(*detach_func)(ifp);
131 		}
132 	}
133 
134 	ifnet_dispose(ifp);
135 }
136 
137 errno_t
ifnet_allocate_common(const struct ifnet_init_params * init,ifnet_t * ifp,bool is_internal)138 ifnet_allocate_common(const struct ifnet_init_params *init,
139     ifnet_t *ifp, bool is_internal)
140 {
141 	struct ifnet_init_eparams einit;
142 
143 	bzero(&einit, sizeof(einit));
144 
145 	einit.ver               = IFNET_INIT_CURRENT_VERSION;
146 	einit.len               = sizeof(einit);
147 	einit.flags             = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO;
148 	if (!is_internal) {
149 		einit.flags |= IFNET_INIT_ALLOC_KPI;
150 	}
151 	einit.uniqueid          = init->uniqueid;
152 	einit.uniqueid_len      = init->uniqueid_len;
153 	einit.name              = init->name;
154 	einit.unit              = init->unit;
155 	einit.family            = init->family;
156 	einit.type              = init->type;
157 	einit.output            = init->output;
158 	einit.demux             = init->demux;
159 	einit.add_proto         = init->add_proto;
160 	einit.del_proto         = init->del_proto;
161 	einit.check_multi       = init->check_multi;
162 	einit.framer            = init->framer;
163 	einit.softc             = init->softc;
164 	einit.ioctl             = init->ioctl;
165 	einit.set_bpf_tap       = init->set_bpf_tap;
166 	einit.detach            = init->detach;
167 	einit.event             = init->event;
168 	einit.broadcast_addr    = init->broadcast_addr;
169 	einit.broadcast_len     = init->broadcast_len;
170 
171 	return ifnet_allocate_extended(&einit, ifp);
172 }
173 
174 errno_t
ifnet_allocate_internal(const struct ifnet_init_params * init,ifnet_t * ifp)175 ifnet_allocate_internal(const struct ifnet_init_params *init, ifnet_t *ifp)
176 {
177 	return ifnet_allocate_common(init, ifp, true);
178 }
179 
180 errno_t
ifnet_allocate(const struct ifnet_init_params * init,ifnet_t * ifp)181 ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *ifp)
182 {
183 	return ifnet_allocate_common(init, ifp, false);
184 }
185 
186 static void
ifnet_set_broadcast_addr(ifnet_t ifp,const void * __sized_by (broadcast_len)broadcast_addr,u_int32_t broadcast_len)187 ifnet_set_broadcast_addr(ifnet_t ifp,
188     const void *__sized_by(broadcast_len) broadcast_addr,
189     u_int32_t broadcast_len)
190 {
191 	if (ifp->if_broadcast.length != 0) {
192 		kfree_data_counted_by(ifp->if_broadcast.ptr,
193 		    ifp->if_broadcast.length);
194 	}
195 	if (broadcast_len != 0 && broadcast_addr != NULL) {
196 		ifp->if_broadcast.ptr = kalloc_data(broadcast_len,
197 		    Z_WAITOK | Z_NOFAIL);
198 		ifp->if_broadcast.length = broadcast_len;
199 		bcopy(broadcast_addr, ifp->if_broadcast.ptr,
200 		    broadcast_len);
201 	}
202 }
203 
204 errno_t
ifnet_allocate_extended(const struct ifnet_init_eparams * einit0,ifnet_t * interface)205 ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
206     ifnet_t *interface)
207 {
208 #if SKYWALK
209 	ifnet_start_func ostart = NULL;
210 #endif /* SKYWALK */
211 	struct ifnet_init_eparams einit;
212 	ifnet_ref_t ifp = NULL;
213 	char if_xname[IFXNAMSIZ] = {0};
214 	int error;
215 
216 	einit = *einit0;
217 
218 	if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
219 	    einit.len < sizeof(einit)) {
220 		return EINVAL;
221 	}
222 
223 	if (einit.family == 0 || einit.name == NULL ||
224 	    strlen(einit.name) >= IFNAMSIZ ||
225 	    (einit.type & 0xFFFFFF00) != 0 || einit.type == 0) {
226 		return EINVAL;
227 	}
228 
229 #if SKYWALK
230 	/* headroom must be a multiple of 8 bytes */
231 	if ((einit.tx_headroom & 0x7) != 0) {
232 		return EINVAL;
233 	}
234 	if ((einit.flags & IFNET_INIT_SKYWALK_NATIVE) == 0) {
235 		/*
236 		 * Currently Interface advisory reporting is supported only
237 		 * for skywalk interface.
238 		 */
239 		if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
240 			return EINVAL;
241 		}
242 	}
243 #endif /* SKYWALK */
244 
245 	if (einit.flags & IFNET_INIT_LEGACY) {
246 #if SKYWALK
247 		if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
248 			return EINVAL;
249 		}
250 #endif /* SKYWALK */
251 		if (einit.output == NULL ||
252 		    (einit.flags & IFNET_INIT_INPUT_POLL)) {
253 			return EINVAL;
254 		}
255 		einit.pre_enqueue = NULL;
256 		einit.start = NULL;
257 		einit.output_ctl = NULL;
258 		einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
259 		einit.input_poll = NULL;
260 		einit.input_ctl = NULL;
261 	} else {
262 #if SKYWALK
263 		/*
264 		 * For native Skywalk drivers, steer all start requests
265 		 * to ifp_if_start() until the netif device adapter is
266 		 * fully activated, at which point we will point it to
267 		 * nx_netif_doorbell().
268 		 */
269 		if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
270 			if (einit.start != NULL) {
271 				return EINVAL;
272 			}
273 			/* override output start callback */
274 			ostart = einit.start = ifp_if_start;
275 		} else {
276 			ostart = einit.start;
277 		}
278 #endif /* SKYWALK */
279 		if (einit.start == NULL) {
280 			return EINVAL;
281 		}
282 
283 		einit.output = NULL;
284 		if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX) {
285 			return EINVAL;
286 		}
287 
288 		if (einit.flags & IFNET_INIT_INPUT_POLL) {
289 			if (einit.input_poll == NULL || einit.input_ctl == NULL) {
290 				return EINVAL;
291 			}
292 		} else {
293 			einit.input_poll = NULL;
294 			einit.input_ctl = NULL;
295 		}
296 	}
297 
298 	if (einit.type > UCHAR_MAX) {
299 		return EINVAL;
300 	}
301 
302 	if (einit.unit > SHRT_MAX) {
303 		return EINVAL;
304 	}
305 
306 	/* Initialize external name (name + unit) */
307 	snprintf(if_xname, sizeof(if_xname), "%s%d",
308 	    einit.name, einit.unit);
309 
310 	if (einit.uniqueid == NULL) {
311 		einit.uniqueid_len = (uint32_t)strbuflen(if_xname);
312 		einit.uniqueid = if_xname;
313 	}
314 
315 	error = dlil_if_acquire(einit.family, einit.uniqueid,
316 	    einit.uniqueid_len,
317 	    __unsafe_null_terminated_from_indexable(if_xname), &ifp);
318 
319 	if (error == 0) {
320 		uint64_t br;
321 
322 		/*
323 		 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
324 		 * to point to storage of at least IFNAMSIZ bytes. It is safe
325 		 * to write to this.
326 		 */
327 		char *ifname = __unsafe_forge_bidi_indexable(char *, __DECONST(char *, ifp->if_name), IFNAMSIZ);
328 		const char *einit_name = __unsafe_forge_bidi_indexable(const char *, einit.name, IFNAMSIZ);
329 		strbufcpy(ifname, IFNAMSIZ, einit_name, IFNAMSIZ);
330 		ifp->if_type            = (u_char)einit.type;
331 		ifp->if_family          = einit.family;
332 		ifp->if_subfamily       = einit.subfamily;
333 		ifp->if_unit            = (short)einit.unit;
334 		ifp->if_output          = einit.output;
335 		ifp->if_pre_enqueue     = einit.pre_enqueue;
336 		ifp->if_start           = einit.start;
337 		ifp->if_output_ctl      = einit.output_ctl;
338 		ifp->if_output_sched_model = einit.output_sched_model;
339 		ifp->if_output_bw.eff_bw = einit.output_bw;
340 		ifp->if_output_bw.max_bw = einit.output_bw_max;
341 		ifp->if_output_lt.eff_lt = einit.output_lt;
342 		ifp->if_output_lt.max_lt = einit.output_lt_max;
343 		ifp->if_input_poll      = einit.input_poll;
344 		ifp->if_input_ctl       = einit.input_ctl;
345 		ifp->if_input_bw.eff_bw = einit.input_bw;
346 		ifp->if_input_bw.max_bw = einit.input_bw_max;
347 		ifp->if_input_lt.eff_lt = einit.input_lt;
348 		ifp->if_input_lt.max_lt = einit.input_lt_max;
349 		ifp->if_demux           = einit.demux;
350 		ifp->if_add_proto       = einit.add_proto;
351 		ifp->if_del_proto       = einit.del_proto;
352 		ifp->if_check_multi     = einit.check_multi;
353 		ifp->if_framer_legacy   = einit.framer;
354 		ifp->if_framer          = einit.framer_extended;
355 		ifp->if_softc           = einit.softc;
356 		ifp->if_ioctl           = einit.ioctl;
357 		ifp->if_set_bpf_tap     = einit.set_bpf_tap;
358 		ifp->if_free            = (einit.free != NULL) ? einit.free : ifnet_kpi_free;
359 		ifp->if_event           = einit.event;
360 		ifp->if_detach          = einit.detach;
361 
362 		/* Initialize Network ID */
363 		ifp->network_id_len     = 0;
364 		bzero(&ifp->network_id, sizeof(ifp->network_id));
365 
366 		/* Initialize external name (name + unit) */
367 		char *ifxname = __unsafe_forge_bidi_indexable(char *, __DECONST(char *, ifp->if_xname), IFXNAMSIZ);
368 		snprintf(ifxname, IFXNAMSIZ, "%s", if_xname);
369 
370 		/*
371 		 * On embedded, framer() is already in the extended form;
372 		 * we simply use it as is, unless the caller specifies
373 		 * framer_extended() which will then override it.
374 		 *
375 		 * On non-embedded, framer() has long been exposed as part
376 		 * of the public KPI, and therefore its signature must
377 		 * remain the same (without the pre- and postpend length
378 		 * parameters.)  We special case ether_frameout, such that
379 		 * it gets mapped to its extended variant.  All other cases
380 		 * utilize the stub routine which will simply return zeroes
381 		 * for those new parameters.
382 		 *
383 		 * Internally, DLIL will only use the extended callback
384 		 * variant which is represented by if_framer.
385 		 */
386 #if !XNU_TARGET_OS_OSX
387 		if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
388 			ifp->if_framer = ifp->if_framer_legacy;
389 		}
390 #else /* XNU_TARGET_OS_OSX */
391 		if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
392 			if (ifp->if_framer_legacy == ether_frameout) {
393 				ifp->if_framer = ether_frameout_extended;
394 			} else {
395 				ifp->if_framer = ifnet_framer_stub;
396 			}
397 		}
398 #endif /* XNU_TARGET_OS_OSX */
399 
400 		if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
401 			ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
402 		} else if (ifp->if_output_bw.eff_bw == 0) {
403 			ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
404 		}
405 
406 		if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
407 			ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
408 		} else if (ifp->if_input_bw.eff_bw == 0) {
409 			ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
410 		}
411 
412 		if (ifp->if_output_bw.max_bw == 0) {
413 			ifp->if_output_bw = ifp->if_input_bw;
414 		} else if (ifp->if_input_bw.max_bw == 0) {
415 			ifp->if_input_bw = ifp->if_output_bw;
416 		}
417 
418 		/* Pin if_baudrate to 32 bits */
419 		br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
420 		if (br != 0) {
421 			ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
422 		}
423 
424 		if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
425 			ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
426 		} else if (ifp->if_output_lt.eff_lt == 0) {
427 			ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
428 		}
429 
430 		if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
431 			ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
432 		} else if (ifp->if_input_lt.eff_lt == 0) {
433 			ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
434 		}
435 
436 		if (ifp->if_output_lt.max_lt == 0) {
437 			ifp->if_output_lt = ifp->if_input_lt;
438 		} else if (ifp->if_input_lt.max_lt == 0) {
439 			ifp->if_input_lt = ifp->if_output_lt;
440 		}
441 
442 		if (ifp->if_ioctl == NULL) {
443 			ifp->if_ioctl = ifp_if_ioctl;
444 		}
445 
446 		if_clear_eflags(ifp, -1);
447 		if (ifp->if_start != NULL) {
448 			if_set_eflags(ifp, IFEF_TXSTART);
449 			if (ifp->if_pre_enqueue == NULL) {
450 				ifp->if_pre_enqueue = ifnet_enqueue;
451 			}
452 			ifp->if_output = ifp->if_pre_enqueue;
453 		}
454 
455 		if (ifp->if_input_poll != NULL) {
456 			if_set_eflags(ifp, IFEF_RXPOLL);
457 		}
458 
459 		ifp->if_output_dlil = dlil_output_handler;
460 		ifp->if_input_dlil = dlil_input_handler;
461 
462 		VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
463 		    (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
464 		    ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
465 		    ifp->if_input_ctl == NULL));
466 		VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
467 		    (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
468 
469 		ifnet_set_broadcast_addr(ifp, einit.broadcast_addr,
470 		    einit.broadcast_len);
471 
472 		if_clear_xflags(ifp, -1);
473 #if SKYWALK
474 		ifp->if_tx_headroom = 0;
475 		ifp->if_tx_trailer = 0;
476 		ifp->if_rx_mit_ival = 0;
477 		ifp->if_save_start = ostart;
478 		if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
479 			VERIFY(ifp->if_eflags & IFEF_TXSTART);
480 			VERIFY(!(einit.flags & IFNET_INIT_LEGACY));
481 			if_set_eflags(ifp, IFEF_SKYWALK_NATIVE);
482 			ifp->if_tx_headroom = einit.tx_headroom;
483 			ifp->if_tx_trailer = einit.tx_trailer;
484 			ifp->if_rx_mit_ival = einit.rx_mit_ival;
485 			/*
486 			 * For native Skywalk drivers, make sure packets
487 			 * emitted by the BSD stack get dropped until the
488 			 * interface is in service.  When the netif host
489 			 * adapter is fully activated, we'll point it to
490 			 * nx_netif_output().
491 			 */
492 			ifp->if_output = ifp_if_output;
493 			/*
494 			 * Override driver-supplied parameters
495 			 * and force IFEF_ENQUEUE_MULTI?
496 			 */
497 			if (sk_netif_native_txmodel ==
498 			    NETIF_NATIVE_TXMODEL_ENQUEUE_MULTI) {
499 				einit.start_delay_qlen = sk_tx_delay_qlen;
500 				einit.start_delay_timeout = sk_tx_delay_timeout;
501 			}
502 			/* netif comes with native interfaces */
503 			VERIFY((ifp->if_xflags & IFXF_LEGACY) == 0);
504 		} else if (!ifnet_needs_compat(ifp)) {
505 			/*
506 			 * If we're told not to plumb in netif compat
507 			 * for this interface, set IFXF_NX_NOAUTO to
508 			 * prevent DLIL from auto-attaching the nexus.
509 			 */
510 			einit.flags |= IFNET_INIT_NX_NOAUTO;
511 			/* legacy (non-netif) interface */
512 			if_set_xflags(ifp, IFXF_LEGACY);
513 		}
514 
515 		ifp->if_save_output = ifp->if_output;
516 		if ((einit.flags & IFNET_INIT_NX_NOAUTO) != 0) {
517 			if_set_xflags(ifp, IFXF_NX_NOAUTO);
518 		}
519 		if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
520 			if_set_eflags(ifp, IFEF_ADV_REPORT);
521 		}
522 #else /* !SKYWALK */
523 		/* legacy interface */
524 		if_set_xflags(ifp, IFXF_LEGACY);
525 #endif /* !SKYWALK */
526 
527 		if ((ifp->if_snd = ifclassq_alloc()) == NULL) {
528 			panic_plain("%s: ifp=%p couldn't allocate class queues",
529 			    __func__, ifp);
530 			/* NOTREACHED */
531 		}
532 
533 		/*
534 		 * output target queue delay is specified in millisecond
535 		 * convert it to nanoseconds
536 		 */
537 		IFCQ_TARGET_QDELAY(ifp->if_snd) =
538 		    einit.output_target_qdelay * 1000 * 1000;
539 		IFCQ_MAXLEN(ifp->if_snd) = einit.sndq_maxlen;
540 
541 		ifnet_enqueue_multi_setup(ifp, einit.start_delay_qlen,
542 		    einit.start_delay_timeout);
543 
544 		IFCQ_PKT_DROP_LIMIT(ifp->if_snd) = IFCQ_DEFAULT_PKT_DROP_LIMIT;
545 
546 		/*
547 		 * Set embryonic flag; this will be cleared
548 		 * later when it is fully attached.
549 		 */
550 		ifp->if_refflags = IFRF_EMBRYONIC;
551 
552 		/*
553 		 * Count the newly allocated ifnet
554 		 */
555 		OSIncrementAtomic64(&net_api_stats.nas_ifnet_alloc_count);
556 		INC_ATOMIC_INT64_LIM(net_api_stats.nas_ifnet_alloc_total);
557 		if ((einit.flags & IFNET_INIT_ALLOC_KPI) != 0) {
558 			if_set_xflags(ifp, IFXF_ALLOC_KPI);
559 		} else {
560 			OSIncrementAtomic64(
561 				&net_api_stats.nas_ifnet_alloc_os_count);
562 			INC_ATOMIC_INT64_LIM(
563 				net_api_stats.nas_ifnet_alloc_os_total);
564 		}
565 
566 		if (ifp->if_subfamily == IFNET_SUBFAMILY_MANAGEMENT) {
567 			if_set_xflags(ifp, IFXF_MANAGEMENT);
568 			if_management_interface_check_needed = true;
569 		}
570 
571 		/*
572 		 * Increment the generation count on interface creation
573 		 */
574 		ifp->if_creation_generation_id = os_atomic_inc(&if_creation_generation_count, relaxed);
575 
576 		*interface = ifp;
577 	}
578 	return error;
579 }
580 
581 errno_t
ifnet_reference(ifnet_t ifp)582 ifnet_reference(ifnet_t ifp)
583 {
584 	return dlil_if_ref(ifp);
585 }
586 
587 void
ifnet_dispose(ifnet_t ifp)588 ifnet_dispose(ifnet_t ifp)
589 {
590 	dlil_if_release(ifp);
591 }
592 
593 errno_t
ifnet_release(ifnet_t ifp)594 ifnet_release(ifnet_t ifp)
595 {
596 	return dlil_if_free(ifp);
597 }
598 
599 errno_t
ifnet_interface_family_find(const char * module_string,ifnet_family_t * family_id)600 ifnet_interface_family_find(const char *module_string,
601     ifnet_family_t *family_id)
602 {
603 	if (module_string == NULL || family_id == NULL) {
604 		return EINVAL;
605 	}
606 
607 	return net_str_id_find_internal(module_string, family_id,
608 	           NSI_IF_FAM_ID, 1);
609 }
610 
611 void *
ifnet_softc(ifnet_t interface)612 ifnet_softc(ifnet_t interface)
613 {
614 	return (interface == NULL) ? NULL : interface->if_softc;
615 }
616 
617 const char *
ifnet_name(ifnet_t interface)618 ifnet_name(ifnet_t interface)
619 {
620 	return (interface == NULL) ? NULL : interface->if_name;
621 }
622 
623 ifnet_family_t
ifnet_family(ifnet_t interface)624 ifnet_family(ifnet_t interface)
625 {
626 	return (interface == NULL) ? 0 : interface->if_family;
627 }
628 
629 ifnet_subfamily_t
ifnet_subfamily(ifnet_t interface)630 ifnet_subfamily(ifnet_t interface)
631 {
632 	return (interface == NULL) ? 0 : interface->if_subfamily;
633 }
634 
635 u_int32_t
ifnet_unit(ifnet_t interface)636 ifnet_unit(ifnet_t interface)
637 {
638 	return (interface == NULL) ? (u_int32_t)0xffffffff :
639 	       (u_int32_t)interface->if_unit;
640 }
641 
642 u_int32_t
ifnet_index(ifnet_t interface)643 ifnet_index(ifnet_t interface)
644 {
645 	return (interface == NULL) ? (u_int32_t)0xffffffff :
646 	       interface->if_index;
647 }
648 
649 errno_t
ifnet_set_flags(ifnet_t interface,u_int16_t new_flags,u_int16_t mask)650 ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
651 {
652 	bool set_IFF_UP;
653 	bool change_IFF_UP;
654 	uint16_t old_flags;
655 
656 	if (interface == NULL) {
657 		return EINVAL;
658 	}
659 	set_IFF_UP = (new_flags & IFF_UP) != 0;
660 	change_IFF_UP = (mask & IFF_UP) != 0;
661 #if SKYWALK
662 	if (set_IFF_UP && change_IFF_UP) {
663 		/*
664 		 * When a native skywalk interface is marked IFF_UP, ensure
665 		 * the flowswitch is attached.
666 		 */
667 		ifnet_attach_native_flowswitch(interface);
668 	}
669 #endif /* SKYWALK */
670 
671 	ifnet_lock_exclusive(interface);
672 
673 	/* If we are modifying the up/down state, call if_updown */
674 	if (change_IFF_UP) {
675 		if_updown(interface, set_IFF_UP);
676 	}
677 
678 	old_flags = interface->if_flags;
679 	interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
680 	/* If we are modifying the multicast flag, set/unset the silent flag */
681 	if ((old_flags & IFF_MULTICAST) !=
682 	    (interface->if_flags & IFF_MULTICAST)) {
683 #if INET
684 		if (IGMP_IFINFO(interface) != NULL) {
685 			igmp_initsilent(interface, IGMP_IFINFO(interface));
686 		}
687 #endif /* INET */
688 		if (MLD_IFINFO(interface) != NULL) {
689 			mld6_initsilent(interface, MLD_IFINFO(interface));
690 		}
691 	}
692 
693 	ifnet_lock_done(interface);
694 
695 	return 0;
696 }
697 
698 u_int16_t
ifnet_flags(ifnet_t interface)699 ifnet_flags(ifnet_t interface)
700 {
701 	return (interface == NULL) ? 0 : interface->if_flags;
702 }
703 
704 /*
705  * This routine ensures the following:
706  *
707  * If IFEF_AWDL is set by the caller, also set the rest of flags as
708  * defined in IFEF_AWDL_MASK.
709  *
710  * If IFEF_AWDL has been set on the interface and the caller attempts
711  * to clear one or more of the associated flags in IFEF_AWDL_MASK,
712  * return failure.
713  *
714  * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
715  * on the interface.
716  *
717  * All other flags not associated with AWDL are not affected.
718  *
719  * See <net/if.h> for current definition of IFEF_AWDL_MASK.
720  */
721 static errno_t
ifnet_awdl_check_eflags(ifnet_t ifp,u_int32_t * new_eflags,u_int32_t * mask)722 ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
723 {
724 	u_int32_t eflags;
725 
726 	ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
727 
728 	eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
729 
730 	if (ifp->if_eflags & IFEF_AWDL) {
731 		if (eflags & IFEF_AWDL) {
732 			if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK) {
733 				return EINVAL;
734 			}
735 		} else {
736 			*new_eflags &= ~IFEF_AWDL_MASK;
737 			*mask |= IFEF_AWDL_MASK;
738 		}
739 	} else if (eflags & IFEF_AWDL) {
740 		*new_eflags |= IFEF_AWDL_MASK;
741 		*mask |= IFEF_AWDL_MASK;
742 	} else if (eflags & IFEF_AWDL_RESTRICTED &&
743 	    !(ifp->if_eflags & IFEF_AWDL)) {
744 		return EINVAL;
745 	}
746 
747 	return 0;
748 }
749 
750 errno_t
ifnet_set_eflags(ifnet_t interface,u_int32_t new_flags,u_int32_t mask)751 ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
752 {
753 	uint32_t oeflags;
754 	struct kev_msg ev_msg;
755 	struct net_event_data ev_data;
756 
757 	if (interface == NULL) {
758 		return EINVAL;
759 	}
760 
761 	bzero(&ev_msg, sizeof(ev_msg));
762 	ifnet_lock_exclusive(interface);
763 	/*
764 	 * Sanity checks for IFEF_AWDL and its related flags.
765 	 */
766 	if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) {
767 		ifnet_lock_done(interface);
768 		return EINVAL;
769 	}
770 	/*
771 	 * Currently Interface advisory reporting is supported only for
772 	 * skywalk interface.
773 	 */
774 	if ((((new_flags & mask) & IFEF_ADV_REPORT) != 0) &&
775 	    ((interface->if_eflags & IFEF_SKYWALK_NATIVE) == 0)) {
776 		ifnet_lock_done(interface);
777 		return EINVAL;
778 	}
779 	oeflags = interface->if_eflags;
780 	if_clear_eflags(interface, mask);
781 	if (new_flags != 0) {
782 		if_set_eflags(interface, (new_flags & mask));
783 	}
784 	ifnet_lock_done(interface);
785 	if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
786 	    !(oeflags & IFEF_AWDL_RESTRICTED)) {
787 		ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
788 		/*
789 		 * The interface is now restricted to applications that have
790 		 * the entitlement.
791 		 * The check for the entitlement will be done in the data
792 		 * path, so we don't have to do anything here.
793 		 */
794 	} else if (oeflags & IFEF_AWDL_RESTRICTED &&
795 	    !(interface->if_eflags & IFEF_AWDL_RESTRICTED)) {
796 		ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
797 	}
798 	/*
799 	 * Notify configd so that it has a chance to perform better
800 	 * reachability detection.
801 	 */
802 	if (ev_msg.event_code) {
803 		bzero(&ev_data, sizeof(ev_data));
804 		ev_msg.vendor_code = KEV_VENDOR_APPLE;
805 		ev_msg.kev_class = KEV_NETWORK_CLASS;
806 		ev_msg.kev_subclass = KEV_DL_SUBCLASS;
807 		strlcpy(ev_data.if_name, interface->if_name, IFNAMSIZ);
808 		ev_data.if_family = interface->if_family;
809 		ev_data.if_unit = interface->if_unit;
810 		ev_msg.dv[0].data_length = sizeof(struct net_event_data);
811 		ev_msg.dv[0].data_ptr = &ev_data;
812 		ev_msg.dv[1].data_length = 0;
813 		dlil_post_complete_msg(interface, &ev_msg);
814 	}
815 
816 	return 0;
817 }
818 
819 u_int32_t
ifnet_eflags(ifnet_t interface)820 ifnet_eflags(ifnet_t interface)
821 {
822 	return (interface == NULL) ? 0 : interface->if_eflags;
823 }
824 
825 errno_t
ifnet_set_idle_flags_locked(ifnet_t ifp,u_int32_t new_flags,u_int32_t mask)826 ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
827 {
828 	if (ifp == NULL) {
829 		return EINVAL;
830 	}
831 	ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
832 
833 	/*
834 	 * If this is called prior to ifnet attach, the actual work will
835 	 * be done at attach time.  Otherwise, if it is called after
836 	 * ifnet detach, then it is a no-op.
837 	 */
838 	if (!ifnet_is_attached(ifp, 0)) {
839 		ifp->if_idle_new_flags = new_flags;
840 		ifp->if_idle_new_flags_mask = mask;
841 		return 0;
842 	} else {
843 		ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
844 	}
845 
846 	ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
847 	return 0;
848 }
849 
850 errno_t
ifnet_set_idle_flags(ifnet_t ifp,u_int32_t new_flags,u_int32_t mask)851 ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
852 {
853 	errno_t err;
854 
855 	ifnet_lock_exclusive(ifp);
856 	err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
857 	ifnet_lock_done(ifp);
858 
859 	return err;
860 }
861 
862 u_int32_t
ifnet_idle_flags(ifnet_t ifp)863 ifnet_idle_flags(ifnet_t ifp)
864 {
865 	return (ifp == NULL) ? 0 : ifp->if_idle_flags;
866 }
867 
868 errno_t
ifnet_set_link_quality(ifnet_t ifp,int quality)869 ifnet_set_link_quality(ifnet_t ifp, int quality)
870 {
871 	errno_t err = 0;
872 
873 	if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
874 		err = EINVAL;
875 		goto done;
876 	}
877 
878 	if (!ifnet_is_attached(ifp, 0)) {
879 		err = ENXIO;
880 		goto done;
881 	}
882 
883 	if_lqm_update(ifp, quality, 0);
884 
885 done:
886 	return err;
887 }
888 
889 int
ifnet_link_quality(ifnet_t ifp)890 ifnet_link_quality(ifnet_t ifp)
891 {
892 	int lqm;
893 
894 	if (ifp == NULL) {
895 		return IFNET_LQM_THRESH_OFF;
896 	}
897 
898 	ifnet_lock_shared(ifp);
899 	lqm = ifp->if_interface_state.lqm_state;
900 	ifnet_lock_done(ifp);
901 
902 	return lqm;
903 }
904 
905 errno_t
ifnet_set_interface_state(ifnet_t ifp,struct if_interface_state * if_interface_state)906 ifnet_set_interface_state(ifnet_t ifp,
907     struct if_interface_state *if_interface_state)
908 {
909 	errno_t err = 0;
910 
911 	if (ifp == NULL || if_interface_state == NULL) {
912 		err = EINVAL;
913 		goto done;
914 	}
915 
916 	if (!ifnet_is_attached(ifp, 0)) {
917 		err = ENXIO;
918 		goto done;
919 	}
920 
921 	if_state_update(ifp, if_interface_state);
922 
923 done:
924 	return err;
925 }
926 
927 errno_t
ifnet_get_interface_state(ifnet_t ifp,struct if_interface_state * if_interface_state)928 ifnet_get_interface_state(ifnet_t ifp,
929     struct if_interface_state *if_interface_state)
930 {
931 	errno_t err = 0;
932 
933 	if (ifp == NULL || if_interface_state == NULL) {
934 		err = EINVAL;
935 		goto done;
936 	}
937 
938 	if (!ifnet_is_attached(ifp, 0)) {
939 		err = ENXIO;
940 		goto done;
941 	}
942 
943 	if_get_state(ifp, if_interface_state);
944 
945 done:
946 	return err;
947 }
948 
949 
950 static errno_t
ifnet_defrouter_llreachinfo(ifnet_t ifp,sa_family_t af,struct ifnet_llreach_info * iflri)951 ifnet_defrouter_llreachinfo(ifnet_t ifp, sa_family_t af,
952     struct ifnet_llreach_info *iflri)
953 {
954 	if (ifp == NULL || iflri == NULL) {
955 		return EINVAL;
956 	}
957 
958 	VERIFY(af == AF_INET || af == AF_INET6);
959 
960 	return ifnet_llreach_get_defrouter(ifp, af, iflri);
961 }
962 
963 errno_t
ifnet_inet_defrouter_llreachinfo(ifnet_t ifp,struct ifnet_llreach_info * iflri)964 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
965 {
966 	return ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri);
967 }
968 
969 errno_t
ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp,struct ifnet_llreach_info * iflri)970 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
971 {
972 	return ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri);
973 }
974 
975 errno_t
ifnet_set_capabilities_supported(ifnet_t ifp,u_int32_t new_caps,u_int32_t mask)976 ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
977     u_int32_t mask)
978 {
979 	errno_t error = 0;
980 	int tmp;
981 
982 	if (ifp == NULL) {
983 		return EINVAL;
984 	}
985 
986 	ifnet_lock_exclusive(ifp);
987 	tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
988 	if ((tmp & ~IFCAP_VALID)) {
989 		error = EINVAL;
990 	} else {
991 		ifp->if_capabilities = tmp;
992 	}
993 	ifnet_lock_done(ifp);
994 
995 	return error;
996 }
997 
998 u_int32_t
ifnet_capabilities_supported(ifnet_t ifp)999 ifnet_capabilities_supported(ifnet_t ifp)
1000 {
1001 	return (ifp == NULL) ? 0 : ifp->if_capabilities;
1002 }
1003 
1004 
1005 errno_t
ifnet_set_capabilities_enabled(ifnet_t ifp,u_int32_t new_caps,u_int32_t mask)1006 ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
1007     u_int32_t mask)
1008 {
1009 	errno_t error = 0;
1010 	int tmp;
1011 	struct kev_msg ev_msg;
1012 	struct net_event_data ev_data;
1013 
1014 	if (ifp == NULL) {
1015 		return EINVAL;
1016 	}
1017 
1018 	ifnet_lock_exclusive(ifp);
1019 	tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
1020 	if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities)) {
1021 		error = EINVAL;
1022 	} else {
1023 		ifp->if_capenable = tmp;
1024 	}
1025 	ifnet_lock_done(ifp);
1026 
1027 	/* Notify application of the change */
1028 	bzero(&ev_data, sizeof(struct net_event_data));
1029 	bzero(&ev_msg, sizeof(struct kev_msg));
1030 	ev_msg.vendor_code      = KEV_VENDOR_APPLE;
1031 	ev_msg.kev_class        = KEV_NETWORK_CLASS;
1032 	ev_msg.kev_subclass     = KEV_DL_SUBCLASS;
1033 
1034 	ev_msg.event_code       = KEV_DL_IFCAP_CHANGED;
1035 	strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
1036 	ev_data.if_family       = ifp->if_family;
1037 	ev_data.if_unit         = (u_int32_t)ifp->if_unit;
1038 	ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1039 	ev_msg.dv[0].data_ptr = &ev_data;
1040 	ev_msg.dv[1].data_length = 0;
1041 	dlil_post_complete_msg(ifp, &ev_msg);
1042 
1043 	return error;
1044 }
1045 
1046 u_int32_t
ifnet_capabilities_enabled(ifnet_t ifp)1047 ifnet_capabilities_enabled(ifnet_t ifp)
1048 {
1049 	return (ifp == NULL) ? 0 : ifp->if_capenable;
1050 }
1051 
1052 static const ifnet_offload_t offload_mask =
1053     (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
1054     IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
1055     IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT |
1056     IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES |
1057     IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP |
1058     IFNET_SW_TIMESTAMP | IFNET_LRO | IFNET_LRO_NUM_SEG);
1059 
1060 static const ifnet_offload_t any_offload_csum = IFNET_CHECKSUMF;
1061 
1062 static errno_t
ifnet_set_offload_common(ifnet_t interface,ifnet_offload_t offload,boolean_t set_both)1063 ifnet_set_offload_common(ifnet_t interface, ifnet_offload_t offload, boolean_t set_both)
1064 {
1065 	u_int32_t ifcaps = 0;
1066 
1067 	if (interface == NULL) {
1068 		return EINVAL;
1069 	}
1070 
1071 	ifnet_lock_exclusive(interface);
1072 	interface->if_hwassist = (offload & offload_mask);
1073 
1074 #if SKYWALK
1075 	/* preserve skywalk capability */
1076 	if ((interface->if_capabilities & IFCAP_SKYWALK) != 0) {
1077 		ifcaps |= IFCAP_SKYWALK;
1078 	}
1079 #endif /* SKYWALK */
1080 	if (dlil_verbose) {
1081 		log(LOG_DEBUG, "%s: set offload flags=0x%x\n",
1082 		    if_name(interface),
1083 		    interface->if_hwassist);
1084 	}
1085 	ifnet_lock_done(interface);
1086 
1087 	if ((offload & any_offload_csum)) {
1088 		ifcaps |= IFCAP_HWCSUM;
1089 	}
1090 	if ((offload & IFNET_TSO_IPV4)) {
1091 		ifcaps |= IFCAP_TSO4;
1092 	}
1093 	if ((offload & IFNET_TSO_IPV6)) {
1094 		ifcaps |= IFCAP_TSO6;
1095 	}
1096 	if ((offload & IFNET_LRO)) {
1097 		ifcaps |= IFCAP_LRO;
1098 	}
1099 	if ((offload & IFNET_LRO_NUM_SEG)) {
1100 		ifcaps |= IFCAP_LRO_NUM_SEG;
1101 	}
1102 	if ((offload & IFNET_VLAN_MTU)) {
1103 		ifcaps |= IFCAP_VLAN_MTU;
1104 	}
1105 	if ((offload & IFNET_VLAN_TAGGING)) {
1106 		ifcaps |= IFCAP_VLAN_HWTAGGING;
1107 	}
1108 	if ((offload & IFNET_TX_STATUS)) {
1109 		ifcaps |= IFCAP_TXSTATUS;
1110 	}
1111 	if ((offload & IFNET_HW_TIMESTAMP)) {
1112 		ifcaps |= IFCAP_HW_TIMESTAMP;
1113 	}
1114 	if ((offload & IFNET_SW_TIMESTAMP)) {
1115 		ifcaps |= IFCAP_SW_TIMESTAMP;
1116 	}
1117 	if ((offload & IFNET_CSUM_PARTIAL)) {
1118 		ifcaps |= IFCAP_CSUM_PARTIAL;
1119 	}
1120 	if ((offload & IFNET_CSUM_ZERO_INVERT)) {
1121 		ifcaps |= IFCAP_CSUM_ZERO_INVERT;
1122 	}
1123 	if (ifcaps != 0) {
1124 		if (set_both) {
1125 			(void) ifnet_set_capabilities_supported(interface,
1126 			    ifcaps, IFCAP_VALID);
1127 		}
1128 		(void) ifnet_set_capabilities_enabled(interface, ifcaps,
1129 		    IFCAP_VALID);
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 errno_t
ifnet_set_offload(ifnet_t interface,ifnet_offload_t offload)1136 ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
1137 {
1138 	return ifnet_set_offload_common(interface, offload, TRUE);
1139 }
1140 
1141 errno_t
ifnet_set_offload_enabled(ifnet_t interface,ifnet_offload_t offload)1142 ifnet_set_offload_enabled(ifnet_t interface, ifnet_offload_t offload)
1143 {
1144 	return ifnet_set_offload_common(interface, offload, FALSE);
1145 }
1146 
1147 ifnet_offload_t
ifnet_offload(ifnet_t interface)1148 ifnet_offload(ifnet_t interface)
1149 {
1150 	return (interface == NULL) ?
1151 	       0 : (interface->if_hwassist & offload_mask);
1152 }
1153 
1154 errno_t
ifnet_set_tso_mtu(ifnet_t interface,sa_family_t family,u_int32_t mtuLen)1155 ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
1156 {
1157 	errno_t error = 0;
1158 
1159 	if (interface == NULL || mtuLen < interface->if_mtu) {
1160 		return EINVAL;
1161 	}
1162 	if (mtuLen > IP_MAXPACKET) {
1163 		return EINVAL;
1164 	}
1165 
1166 	switch (family) {
1167 	case AF_INET:
1168 		if (interface->if_hwassist & IFNET_TSO_IPV4) {
1169 			interface->if_tso_v4_mtu = mtuLen;
1170 		} else {
1171 			error = EINVAL;
1172 		}
1173 		break;
1174 
1175 	case AF_INET6:
1176 		if (interface->if_hwassist & IFNET_TSO_IPV6) {
1177 			interface->if_tso_v6_mtu = mtuLen;
1178 		} else {
1179 			error = EINVAL;
1180 		}
1181 		break;
1182 
1183 	default:
1184 		error = EPROTONOSUPPORT;
1185 		break;
1186 	}
1187 
1188 	if (error == 0) {
1189 		struct ifclassq *ifq = interface->if_snd;
1190 		ASSERT(ifq != NULL);
1191 		/* Inform all transmit queues about the new TSO MTU */
1192 		IFCQ_LOCK(ifq);
1193 		ifnet_update_sndq(ifq, CLASSQ_EV_LINK_MTU);
1194 		IFCQ_UNLOCK(ifq);
1195 	}
1196 
1197 	return error;
1198 }
1199 
1200 errno_t
ifnet_get_tso_mtu(ifnet_t interface,sa_family_t family,u_int32_t * mtuLen)1201 ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
1202 {
1203 	errno_t error = 0;
1204 
1205 	if (interface == NULL || mtuLen == NULL) {
1206 		return EINVAL;
1207 	}
1208 
1209 	switch (family) {
1210 	case AF_INET:
1211 		if (interface->if_hwassist & IFNET_TSO_IPV4) {
1212 			*mtuLen = interface->if_tso_v4_mtu;
1213 		} else {
1214 			error = EINVAL;
1215 		}
1216 		break;
1217 
1218 	case AF_INET6:
1219 		if (interface->if_hwassist & IFNET_TSO_IPV6) {
1220 			*mtuLen = interface->if_tso_v6_mtu;
1221 		} else {
1222 			error = EINVAL;
1223 		}
1224 		break;
1225 
1226 	default:
1227 		error = EPROTONOSUPPORT;
1228 		break;
1229 	}
1230 
1231 	return error;
1232 }
1233 
1234 errno_t
ifnet_set_wake_flags(ifnet_t interface,u_int32_t properties,u_int32_t mask)1235 ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
1236 {
1237 	struct kev_msg ev_msg;
1238 	struct net_event_data ev_data;
1239 
1240 	bzero(&ev_data, sizeof(struct net_event_data));
1241 	bzero(&ev_msg, sizeof(struct kev_msg));
1242 
1243 	if (interface == NULL) {
1244 		return EINVAL;
1245 	}
1246 
1247 	/* Do not accept wacky values */
1248 	if ((properties & mask) & ~IF_WAKE_VALID_FLAGS) {
1249 		return EINVAL;
1250 	}
1251 
1252 	if ((mask & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1253 		if ((properties & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1254 			if_set_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1255 		} else {
1256 			if_clear_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1257 		}
1258 	}
1259 
1260 	(void) ifnet_touch_lastchange(interface);
1261 
1262 	/* Notify application of the change */
1263 	ev_msg.vendor_code      = KEV_VENDOR_APPLE;
1264 	ev_msg.kev_class        = KEV_NETWORK_CLASS;
1265 	ev_msg.kev_subclass     = KEV_DL_SUBCLASS;
1266 
1267 	ev_msg.event_code       = KEV_DL_WAKEFLAGS_CHANGED;
1268 	strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ);
1269 	ev_data.if_family       = interface->if_family;
1270 	ev_data.if_unit         = (u_int32_t)interface->if_unit;
1271 	ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1272 	ev_msg.dv[0].data_ptr   = &ev_data;
1273 	ev_msg.dv[1].data_length = 0;
1274 	dlil_post_complete_msg(interface, &ev_msg);
1275 
1276 	return 0;
1277 }
1278 
1279 u_int32_t
ifnet_get_wake_flags(ifnet_t interface)1280 ifnet_get_wake_flags(ifnet_t interface)
1281 {
1282 	u_int32_t flags = 0;
1283 
1284 	if (interface == NULL) {
1285 		return 0;
1286 	}
1287 
1288 	if ((interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) != 0) {
1289 		flags |= IF_WAKE_ON_MAGIC_PACKET;
1290 	}
1291 
1292 	return flags;
1293 }
1294 
1295 /*
1296  * Should MIB data store a copy?
1297  */
1298 errno_t
ifnet_set_link_mib_data(ifnet_t interface,void * __sized_by (mibLen)mibData,uint32_t mibLen)1299 ifnet_set_link_mib_data(ifnet_t interface, void *__sized_by(mibLen) mibData, uint32_t mibLen)
1300 {
1301 	if (interface == NULL) {
1302 		return EINVAL;
1303 	}
1304 
1305 	ifnet_lock_exclusive(interface);
1306 	interface->if_linkmib = (void*)mibData;
1307 	interface->if_linkmiblen = mibLen;
1308 	ifnet_lock_done(interface);
1309 	return 0;
1310 }
1311 
1312 errno_t
ifnet_get_link_mib_data(ifnet_t interface,void * __sized_by (* mibLen)mibData,uint32_t * mibLen)1313 ifnet_get_link_mib_data(ifnet_t interface, void *__sized_by(*mibLen) mibData, uint32_t *mibLen)
1314 {
1315 	errno_t result = 0;
1316 
1317 	if (interface == NULL) {
1318 		return EINVAL;
1319 	}
1320 
1321 	ifnet_lock_shared(interface);
1322 	if (*mibLen < interface->if_linkmiblen) {
1323 		result = EMSGSIZE;
1324 	}
1325 	if (result == 0 && interface->if_linkmib == NULL) {
1326 		result = ENOTSUP;
1327 	}
1328 
1329 	if (result == 0) {
1330 		*mibLen = interface->if_linkmiblen;
1331 		bcopy(interface->if_linkmib, mibData, *mibLen);
1332 	}
1333 	ifnet_lock_done(interface);
1334 
1335 	return result;
1336 }
1337 
1338 uint32_t
ifnet_get_link_mib_data_length(ifnet_t interface)1339 ifnet_get_link_mib_data_length(ifnet_t interface)
1340 {
1341 	return (interface == NULL) ? 0 : interface->if_linkmiblen;
1342 }
1343 
1344 errno_t
ifnet_output(ifnet_t interface,protocol_family_t protocol_family,mbuf_t m,void * route,const struct sockaddr * dest)1345 ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1346     mbuf_t m, void *route, const struct sockaddr *dest)
1347 {
1348 	if (interface == NULL || protocol_family == 0 || m == NULL) {
1349 		if (m != NULL) {
1350 			mbuf_freem_list(m);
1351 		}
1352 		return EINVAL;
1353 	}
1354 	return dlil_output(interface, protocol_family, m, route, dest,
1355 	           DLIL_OUTPUT_FLAGS_NONE, NULL);
1356 }
1357 
1358 errno_t
ifnet_output_raw(ifnet_t interface,protocol_family_t protocol_family,mbuf_t m)1359 ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1360 {
1361 	if (interface == NULL || m == NULL) {
1362 		if (m != NULL) {
1363 			mbuf_freem_list(m);
1364 		}
1365 		return EINVAL;
1366 	}
1367 	return dlil_output(interface, protocol_family, m, NULL, NULL,
1368 	           DLIL_OUTPUT_FLAGS_RAW, NULL);
1369 }
1370 
1371 errno_t
ifnet_set_mtu(ifnet_t interface,u_int32_t mtu)1372 ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1373 {
1374 	if (interface == NULL) {
1375 		return EINVAL;
1376 	}
1377 
1378 	interface->if_mtu = mtu;
1379 	return 0;
1380 }
1381 
1382 u_int32_t
ifnet_mtu(ifnet_t interface)1383 ifnet_mtu(ifnet_t interface)
1384 {
1385 	return (interface == NULL) ? 0 : interface->if_mtu;
1386 }
1387 
1388 u_char
ifnet_type(ifnet_t interface)1389 ifnet_type(ifnet_t interface)
1390 {
1391 	return (interface == NULL) ? 0 : interface->if_data.ifi_type;
1392 }
1393 
1394 errno_t
ifnet_set_addrlen(ifnet_t interface,u_char addrlen)1395 ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1396 {
1397 	if (interface == NULL) {
1398 		return EINVAL;
1399 	}
1400 
1401 	interface->if_data.ifi_addrlen = addrlen;
1402 	return 0;
1403 }
1404 
1405 u_char
ifnet_addrlen(ifnet_t interface)1406 ifnet_addrlen(ifnet_t interface)
1407 {
1408 	return (interface == NULL) ? 0 : interface->if_data.ifi_addrlen;
1409 }
1410 
1411 errno_t
ifnet_set_hdrlen(ifnet_t interface,u_char hdrlen)1412 ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1413 {
1414 	if (interface == NULL) {
1415 		return EINVAL;
1416 	}
1417 
1418 	interface->if_data.ifi_hdrlen = hdrlen;
1419 	return 0;
1420 }
1421 
1422 u_char
ifnet_hdrlen(ifnet_t interface)1423 ifnet_hdrlen(ifnet_t interface)
1424 {
1425 	return (interface == NULL) ? 0 : interface->if_data.ifi_hdrlen;
1426 }
1427 
1428 errno_t
ifnet_set_metric(ifnet_t interface,u_int32_t metric)1429 ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1430 {
1431 	if (interface == NULL) {
1432 		return EINVAL;
1433 	}
1434 
1435 	interface->if_data.ifi_metric = metric;
1436 	return 0;
1437 }
1438 
1439 u_int32_t
ifnet_metric(ifnet_t interface)1440 ifnet_metric(ifnet_t interface)
1441 {
1442 	return (interface == NULL) ? 0 : interface->if_data.ifi_metric;
1443 }
1444 
1445 errno_t
ifnet_set_baudrate(struct ifnet * ifp,uint64_t baudrate)1446 ifnet_set_baudrate(struct ifnet *ifp, uint64_t baudrate)
1447 {
1448 	if (ifp == NULL) {
1449 		return EINVAL;
1450 	}
1451 
1452 	ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1453 	    ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1454 
1455 	/* Pin if_baudrate to 32 bits until we can change the storage size */
1456 	ifp->if_baudrate = (baudrate > UINT32_MAX) ? UINT32_MAX : (uint32_t)baudrate;
1457 
1458 	return 0;
1459 }
1460 
1461 u_int64_t
ifnet_baudrate(struct ifnet * ifp)1462 ifnet_baudrate(struct ifnet *ifp)
1463 {
1464 	return (ifp == NULL) ? 0 : ifp->if_baudrate;
1465 }
1466 
1467 errno_t
ifnet_set_bandwidths(struct ifnet * ifp,struct if_bandwidths * output_bw,struct if_bandwidths * input_bw)1468 ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1469     struct if_bandwidths *input_bw)
1470 {
1471 	if (ifp == NULL) {
1472 		return EINVAL;
1473 	}
1474 
1475 	/* set input values first (if any), as output values depend on them */
1476 	if (input_bw != NULL) {
1477 		(void) ifnet_set_input_bandwidths(ifp, input_bw);
1478 	}
1479 
1480 	if (output_bw != NULL) {
1481 		(void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE);
1482 	}
1483 
1484 	return 0;
1485 }
1486 
1487 static void
ifnet_set_link_status_outbw(struct ifnet * ifp)1488 ifnet_set_link_status_outbw(struct ifnet *ifp)
1489 {
1490 	struct if_wifi_status_v1 *sr;
1491 	sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1492 	if (ifp->if_output_bw.eff_bw != 0) {
1493 		sr->valid_bitmask |=
1494 		    IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
1495 		sr->ul_effective_bandwidth =
1496 		    ifp->if_output_bw.eff_bw > UINT32_MAX ?
1497 		    UINT32_MAX :
1498 		    (uint32_t)ifp->if_output_bw.eff_bw;
1499 	}
1500 	if (ifp->if_output_bw.max_bw != 0) {
1501 		sr->valid_bitmask |=
1502 		    IF_WIFI_UL_MAX_BANDWIDTH_VALID;
1503 		sr->ul_max_bandwidth =
1504 		    ifp->if_output_bw.max_bw > UINT32_MAX ?
1505 		    UINT32_MAX :
1506 		    (uint32_t)ifp->if_output_bw.max_bw;
1507 	}
1508 }
1509 
1510 errno_t
ifnet_set_output_bandwidths(struct ifnet * ifp,struct if_bandwidths * bw,boolean_t locked)1511 ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw,
1512     boolean_t locked)
1513 {
1514 	struct if_bandwidths old_bw;
1515 	struct ifclassq *ifq;
1516 	u_int64_t br;
1517 
1518 	VERIFY(ifp != NULL && bw != NULL);
1519 
1520 	ifq = ifp->if_snd;
1521 	if (!locked) {
1522 		IFCQ_LOCK(ifq);
1523 	}
1524 	IFCQ_LOCK_ASSERT_HELD(ifq);
1525 
1526 	old_bw = ifp->if_output_bw;
1527 	if (bw->eff_bw != 0) {
1528 		ifp->if_output_bw.eff_bw = bw->eff_bw;
1529 	}
1530 	if (bw->max_bw != 0) {
1531 		ifp->if_output_bw.max_bw = bw->max_bw;
1532 	}
1533 	if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
1534 		ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1535 	} else if (ifp->if_output_bw.eff_bw == 0) {
1536 		ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1537 	}
1538 
1539 	/* Pin if_baudrate to 32 bits */
1540 	br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1541 	if (br != 0) {
1542 		ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
1543 	}
1544 
1545 	/* Adjust queue parameters if needed */
1546 	if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1547 	    old_bw.max_bw != ifp->if_output_bw.max_bw) {
1548 		ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH);
1549 	}
1550 
1551 	if (!locked) {
1552 		IFCQ_UNLOCK(ifq);
1553 	}
1554 
1555 	/*
1556 	 * If this is a Wifi interface, update the values in
1557 	 * if_link_status structure also.
1558 	 */
1559 	if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1560 		lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1561 		ifnet_set_link_status_outbw(ifp);
1562 		lck_rw_done(&ifp->if_link_status_lock);
1563 	}
1564 
1565 	return 0;
1566 }
1567 
1568 static void
ifnet_set_link_status_inbw(struct ifnet * ifp)1569 ifnet_set_link_status_inbw(struct ifnet *ifp)
1570 {
1571 	struct if_wifi_status_v1 *sr;
1572 
1573 	sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1574 	if (ifp->if_input_bw.eff_bw != 0) {
1575 		sr->valid_bitmask |=
1576 		    IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
1577 		sr->dl_effective_bandwidth =
1578 		    ifp->if_input_bw.eff_bw > UINT32_MAX ?
1579 		    UINT32_MAX :
1580 		    (uint32_t)ifp->if_input_bw.eff_bw;
1581 	}
1582 	if (ifp->if_input_bw.max_bw != 0) {
1583 		sr->valid_bitmask |=
1584 		    IF_WIFI_DL_MAX_BANDWIDTH_VALID;
1585 		sr->dl_max_bandwidth = ifp->if_input_bw.max_bw > UINT32_MAX ?
1586 		    UINT32_MAX :
1587 		    (uint32_t)ifp->if_input_bw.max_bw;
1588 	}
1589 }
1590 
1591 errno_t
ifnet_set_input_bandwidths(struct ifnet * ifp,struct if_bandwidths * bw)1592 ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1593 {
1594 	struct if_bandwidths old_bw;
1595 
1596 	VERIFY(ifp != NULL && bw != NULL);
1597 
1598 	old_bw = ifp->if_input_bw;
1599 	if (bw->eff_bw != 0) {
1600 		ifp->if_input_bw.eff_bw = bw->eff_bw;
1601 	}
1602 	if (bw->max_bw != 0) {
1603 		ifp->if_input_bw.max_bw = bw->max_bw;
1604 	}
1605 	if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
1606 		ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1607 	} else if (ifp->if_input_bw.eff_bw == 0) {
1608 		ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1609 	}
1610 
1611 	if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1612 		lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1613 		ifnet_set_link_status_inbw(ifp);
1614 		lck_rw_done(&ifp->if_link_status_lock);
1615 	}
1616 
1617 	if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1618 	    old_bw.max_bw != ifp->if_input_bw.max_bw) {
1619 		ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 u_int64_t
ifnet_output_linkrate(struct ifnet * ifp)1626 ifnet_output_linkrate(struct ifnet *ifp)
1627 {
1628 	struct ifclassq *ifq = ifp->if_snd;
1629 	u_int64_t rate;
1630 
1631 	IFCQ_LOCK_ASSERT_HELD(ifq);
1632 
1633 	rate = ifp->if_output_bw.eff_bw;
1634 	if (IFCQ_TBR_IS_ENABLED(ifq)) {
1635 		u_int64_t tbr_rate = ifq->ifcq_tbr.tbr_rate_raw;
1636 		VERIFY(tbr_rate > 0);
1637 		rate = MIN(rate, ifq->ifcq_tbr.tbr_rate_raw);
1638 	}
1639 
1640 	return rate;
1641 }
1642 
1643 u_int64_t
ifnet_input_linkrate(struct ifnet * ifp)1644 ifnet_input_linkrate(struct ifnet *ifp)
1645 {
1646 	return ifp->if_input_bw.eff_bw;
1647 }
1648 
1649 errno_t
ifnet_bandwidths(struct ifnet * ifp,struct if_bandwidths * output_bw,struct if_bandwidths * input_bw)1650 ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1651     struct if_bandwidths *input_bw)
1652 {
1653 	if (ifp == NULL) {
1654 		return EINVAL;
1655 	}
1656 
1657 	if (output_bw != NULL) {
1658 		*output_bw = ifp->if_output_bw;
1659 	}
1660 	if (input_bw != NULL) {
1661 		*input_bw = ifp->if_input_bw;
1662 	}
1663 
1664 	return 0;
1665 }
1666 
1667 errno_t
ifnet_set_latencies(struct ifnet * ifp,struct if_latencies * output_lt,struct if_latencies * input_lt)1668 ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1669     struct if_latencies *input_lt)
1670 {
1671 	if (ifp == NULL) {
1672 		return EINVAL;
1673 	}
1674 
1675 	if (output_lt != NULL) {
1676 		(void) ifnet_set_output_latencies(ifp, output_lt, FALSE);
1677 	}
1678 
1679 	if (input_lt != NULL) {
1680 		(void) ifnet_set_input_latencies(ifp, input_lt);
1681 	}
1682 
1683 	return 0;
1684 }
1685 
1686 errno_t
ifnet_set_output_latencies(struct ifnet * ifp,struct if_latencies * lt,boolean_t locked)1687 ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt,
1688     boolean_t locked)
1689 {
1690 	struct if_latencies old_lt;
1691 	struct ifclassq *ifq;
1692 
1693 	VERIFY(ifp != NULL && lt != NULL);
1694 
1695 	ifq = ifp->if_snd;
1696 	if (!locked) {
1697 		IFCQ_LOCK(ifq);
1698 	}
1699 	IFCQ_LOCK_ASSERT_HELD(ifq);
1700 
1701 	old_lt = ifp->if_output_lt;
1702 	if (lt->eff_lt != 0) {
1703 		ifp->if_output_lt.eff_lt = lt->eff_lt;
1704 	}
1705 	if (lt->max_lt != 0) {
1706 		ifp->if_output_lt.max_lt = lt->max_lt;
1707 	}
1708 	if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
1709 		ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1710 	} else if (ifp->if_output_lt.eff_lt == 0) {
1711 		ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1712 	}
1713 
1714 	/* Adjust queue parameters if needed */
1715 	if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1716 	    old_lt.max_lt != ifp->if_output_lt.max_lt) {
1717 		ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY);
1718 	}
1719 
1720 	if (!locked) {
1721 		IFCQ_UNLOCK(ifq);
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 errno_t
ifnet_set_input_latencies(struct ifnet * ifp,struct if_latencies * lt)1728 ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1729 {
1730 	struct if_latencies old_lt;
1731 
1732 	VERIFY(ifp != NULL && lt != NULL);
1733 
1734 	old_lt = ifp->if_input_lt;
1735 	if (lt->eff_lt != 0) {
1736 		ifp->if_input_lt.eff_lt = lt->eff_lt;
1737 	}
1738 	if (lt->max_lt != 0) {
1739 		ifp->if_input_lt.max_lt = lt->max_lt;
1740 	}
1741 	if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
1742 		ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1743 	} else if (ifp->if_input_lt.eff_lt == 0) {
1744 		ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1745 	}
1746 
1747 	if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1748 	    old_lt.max_lt != ifp->if_input_lt.max_lt) {
1749 		ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1750 	}
1751 
1752 	return 0;
1753 }
1754 
1755 errno_t
ifnet_latencies(struct ifnet * ifp,struct if_latencies * output_lt,struct if_latencies * input_lt)1756 ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1757     struct if_latencies *input_lt)
1758 {
1759 	if (ifp == NULL) {
1760 		return EINVAL;
1761 	}
1762 
1763 	if (output_lt != NULL) {
1764 		*output_lt = ifp->if_output_lt;
1765 	}
1766 	if (input_lt != NULL) {
1767 		*input_lt = ifp->if_input_lt;
1768 	}
1769 
1770 	return 0;
1771 }
1772 
1773 errno_t
ifnet_set_poll_params(struct ifnet * ifp,struct ifnet_poll_params * p)1774 ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1775 {
1776 	errno_t err;
1777 
1778 	if (ifp == NULL) {
1779 		return EINVAL;
1780 	} else if (!ifnet_is_attached(ifp, 1)) {
1781 		return ENXIO;
1782 	}
1783 
1784 #if SKYWALK
1785 	if (SKYWALK_CAPABLE(ifp)) {
1786 		err = netif_rxpoll_set_params(ifp, p, FALSE);
1787 		ifnet_decr_iorefcnt(ifp);
1788 		return err;
1789 	}
1790 #endif /* SKYWALK */
1791 	err = dlil_rxpoll_set_params(ifp, p, FALSE);
1792 
1793 	/* Release the io ref count */
1794 	ifnet_decr_iorefcnt(ifp);
1795 
1796 	return err;
1797 }
1798 
1799 errno_t
ifnet_poll_params(struct ifnet * ifp,struct ifnet_poll_params * p)1800 ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1801 {
1802 	errno_t err;
1803 
1804 	if (ifp == NULL || p == NULL) {
1805 		return EINVAL;
1806 	} else if (!ifnet_is_attached(ifp, 1)) {
1807 		return ENXIO;
1808 	}
1809 
1810 	err = dlil_rxpoll_get_params(ifp, p);
1811 
1812 	/* Release the io ref count */
1813 	ifnet_decr_iorefcnt(ifp);
1814 
1815 	return err;
1816 }
1817 
1818 errno_t
ifnet_stat_increment(struct ifnet * ifp,const struct ifnet_stat_increment_param * s)1819 ifnet_stat_increment(struct ifnet *ifp,
1820     const struct ifnet_stat_increment_param *s)
1821 {
1822 	if (ifp == NULL) {
1823 		return EINVAL;
1824 	}
1825 
1826 	if (s->packets_in != 0) {
1827 		os_atomic_add(&ifp->if_data.ifi_ipackets, s->packets_in, relaxed);
1828 	}
1829 	if (s->bytes_in != 0) {
1830 		os_atomic_add(&ifp->if_data.ifi_ibytes, s->bytes_in, relaxed);
1831 	}
1832 	if (s->errors_in != 0) {
1833 		os_atomic_add(&ifp->if_data.ifi_ierrors, s->errors_in, relaxed);
1834 	}
1835 
1836 	if (s->packets_out != 0) {
1837 		os_atomic_add(&ifp->if_data.ifi_opackets, s->packets_out, relaxed);
1838 	}
1839 	if (s->bytes_out != 0) {
1840 		os_atomic_add(&ifp->if_data.ifi_obytes, s->bytes_out, relaxed);
1841 	}
1842 	if (s->errors_out != 0) {
1843 		os_atomic_add(&ifp->if_data.ifi_oerrors, s->errors_out, relaxed);
1844 	}
1845 
1846 	if (s->collisions != 0) {
1847 		os_atomic_add(&ifp->if_data.ifi_collisions, s->collisions, relaxed);
1848 	}
1849 	if (s->dropped != 0) {
1850 		os_atomic_add(&ifp->if_data.ifi_iqdrops, s->dropped, relaxed);
1851 	}
1852 
1853 	/* Touch the last change time. */
1854 	TOUCHLASTCHANGE(&ifp->if_lastchange);
1855 
1856 	if (ifp->if_data_threshold != 0) {
1857 		ifnet_notify_data_threshold(ifp);
1858 	}
1859 
1860 	return 0;
1861 }
1862 
1863 errno_t
ifnet_stat_increment_in(struct ifnet * ifp,u_int32_t packets_in,u_int32_t bytes_in,u_int32_t errors_in)1864 ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1865     u_int32_t bytes_in, u_int32_t errors_in)
1866 {
1867 	if (ifp == NULL) {
1868 		return EINVAL;
1869 	}
1870 
1871 	if (packets_in != 0) {
1872 		os_atomic_add(&ifp->if_data.ifi_ipackets, packets_in, relaxed);
1873 	}
1874 	if (bytes_in != 0) {
1875 		os_atomic_add(&ifp->if_data.ifi_ibytes, bytes_in, relaxed);
1876 	}
1877 	if (errors_in != 0) {
1878 		os_atomic_add(&ifp->if_data.ifi_ierrors, errors_in, relaxed);
1879 	}
1880 
1881 	TOUCHLASTCHANGE(&ifp->if_lastchange);
1882 
1883 	if (ifp->if_data_threshold != 0) {
1884 		ifnet_notify_data_threshold(ifp);
1885 	}
1886 
1887 	return 0;
1888 }
1889 
1890 errno_t
ifnet_stat_increment_out(struct ifnet * ifp,u_int32_t packets_out,u_int32_t bytes_out,u_int32_t errors_out)1891 ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1892     u_int32_t bytes_out, u_int32_t errors_out)
1893 {
1894 	if (ifp == NULL) {
1895 		return EINVAL;
1896 	}
1897 
1898 	if (packets_out != 0) {
1899 		os_atomic_add(&ifp->if_data.ifi_opackets, packets_out, relaxed);
1900 	}
1901 	if (bytes_out != 0) {
1902 		os_atomic_add(&ifp->if_data.ifi_obytes, bytes_out, relaxed);
1903 	}
1904 	if (errors_out != 0) {
1905 		os_atomic_add(&ifp->if_data.ifi_oerrors, errors_out, relaxed);
1906 	}
1907 
1908 	TOUCHLASTCHANGE(&ifp->if_lastchange);
1909 
1910 	if (ifp->if_data_threshold != 0) {
1911 		ifnet_notify_data_threshold(ifp);
1912 	}
1913 
1914 	return 0;
1915 }
1916 
1917 errno_t
ifnet_set_stat(struct ifnet * ifp,const struct ifnet_stats_param * s)1918 ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1919 {
1920 	if (ifp == NULL) {
1921 		return EINVAL;
1922 	}
1923 
1924 	os_atomic_store(&ifp->if_data.ifi_ipackets, s->packets_in, release);
1925 	os_atomic_store(&ifp->if_data.ifi_ibytes, s->bytes_in, release);
1926 	os_atomic_store(&ifp->if_data.ifi_imcasts, s->multicasts_in, release);
1927 	os_atomic_store(&ifp->if_data.ifi_ierrors, s->errors_in, release);
1928 
1929 	os_atomic_store(&ifp->if_data.ifi_opackets, s->packets_out, release);
1930 	os_atomic_store(&ifp->if_data.ifi_obytes, s->bytes_out, release);
1931 	os_atomic_store(&ifp->if_data.ifi_omcasts, s->multicasts_out, release);
1932 	os_atomic_store(&ifp->if_data.ifi_oerrors, s->errors_out, release);
1933 
1934 	os_atomic_store(&ifp->if_data.ifi_collisions, s->collisions, release);
1935 	os_atomic_store(&ifp->if_data.ifi_iqdrops, s->dropped, release);
1936 	os_atomic_store(&ifp->if_data.ifi_noproto, s->no_protocol, release);
1937 
1938 	/* Touch the last change time. */
1939 	TOUCHLASTCHANGE(&ifp->if_lastchange);
1940 
1941 	if (ifp->if_data_threshold != 0) {
1942 		ifnet_notify_data_threshold(ifp);
1943 	}
1944 
1945 	return 0;
1946 }
1947 
1948 errno_t
ifnet_stat(struct ifnet * ifp,struct ifnet_stats_param * s)1949 ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1950 {
1951 	if (ifp == NULL) {
1952 		return EINVAL;
1953 	}
1954 
1955 	s->packets_in = os_atomic_load(&ifp->if_data.ifi_ipackets, relaxed);
1956 	s->bytes_in = os_atomic_load(&ifp->if_data.ifi_ibytes, relaxed);
1957 	s->multicasts_in = os_atomic_load(&ifp->if_data.ifi_imcasts, relaxed);
1958 	s->errors_in = os_atomic_load(&ifp->if_data.ifi_ierrors, relaxed);
1959 
1960 	s->packets_out = os_atomic_load(&ifp->if_data.ifi_opackets, relaxed);
1961 	s->bytes_out = os_atomic_load(&ifp->if_data.ifi_obytes, relaxed);
1962 	s->multicasts_out = os_atomic_load(&ifp->if_data.ifi_omcasts, relaxed);
1963 	s->errors_out = os_atomic_load(&ifp->if_data.ifi_oerrors, relaxed);
1964 
1965 	s->collisions = os_atomic_load(&ifp->if_data.ifi_collisions, relaxed);
1966 	s->dropped = os_atomic_load(&ifp->if_data.ifi_iqdrops, relaxed);
1967 	s->no_protocol = os_atomic_load(&ifp->if_data.ifi_noproto, relaxed);
1968 
1969 	if (ifp->if_data_threshold != 0) {
1970 		ifnet_notify_data_threshold(ifp);
1971 	}
1972 
1973 	return 0;
1974 }
1975 
1976 errno_t
ifnet_touch_lastchange(ifnet_t interface)1977 ifnet_touch_lastchange(ifnet_t interface)
1978 {
1979 	if (interface == NULL) {
1980 		return EINVAL;
1981 	}
1982 
1983 	TOUCHLASTCHANGE(&interface->if_lastchange);
1984 
1985 	return 0;
1986 }
1987 
1988 errno_t
ifnet_lastchange(ifnet_t interface,struct timeval * last_change)1989 ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1990 {
1991 	if (interface == NULL) {
1992 		return EINVAL;
1993 	}
1994 
1995 	*last_change = interface->if_data.ifi_lastchange;
1996 	/* Crude conversion from uptime to calendar time */
1997 	last_change->tv_sec += boottime_sec();
1998 
1999 	return 0;
2000 }
2001 
2002 errno_t
ifnet_touch_lastupdown(ifnet_t interface)2003 ifnet_touch_lastupdown(ifnet_t interface)
2004 {
2005 	if (interface == NULL) {
2006 		return EINVAL;
2007 	}
2008 
2009 	TOUCHLASTCHANGE(&interface->if_lastupdown);
2010 
2011 	return 0;
2012 }
2013 
2014 errno_t
ifnet_updown_delta(ifnet_t interface,struct timeval * updown_delta)2015 ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta)
2016 {
2017 	if (interface == NULL) {
2018 		return EINVAL;
2019 	}
2020 
2021 	/* Calculate the delta */
2022 	updown_delta->tv_sec = (time_t)net_uptime();
2023 	if (updown_delta->tv_sec > interface->if_data.ifi_lastupdown.tv_sec) {
2024 		updown_delta->tv_sec -= interface->if_data.ifi_lastupdown.tv_sec;
2025 	} else {
2026 		updown_delta->tv_sec = 0;
2027 	}
2028 	updown_delta->tv_usec = 0;
2029 
2030 	return 0;
2031 }
2032 
2033 errno_t
ifnet_get_address_list(ifnet_t interface,ifaddr_t * __null_terminated * addresses)2034 ifnet_get_address_list(ifnet_t interface, ifaddr_t *__null_terminated *addresses)
2035 {
2036 	return addresses == NULL ? EINVAL :
2037 	       ifnet_get_address_list_family(interface, addresses, 0);
2038 }
2039 
2040 errno_t
ifnet_get_address_list_with_count(ifnet_t interface,ifaddr_t * __counted_by (* addresses_count)* addresses,uint16_t * addresses_count)2041 ifnet_get_address_list_with_count(ifnet_t interface,
2042     ifaddr_t *__counted_by(*addresses_count) * addresses,
2043     uint16_t *addresses_count)
2044 {
2045 	return ifnet_get_address_list_family_internal(interface, addresses,
2046 	           addresses_count, 0, 0, Z_WAITOK, 0);
2047 }
2048 
2049 struct ifnet_addr_list {
2050 	SLIST_ENTRY(ifnet_addr_list)    ifal_le;
2051 	struct ifaddr                   *ifal_ifa;
2052 };
2053 
2054 errno_t
ifnet_get_address_list_family(ifnet_t interface,ifaddr_t * __null_terminated * ret_addresses,sa_family_t family)2055 ifnet_get_address_list_family(ifnet_t interface, ifaddr_t *__null_terminated *ret_addresses,
2056     sa_family_t family)
2057 {
2058 	uint16_t addresses_count = 0;
2059 	ifaddr_t *__counted_by(addresses_count) addresses = NULL;
2060 	errno_t error;
2061 
2062 	error = ifnet_get_address_list_family_internal(interface, &addresses,
2063 	    &addresses_count, family, 0, Z_WAITOK, 0);
2064 	if (addresses_count > 0) {
2065 		*ret_addresses = __unsafe_null_terminated_from_indexable(addresses,
2066 		    &addresses[addresses_count - 1]);
2067 	} else {
2068 		*ret_addresses = NULL;
2069 	}
2070 
2071 	return error;
2072 }
2073 
2074 errno_t
ifnet_get_address_list_family_with_count(ifnet_t interface,ifaddr_t * __counted_by (* addresses_count)* addresses,uint16_t * addresses_count,sa_family_t family)2075 ifnet_get_address_list_family_with_count(ifnet_t interface,
2076     ifaddr_t *__counted_by(*addresses_count) *addresses,
2077     uint16_t *addresses_count, sa_family_t family)
2078 {
2079 	return ifnet_get_address_list_family_internal(interface, addresses,
2080 	           addresses_count, family, 0, Z_WAITOK, 0);
2081 }
2082 
2083 errno_t
ifnet_get_inuse_address_list(ifnet_t interface,ifaddr_t * __null_terminated * ret_addresses)2084 ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t *__null_terminated *ret_addresses)
2085 {
2086 	uint16_t addresses_count = 0;
2087 	ifaddr_t *__counted_by(addresses_count) addresses = NULL;
2088 	errno_t error;
2089 
2090 	error = ifnet_get_address_list_family_internal(interface, &addresses,
2091 	    &addresses_count, 0, 0, Z_WAITOK, 1);
2092 	if (addresses_count > 0) {
2093 		*ret_addresses = __unsafe_null_terminated_from_indexable(addresses,
2094 		    &addresses[addresses_count - 1]);
2095 	} else {
2096 		*ret_addresses = NULL;
2097 	}
2098 
2099 	return error;
2100 }
2101 
2102 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
2103 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
2104 
2105 __private_extern__ errno_t
ifnet_get_address_list_family_internal(ifnet_t interface,ifaddr_t * __counted_by (* addresses_count)* addresses,uint16_t * addresses_count,sa_family_t family,int detached,int how,int return_inuse_addrs)2106 ifnet_get_address_list_family_internal(ifnet_t interface,
2107     ifaddr_t *__counted_by(*addresses_count) *addresses,
2108     uint16_t *addresses_count, sa_family_t family, int detached, int how,
2109     int return_inuse_addrs)
2110 {
2111 	SLIST_HEAD(, ifnet_addr_list) ifal_head;
2112 	struct ifnet_addr_list *ifal, *ifal_tmp;
2113 	struct ifnet *ifp;
2114 	uint16_t count = 0;
2115 	errno_t err = 0;
2116 	int usecount = 0;
2117 	int index = 0;
2118 
2119 	SLIST_INIT(&ifal_head);
2120 
2121 	if (addresses == NULL || addresses_count == NULL) {
2122 		err = EINVAL;
2123 		goto done;
2124 	}
2125 	*addresses = NULL;
2126 	*addresses_count = 0;
2127 
2128 	if (detached) {
2129 		/*
2130 		 * Interface has been detached, so skip the lookup
2131 		 * at ifnet_head and go directly to inner loop.
2132 		 */
2133 		ifp = interface;
2134 		if (ifp == NULL) {
2135 			err = EINVAL;
2136 			goto done;
2137 		}
2138 		goto one;
2139 	}
2140 
2141 	ifnet_head_lock_shared();
2142 	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2143 		if (interface != NULL && ifp != interface) {
2144 			continue;
2145 		}
2146 one:
2147 		ifnet_lock_shared(ifp);
2148 		if (interface == NULL || interface == ifp) {
2149 			struct ifaddr *ifa;
2150 			TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
2151 				IFA_LOCK(ifa);
2152 				if (family != 0 &&
2153 				    ifa->ifa_addr->sa_family != family) {
2154 					IFA_UNLOCK(ifa);
2155 					continue;
2156 				}
2157 				ifal = kalloc_type(struct ifnet_addr_list, how);
2158 				if (ifal == NULL) {
2159 					IFA_UNLOCK(ifa);
2160 					ifnet_lock_done(ifp);
2161 					if (!detached) {
2162 						ifnet_head_done();
2163 					}
2164 					err = ENOMEM;
2165 					goto done;
2166 				}
2167 				ifal->ifal_ifa = ifa;
2168 				ifa_addref(ifa);
2169 				SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
2170 				IFA_UNLOCK(ifa);
2171 				if (__improbable(os_inc_overflow(&count))) {
2172 					ifnet_lock_done(ifp);
2173 					if (!detached) {
2174 						ifnet_head_done();
2175 					}
2176 					err = EINVAL;
2177 					goto done;
2178 				}
2179 			}
2180 		}
2181 		ifnet_lock_done(ifp);
2182 		if (detached) {
2183 			break;
2184 		}
2185 	}
2186 	if (!detached) {
2187 		ifnet_head_done();
2188 	}
2189 
2190 	if (count == 0) {
2191 		err = ENXIO;
2192 		goto done;
2193 	}
2194 
2195 	uint16_t allocation_size = 0;
2196 	if (__improbable(os_add_overflow(count, 1, &allocation_size))) {
2197 		err = EINVAL;
2198 		goto done;
2199 	}
2200 	ifaddr_t *allocation = kalloc_type(ifaddr_t, allocation_size, how | Z_ZERO);
2201 	if (allocation == NULL) {
2202 		err = ENOMEM;
2203 		goto done;
2204 	}
2205 	*addresses = allocation;
2206 	*addresses_count = allocation_size;
2207 
2208 done:
2209 	SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
2210 		SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
2211 		if (err == 0) {
2212 			if (return_inuse_addrs) {
2213 				usecount = tcp_find_anypcb_byaddr(ifal->ifal_ifa);
2214 				usecount += udp_find_anypcb_byaddr(ifal->ifal_ifa);
2215 				if (usecount) {
2216 					(*addresses)[index] = ifal->ifal_ifa;
2217 					index++;
2218 				} else {
2219 					ifa_remref(ifal->ifal_ifa);
2220 				}
2221 			} else {
2222 				(*addresses)[--count] = ifal->ifal_ifa;
2223 			}
2224 		} else {
2225 			ifa_remref(ifal->ifal_ifa);
2226 		}
2227 		kfree_type(struct ifnet_addr_list, ifal);
2228 	}
2229 
2230 	VERIFY(err == 0 || *addresses == NULL);
2231 	if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
2232 		VERIFY(return_inuse_addrs == 1);
2233 		kfree_type_counted_by(ifaddr_t, *addresses_count, *addresses);
2234 		err = ENXIO;
2235 	}
2236 	return err;
2237 }
2238 
2239 void
ifnet_free_address_list(ifaddr_t * __null_terminated addresses)2240 ifnet_free_address_list(ifaddr_t *__null_terminated addresses)
2241 {
2242 	int i = 0;
2243 
2244 	if (addresses == NULL) {
2245 		return;
2246 	}
2247 
2248 	for (ifaddr_t *__null_terminated ptr = addresses; *ptr != NULL; ++ptr, i++) {
2249 		ifa_remref(*ptr);
2250 	}
2251 
2252 	ifaddr_t *free_addresses = __unsafe_null_terminated_to_indexable(addresses);
2253 	kfree_type(ifaddr_t, i + 1, free_addresses);
2254 }
2255 
2256 void
ifnet_address_list_free_counted_by_internal(ifaddr_t * __counted_by (addresses_count)addresses,uint16_t addresses_count)2257 ifnet_address_list_free_counted_by_internal(ifaddr_t *__counted_by(addresses_count) addresses,
2258     uint16_t addresses_count)
2259 {
2260 	if (addresses == NULL) {
2261 		return;
2262 	}
2263 	for (int i = 0; i < addresses_count; i++) {
2264 		if (addresses[i] != NULL) {
2265 			ifa_remref(addresses[i]);
2266 		}
2267 	}
2268 	kfree_type_counted_by(ifaddr_t, addresses_count, addresses);
2269 }
2270 
2271 void *
ifnet_lladdr(ifnet_t interface)2272 ifnet_lladdr(ifnet_t interface)
2273 {
2274 	struct ifaddr *ifa;
2275 	void *lladdr;
2276 
2277 	if (interface == NULL) {
2278 		return NULL;
2279 	}
2280 
2281 	/*
2282 	 * if_lladdr points to the permanent link address of
2283 	 * the interface and it never gets deallocated; internal
2284 	 * code should simply use IF_LLADDR() for performance.
2285 	 */
2286 	ifa = interface->if_lladdr;
2287 	IFA_LOCK_SPIN(ifa);
2288 	struct sockaddr_dl *sdl = SDL(ifa->ifa_addr);
2289 	lladdr = LLADDR(sdl);
2290 	IFA_UNLOCK(ifa);
2291 
2292 	return lladdr;
2293 }
2294 
2295 errno_t
ifnet_llbroadcast_copy_bytes(ifnet_t interface,void * __sized_by (buffer_len)addr,size_t buffer_len,size_t * out_len)2296 ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *__sized_by(buffer_len) addr,
2297     size_t buffer_len, size_t *out_len)
2298 {
2299 	if (interface == NULL || addr == NULL || out_len == NULL) {
2300 		return EINVAL;
2301 	}
2302 
2303 	*out_len = interface->if_broadcast.length;
2304 
2305 	if (buffer_len < interface->if_broadcast.length) {
2306 		return EMSGSIZE;
2307 	}
2308 
2309 	if (interface->if_broadcast.length == 0) {
2310 		return ENXIO;
2311 	}
2312 
2313 	bcopy(interface->if_broadcast.ptr, addr,
2314 	    interface->if_broadcast.length);
2315 
2316 	return 0;
2317 }
2318 
2319 static errno_t
ifnet_lladdr_copy_bytes_internal(ifnet_t interface,void * __sized_by (lladdr_len)lladdr,size_t lladdr_len,kauth_cred_t * credp)2320 ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *__sized_by(lladdr_len) lladdr,
2321     size_t lladdr_len, kauth_cred_t *credp)
2322 {
2323 	size_t bytes_len;
2324 	const u_int8_t *bytes;
2325 	struct ifaddr *ifa;
2326 	uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
2327 	errno_t error = 0;
2328 
2329 	/*
2330 	 * Make sure to accomodate the largest possible
2331 	 * size of SA(if_lladdr)->sa_len.
2332 	 */
2333 	_CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
2334 
2335 	if (interface == NULL || lladdr == NULL) {
2336 		return EINVAL;
2337 	}
2338 
2339 	ifa = interface->if_lladdr;
2340 	IFA_LOCK_SPIN(ifa);
2341 	const struct sockaddr_dl *sdl = SDL(sdlbuf);
2342 	SOCKADDR_COPY(ifa->ifa_addr, sdl, SA(ifa->ifa_addr)->sa_len);
2343 	IFA_UNLOCK(ifa);
2344 
2345 	bytes = dlil_ifaddr_bytes_indexable(SDL(sdlbuf), &bytes_len, credp);
2346 	if (bytes_len != lladdr_len) {
2347 		bzero(lladdr, lladdr_len);
2348 		error = EMSGSIZE;
2349 	} else {
2350 		bcopy(bytes, lladdr, bytes_len);
2351 	}
2352 
2353 	return error;
2354 }
2355 
2356 errno_t
ifnet_lladdr_copy_bytes(ifnet_t interface,void * __sized_by (length)lladdr,size_t length)2357 ifnet_lladdr_copy_bytes(ifnet_t interface, void *__sized_by(length) lladdr, size_t length)
2358 {
2359 	return ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2360 	           NULL);
2361 }
2362 
2363 errno_t
ifnet_guarded_lladdr_copy_bytes(ifnet_t interface,void * __sized_by (length)lladdr,size_t length)2364 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *__sized_by(length) lladdr, size_t length)
2365 {
2366 #if CONFIG_MACF
2367 	kauth_cred_t __single cred;
2368 	net_thread_marks_t __single marks;
2369 #endif
2370 	kauth_cred_t *__single credp;
2371 	errno_t error;
2372 
2373 #if CONFIG_MACF
2374 	marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
2375 	cred  = current_cached_proc_cred(PROC_NULL);
2376 	credp = &cred;
2377 #else
2378 	credp = NULL;
2379 #endif
2380 
2381 	error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2382 	    credp);
2383 
2384 #if CONFIG_MACF
2385 	net_thread_marks_pop(marks);
2386 #endif
2387 
2388 	return error;
2389 }
2390 
2391 static errno_t
ifnet_set_lladdr_internal(ifnet_t interface,const void * __sized_by (lladdr_len)lladdr,size_t lladdr_len,u_char new_type,int apply_type)2392 ifnet_set_lladdr_internal(ifnet_t interface, const void *__sized_by(lladdr_len) lladdr,
2393     size_t lladdr_len, u_char new_type, int apply_type)
2394 {
2395 	struct ifaddr *ifa;
2396 	errno_t error = 0;
2397 
2398 	if (interface == NULL) {
2399 		return EINVAL;
2400 	}
2401 
2402 	ifnet_head_lock_shared();
2403 	ifnet_lock_exclusive(interface);
2404 	if (lladdr_len != 0 &&
2405 	    (lladdr_len != interface->if_addrlen || lladdr == 0)) {
2406 		ifnet_lock_done(interface);
2407 		ifnet_head_done();
2408 		return EINVAL;
2409 	}
2410 	/* The interface needs to be attached to add an address */
2411 	if (interface->if_refflags & IFRF_EMBRYONIC) {
2412 		ifnet_lock_done(interface);
2413 		ifnet_head_done();
2414 		return ENXIO;
2415 	}
2416 
2417 	ifa = ifnet_addrs[interface->if_index - 1];
2418 	if (ifa != NULL) {
2419 		struct sockaddr_dl *sdl;
2420 
2421 		IFA_LOCK_SPIN(ifa);
2422 		sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2423 		if (lladdr_len != 0) {
2424 			bcopy(lladdr, LLADDR(sdl), lladdr_len);
2425 		} else {
2426 			bzero(LLADDR(sdl), interface->if_addrlen);
2427 		}
2428 		/* lladdr_len-check with if_addrlen makes sure it fits in u_char */
2429 		sdl->sdl_alen = (u_char)lladdr_len;
2430 
2431 		if (apply_type) {
2432 			sdl->sdl_type = new_type;
2433 		}
2434 		IFA_UNLOCK(ifa);
2435 	} else {
2436 		error = ENXIO;
2437 	}
2438 	ifnet_lock_done(interface);
2439 	ifnet_head_done();
2440 
2441 	/* Generate a kernel event */
2442 	if (error == 0) {
2443 		intf_event_enqueue_nwk_wq_entry(interface, NULL,
2444 		    INTF_EVENT_CODE_LLADDR_UPDATE);
2445 		dlil_post_msg(interface, KEV_DL_SUBCLASS,
2446 		    KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0, FALSE);
2447 	}
2448 
2449 	return error;
2450 }
2451 
2452 errno_t
ifnet_set_lladdr(ifnet_t interface,const void * __sized_by (lladdr_len)lladdr,size_t lladdr_len)2453 ifnet_set_lladdr(ifnet_t interface, const void *__sized_by(lladdr_len) lladdr, size_t lladdr_len)
2454 {
2455 	return ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0);
2456 }
2457 
2458 errno_t
ifnet_set_lladdr_and_type(ifnet_t interface,const void * __sized_by (lladdr_len)lladdr,size_t lladdr_len,u_char type)2459 ifnet_set_lladdr_and_type(ifnet_t interface, const void *__sized_by(lladdr_len) lladdr,
2460     size_t lladdr_len, u_char type)
2461 {
2462 	return ifnet_set_lladdr_internal(interface, lladdr,
2463 	           lladdr_len, type, 1);
2464 }
2465 
2466 errno_t
ifnet_add_multicast(ifnet_t interface,const struct sockaddr * maddr,ifmultiaddr_t * ifmap)2467 ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
2468     ifmultiaddr_t *ifmap)
2469 {
2470 	if (interface == NULL || maddr == NULL) {
2471 		return EINVAL;
2472 	}
2473 
2474 	/* Don't let users screw up protocols' entries. */
2475 	switch (maddr->sa_family) {
2476 	case AF_LINK: {
2477 		const struct sockaddr_dl *sdl = SDL(maddr);
2478 		if (sdl->sdl_len < sizeof(struct sockaddr_dl) ||
2479 		    (sdl->sdl_nlen + sdl->sdl_alen + sdl->sdl_slen +
2480 		    offsetof(struct sockaddr_dl, sdl_data) > sdl->sdl_len)) {
2481 			return EINVAL;
2482 		}
2483 		break;
2484 	}
2485 	case AF_UNSPEC:
2486 		if (maddr->sa_len < ETHER_ADDR_LEN +
2487 		    offsetof(struct sockaddr, sa_data)) {
2488 			return EINVAL;
2489 		}
2490 		break;
2491 	default:
2492 		return EINVAL;
2493 	}
2494 
2495 	return if_addmulti_anon(interface, maddr, ifmap);
2496 }
2497 
2498 errno_t
ifnet_remove_multicast(ifmultiaddr_t ifma)2499 ifnet_remove_multicast(ifmultiaddr_t ifma)
2500 {
2501 	struct sockaddr *maddr;
2502 
2503 	if (ifma == NULL) {
2504 		return EINVAL;
2505 	}
2506 
2507 	maddr = ifma->ifma_addr;
2508 	/* Don't let users screw up protocols' entries. */
2509 	if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) {
2510 		return EINVAL;
2511 	}
2512 
2513 	return if_delmulti_anon(ifma->ifma_ifp, maddr);
2514 }
2515 
2516 errno_t
ifnet_get_multicast_list(ifnet_t ifp,ifmultiaddr_t * __null_terminated * ret_addresses)2517 ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t *__null_terminated *ret_addresses)
2518 {
2519 	int count = 0;
2520 	int cmax = 0;
2521 	struct ifmultiaddr *addr;
2522 
2523 	if (ifp == NULL || ret_addresses == NULL) {
2524 		return EINVAL;
2525 	}
2526 	*ret_addresses = NULL;
2527 
2528 	ifnet_lock_shared(ifp);
2529 	LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2530 		cmax++;
2531 	}
2532 
2533 	ifmultiaddr_t *addresses = kalloc_type(ifmultiaddr_t, cmax + 1, Z_WAITOK);
2534 	if (addresses == NULL) {
2535 		ifnet_lock_done(ifp);
2536 		return ENOMEM;
2537 	}
2538 
2539 	LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2540 		if (count + 1 > cmax) {
2541 			break;
2542 		}
2543 		addresses[count] = (ifmultiaddr_t)addr;
2544 		ifmaddr_reference(addresses[count]);
2545 		count++;
2546 	}
2547 	addresses[cmax] = NULL;
2548 	ifnet_lock_done(ifp);
2549 
2550 	*ret_addresses = __unsafe_null_terminated_from_indexable(addresses, &addresses[cmax]);
2551 
2552 	return 0;
2553 }
2554 
2555 void
ifnet_free_multicast_list(ifmultiaddr_t * __null_terminated addresses)2556 ifnet_free_multicast_list(ifmultiaddr_t *__null_terminated addresses)
2557 {
2558 	int i = 0;
2559 
2560 	if (addresses == NULL) {
2561 		return;
2562 	}
2563 
2564 	for (ifmultiaddr_t *__null_terminated ptr = addresses; *ptr != NULL; ptr++, i++) {
2565 		ifmaddr_release(*ptr);
2566 	}
2567 
2568 	ifmultiaddr_t *free_addresses = __unsafe_null_terminated_to_indexable(addresses);
2569 	kfree_type(ifmultiaddr_t, i + 1, free_addresses);
2570 }
2571 
2572 errno_t
ifnet_find_by_name(const char * ifname,ifnet_t * ifpp)2573 ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
2574 {
2575 	struct ifnet *ifp;
2576 	size_t namelen;
2577 
2578 	if (ifname == NULL) {
2579 		return EINVAL;
2580 	}
2581 
2582 	namelen = strlen(ifname);
2583 
2584 	*ifpp = NULL;
2585 
2586 	ifnet_head_lock_shared();
2587 	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2588 		struct ifaddr *ifa;
2589 		struct sockaddr_dl *ll_addr;
2590 
2591 		ifa = ifnet_addrs[ifp->if_index - 1];
2592 		if (ifa == NULL) {
2593 			continue;
2594 		}
2595 
2596 		IFA_LOCK(ifa);
2597 		ll_addr = SDL(ifa->ifa_addr);
2598 
2599 		if (namelen == ll_addr->sdl_nlen &&
2600 		    strlcmp(ll_addr->sdl_data, ifname, namelen) == 0) {
2601 			IFA_UNLOCK(ifa);
2602 			*ifpp = ifp;
2603 			ifnet_reference(*ifpp);
2604 			break;
2605 		}
2606 		IFA_UNLOCK(ifa);
2607 	}
2608 	ifnet_head_done();
2609 
2610 	return (ifp == NULL) ? ENXIO : 0;
2611 }
2612 
2613 errno_t
ifnet_list_get(ifnet_family_t family,ifnet_t * __counted_by (* count)* list,u_int32_t * count)2614 ifnet_list_get(ifnet_family_t family, ifnet_t *__counted_by(*count) *list,
2615     u_int32_t *count)
2616 {
2617 	return ifnet_list_get_common(family, FALSE, list, count);
2618 }
2619 
2620 __private_extern__ errno_t
ifnet_list_get_all(ifnet_family_t family,ifnet_t * __counted_by (* count)* list,u_int32_t * count)2621 ifnet_list_get_all(ifnet_family_t family, ifnet_t *__counted_by(*count) *list,
2622     u_int32_t *count)
2623 {
2624 	return ifnet_list_get_common(family, TRUE, list, count);
2625 }
2626 
2627 struct ifnet_list {
2628 	SLIST_ENTRY(ifnet_list) ifl_le;
2629 	struct ifnet            *ifl_ifp;
2630 };
2631 
2632 static errno_t
ifnet_list_get_common(ifnet_family_t family,boolean_t get_all,ifnet_t * __counted_by (* count)* list,u_int32_t * count)2633 ifnet_list_get_common(ifnet_family_t family, boolean_t get_all,
2634     ifnet_t *__counted_by(*count) *list, u_int32_t *count)
2635 {
2636 #pragma unused(get_all)
2637 	SLIST_HEAD(, ifnet_list) ifl_head;
2638 	struct ifnet_list *ifl, *ifl_tmp;
2639 	struct ifnet *ifp;
2640 	ifnet_t *tmp_list = NULL;
2641 	int cnt = 0;
2642 	errno_t err = 0;
2643 
2644 	SLIST_INIT(&ifl_head);
2645 
2646 	if (list == NULL || count == NULL) {
2647 		err = EINVAL;
2648 		goto done;
2649 	}
2650 	*list = NULL;
2651 	*count = 0;
2652 
2653 	ifnet_head_lock_shared();
2654 	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2655 		if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2656 			ifl = kalloc_type(struct ifnet_list, Z_WAITOK | Z_ZERO);
2657 			if (ifl == NULL) {
2658 				ifnet_head_done();
2659 				err = ENOMEM;
2660 				goto done;
2661 			}
2662 			ifl->ifl_ifp = ifp;
2663 			ifnet_reference(ifp);
2664 			SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2665 			++cnt;
2666 		}
2667 	}
2668 	ifnet_head_done();
2669 
2670 	if (cnt == 0) {
2671 		err = ENXIO;
2672 		goto done;
2673 	}
2674 
2675 	tmp_list = kalloc_type(ifnet_t, cnt + 1, Z_WAITOK | Z_ZERO);
2676 	if (tmp_list == NULL) {
2677 		err = ENOMEM;
2678 		goto done;
2679 	}
2680 	*list = tmp_list;
2681 	*count = cnt;
2682 
2683 done:
2684 	SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2685 		SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2686 		if (err == 0) {
2687 			(*list)[--cnt] = ifl->ifl_ifp;
2688 		} else {
2689 			ifnet_release(ifl->ifl_ifp);
2690 		}
2691 		kfree_type(struct ifnet_list, ifl);
2692 	}
2693 
2694 	return err;
2695 }
2696 
2697 void
ifnet_list_free(ifnet_t * __null_terminated interfaces)2698 ifnet_list_free(ifnet_t *__null_terminated interfaces)
2699 {
2700 	int i = 0;
2701 
2702 	if (interfaces == NULL) {
2703 		return;
2704 	}
2705 
2706 	for (ifnet_t *__null_terminated ptr = interfaces; *ptr != NULL; ptr++, i++) {
2707 		ifnet_release(*ptr);
2708 	}
2709 
2710 	ifnet_t *free_interfaces = __unsafe_null_terminated_to_indexable(interfaces);
2711 	kfree_type(ifnet_t, i + 1, free_interfaces);
2712 }
2713 
2714 void
ifnet_list_free_counted_by_internal(ifnet_t * __counted_by (count)interfaces,uint32_t count)2715 ifnet_list_free_counted_by_internal(ifnet_t *__counted_by(count) interfaces, uint32_t count)
2716 {
2717 	if (interfaces == NULL) {
2718 		return;
2719 	}
2720 	for (int i = 0; i < count; i++) {
2721 		ifnet_release(interfaces[i]);
2722 	}
2723 
2724 	/*
2725 	 * When we allocated the ifnet_list, we returned only the number
2726 	 * of ifnet_t pointers without the null terminator in the `count'
2727 	 * variable, so we cheat here by freeing everything.
2728 	 */
2729 	ifnet_t *free_interfaces = interfaces;
2730 	kfree_type(ifnet_t, count + 1, free_interfaces);
2731 	interfaces = NULL;
2732 	count = 0;
2733 }
2734 
2735 /*************************************************************************/
2736 /* ifaddr_t accessors						*/
2737 /*************************************************************************/
2738 
2739 errno_t
ifaddr_reference(ifaddr_t ifa)2740 ifaddr_reference(ifaddr_t ifa)
2741 {
2742 	if (ifa == NULL) {
2743 		return EINVAL;
2744 	}
2745 
2746 	ifa_addref(ifa);
2747 	return 0;
2748 }
2749 
2750 errno_t
ifaddr_release(ifaddr_t ifa)2751 ifaddr_release(ifaddr_t ifa)
2752 {
2753 	if (ifa == NULL) {
2754 		return EINVAL;
2755 	}
2756 
2757 	ifa_remref(ifa);
2758 	return 0;
2759 }
2760 
2761 sa_family_t
ifaddr_address_family(ifaddr_t ifa)2762 ifaddr_address_family(ifaddr_t ifa)
2763 {
2764 	sa_family_t family = 0;
2765 
2766 	if (ifa != NULL) {
2767 		IFA_LOCK_SPIN(ifa);
2768 		if (ifa->ifa_addr != NULL) {
2769 			family = ifa->ifa_addr->sa_family;
2770 		}
2771 		IFA_UNLOCK(ifa);
2772 	}
2773 	return family;
2774 }
2775 
2776 errno_t
ifaddr_address(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2777 ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2778 {
2779 	u_int32_t copylen;
2780 
2781 	if (ifa == NULL || out_addr == NULL) {
2782 		return EINVAL;
2783 	}
2784 
2785 	IFA_LOCK_SPIN(ifa);
2786 	if (ifa->ifa_addr == NULL) {
2787 		IFA_UNLOCK(ifa);
2788 		return ENOTSUP;
2789 	}
2790 
2791 	copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2792 	    ifa->ifa_addr->sa_len : addr_size;
2793 	SOCKADDR_COPY(ifa->ifa_addr, out_addr, copylen);
2794 
2795 	if (ifa->ifa_addr->sa_len > addr_size) {
2796 		IFA_UNLOCK(ifa);
2797 		return EMSGSIZE;
2798 	}
2799 
2800 	IFA_UNLOCK(ifa);
2801 	return 0;
2802 }
2803 
2804 errno_t
ifaddr_dstaddress(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2805 ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2806 {
2807 	u_int32_t copylen;
2808 
2809 	if (ifa == NULL || out_addr == NULL) {
2810 		return EINVAL;
2811 	}
2812 
2813 	IFA_LOCK_SPIN(ifa);
2814 	if (ifa->ifa_dstaddr == NULL) {
2815 		IFA_UNLOCK(ifa);
2816 		return ENOTSUP;
2817 	}
2818 
2819 	copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2820 	    ifa->ifa_dstaddr->sa_len : addr_size;
2821 	SOCKADDR_COPY(ifa->ifa_dstaddr, out_addr, copylen);
2822 
2823 	if (ifa->ifa_dstaddr->sa_len > addr_size) {
2824 		IFA_UNLOCK(ifa);
2825 		return EMSGSIZE;
2826 	}
2827 
2828 	IFA_UNLOCK(ifa);
2829 	return 0;
2830 }
2831 
2832 errno_t
ifaddr_netmask(ifaddr_t ifa,struct sockaddr * out_addr,u_int32_t addr_size)2833 ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2834 {
2835 	u_int32_t copylen;
2836 
2837 	if (ifa == NULL || out_addr == NULL) {
2838 		return EINVAL;
2839 	}
2840 
2841 	IFA_LOCK_SPIN(ifa);
2842 	if (ifa->ifa_netmask == NULL) {
2843 		IFA_UNLOCK(ifa);
2844 		return ENOTSUP;
2845 	}
2846 
2847 	copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2848 	    ifa->ifa_netmask->sa_len : addr_size;
2849 	SOCKADDR_COPY(ifa->ifa_netmask, out_addr, copylen);
2850 
2851 	if (ifa->ifa_netmask->sa_len > addr_size) {
2852 		IFA_UNLOCK(ifa);
2853 		return EMSGSIZE;
2854 	}
2855 
2856 	IFA_UNLOCK(ifa);
2857 	return 0;
2858 }
2859 
2860 ifnet_t
ifaddr_ifnet(ifaddr_t ifa)2861 ifaddr_ifnet(ifaddr_t ifa)
2862 {
2863 	struct ifnet *ifp;
2864 
2865 	if (ifa == NULL) {
2866 		return NULL;
2867 	}
2868 
2869 	/* ifa_ifp is set once at creation time; it is never changed */
2870 	ifp = ifa->ifa_ifp;
2871 
2872 	return ifp;
2873 }
2874 
2875 ifaddr_t
ifaddr_withaddr(const struct sockaddr * address)2876 ifaddr_withaddr(const struct sockaddr *address)
2877 {
2878 	if (address == NULL) {
2879 		return NULL;
2880 	}
2881 
2882 	return ifa_ifwithaddr(address);
2883 }
2884 
2885 ifaddr_t
ifaddr_withdstaddr(const struct sockaddr * address)2886 ifaddr_withdstaddr(const struct sockaddr *address)
2887 {
2888 	if (address == NULL) {
2889 		return NULL;
2890 	}
2891 
2892 	return ifa_ifwithdstaddr(address);
2893 }
2894 
2895 ifaddr_t
ifaddr_withnet(const struct sockaddr * net)2896 ifaddr_withnet(const struct sockaddr *net)
2897 {
2898 	if (net == NULL) {
2899 		return NULL;
2900 	}
2901 
2902 	return ifa_ifwithnet(net);
2903 }
2904 
2905 ifaddr_t
ifaddr_withroute(int flags,const struct sockaddr * destination,const struct sockaddr * gateway)2906 ifaddr_withroute(int flags, const struct sockaddr *destination,
2907     const struct sockaddr *gateway)
2908 {
2909 	if (destination == NULL || gateway == NULL) {
2910 		return NULL;
2911 	}
2912 
2913 	return ifa_ifwithroute(flags, destination, gateway);
2914 }
2915 
2916 ifaddr_t
ifaddr_findbestforaddr(const struct sockaddr * addr,ifnet_t interface)2917 ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2918 {
2919 	if (addr == NULL || interface == NULL) {
2920 		return NULL;
2921 	}
2922 
2923 	return ifaof_ifpforaddr_select(addr, interface);
2924 }
2925 
2926 errno_t
ifaddr_get_ia6_flags(ifaddr_t ifa,u_int32_t * out_flags)2927 ifaddr_get_ia6_flags(ifaddr_t ifa, u_int32_t *out_flags)
2928 {
2929 	sa_family_t family = 0;
2930 
2931 	if (ifa == NULL || out_flags == NULL) {
2932 		return EINVAL;
2933 	}
2934 
2935 	IFA_LOCK_SPIN(ifa);
2936 	if (ifa->ifa_addr != NULL) {
2937 		family = ifa->ifa_addr->sa_family;
2938 	}
2939 	IFA_UNLOCK(ifa);
2940 
2941 	if (family != AF_INET6) {
2942 		return EINVAL;
2943 	}
2944 
2945 	*out_flags = ifatoia6(ifa)->ia6_flags;
2946 	return 0;
2947 }
2948 
2949 errno_t
ifmaddr_reference(ifmultiaddr_t ifmaddr)2950 ifmaddr_reference(ifmultiaddr_t ifmaddr)
2951 {
2952 	if (ifmaddr == NULL) {
2953 		return EINVAL;
2954 	}
2955 
2956 	IFMA_ADDREF(ifmaddr);
2957 	return 0;
2958 }
2959 
2960 errno_t
ifmaddr_release(ifmultiaddr_t ifmaddr)2961 ifmaddr_release(ifmultiaddr_t ifmaddr)
2962 {
2963 	if (ifmaddr == NULL) {
2964 		return EINVAL;
2965 	}
2966 
2967 	IFMA_REMREF(ifmaddr);
2968 	return 0;
2969 }
2970 
2971 errno_t
ifmaddr_address(ifmultiaddr_t ifma,struct sockaddr * out_addr,u_int32_t addr_size)2972 ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2973     u_int32_t addr_size)
2974 {
2975 	u_int32_t copylen;
2976 
2977 	if (ifma == NULL || out_addr == NULL) {
2978 		return EINVAL;
2979 	}
2980 
2981 	IFMA_LOCK(ifma);
2982 	if (ifma->ifma_addr == NULL) {
2983 		IFMA_UNLOCK(ifma);
2984 		return ENOTSUP;
2985 	}
2986 
2987 	copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2988 	    ifma->ifma_addr->sa_len : addr_size);
2989 	SOCKADDR_COPY(ifma->ifma_addr, out_addr, copylen);
2990 
2991 	if (ifma->ifma_addr->sa_len > addr_size) {
2992 		IFMA_UNLOCK(ifma);
2993 		return EMSGSIZE;
2994 	}
2995 	IFMA_UNLOCK(ifma);
2996 	return 0;
2997 }
2998 
2999 errno_t
ifmaddr_lladdress(ifmultiaddr_t ifma,struct sockaddr * out_addr,u_int32_t addr_size)3000 ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
3001     u_int32_t addr_size)
3002 {
3003 	struct ifmultiaddr *ifma_ll;
3004 
3005 	if (ifma == NULL || out_addr == NULL) {
3006 		return EINVAL;
3007 	}
3008 	if ((ifma_ll = ifma->ifma_ll) == NULL) {
3009 		return ENOTSUP;
3010 	}
3011 
3012 	return ifmaddr_address(ifma_ll, out_addr, addr_size);
3013 }
3014 
3015 ifnet_t
ifmaddr_ifnet(ifmultiaddr_t ifma)3016 ifmaddr_ifnet(ifmultiaddr_t ifma)
3017 {
3018 	return (ifma == NULL) ? NULL : ifma->ifma_ifp;
3019 }
3020 
3021 /**************************************************************************/
3022 /* interface cloner						*/
3023 /**************************************************************************/
3024 
3025 errno_t
ifnet_clone_attach(struct ifnet_clone_params * cloner_params,if_clone_t * ifcloner)3026 ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
3027     if_clone_t *ifcloner)
3028 {
3029 	errno_t error = 0;
3030 	struct if_clone *ifc = NULL;
3031 	size_t namelen;
3032 
3033 	if (cloner_params == NULL || ifcloner == NULL ||
3034 	    cloner_params->ifc_name == NULL ||
3035 	    cloner_params->ifc_create == NULL ||
3036 	    cloner_params->ifc_destroy == NULL ||
3037 	    (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
3038 		error = EINVAL;
3039 		goto fail;
3040 	}
3041 
3042 	if (if_clone_lookup(__terminated_by_to_indexable(cloner_params->ifc_name),
3043 	    namelen, NULL) != NULL) {
3044 		printf("%s: already a cloner for %s\n", __func__,
3045 		    cloner_params->ifc_name);
3046 		error = EEXIST;
3047 		goto fail;
3048 	}
3049 
3050 	ifc = kalloc_type(struct if_clone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3051 	strlcpy(ifc->ifc_name, cloner_params->ifc_name, IFNAMSIZ + 1);
3052 	ifc->ifc_namelen = (uint8_t)namelen;
3053 	ifc->ifc_maxunit = IF_MAXUNIT;
3054 	ifc->ifc_create = cloner_params->ifc_create;
3055 	ifc->ifc_destroy = cloner_params->ifc_destroy;
3056 
3057 	error = if_clone_attach(ifc);
3058 	if (error != 0) {
3059 		printf("%s: if_clone_attach failed %d\n", __func__, error);
3060 		goto fail;
3061 	}
3062 	*ifcloner = ifc;
3063 
3064 	return 0;
3065 fail:
3066 	if (ifc != NULL) {
3067 		kfree_type(struct if_clone, ifc);
3068 	}
3069 	return error;
3070 }
3071 
3072 errno_t
ifnet_clone_detach(if_clone_t ifcloner)3073 ifnet_clone_detach(if_clone_t ifcloner)
3074 {
3075 	errno_t error = 0;
3076 	struct if_clone *ifc = ifcloner;
3077 
3078 	if (ifc == NULL) {
3079 		return EINVAL;
3080 	}
3081 
3082 	if ((if_clone_lookup(ifc->ifc_name, ifc->ifc_namelen, NULL)) == NULL) {
3083 		printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
3084 		error = EINVAL;
3085 		goto fail;
3086 	}
3087 
3088 	if_clone_detach(ifc);
3089 
3090 	kfree_type(struct if_clone, ifc);
3091 
3092 fail:
3093 	return error;
3094 }
3095 
3096 /**************************************************************************/
3097 /* misc							*/
3098 /**************************************************************************/
3099 
3100 static errno_t
ifnet_get_local_ports_extended_inner(ifnet_t ifp,protocol_family_t protocol,u_int32_t flags,u_int8_t bitfield[bitstr_size (IP_PORTRANGE_SIZE)])3101 ifnet_get_local_ports_extended_inner(ifnet_t ifp, protocol_family_t protocol,
3102     u_int32_t flags, u_int8_t bitfield[bitstr_size(IP_PORTRANGE_SIZE)])
3103 {
3104 	u_int32_t ifindex;
3105 
3106 	/* no point in continuing if no address is assigned */
3107 	if (ifp != NULL && TAILQ_EMPTY(&ifp->if_addrhead)) {
3108 		return 0;
3109 	}
3110 
3111 	if_ports_used_update_wakeuuid(ifp);
3112 
3113 #if SKYWALK
3114 	if (netns_is_enabled()) {
3115 		netns_get_local_ports(ifp, protocol, flags, bitfield);
3116 	}
3117 #endif /* SKYWALK */
3118 
3119 	ifindex = (ifp != NULL) ? ifp->if_index : 0;
3120 
3121 	if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY)) {
3122 		udp_get_ports_used(ifp, protocol, flags,
3123 		    bitfield);
3124 	}
3125 
3126 	if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY)) {
3127 		tcp_get_ports_used(ifp, protocol, flags,
3128 		    bitfield);
3129 	}
3130 
3131 	return 0;
3132 }
3133 
3134 errno_t
ifnet_get_local_ports_extended(ifnet_t ifp,protocol_family_t protocol,u_int32_t flags,u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])3135 ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
3136     u_int32_t flags, u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])
3137 {
3138 	ifnet_ref_t parent_ifp = NULL;
3139 
3140 	if (bitfield == NULL) {
3141 		return EINVAL;
3142 	}
3143 
3144 	switch (protocol) {
3145 	case PF_UNSPEC:
3146 	case PF_INET:
3147 	case PF_INET6:
3148 		break;
3149 	default:
3150 		return EINVAL;
3151 	}
3152 
3153 	/* bit string is long enough to hold 16-bit port values */
3154 	bzero(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
3155 
3156 	ifnet_get_local_ports_extended_inner(ifp, protocol, flags, bitfield);
3157 
3158 	/* get local ports for parent interface */
3159 	if (ifp != NULL && ifnet_get_delegate_parent(ifp, &parent_ifp) == 0) {
3160 		ifnet_get_local_ports_extended_inner(parent_ifp, protocol,
3161 		    flags, bitfield);
3162 		ifnet_release_delegate_parent(ifp);
3163 	}
3164 
3165 	return 0;
3166 }
3167 
3168 errno_t
ifnet_get_local_ports(ifnet_t ifp,u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])3169 ifnet_get_local_ports(ifnet_t ifp, u_int8_t bitfield[IP_PORTRANGE_BITFIELD_LEN])
3170 {
3171 	u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
3172 
3173 	return ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
3174 	           bitfield);
3175 }
3176 
3177 errno_t
ifnet_notice_node_presence(ifnet_t ifp,struct sockaddr * sa,int32_t rssi,int lqm,int npm,u_int8_t srvinfo[48])3178 ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi,
3179     int lqm, int npm, u_int8_t srvinfo[48])
3180 {
3181 	if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3182 		return EINVAL;
3183 	}
3184 	if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3185 		return EINVAL;
3186 	}
3187 	if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3188 		return EINVAL;
3189 	}
3190 
3191 	return dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
3192 }
3193 
3194 errno_t
ifnet_notice_node_presence_v2(ifnet_t ifp,struct sockaddr * sa,struct sockaddr_dl * sdl,int32_t rssi,int lqm,int npm,u_int8_t srvinfo[48])3195 ifnet_notice_node_presence_v2(ifnet_t ifp, struct sockaddr *sa, struct sockaddr_dl *sdl,
3196     int32_t rssi, int lqm, int npm, u_int8_t srvinfo[48])
3197 {
3198 	/* Support older version if sdl is NULL */
3199 	if (sdl == NULL) {
3200 		return ifnet_notice_node_presence(ifp, sa, rssi, lqm, npm, srvinfo);
3201 	}
3202 
3203 	if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3204 		return EINVAL;
3205 	}
3206 	if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3207 		return EINVAL;
3208 	}
3209 
3210 	if (sa->sa_family != AF_INET6) {
3211 		return EINVAL;
3212 	}
3213 
3214 	if (sdl->sdl_family != AF_LINK) {
3215 		return EINVAL;
3216 	}
3217 
3218 	return dlil_node_present_v2(ifp, sa, sdl, rssi, lqm, npm, srvinfo);
3219 }
3220 
3221 errno_t
ifnet_notice_node_absence(ifnet_t ifp,struct sockaddr * sa)3222 ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa)
3223 {
3224 	if (ifp == NULL || sa == NULL) {
3225 		return EINVAL;
3226 	}
3227 	if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3228 		return EINVAL;
3229 	}
3230 	if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3231 		return EINVAL;
3232 	}
3233 
3234 	dlil_node_absent(ifp, sa);
3235 	return 0;
3236 }
3237 
3238 errno_t
ifnet_notice_primary_elected(ifnet_t ifp)3239 ifnet_notice_primary_elected(ifnet_t ifp)
3240 {
3241 	if (ifp == NULL) {
3242 		return EINVAL;
3243 	}
3244 
3245 	dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PRIMARY_ELECTED, NULL, 0, FALSE);
3246 	return 0;
3247 }
3248 
3249 errno_t
ifnet_tx_compl_status(ifnet_t ifp,mbuf_t m,tx_compl_val_t val)3250 ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
3251 {
3252 #pragma unused(val)
3253 
3254 	m_do_tx_compl_callback(m, ifp);
3255 
3256 	return 0;
3257 }
3258 
3259 errno_t
ifnet_tx_compl(ifnet_t ifp,mbuf_t m)3260 ifnet_tx_compl(ifnet_t ifp, mbuf_t m)
3261 {
3262 	m_do_tx_compl_callback(m, ifp);
3263 
3264 	return 0;
3265 }
3266 
3267 errno_t
ifnet_report_issues(ifnet_t ifp,u_int8_t modid[IFNET_MODIDLEN],u_int8_t info[IFNET_MODARGLEN])3268 ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
3269     u_int8_t info[IFNET_MODARGLEN])
3270 {
3271 	if (ifp == NULL || modid == NULL) {
3272 		return EINVAL;
3273 	}
3274 
3275 	dlil_report_issues(ifp, modid, info);
3276 	return 0;
3277 }
3278 
3279 errno_t
ifnet_set_delegate(ifnet_t ifp,ifnet_t delegated_ifp)3280 ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
3281 {
3282 	ifnet_t odifp = NULL;
3283 
3284 	if (ifp == NULL) {
3285 		return EINVAL;
3286 	} else if (!ifnet_is_attached(ifp, 1)) {
3287 		return ENXIO;
3288 	}
3289 
3290 	ifnet_lock_exclusive(ifp);
3291 	odifp = ifp->if_delegated.ifp;
3292 	if (odifp != NULL && odifp == delegated_ifp) {
3293 		/* delegate info is unchanged; nothing more to do */
3294 		ifnet_lock_done(ifp);
3295 		goto done;
3296 	}
3297 	// Test if this delegate interface would cause a loop
3298 	ifnet_t delegate_check_ifp = delegated_ifp;
3299 	while (delegate_check_ifp != NULL) {
3300 		if (delegate_check_ifp == ifp) {
3301 			printf("%s: delegating to %s would cause a loop\n",
3302 			    ifp->if_xname, delegated_ifp->if_xname);
3303 			ifnet_lock_done(ifp);
3304 			goto done;
3305 		}
3306 		delegate_check_ifp = delegate_check_ifp->if_delegated.ifp;
3307 	}
3308 	bzero(&ifp->if_delegated, sizeof(ifp->if_delegated));
3309 	if (delegated_ifp != NULL && ifp != delegated_ifp) {
3310 		uint32_t        set_eflags;
3311 
3312 		ifp->if_delegated.ifp = delegated_ifp;
3313 		ifnet_reference(delegated_ifp);
3314 		ifp->if_delegated.type = delegated_ifp->if_type;
3315 		ifp->if_delegated.family = delegated_ifp->if_family;
3316 		ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
3317 		ifp->if_delegated.expensive =
3318 		    delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
3319 		ifp->if_delegated.constrained =
3320 		    delegated_ifp->if_xflags & IFXF_CONSTRAINED ? 1 : 0;
3321 		ifp->if_delegated.ultra_constrained =
3322 		    delegated_ifp->if_xflags & IFXF_ULTRA_CONSTRAINED ? 1 : 0;
3323 
3324 		/*
3325 		 * Propogate flags related to ECN from delegated interface
3326 		 */
3327 		if_clear_eflags(ifp, IFEF_ECN_ENABLE | IFEF_ECN_DISABLE);
3328 		set_eflags = (delegated_ifp->if_eflags &
3329 		    (IFEF_ECN_ENABLE | IFEF_ECN_DISABLE));
3330 		if_set_eflags(ifp, set_eflags);
3331 		printf("%s: is now delegating %s (type 0x%x, family %u, "
3332 		    "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
3333 		    delegated_ifp->if_type, delegated_ifp->if_family,
3334 		    delegated_ifp->if_subfamily);
3335 	}
3336 
3337 	ifnet_lock_done(ifp);
3338 
3339 	if (odifp != NULL) {
3340 		if (odifp != delegated_ifp) {
3341 			printf("%s: is no longer delegating %s\n",
3342 			    ifp->if_xname, odifp->if_xname);
3343 		}
3344 		ifnet_release(odifp);
3345 	}
3346 
3347 	/* Generate a kernel event */
3348 	dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0, FALSE);
3349 
3350 done:
3351 	/* Release the io ref count */
3352 	ifnet_decr_iorefcnt(ifp);
3353 
3354 	return 0;
3355 }
3356 
3357 errno_t
ifnet_get_delegate(ifnet_t ifp,ifnet_t * pdelegated_ifp)3358 ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
3359 {
3360 	if (ifp == NULL || pdelegated_ifp == NULL) {
3361 		return EINVAL;
3362 	} else if (!ifnet_is_attached(ifp, 1)) {
3363 		return ENXIO;
3364 	}
3365 
3366 	ifnet_lock_shared(ifp);
3367 	if (ifp->if_delegated.ifp != NULL) {
3368 		ifnet_reference(ifp->if_delegated.ifp);
3369 	}
3370 	*pdelegated_ifp = ifp->if_delegated.ifp;
3371 	ifnet_lock_done(ifp);
3372 
3373 	/* Release the io ref count */
3374 	ifnet_decr_iorefcnt(ifp);
3375 
3376 	return 0;
3377 }
3378 
3379 errno_t
ifnet_get_keepalive_offload_frames(ifnet_t ifp,struct ifnet_keepalive_offload_frame * __counted_by (frames_array_count)frames_array,u_int32_t frames_array_count,size_t frame_data_offset,u_int32_t * used_frames_count)3380 ifnet_get_keepalive_offload_frames(ifnet_t ifp,
3381     struct ifnet_keepalive_offload_frame *__counted_by(frames_array_count) frames_array,
3382     u_int32_t frames_array_count, size_t frame_data_offset,
3383     u_int32_t *used_frames_count)
3384 {
3385 	u_int32_t i;
3386 
3387 	if (frames_array == NULL || used_frames_count == NULL ||
3388 	    frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
3389 		return EINVAL;
3390 	}
3391 
3392 	/* frame_data_offset should be 32-bit aligned */
3393 	if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) !=
3394 	    frame_data_offset) {
3395 		return EINVAL;
3396 	}
3397 
3398 	*used_frames_count = 0;
3399 	if (frames_array_count == 0) {
3400 		return 0;
3401 	}
3402 
3403 
3404 	for (i = 0; i < frames_array_count; i++) {
3405 		struct ifnet_keepalive_offload_frame *frame = frames_array + i;
3406 		bzero(frame, sizeof(struct ifnet_keepalive_offload_frame));
3407 	}
3408 
3409 	/* First collect IPsec related keep-alive frames */
3410 	*used_frames_count = key_fill_offload_frames_for_savs(ifp,
3411 	    frames_array, frames_array_count, frame_data_offset);
3412 
3413 	/* Keep-alive offload not required for TCP/UDP on CLAT interface */
3414 	if (IS_INTF_CLAT46(ifp)) {
3415 		return 0;
3416 	}
3417 
3418 	/* If there is more room, collect other UDP keep-alive frames */
3419 	if (*used_frames_count < frames_array_count) {
3420 		udp_fill_keepalive_offload_frames(ifp, frames_array,
3421 		    frames_array_count, frame_data_offset,
3422 		    used_frames_count);
3423 	}
3424 
3425 	/* If there is more room, collect other TCP keep-alive frames */
3426 	if (*used_frames_count < frames_array_count) {
3427 		tcp_fill_keepalive_offload_frames(ifp, frames_array,
3428 		    frames_array_count, frame_data_offset,
3429 		    used_frames_count);
3430 	}
3431 
3432 	VERIFY(*used_frames_count <= frames_array_count);
3433 
3434 	return 0;
3435 }
3436 
3437 errno_t
ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,struct ifnet_keepalive_offload_frame * frame)3438 ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,
3439     struct ifnet_keepalive_offload_frame *frame)
3440 {
3441 	errno_t error = 0;
3442 
3443 	if (ifp == NULL || frame == NULL) {
3444 		return EINVAL;
3445 	}
3446 
3447 	if (frame->type != IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP) {
3448 		return EINVAL;
3449 	}
3450 	if (frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 &&
3451 	    frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6) {
3452 		return EINVAL;
3453 	}
3454 	if (frame->local_port == 0 || frame->remote_port == 0) {
3455 		return EINVAL;
3456 	}
3457 
3458 	error = tcp_notify_kao_timeout(ifp, frame);
3459 
3460 	return error;
3461 }
3462 
3463 errno_t
ifnet_link_status_report(ifnet_t ifp,const void * __sized_by (buffer_len)buffer,size_t buffer_len)3464 ifnet_link_status_report(ifnet_t ifp, const void *__sized_by(buffer_len) buffer,
3465     size_t buffer_len)
3466 {
3467 	struct if_link_status ifsr = {};
3468 	errno_t err = 0;
3469 
3470 	if (ifp == NULL || buffer == NULL || buffer_len == 0) {
3471 		return EINVAL;
3472 	}
3473 
3474 	ifnet_lock_shared(ifp);
3475 
3476 	/*
3477 	 * Make sure that the interface is attached but there is no need
3478 	 * to take a reference because this call is coming from the driver.
3479 	 */
3480 	if (!ifnet_is_attached(ifp, 0)) {
3481 		ifnet_lock_done(ifp);
3482 		return ENXIO;
3483 	}
3484 
3485 	lck_rw_lock_exclusive(&ifp->if_link_status_lock);
3486 
3487 	/*
3488 	 * If this is the first status report then allocate memory
3489 	 * to store it.
3490 	 */
3491 	if (ifp->if_link_status == NULL) {
3492 		ifp->if_link_status = kalloc_type(struct if_link_status, Z_ZERO);
3493 		if (ifp->if_link_status == NULL) {
3494 			err = ENOMEM;
3495 			goto done;
3496 		}
3497 	}
3498 
3499 	memcpy(&ifsr, buffer, MIN(sizeof(ifsr), buffer_len));
3500 	if (ifp->if_type == IFT_CELLULAR) {
3501 		struct if_cellular_status_v1 *if_cell_sr, *new_cell_sr;
3502 		/*
3503 		 * Currently we have a single version -- if it does
3504 		 * not match, just return.
3505 		 */
3506 		if (ifsr.ifsr_version !=
3507 		    IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION) {
3508 			err = ENOTSUP;
3509 			goto done;
3510 		}
3511 
3512 		if (ifsr.ifsr_len != sizeof(*if_cell_sr)) {
3513 			err = EINVAL;
3514 			goto done;
3515 		}
3516 
3517 		if_cell_sr =
3518 		    &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3519 		new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3520 		/* Check if we need to act on any new notifications */
3521 		if ((new_cell_sr->valid_bitmask &
3522 		    IF_CELL_UL_MSS_RECOMMENDED_VALID) &&
3523 		    new_cell_sr->mss_recommended !=
3524 		    if_cell_sr->mss_recommended) {
3525 			os_atomic_or(&tcbinfo.ipi_flags, INPCBINFO_UPDATE_MSS, relaxed);
3526 			inpcb_timer_sched(&tcbinfo, INPCB_TIMER_FAST);
3527 #if NECP
3528 			necp_update_all_clients();
3529 #endif
3530 		}
3531 
3532 		/* Finally copy the new information */
3533 		ifp->if_link_status->ifsr_version = ifsr.ifsr_version;
3534 		ifp->if_link_status->ifsr_len = ifsr.ifsr_len;
3535 		if_cell_sr->valid_bitmask = 0;
3536 		bcopy(new_cell_sr, if_cell_sr, sizeof(*if_cell_sr));
3537 	} else if (IFNET_IS_WIFI(ifp)) {
3538 		struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr;
3539 
3540 		/* Check version */
3541 		if (ifsr.ifsr_version !=
3542 		    IF_WIFI_STATUS_REPORT_CURRENT_VERSION) {
3543 			err = ENOTSUP;
3544 			goto done;
3545 		}
3546 
3547 		if (ifsr.ifsr_len != sizeof(*if_wifi_sr)) {
3548 			err = EINVAL;
3549 			goto done;
3550 		}
3551 
3552 		if_wifi_sr =
3553 		    &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3554 		new_wifi_sr =
3555 		    &ifsr.ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3556 		ifp->if_link_status->ifsr_version = ifsr.ifsr_version;
3557 		ifp->if_link_status->ifsr_len = ifsr.ifsr_len;
3558 		if_wifi_sr->valid_bitmask = 0;
3559 		bcopy(new_wifi_sr, if_wifi_sr, sizeof(*if_wifi_sr));
3560 
3561 		/*
3562 		 * Update the bandwidth values if we got recent values
3563 		 * reported through the other KPI.
3564 		 */
3565 		if (!(new_wifi_sr->valid_bitmask &
3566 		    IF_WIFI_UL_MAX_BANDWIDTH_VALID) &&
3567 		    ifp->if_output_bw.max_bw > 0) {
3568 			if_wifi_sr->valid_bitmask |=
3569 			    IF_WIFI_UL_MAX_BANDWIDTH_VALID;
3570 			if_wifi_sr->ul_max_bandwidth =
3571 			    ifp->if_output_bw.max_bw > UINT32_MAX ?
3572 			    UINT32_MAX :
3573 			    (uint32_t)ifp->if_output_bw.max_bw;
3574 		}
3575 		if (!(new_wifi_sr->valid_bitmask &
3576 		    IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) &&
3577 		    ifp->if_output_bw.eff_bw > 0) {
3578 			if_wifi_sr->valid_bitmask |=
3579 			    IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3580 			if_wifi_sr->ul_effective_bandwidth =
3581 			    ifp->if_output_bw.eff_bw > UINT32_MAX ?
3582 			    UINT32_MAX :
3583 			    (uint32_t)ifp->if_output_bw.eff_bw;
3584 		}
3585 		if (!(new_wifi_sr->valid_bitmask &
3586 		    IF_WIFI_DL_MAX_BANDWIDTH_VALID) &&
3587 		    ifp->if_input_bw.max_bw > 0) {
3588 			if_wifi_sr->valid_bitmask |=
3589 			    IF_WIFI_DL_MAX_BANDWIDTH_VALID;
3590 			if_wifi_sr->dl_max_bandwidth =
3591 			    ifp->if_input_bw.max_bw > UINT32_MAX ?
3592 			    UINT32_MAX :
3593 			    (uint32_t)ifp->if_input_bw.max_bw;
3594 		}
3595 		if (!(new_wifi_sr->valid_bitmask &
3596 		    IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) &&
3597 		    ifp->if_input_bw.eff_bw > 0) {
3598 			if_wifi_sr->valid_bitmask |=
3599 			    IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3600 			if_wifi_sr->dl_effective_bandwidth =
3601 			    ifp->if_input_bw.eff_bw > UINT32_MAX ?
3602 			    UINT32_MAX :
3603 			    (uint32_t)ifp->if_input_bw.eff_bw;
3604 		}
3605 	}
3606 
3607 done:
3608 	lck_rw_done(&ifp->if_link_status_lock);
3609 	ifnet_lock_done(ifp);
3610 	return err;
3611 }
3612 
3613 /*************************************************************************/
3614 /* Fastlane QoS Ca						*/
3615 /*************************************************************************/
3616 
3617 errno_t
ifnet_set_fastlane_capable(ifnet_t interface,boolean_t capable)3618 ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable)
3619 {
3620 	if (interface == NULL) {
3621 		return EINVAL;
3622 	}
3623 
3624 	if_set_qosmarking_mode(interface,
3625 	    capable ? IFRTYPE_QOSMARKING_FASTLANE : IFRTYPE_QOSMARKING_MODE_NONE);
3626 
3627 	return 0;
3628 }
3629 
3630 errno_t
ifnet_get_fastlane_capable(ifnet_t interface,boolean_t * capable)3631 ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable)
3632 {
3633 	if (interface == NULL || capable == NULL) {
3634 		return EINVAL;
3635 	}
3636 	if (interface->if_qosmarking_mode == IFRTYPE_QOSMARKING_FASTLANE) {
3637 		*capable = true;
3638 	} else {
3639 		*capable = false;
3640 	}
3641 	return 0;
3642 }
3643 
3644 errno_t
ifnet_get_unsent_bytes(ifnet_t interface,int64_t * unsent_bytes)3645 ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes)
3646 {
3647 	int64_t bytes;
3648 
3649 	if (interface == NULL || unsent_bytes == NULL) {
3650 		return EINVAL;
3651 	}
3652 
3653 	bytes = *unsent_bytes = 0;
3654 
3655 	if (!IF_FULLY_ATTACHED(interface)) {
3656 		return ENXIO;
3657 	}
3658 
3659 	bytes = interface->if_sndbyte_unsent;
3660 
3661 	if (interface->if_eflags & IFEF_TXSTART) {
3662 		bytes += IFCQ_BYTES(interface->if_snd);
3663 	}
3664 	*unsent_bytes = bytes;
3665 
3666 	return 0;
3667 }
3668 
3669 errno_t
ifnet_get_buffer_status(const ifnet_t ifp,ifnet_buffer_status_t * buf_status)3670 ifnet_get_buffer_status(const ifnet_t ifp, ifnet_buffer_status_t *buf_status)
3671 {
3672 	if (ifp == NULL || buf_status == NULL) {
3673 		return EINVAL;
3674 	}
3675 
3676 	bzero(buf_status, sizeof(*buf_status));
3677 
3678 	if (!IF_FULLY_ATTACHED(ifp)) {
3679 		return ENXIO;
3680 	}
3681 
3682 	if (ifp->if_eflags & IFEF_TXSTART) {
3683 		buf_status->buf_interface = IFCQ_BYTES(ifp->if_snd);
3684 	}
3685 
3686 	buf_status->buf_sndbuf = ((buf_status->buf_interface != 0) ||
3687 	    (ifp->if_sndbyte_unsent != 0)) ? 1 : 0;
3688 
3689 	return 0;
3690 }
3691 
3692 void
ifnet_normalise_unsent_data(void)3693 ifnet_normalise_unsent_data(void)
3694 {
3695 	struct ifnet *ifp;
3696 
3697 	ifnet_head_lock_shared();
3698 	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3699 		ifnet_lock_exclusive(ifp);
3700 		if (!IF_FULLY_ATTACHED(ifp)) {
3701 			ifnet_lock_done(ifp);
3702 			continue;
3703 		}
3704 		if (!(ifp->if_eflags & IFEF_TXSTART)) {
3705 			ifnet_lock_done(ifp);
3706 			continue;
3707 		}
3708 
3709 		if (ifp->if_sndbyte_total > 0 ||
3710 		    IFCQ_BYTES(ifp->if_snd) > 0) {
3711 			ifp->if_unsent_data_cnt++;
3712 		}
3713 
3714 		ifnet_lock_done(ifp);
3715 	}
3716 	ifnet_head_done();
3717 }
3718 
3719 errno_t
ifnet_set_low_power_mode(ifnet_t ifp,boolean_t on)3720 ifnet_set_low_power_mode(ifnet_t ifp, boolean_t on)
3721 {
3722 	errno_t error;
3723 
3724 	error = if_set_low_power(ifp, on);
3725 
3726 	return error;
3727 }
3728 
3729 errno_t
ifnet_get_low_power_mode(ifnet_t ifp,boolean_t * on)3730 ifnet_get_low_power_mode(ifnet_t ifp, boolean_t *on)
3731 {
3732 	if (ifp == NULL || on == NULL) {
3733 		return EINVAL;
3734 	}
3735 
3736 	*on = ((ifp->if_xflags & IFXF_LOW_POWER) != 0);
3737 	return 0;
3738 }
3739