xref: /xnu-11417.140.69/bsd/net/dlil_ctl.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 1999-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <stddef.h>
30 #include <stdint.h>
31 #include <sys/queue.h>
32 #include <sys/mcache.h>
33 #include <libkern/OSAtomic.h>
34 
35 #include <kern/zalloc.h>
36 
37 #include <net/dlil_var_private.h>
38 #include <net/if_var_private.h>
39 
40 /*
41  * DLIL device management
42  */
43 int
dlil_if_acquire(uint32_t family,const void * uniqueid __sized_by (uniqueid_len),size_t uniqueid_len,const char * ifxname0 __null_terminated,struct ifnet ** ifp)44 dlil_if_acquire(uint32_t family, const void *uniqueid __sized_by(uniqueid_len),
45     size_t uniqueid_len, const char *ifxname0 __null_terminated, struct ifnet **ifp)
46 {
47 	struct ifnet *ifp1 = NULL;
48 	struct dlil_ifnet *dlifp1 = NULL;
49 	struct dlil_ifnet *dlifp1_saved = NULL;
50 	int ret = 0;
51 	size_t ifxname_len = strlen(ifxname0);
52 	const char *ifxname = __unsafe_forge_bidi_indexable(const char *, ifxname0, ifxname_len);
53 	size_t ifp_name_len;
54 
55 	VERIFY(*ifp == NULL);
56 	dlil_if_lock();
57 	/*
58 	 * We absolutely can't have an interface with the same name
59 	 * in in-use state.
60 	 * To make sure of that list has to be traversed completely
61 	 */
62 	TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
63 		ifp1 = (struct ifnet *)dlifp1;
64 		ifp_name_len = strlen(ifp1->if_name);
65 		if (IFXNAMSIZ < ifp_name_len) {
66 			ifp_name_len = IFXNAMSIZ;
67 		}
68 
69 		if (ifp1->if_family != family) {
70 			continue;
71 		}
72 
73 		/*
74 		 * If interface is in use, return EBUSY if either unique id
75 		 * or interface extended names are the same
76 		 */
77 		lck_mtx_lock(&dlifp1->dl_if_lock);
78 		/*
79 		 * Note: compare the lengths to avoid least prefix match.
80 		 */
81 		if (ifxname_len == ifp_name_len &&
82 		    strlcmp(ifxname, ifp1->if_xname, ifxname_len) == 0 &&
83 		    (dlifp1->dl_if_flags & DLIF_INUSE) != 0) {
84 			lck_mtx_unlock(&dlifp1->dl_if_lock);
85 			ret = EBUSY;
86 			goto end;
87 		}
88 
89 		if (uniqueid_len != 0 &&
90 		    uniqueid_len == dlifp1->dl_if_uniqueid_len &&
91 		    bcmp(uniqueid, dlifp1->dl_if_uniqueid, uniqueid_len) == 0) {
92 			if ((dlifp1->dl_if_flags & DLIF_INUSE) != 0) {
93 				lck_mtx_unlock(&dlifp1->dl_if_lock);
94 				ret = EBUSY;
95 				goto end;
96 			}
97 			if (dlifp1_saved == NULL) {
98 				/* cache the first match */
99 				dlifp1_saved = dlifp1;
100 			}
101 			/*
102 			 * Do not break or jump to end as we have to traverse
103 			 * the whole list to ensure there are no name collisions
104 			 */
105 		}
106 		lck_mtx_unlock(&dlifp1->dl_if_lock);
107 	}
108 
109 	/* If there's an interface that can be recycled, use that */
110 	if (dlifp1_saved != NULL) {
111 		lck_mtx_lock(&dlifp1_saved->dl_if_lock);
112 		if ((dlifp1_saved->dl_if_flags & DLIF_INUSE) != 0) {
113 			/* some other thread got in ahead of us */
114 			lck_mtx_unlock(&dlifp1_saved->dl_if_lock);
115 			ret = EBUSY;
116 			goto end;
117 		}
118 		dlifp1_saved->dl_if_flags |= (DLIF_INUSE | DLIF_REUSE);
119 		lck_mtx_unlock(&dlifp1_saved->dl_if_lock);
120 		*ifp = (struct ifnet *)dlifp1_saved;
121 		dlil_if_ref(*ifp);
122 		goto end;
123 	}
124 
125 	/* no interface found, allocate a new one */
126 	dlifp1 = dlif_ifnet_alloc();
127 
128 	if (uniqueid_len) {
129 		void *new_uniqueid = kalloc_data(uniqueid_len,
130 		    Z_WAITOK);
131 		if (new_uniqueid == NULL) {
132 			dlif_ifnet_free(dlifp1);
133 			ret = ENOMEM;
134 			goto end;
135 		}
136 		dlifp1->dl_if_uniqueid_len = uniqueid_len;
137 		dlifp1->dl_if_uniqueid = new_uniqueid;
138 
139 		bcopy(uniqueid, dlifp1->dl_if_uniqueid, uniqueid_len);
140 	}
141 
142 	ifp1 = (struct ifnet *)dlifp1;
143 	dlifp1->dl_if_flags = DLIF_INUSE;
144 	if (ifnet_debug) {
145 		dlifp1->dl_if_flags |= DLIF_DEBUG;
146 		dlifp1->dl_if_trace = dlil_if_trace;
147 	}
148 	ifp1->if_name = tsnprintf(dlifp1->dl_if_namestorage, sizeof(dlifp1->dl_if_namestorage), "");
149 	ifp1->if_xname = tsnprintf(dlifp1->dl_if_xnamestorage, sizeof(dlifp1->dl_if_xnamestorage), "");
150 
151 	/* initialize interface description */
152 	ifp1->if_desc.ifd_maxlen = IF_DESCSIZE;
153 	ifp1->if_desc.ifd_len = 0;
154 	ifp1->if_desc.ifd_desc = dlifp1->dl_if_descstorage;
155 
156 #if SKYWALK
157 	LIST_INIT(&ifp1->if_netns_tokens);
158 #endif /* SKYWALK */
159 
160 	if ((ret = dlil_alloc_local_stats(ifp1)) != 0) {
161 		DLIL_PRINTF("%s: failed to allocate if local stats, "
162 		    "error: %d\n", __func__, ret);
163 		/* This probably shouldn't be fatal */
164 		ret = 0;
165 	}
166 
167 	lck_mtx_init(&dlifp1->dl_if_lock, &ifnet_lock_group, &ifnet_lock_attr);
168 	lck_rw_init(&ifp1->if_lock, &ifnet_lock_group, &ifnet_lock_attr);
169 	lck_mtx_init(&ifp1->if_ref_lock, &ifnet_lock_group, &ifnet_lock_attr);
170 	lck_mtx_init(&ifp1->if_flt_lock, &ifnet_lock_group, &ifnet_lock_attr);
171 	lck_mtx_init(&ifp1->if_addrconfig_lock, &ifnet_lock_group,
172 	    &ifnet_lock_attr);
173 	lck_rw_init(&ifp1->if_llreach_lock, &ifnet_lock_group, &ifnet_lock_attr);
174 #if INET
175 	lck_rw_init(&ifp1->if_inetdata_lock, &ifnet_lock_group,
176 	    &ifnet_lock_attr);
177 	ifp1->if_inetdata = NULL;
178 #endif
179 	lck_mtx_init(&ifp1->if_inet6_ioctl_lock, &ifnet_lock_group, &ifnet_lock_attr);
180 	ifp1->if_inet6_ioctl_busy = FALSE;
181 	lck_rw_init(&ifp1->if_inet6data_lock, &ifnet_lock_group,
182 	    &ifnet_lock_attr);
183 	ifp1->if_inet6data = NULL;
184 	lck_rw_init(&ifp1->if_link_status_lock, &ifnet_lock_group,
185 	    &ifnet_lock_attr);
186 	ifp1->if_link_status = NULL;
187 	lck_mtx_init(&ifp1->if_delegate_lock, &ifnet_lock_group, &ifnet_lock_attr);
188 
189 	/* for send data paths */
190 	lck_mtx_init(&ifp1->if_start_lock, &ifnet_snd_lock_group,
191 	    &ifnet_lock_attr);
192 	lck_mtx_init(&ifp1->if_cached_route_lock, &ifnet_snd_lock_group,
193 	    &ifnet_lock_attr);
194 
195 	/* for receive data paths */
196 	lck_mtx_init(&ifp1->if_poll_lock, &ifnet_rcv_lock_group,
197 	    &ifnet_lock_attr);
198 
199 	/* thread call allocation is done with sleeping zalloc */
200 	ifp1->if_dt_tcall = thread_call_allocate_with_options(dlil_dt_tcall_fn,
201 	    ifp1, THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
202 	if (ifp1->if_dt_tcall == NULL) {
203 		panic_plain("%s: couldn't create if_dt_tcall", __func__);
204 		/* NOTREACHED */
205 	}
206 
207 	TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
208 
209 	*ifp = ifp1;
210 	dlil_if_ref(*ifp);
211 
212 end:
213 	dlil_if_unlock();
214 
215 	VERIFY(dlifp1 == NULL || (IS_P2ALIGNED(dlifp1, sizeof(u_int64_t)) &&
216 	    IS_P2ALIGNED(&ifp1->if_data, sizeof(u_int64_t))));
217 
218 	return ret;
219 }
220 
221 void
dlil_if_trace(struct dlil_ifnet * dl_if,int refhold)222 dlil_if_trace(struct dlil_ifnet *dl_if, int refhold)
223 {
224 	struct dlil_ifnet_dbg *dl_if_dbg = (struct dlil_ifnet_dbg *)dl_if;
225 	ctrace_t *tr;
226 	u_int32_t idx;
227 	u_int16_t *cnt;
228 
229 	if (!(dl_if->dl_if_flags & DLIF_DEBUG)) {
230 		panic("%s: dl_if %p has no debug structure", __func__, dl_if);
231 		/* NOTREACHED */
232 	}
233 
234 	if (refhold) {
235 		cnt = &dl_if_dbg->dldbg_if_refhold_cnt;
236 		tr = dl_if_dbg->dldbg_if_refhold;
237 	} else {
238 		cnt = &dl_if_dbg->dldbg_if_refrele_cnt;
239 		tr = dl_if_dbg->dldbg_if_refrele;
240 	}
241 
242 	idx = os_atomic_inc_orig(cnt, relaxed) % IF_REF_TRACE_HIST_SIZE;
243 	ctrace_record(&tr[idx]);
244 }
245 
246 /*
247  * Stats management.
248  */
249 void
dlil_input_stats_add(const struct ifnet_stat_increment_param * s,struct dlil_threading_info * inp,struct ifnet * ifp,boolean_t poll)250 dlil_input_stats_add(const struct ifnet_stat_increment_param *s,
251     struct dlil_threading_info *inp, struct ifnet *ifp, boolean_t poll)
252 {
253 	struct ifnet_stat_increment_param *d = &inp->dlth_stats;
254 
255 	if (s->packets_in != 0) {
256 		d->packets_in += s->packets_in;
257 	}
258 	if (s->bytes_in != 0) {
259 		d->bytes_in += s->bytes_in;
260 	}
261 	if (s->errors_in != 0) {
262 		d->errors_in += s->errors_in;
263 	}
264 
265 	if (s->packets_out != 0) {
266 		d->packets_out += s->packets_out;
267 	}
268 	if (s->bytes_out != 0) {
269 		d->bytes_out += s->bytes_out;
270 	}
271 	if (s->errors_out != 0) {
272 		d->errors_out += s->errors_out;
273 	}
274 
275 	if (s->collisions != 0) {
276 		d->collisions += s->collisions;
277 	}
278 	if (s->dropped != 0) {
279 		d->dropped += s->dropped;
280 	}
281 
282 	if (poll) {
283 		PKTCNTR_ADD(&ifp->if_poll_tstats, s->packets_in, s->bytes_in);
284 	}
285 }
286 
287 boolean_t
dlil_input_stats_sync(struct ifnet * ifp,struct dlil_threading_info * inp)288 dlil_input_stats_sync(struct ifnet *ifp, struct dlil_threading_info *inp)
289 {
290 	struct ifnet_stat_increment_param *s = &inp->dlth_stats;
291 
292 	/*
293 	 * Use of atomic operations is unavoidable here because
294 	 * these stats may also be incremented elsewhere via KPIs.
295 	 */
296 	if (s->packets_in != 0) {
297 		os_atomic_add(&ifp->if_data.ifi_ipackets, s->packets_in, relaxed);
298 		s->packets_in = 0;
299 	}
300 	if (s->bytes_in != 0) {
301 		os_atomic_add(&ifp->if_data.ifi_ibytes, s->bytes_in, relaxed);
302 		s->bytes_in = 0;
303 	}
304 	if (s->errors_in != 0) {
305 		os_atomic_add(&ifp->if_data.ifi_ierrors, s->errors_in, relaxed);
306 		s->errors_in = 0;
307 	}
308 
309 	if (s->packets_out != 0) {
310 		os_atomic_add(&ifp->if_data.ifi_opackets, s->packets_out, relaxed);
311 		s->packets_out = 0;
312 	}
313 	if (s->bytes_out != 0) {
314 		os_atomic_add(&ifp->if_data.ifi_obytes, s->bytes_out, relaxed);
315 		s->bytes_out = 0;
316 	}
317 	if (s->errors_out != 0) {
318 		os_atomic_add(&ifp->if_data.ifi_oerrors, s->errors_out, relaxed);
319 		s->errors_out = 0;
320 	}
321 
322 	if (s->collisions != 0) {
323 		os_atomic_add(&ifp->if_data.ifi_collisions, s->collisions, relaxed);
324 		s->collisions = 0;
325 	}
326 	if (s->dropped != 0) {
327 		os_atomic_add(&ifp->if_data.ifi_iqdrops, s->dropped, relaxed);
328 		s->dropped = 0;
329 	}
330 
331 	/*
332 	 * No need for atomic operations as they are modified here
333 	 * only from within the DLIL input thread context.
334 	 */
335 	if (ifp->if_poll_tstats.packets != 0) {
336 		ifp->if_poll_pstats.ifi_poll_packets += ifp->if_poll_tstats.packets;
337 		ifp->if_poll_tstats.packets = 0;
338 	}
339 	if (ifp->if_poll_tstats.bytes != 0) {
340 		ifp->if_poll_pstats.ifi_poll_bytes += ifp->if_poll_tstats.bytes;
341 		ifp->if_poll_tstats.bytes = 0;
342 	}
343 
344 	return ifp->if_data_threshold != 0;
345 }
346 
347 
348 #if SKYWALK
349 errno_t
dlil_set_input_handler(struct ifnet * ifp,dlil_input_func fn)350 dlil_set_input_handler(struct ifnet *ifp, dlil_input_func fn)
351 {
352 	return os_atomic_cmpxchg(__unsafe_forge_single(void * volatile *, &ifp->if_input_dlil),
353 	           ptrauth_nop_cast(void *, &dlil_input_handler),
354 	           ptrauth_nop_cast(void *, fn), acq_rel) ? 0 : EBUSY;
355 }
356 
357 void
dlil_reset_input_handler(struct ifnet * ifp)358 dlil_reset_input_handler(struct ifnet *ifp)
359 {
360 	while (!os_atomic_cmpxchg(__unsafe_forge_single(void * volatile *, &ifp->if_input_dlil),
361 	    ptrauth_nop_cast(void *, ifp->if_input_dlil),
362 	    ptrauth_nop_cast(void *, &dlil_input_handler), acq_rel)) {
363 		;
364 	}
365 }
366 
367 errno_t
dlil_set_output_handler(struct ifnet * ifp,dlil_output_func fn)368 dlil_set_output_handler(struct ifnet *ifp, dlil_output_func fn)
369 {
370 	return os_atomic_cmpxchg(__unsafe_forge_single(void * volatile *, &ifp->if_output_dlil),
371 	           ptrauth_nop_cast(void *, &dlil_output_handler),
372 	           ptrauth_nop_cast(void *, fn), acq_rel) ? 0 : EBUSY;
373 }
374 
375 void
dlil_reset_output_handler(struct ifnet * ifp)376 dlil_reset_output_handler(struct ifnet *ifp)
377 {
378 	while (!os_atomic_cmpxchg(__unsafe_forge_single(void * volatile *, &ifp->if_output_dlil),
379 	    ptrauth_nop_cast(void *, ifp->if_output_dlil),
380 	    ptrauth_nop_cast(void *, &dlil_output_handler), acq_rel)) {
381 		;
382 	}
383 }
384 #endif /* SKYWALK */
385 
386 errno_t
dlil_output_handler(struct ifnet * ifp,struct mbuf * m)387 dlil_output_handler(struct ifnet *ifp, struct mbuf *m)
388 {
389 	return ifp->if_output(ifp, m);
390 }
391 
392 #define MAX_KNOWN_MBUF_CLASS 8
393 
394 
395 #if SKYWALK
396 errno_t
ifnet_set_output_handler(struct ifnet * ifp,ifnet_output_func fn)397 ifnet_set_output_handler(struct ifnet *ifp, ifnet_output_func fn)
398 {
399 	return os_atomic_cmpxchg(__unsafe_forge_single(void * volatile *, &ifp->if_output),
400 	           ptrauth_nop_cast(void *, ifp->if_save_output),
401 	           ptrauth_nop_cast(void *, fn), acq_rel) ? 0 : EBUSY;
402 }
403 
404 void
ifnet_reset_output_handler(struct ifnet * ifp)405 ifnet_reset_output_handler(struct ifnet *ifp)
406 {
407 	while (!os_atomic_cmpxchg(__unsafe_forge_single(void * volatile *, &ifp->if_output),
408 	    ptrauth_nop_cast(void *, ifp->if_output),
409 	    ptrauth_nop_cast(void *, ifp->if_save_output), acq_rel)) {
410 		;
411 	}
412 }
413 
414 errno_t
ifnet_set_start_handler(struct ifnet * ifp,ifnet_start_func fn)415 ifnet_set_start_handler(struct ifnet *ifp, ifnet_start_func fn)
416 {
417 	return os_atomic_cmpxchg(__unsafe_forge_single(void * volatile *, &ifp->if_start),
418 	           ptrauth_nop_cast(void *, ifp->if_save_start),
419 	           ptrauth_nop_cast(void *, fn), acq_rel) ? 0 : EBUSY;
420 }
421 
422 void
ifnet_reset_start_handler(struct ifnet * ifp)423 ifnet_reset_start_handler(struct ifnet *ifp)
424 {
425 	while (!os_atomic_cmpxchg(__unsafe_forge_single(void * volatile *, &ifp->if_start),
426 	    ptrauth_nop_cast(void *, ifp->if_start),
427 	    ptrauth_nop_cast(void *, ifp->if_save_start), acq_rel)) {
428 		;
429 	}
430 }
431 #endif /* SKYWALK */
432