xref: /xnu-8796.101.5/bsd/net/if_ports_used.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2017-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/types.h>
30 #include <sys/sysctl.h>
31 #include <sys/time.h>
32 #include <sys/mcache.h>
33 #include <sys/malloc.h>
34 #include <sys/kauth.h>
35 #include <sys/kern_event.h>
36 #include <sys/bitstring.h>
37 #include <sys/priv.h>
38 #include <sys/proc.h>
39 #include <sys/protosw.h>
40 #include <sys/socket.h>
41 
42 #include <kern/locks.h>
43 #include <kern/zalloc.h>
44 
45 #include <libkern/libkern.h>
46 
47 #include <net/kpi_interface.h>
48 #include <net/if_var.h>
49 #include <net/if_ports_used.h>
50 
51 #include <netinet/in_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/tcp_var.h>
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/udp.h>
57 
58 #if SKYWALK
59 #include <skywalk/os_skywalk_private.h>
60 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
61 #include <skywalk/namespace/netns.h>
62 #endif /* SKYWALK */
63 
64 #include <stdbool.h>
65 
66 #include <os/log.h>
67 
68 #define ESP_HDR_SIZE 4
69 #define PORT_ISAKMP 500
70 #define PORT_ISAKMP_NATT 4500   /* rfc3948 */
71 
72 #define IF_XNAME(ifp) ((ifp) != NULL ? (ifp)->if_xname : "")
73 
74 extern bool IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len);
75 
76 SYSCTL_DECL(_net_link_generic_system);
77 
78 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, port_used,
79     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used");
80 
81 struct if_ports_used_stats if_ports_used_stats = {};
82 static int sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS;
83 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, stats,
84     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
85     sysctl_if_ports_used_stats, "S,struct if_ports_used_stats", "");
86 
87 static uuid_t current_wakeuuid;
88 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, current_wakeuuid,
89     CTLFLAG_RD | CTLFLAG_LOCKED,
90     current_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
91 
92 static int sysctl_net_port_info_list SYSCTL_HANDLER_ARGS;
93 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, list,
94     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
95     sysctl_net_port_info_list, "S,xnpigen", "");
96 
97 static int use_test_wakeuuid = 0;
98 static uuid_t test_wakeuuid;
99 
100 #if (DEVELOPMENT || DEBUG)
101 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, use_test_wakeuuid,
102     CTLFLAG_RW | CTLFLAG_LOCKED,
103     &use_test_wakeuuid, 0, "");
104 
105 int sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS;
106 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, new_test_wakeuuid,
107     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
108     sysctl_new_test_wakeuuid, "S,uuid_t", "");
109 
110 int sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS;
111 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, clear_test_wakeuuid,
112     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
113     sysctl_clear_test_wakeuuid, "S,uuid_t", "");
114 
115 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, test_wakeuuid,
116     CTLFLAG_RD | CTLFLAG_LOCKED,
117     test_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
118 #endif /* (DEVELOPMENT || DEBUG) */
119 
120 static int sysctl_get_ports_used SYSCTL_HANDLER_ARGS;
121 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, get_ports_used,
122     CTLFLAG_RD | CTLFLAG_LOCKED,
123     sysctl_get_ports_used, "");
124 
125 static int if_ports_used_verbose = 0;
126 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, verbose,
127     CTLFLAG_RW | CTLFLAG_LOCKED,
128     &if_ports_used_verbose, 0, "");
129 
130 struct timeval wakeuuid_not_set_last_time;
131 int sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS;
132 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
133     wakeuuid_not_set_last_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
134     0, 0, sysctl_wakeuuid_not_set_last_time, "S,timeval", "");
135 
136 char wakeuuid_not_set_last_if[IFXNAMSIZ];
137 int sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS;
138 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
139     wakeuuid_not_set_last_if, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
140     0, 0, sysctl_wakeuuid_not_set_last_if, "A", "");
141 
142 struct timeval wakeuuid_last_update_time;
143 int sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS;
144 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
145     wakeuuid_last_update_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
146     0, 0, sysctl_wakeuuid_last_update_time, "S,timeval", "");
147 
148 struct net_port_info_wake_event last_attributed_wake_event;
149 int sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS;
150 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
151     last_attributed_wake_event, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
152     0, 0, sysctl_last_attributed_wake_event, "S,net_port_info_wake_event", "");
153 
154 
155 static bool has_notified_wake_pkt = false;
156 static bool has_notified_unattributed_wake = false;
157 
158 static LCK_GRP_DECLARE(net_port_entry_head_lock_group, "net port entry lock");
159 static LCK_MTX_DECLARE(net_port_entry_head_lock, &net_port_entry_head_lock_group);
160 
161 
162 struct net_port_entry {
163 	SLIST_ENTRY(net_port_entry)     npe_list_next;
164 	TAILQ_ENTRY(net_port_entry)     npe_hash_next;
165 	struct net_port_info            npe_npi;
166 };
167 
168 static KALLOC_TYPE_DEFINE(net_port_entry_zone, struct net_port_entry, NET_KT_DEFAULT);
169 
170 static SLIST_HEAD(net_port_entry_list, net_port_entry) net_port_entry_list =
171     SLIST_HEAD_INITIALIZER(&net_port_entry_list);
172 
173 struct timeval wakeuiid_last_check;
174 
175 
176 #if (DEBUG | DEVELOPMENT)
177 static int64_t npi_search_list_total = 0;
178 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_total,
179     CTLFLAG_RD | CTLFLAG_LOCKED,
180     &npi_search_list_total, "");
181 
182 static int64_t npi_search_list_max = 0;
183 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_max,
184     CTLFLAG_RD | CTLFLAG_LOCKED,
185     &npi_search_list_max, "");
186 #endif /* (DEBUG | DEVELOPMENT) */
187 
188 /*
189  * Hashing of the net_port_entry list is based on the local port
190  *
191  * The hash masks uses the least significant bits so we have to use host byte order
192  * when applying the mask because the LSB have more entropy that the MSB (most local ports
193  * are in the high dynamic port range)
194  */
195 #define NPE_HASH_BUCKET_COUNT 32
196 #define NPE_HASH_MASK (NPE_HASH_BUCKET_COUNT - 1)
197 #define NPE_HASH_VAL(_lport) (ntohs(_lport) & NPE_HASH_MASK)
198 #define NPE_HASH_HEAD(_lport) (&net_port_entry_hash_table[NPE_HASH_VAL(_lport)])
199 
200 static TAILQ_HEAD(net_port_entry_hash_table, net_port_entry) * net_port_entry_hash_table = NULL;
201 
202 /*
203  * Initialize IPv4 source address hash table.
204  */
205 void
if_ports_used_init(void)206 if_ports_used_init(void)
207 {
208 	if (net_port_entry_hash_table != NULL) {
209 		return;
210 	}
211 
212 	net_port_entry_hash_table = zalloc_permanent(
213 		NPE_HASH_BUCKET_COUNT * sizeof(*net_port_entry_hash_table),
214 		ZALIGN_PTR);
215 }
216 
217 static void
net_port_entry_list_clear(void)218 net_port_entry_list_clear(void)
219 {
220 	struct net_port_entry *npe;
221 
222 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
223 
224 	while ((npe = SLIST_FIRST(&net_port_entry_list)) != NULL) {
225 		SLIST_REMOVE_HEAD(&net_port_entry_list, npe_list_next);
226 		TAILQ_REMOVE(NPE_HASH_HEAD(npe->npe_npi.npi_local_port), npe, npe_hash_next);
227 
228 		zfree(net_port_entry_zone, npe);
229 	}
230 
231 	for (int i = 0; i < NPE_HASH_BUCKET_COUNT; i++) {
232 		VERIFY(TAILQ_EMPTY(&net_port_entry_hash_table[i]));
233 	}
234 
235 	if_ports_used_stats.ifpu_npe_count = 0;
236 	if_ports_used_stats.ifpu_wakeuid_gen++;
237 }
238 
239 static bool
get_test_wake_uuid(uuid_string_t wakeuuid_str,size_t len)240 get_test_wake_uuid(uuid_string_t wakeuuid_str, size_t len)
241 {
242 	if (__improbable(use_test_wakeuuid)) {
243 		if (!uuid_is_null(test_wakeuuid)) {
244 			if (wakeuuid_str != NULL && len != 0) {
245 				uuid_unparse(test_wakeuuid, wakeuuid_str);
246 			}
247 			return true;
248 		} else {
249 			return false;
250 		}
251 	} else {
252 		return false;
253 	}
254 }
255 
256 static bool
is_wakeuuid_set(void)257 is_wakeuuid_set(void)
258 {
259 	/*
260 	 * IOPMCopySleepWakeUUIDKey() tells if SleepWakeUUID is currently set
261 	 * That means we are currently in a sleep/wake cycle
262 	 */
263 	return get_test_wake_uuid(NULL, 0) || IOPMCopySleepWakeUUIDKey(NULL, 0);
264 }
265 
266 void
if_ports_used_update_wakeuuid(struct ifnet * ifp)267 if_ports_used_update_wakeuuid(struct ifnet *ifp)
268 {
269 	uuid_t wakeuuid;
270 	bool wakeuuid_is_set = false;
271 	bool updated = false;
272 	uuid_string_t wakeuuid_str;
273 
274 	uuid_clear(wakeuuid);
275 
276 	if (__improbable(use_test_wakeuuid)) {
277 		wakeuuid_is_set = get_test_wake_uuid(wakeuuid_str,
278 		    sizeof(wakeuuid_str));
279 	} else {
280 		wakeuuid_is_set = IOPMCopySleepWakeUUIDKey(wakeuuid_str,
281 		    sizeof(wakeuuid_str));
282 	}
283 
284 	if (wakeuuid_is_set) {
285 		if (uuid_parse(wakeuuid_str, wakeuuid) != 0) {
286 			os_log(OS_LOG_DEFAULT,
287 			    "%s: IOPMCopySleepWakeUUIDKey got bad value %s\n",
288 			    __func__, wakeuuid_str);
289 			wakeuuid_is_set = false;
290 		}
291 	}
292 
293 	if (!wakeuuid_is_set) {
294 		if (ifp != NULL) {
295 			if (if_ports_used_verbose > 0) {
296 				os_log_info(OS_LOG_DEFAULT,
297 				    "%s: SleepWakeUUID not set, "
298 				    "don't update the port list for %s\n",
299 				    __func__, ifp != NULL ? if_name(ifp) : "");
300 			}
301 			if_ports_used_stats.ifpu_wakeuuid_not_set_count += 1;
302 			microtime(&wakeuuid_not_set_last_time);
303 			strlcpy(wakeuuid_not_set_last_if, if_name(ifp),
304 			    sizeof(wakeuuid_not_set_last_if));
305 		}
306 		return;
307 	}
308 
309 	lck_mtx_lock(&net_port_entry_head_lock);
310 	if (uuid_compare(wakeuuid, current_wakeuuid) != 0) {
311 		net_port_entry_list_clear();
312 		uuid_copy(current_wakeuuid, wakeuuid);
313 		microtime(&wakeuuid_last_update_time);
314 		updated = true;
315 
316 		has_notified_wake_pkt = false;
317 		has_notified_unattributed_wake = false;
318 
319 		memset(&last_attributed_wake_event, 0, sizeof(last_attributed_wake_event));
320 	}
321 	/*
322 	 * Record the time last checked
323 	 */
324 	microuptime(&wakeuiid_last_check);
325 	lck_mtx_unlock(&net_port_entry_head_lock);
326 
327 	if (updated && if_ports_used_verbose > 0) {
328 		uuid_string_t uuid_str;
329 
330 		uuid_unparse(current_wakeuuid, uuid_str);
331 		os_log(OS_LOG_DEFAULT, "%s: current wakeuuid %s",
332 		    __func__, uuid_str);
333 	}
334 }
335 
336 static bool
net_port_info_equal(const struct net_port_info * x,const struct net_port_info * y)337 net_port_info_equal(const struct net_port_info *x,
338     const struct net_port_info *y)
339 {
340 	ASSERT(x != NULL && y != NULL);
341 
342 	if (x->npi_if_index == y->npi_if_index &&
343 	    x->npi_local_port == y->npi_local_port &&
344 	    x->npi_foreign_port == y->npi_foreign_port &&
345 	    x->npi_owner_pid == y->npi_owner_pid &&
346 	    x->npi_effective_pid == y->npi_effective_pid &&
347 	    x->npi_flags == y->npi_flags &&
348 	    memcmp(&x->npi_local_addr_, &y->npi_local_addr_,
349 	    sizeof(union in_addr_4_6)) == 0 &&
350 	    memcmp(&x->npi_foreign_addr_, &y->npi_foreign_addr_,
351 	    sizeof(union in_addr_4_6)) == 0) {
352 		return true;
353 	}
354 	return false;
355 }
356 
357 static bool
net_port_info_has_entry(const struct net_port_info * npi)358 net_port_info_has_entry(const struct net_port_info *npi)
359 {
360 	struct net_port_entry *npe;
361 	bool found = false;
362 	int32_t count = 0;
363 
364 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
365 
366 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(npi->npi_local_port), npe_hash_next) {
367 		count += 1;
368 		if (net_port_info_equal(&npe->npe_npi, npi)) {
369 			found = true;
370 			break;
371 		}
372 	}
373 	if_ports_used_stats.ifpu_npi_hash_search_total += count;
374 	if (count > if_ports_used_stats.ifpu_npi_hash_search_max) {
375 		if_ports_used_stats.ifpu_npi_hash_search_max = count;
376 	}
377 
378 	return found;
379 }
380 
381 static bool
net_port_info_add_entry(const struct net_port_info * npi)382 net_port_info_add_entry(const struct net_port_info *npi)
383 {
384 	struct net_port_entry   *npe = NULL;
385 	uint32_t num = 0;
386 	bool entry_added = false;
387 
388 	ASSERT(npi != NULL);
389 
390 	if (__improbable(is_wakeuuid_set() == false)) {
391 		if_ports_used_stats.ifpu_npi_not_added_no_wakeuuid++;
392 		if (if_ports_used_verbose > 0) {
393 			os_log(OS_LOG_DEFAULT, "%s: wakeuuid not set not adding "
394 			    "port: %u flags: 0x%xif: %u pid: %u epid %u",
395 			    __func__,
396 			    ntohs(npi->npi_local_port),
397 			    npi->npi_flags,
398 			    npi->npi_if_index,
399 			    npi->npi_owner_pid,
400 			    npi->npi_effective_pid);
401 		}
402 		return false;
403 	}
404 
405 	npe = zalloc_flags(net_port_entry_zone, Z_WAITOK | Z_ZERO);
406 	if (__improbable(npe == NULL)) {
407 		os_log(OS_LOG_DEFAULT, "%s: zalloc() failed for "
408 		    "port: %u flags: 0x%x if: %u pid: %u epid %u",
409 		    __func__,
410 		    ntohs(npi->npi_local_port),
411 		    npi->npi_flags,
412 		    npi->npi_if_index,
413 		    npi->npi_owner_pid,
414 		    npi->npi_effective_pid);
415 		return false;
416 	}
417 
418 	memcpy(&npe->npe_npi, npi, sizeof(npe->npe_npi));
419 
420 	lck_mtx_lock(&net_port_entry_head_lock);
421 
422 	if (net_port_info_has_entry(npi) == false) {
423 		SLIST_INSERT_HEAD(&net_port_entry_list, npe, npe_list_next);
424 		TAILQ_INSERT_HEAD(NPE_HASH_HEAD(npi->npi_local_port), npe, npe_hash_next);
425 		num = (uint32_t)if_ports_used_stats.ifpu_npe_count++; /* rollover OK */
426 		entry_added = true;
427 
428 		if (if_ports_used_stats.ifpu_npe_count > if_ports_used_stats.ifpu_npe_max) {
429 			if_ports_used_stats.ifpu_npe_max = if_ports_used_stats.ifpu_npe_count;
430 		}
431 		if_ports_used_stats.ifpu_npe_total++;
432 
433 		if (if_ports_used_verbose > 1) {
434 			os_log(OS_LOG_DEFAULT, "%s: num %u for "
435 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
436 			    __func__,
437 			    num,
438 			    ntohs(npi->npi_local_port),
439 			    npi->npi_flags,
440 			    npi->npi_if_index,
441 			    npi->npi_owner_pid,
442 			    npi->npi_effective_pid);
443 		}
444 	} else {
445 		if_ports_used_stats.ifpu_npe_dup++;
446 		if (if_ports_used_verbose > 2) {
447 			os_log(OS_LOG_DEFAULT, "%s: already added "
448 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
449 			    __func__,
450 			    ntohs(npi->npi_local_port),
451 			    npi->npi_flags,
452 			    npi->npi_if_index,
453 			    npi->npi_owner_pid,
454 			    npi->npi_effective_pid);
455 		}
456 	}
457 
458 	lck_mtx_unlock(&net_port_entry_head_lock);
459 
460 	if (entry_added == false) {
461 		zfree(net_port_entry_zone, npe);
462 	}
463 	return entry_added;
464 }
465 
466 #if (DEVELOPMENT || DEBUG)
467 int
468 sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS
469 {
470 #pragma unused(oidp, arg1, arg2)
471 	int error = 0;
472 
473 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
474 		return EPERM;
475 	}
476 	if (req->oldptr == USER_ADDR_NULL) {
477 		req->oldidx = sizeof(uuid_t);
478 		return 0;
479 	}
480 	if (req->newptr != USER_ADDR_NULL) {
481 		uuid_generate(test_wakeuuid);
482 		if_ports_used_update_wakeuuid(NULL);
483 	}
484 	error = SYSCTL_OUT(req, test_wakeuuid,
485 	    MIN(sizeof(uuid_t), req->oldlen));
486 
487 	return error;
488 }
489 
490 int
491 sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS
492 {
493 #pragma unused(oidp, arg1, arg2)
494 	int error = 0;
495 
496 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
497 		return EPERM;
498 	}
499 	if (req->oldptr == USER_ADDR_NULL) {
500 		req->oldidx = sizeof(uuid_t);
501 		return 0;
502 	}
503 	if (req->newptr != USER_ADDR_NULL) {
504 		uuid_clear(test_wakeuuid);
505 		if_ports_used_update_wakeuuid(NULL);
506 	}
507 	error = SYSCTL_OUT(req, test_wakeuuid,
508 	    MIN(sizeof(uuid_t), req->oldlen));
509 
510 	return error;
511 }
512 
513 #endif /* (DEVELOPMENT || DEBUG) */
514 
515 static int
sysctl_timeval(struct sysctl_req * req,const struct timeval * tv)516 sysctl_timeval(struct sysctl_req *req, const struct timeval *tv)
517 {
518 	if (proc_is64bit(req->p)) {
519 		struct user64_timeval tv64 = {};
520 
521 		tv64.tv_sec = tv->tv_sec;
522 		tv64.tv_usec = tv->tv_usec;
523 		return SYSCTL_OUT(req, &tv64, sizeof(tv64));
524 	} else {
525 		struct user32_timeval tv32 = {};
526 
527 		tv32.tv_sec = (user32_time_t)tv->tv_sec;
528 		tv32.tv_usec = tv->tv_usec;
529 		return SYSCTL_OUT(req, &tv32, sizeof(tv32));
530 	}
531 }
532 
533 int
534 sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS
535 {
536 #pragma unused(oidp, arg1, arg2)
537 
538 	return sysctl_timeval(req, &wakeuuid_last_update_time);
539 }
540 
541 int
542 sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS
543 {
544 #pragma unused(oidp, arg1, arg2)
545 
546 	return sysctl_timeval(req, &wakeuuid_not_set_last_time);
547 }
548 
549 int
550 sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS
551 {
552 #pragma unused(oidp, arg1, arg2)
553 
554 	return SYSCTL_OUT(req, &wakeuuid_not_set_last_if, strlen(wakeuuid_not_set_last_if) + 1);
555 }
556 
557 int
558 sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS
559 {
560 #pragma unused(oidp, arg1, arg2)
561 	size_t len = sizeof(struct if_ports_used_stats);
562 
563 	if (req->oldptr != 0) {
564 		len = MIN(req->oldlen, sizeof(struct if_ports_used_stats));
565 	}
566 	return SYSCTL_OUT(req, &if_ports_used_stats, len);
567 }
568 
569 static int
570 sysctl_net_port_info_list SYSCTL_HANDLER_ARGS
571 {
572 #pragma unused(oidp, arg1, arg2)
573 	int error = 0;
574 	struct xnpigen xnpigen;
575 	struct net_port_entry *npe;
576 
577 	if ((error = priv_check_cred(kauth_cred_get(),
578 	    PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0)) != 0) {
579 		return EPERM;
580 	}
581 	lck_mtx_lock(&net_port_entry_head_lock);
582 
583 	if (req->oldptr == USER_ADDR_NULL) {
584 		/* Add a 25% cushion */
585 		size_t cnt = (size_t)if_ports_used_stats.ifpu_npe_count;
586 		cnt += cnt >> 4;
587 		req->oldidx = sizeof(struct xnpigen) +
588 		    cnt * sizeof(struct net_port_info);
589 		goto done;
590 	}
591 
592 	memset(&xnpigen, 0, sizeof(struct xnpigen));
593 	xnpigen.xng_len = sizeof(struct xnpigen);
594 	xnpigen.xng_gen = (uint32_t)if_ports_used_stats.ifpu_wakeuid_gen;
595 	uuid_copy(xnpigen.xng_wakeuuid, current_wakeuuid);
596 	xnpigen.xng_npi_count = (uint32_t)if_ports_used_stats.ifpu_npe_count;
597 	xnpigen.xng_npi_size = sizeof(struct net_port_info);
598 	error = SYSCTL_OUT(req, &xnpigen, sizeof(xnpigen));
599 	if (error != 0) {
600 		printf("%s: SYSCTL_OUT(xnpigen) error %d\n",
601 		    __func__, error);
602 		goto done;
603 	}
604 
605 	SLIST_FOREACH(npe, &net_port_entry_list, npe_list_next) {
606 		error = SYSCTL_OUT(req, &npe->npe_npi,
607 		    sizeof(struct net_port_info));
608 		if (error != 0) {
609 			printf("%s: SYSCTL_OUT(npi) error %d\n",
610 			    __func__, error);
611 			goto done;
612 		}
613 	}
614 done:
615 	lck_mtx_unlock(&net_port_entry_head_lock);
616 
617 	return error;
618 }
619 
620 /*
621  * Mirror the arguments of ifnet_get_local_ports_extended()
622  *  ifindex
623  *  protocol
624  *  flags
625  */
626 static int
627 sysctl_get_ports_used SYSCTL_HANDLER_ARGS
628 {
629 #pragma unused(oidp)
630 	int *name = (int *)arg1;
631 	int namelen = arg2;
632 	int error = 0;
633 	int idx;
634 	protocol_family_t protocol;
635 	u_int32_t flags;
636 	ifnet_t ifp = NULL;
637 	u_int8_t *bitfield = NULL;
638 
639 	if (req->newptr != USER_ADDR_NULL) {
640 		error = EPERM;
641 		goto done;
642 	}
643 	/*
644 	 * 3 is the required number of parameters: ifindex, protocol and flags
645 	 */
646 	if (namelen != 3) {
647 		error = ENOENT;
648 		goto done;
649 	}
650 
651 	if (req->oldptr == USER_ADDR_NULL) {
652 		req->oldidx = bitstr_size(IP_PORTRANGE_SIZE);
653 		goto done;
654 	}
655 	if (req->oldlen < bitstr_size(IP_PORTRANGE_SIZE)) {
656 		error = ENOMEM;
657 		goto done;
658 	}
659 	bitfield = (u_int8_t *) kalloc_data(bitstr_size(IP_PORTRANGE_SIZE),
660 	    Z_WAITOK | Z_ZERO);
661 	if (bitfield == NULL) {
662 		error = ENOMEM;
663 		goto done;
664 	}
665 
666 	idx = name[0];
667 	protocol = name[1];
668 	flags = name[2];
669 
670 	ifnet_head_lock_shared();
671 	if (IF_INDEX_IN_RANGE(idx)) {
672 		ifp = ifindex2ifnet[idx];
673 	}
674 	ifnet_head_done();
675 
676 	error = ifnet_get_local_ports_extended(ifp, protocol, flags, bitfield);
677 	if (error != 0) {
678 		printf("%s: ifnet_get_local_ports_extended() error %d\n",
679 		    __func__, error);
680 		goto done;
681 	}
682 	error = SYSCTL_OUT(req, bitfield, bitstr_size(IP_PORTRANGE_SIZE));
683 done:
684 	if (bitfield != NULL) {
685 		kfree_data(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
686 	}
687 	return error;
688 }
689 
690 __private_extern__ bool
if_ports_used_add_inpcb(const uint32_t ifindex,const struct inpcb * inp)691 if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp)
692 {
693 	struct net_port_info npi = {};
694 	struct socket *so = inp->inp_socket;
695 
696 	/* This is unlikely to happen but better be safe than sorry */
697 	if (ifindex > UINT16_MAX) {
698 		os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
699 		return false;
700 	}
701 
702 	if (ifindex != 0) {
703 		npi.npi_if_index = (uint16_t)ifindex;
704 	} else if (inp->inp_last_outifp != NULL) {
705 		npi.npi_if_index = (uint16_t)inp->inp_last_outifp->if_index;
706 	}
707 	if (IF_INDEX_IN_RANGE(npi.npi_if_index)) {
708 		struct ifnet *ifp = ifindex2ifnet[npi.npi_if_index];
709 		if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
710 			npi.npi_flags |= NPIF_COMPLINK;
711 		}
712 	}
713 
714 	npi.npi_flags |= NPIF_SOCKET;
715 
716 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
717 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
718 
719 	if (so->so_options & SO_NOWAKEFROMSLEEP) {
720 		npi.npi_flags |= NPIF_NOWAKE;
721 	}
722 
723 	if (SOCK_PROTO(so) == IPPROTO_TCP) {
724 		struct tcpcb *tp = intotcpcb(inp);
725 
726 		npi.npi_flags |= NPIF_TCP;
727 		if (tp != NULL && tp->t_state == TCPS_LISTEN) {
728 			npi.npi_flags |= NPIF_LISTEN;
729 		}
730 	} else if (SOCK_PROTO(so) == IPPROTO_UDP) {
731 		npi.npi_flags |= NPIF_UDP;
732 	} else {
733 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for inp %p", __func__,
734 		    SOCK_PROTO(inp->inp_socket), inp);
735 		return false;
736 	}
737 
738 	uuid_copy(npi.npi_flow_uuid, inp->necp_client_uuid);
739 
740 	npi.npi_local_port = inp->inp_lport;
741 	npi.npi_foreign_port = inp->inp_fport;
742 
743 	/*
744 	 * Take in account IPv4 addresses mapped on IPv6
745 	 */
746 	if ((inp->inp_vflag & INP_IPV6) != 0 && (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
747 	    (inp->inp_vflag & (INP_IPV6 | INP_IPV4)) == (INP_IPV6 | INP_IPV4)) {
748 		npi.npi_flags |= NPIF_IPV6 | NPIF_IPV4;
749 		memcpy(&npi.npi_local_addr_in6,
750 		    &inp->in6p_laddr, sizeof(struct in6_addr));
751 	} else if (inp->inp_vflag & INP_IPV4) {
752 		npi.npi_flags |= NPIF_IPV4;
753 		npi.npi_local_addr_in = inp->inp_laddr;
754 		npi.npi_foreign_addr_in = inp->inp_faddr;
755 	} else {
756 		npi.npi_flags |= NPIF_IPV6;
757 		memcpy(&npi.npi_local_addr_in6,
758 		    &inp->in6p_laddr, sizeof(struct in6_addr));
759 		memcpy(&npi.npi_foreign_addr_in6,
760 		    &inp->in6p_faddr, sizeof(struct in6_addr));
761 
762 		/* Clear the embedded scope ID */
763 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
764 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
765 		}
766 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
767 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
768 		}
769 	}
770 
771 	npi.npi_owner_pid = so->last_pid;
772 
773 	if (so->last_pid != 0) {
774 		proc_name(so->last_pid, npi.npi_owner_pname,
775 		    sizeof(npi.npi_owner_pname));
776 		uuid_copy(npi.npi_owner_uuid, so->last_uuid);
777 	}
778 
779 	if (so->so_flags & SOF_DELEGATED) {
780 		npi.npi_flags |= NPIF_DELEGATED;
781 		npi.npi_effective_pid = so->e_pid;
782 		if (so->e_pid != 0) {
783 			proc_name(so->e_pid, npi.npi_effective_pname,
784 			    sizeof(npi.npi_effective_pname));
785 		}
786 		uuid_copy(npi.npi_effective_uuid, so->e_uuid);
787 	} else {
788 		npi.npi_effective_pid = so->last_pid;
789 		if (so->last_pid != 0) {
790 			strlcpy(npi.npi_effective_pname, npi.npi_owner_pname,
791 			    sizeof(npi.npi_effective_pname));
792 		}
793 		uuid_copy(npi.npi_effective_uuid, so->last_uuid);
794 	}
795 
796 	return net_port_info_add_entry(&npi);
797 }
798 
799 #if SKYWALK
800 __private_extern__ bool
if_ports_used_add_flow_entry(const struct flow_entry * fe,const uint32_t ifindex,const struct ns_flow_info * nfi,uint32_t ns_flags)801 if_ports_used_add_flow_entry(const struct flow_entry *fe, const uint32_t ifindex,
802     const struct ns_flow_info *nfi, uint32_t ns_flags)
803 {
804 	struct net_port_info npi = {};
805 
806 	/* This is unlikely to happen but better be safe than sorry */
807 	if (ifindex > UINT16_MAX) {
808 		os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
809 		return false;
810 	}
811 	npi.npi_if_index = (uint16_t)ifindex;
812 	if (IF_INDEX_IN_RANGE(ifindex)) {
813 		struct ifnet *ifp = ifindex2ifnet[ifindex];
814 		if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
815 			npi.npi_flags |= NPIF_COMPLINK;
816 		}
817 	}
818 
819 	npi.npi_flags |= NPIF_CHANNEL;
820 
821 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
822 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
823 
824 	if (ns_flags & NETNS_NOWAKEFROMSLEEP) {
825 		npi.npi_flags |= NPIF_NOWAKE;
826 	}
827 	if ((ns_flags & NETNS_OWNER_MASK) == NETNS_LISTENER) {
828 		npi.npi_flags |= NPIF_LISTEN;
829 	}
830 
831 	uuid_copy(npi.npi_flow_uuid, nfi->nfi_flow_uuid);
832 
833 	if (nfi->nfi_protocol == IPPROTO_TCP) {
834 		npi.npi_flags |= NPIF_TCP;
835 	} else if (nfi->nfi_protocol == IPPROTO_UDP) {
836 		npi.npi_flags |= NPIF_UDP;
837 	} else {
838 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for nfi %p",
839 		    __func__, nfi->nfi_protocol, nfi);
840 		return false;
841 	}
842 
843 	if (nfi->nfi_laddr.sa.sa_family == AF_INET) {
844 		npi.npi_flags |= NPIF_IPV4;
845 
846 		npi.npi_local_port = nfi->nfi_laddr.sin.sin_port;
847 		npi.npi_foreign_port = nfi->nfi_faddr.sin.sin_port;
848 
849 		npi.npi_local_addr_in = nfi->nfi_laddr.sin.sin_addr;
850 		npi.npi_foreign_addr_in = nfi->nfi_faddr.sin.sin_addr;
851 	} else {
852 		npi.npi_flags |= NPIF_IPV6;
853 
854 		npi.npi_local_port = nfi->nfi_laddr.sin6.sin6_port;
855 		npi.npi_foreign_port = nfi->nfi_faddr.sin6.sin6_port;
856 
857 		memcpy(&npi.npi_local_addr_in6,
858 		    &nfi->nfi_laddr.sin6.sin6_addr, sizeof(struct in6_addr));
859 		memcpy(&npi.npi_foreign_addr_in6,
860 		    &nfi->nfi_faddr.sin6.sin6_addr, sizeof(struct in6_addr));
861 
862 		/* Clear the embedded scope ID */
863 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
864 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
865 		}
866 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
867 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
868 		}
869 	}
870 
871 	npi.npi_owner_pid = nfi->nfi_owner_pid;
872 	strlcpy(npi.npi_owner_pname, nfi->nfi_owner_name,
873 	    sizeof(npi.npi_owner_pname));
874 
875 	/*
876 	 * Get the proc UUID from the pid as the the proc UUID is not present
877 	 * in the flow_entry
878 	 */
879 	proc_t proc = proc_find(npi.npi_owner_pid);
880 	if (proc != PROC_NULL) {
881 		proc_getexecutableuuid(proc, npi.npi_owner_uuid, sizeof(npi.npi_owner_uuid));
882 		proc_rele(proc);
883 	}
884 	if (nfi->nfi_effective_pid != -1) {
885 		npi.npi_effective_pid = nfi->nfi_effective_pid;
886 		strlcpy(npi.npi_effective_pname, nfi->nfi_effective_name,
887 		    sizeof(npi.npi_effective_pname));
888 		uuid_copy(npi.npi_effective_uuid, fe->fe_eproc_uuid);
889 	} else {
890 		npi.npi_effective_pid = npi.npi_owner_pid;
891 		strlcpy(npi.npi_effective_pname, npi.npi_owner_pname,
892 		    sizeof(npi.npi_effective_pname));
893 		uuid_copy(npi.npi_effective_uuid, npi.npi_owner_uuid);
894 	}
895 
896 	return net_port_info_add_entry(&npi);
897 }
898 
899 #endif /* SKYWALK */
900 
901 static void
net_port_info_log_npi(const char * s,const struct net_port_info * npi)902 net_port_info_log_npi(const char *s, const struct net_port_info *npi)
903 {
904 	char lbuf[MAX_IPv6_STR_LEN] = {};
905 	char fbuf[MAX_IPv6_STR_LEN] = {};
906 
907 	if (npi->npi_flags & NPIF_IPV4) {
908 		inet_ntop(PF_INET, &npi->npi_local_addr_in.s_addr,
909 		    lbuf, sizeof(lbuf));
910 		inet_ntop(PF_INET, &npi->npi_foreign_addr_in.s_addr,
911 		    fbuf, sizeof(fbuf));
912 	} else if (npi->npi_flags & NPIF_IPV6) {
913 		inet_ntop(PF_INET6, &npi->npi_local_addr_in6,
914 		    lbuf, sizeof(lbuf));
915 		inet_ntop(PF_INET6, &npi->npi_foreign_addr_in6,
916 		    fbuf, sizeof(fbuf));
917 	}
918 	os_log(OS_LOG_DEFAULT, "%s net_port_info if_index %u arch %s family %s proto %s local %s:%u foreign %s:%u pid: %u epid %u",
919 	    s != NULL ? s : "",
920 	    npi->npi_if_index,
921 	    (npi->npi_flags & NPIF_SOCKET) ? "so" : (npi->npi_flags & NPIF_CHANNEL) ? "ch" : "unknown",
922 	    (npi->npi_flags & NPIF_IPV4) ? "ipv4" : (npi->npi_flags & NPIF_IPV6) ? "ipv6" : "unknown",
923 	    npi->npi_flags & NPIF_TCP ? "tcp" : npi->npi_flags & NPIF_UDP ? "udp" :
924 	    npi->npi_flags & NPIF_ESP ? "esp" : "unknown",
925 	    lbuf, ntohs(npi->npi_local_port),
926 	    fbuf, ntohs(npi->npi_foreign_port),
927 	    npi->npi_owner_pid,
928 	    npi->npi_effective_pid);
929 }
930 
931 #define NPI_MATCH_IPV4 (NPIF_IPV4 | NPIF_TCP | NPIF_UDP)
932 #define NPI_MATCH_IPV6 (NPIF_IPV6 | NPIF_TCP | NPIF_UDP)
933 
934 static bool
net_port_info_match_npi(struct net_port_entry * npe,const struct net_port_info * in_npi,struct net_port_entry ** best_match)935 net_port_info_match_npi(struct net_port_entry *npe, const struct net_port_info *in_npi,
936     struct net_port_entry **best_match)
937 {
938 	if (__improbable(net_wake_pkt_debug > 1)) {
939 		net_port_info_log_npi("  ", &npe->npe_npi);
940 	}
941 
942 	/*
943 	 * The interfaces must match or be both companion link
944 	 */
945 	if (npe->npe_npi.npi_if_index != in_npi->npi_if_index &&
946 	    !((npe->npe_npi.npi_flags & NPIF_COMPLINK) && (in_npi->npi_flags & NPIF_COMPLINK))) {
947 		return false;
948 	}
949 
950 	/*
951 	 * The local ports and protocols must match
952 	 */
953 	if (npe->npe_npi.npi_local_port != in_npi->npi_local_port ||
954 	    ((npe->npe_npi.npi_flags & NPI_MATCH_IPV4) != (in_npi->npi_flags & NPI_MATCH_IPV4) &&
955 	    (npe->npe_npi.npi_flags & NPI_MATCH_IPV6) != (in_npi->npi_flags & NPI_MATCH_IPV6))) {
956 		return false;
957 	}
958 	/*
959 	 * Search stops on an exact match
960 	 */
961 	if (npe->npe_npi.npi_foreign_port == in_npi->npi_foreign_port) {
962 		if ((npe->npe_npi.npi_flags & NPIF_IPV4) && (npe->npe_npi.npi_flags & NPIF_IPV4)) {
963 			if (in_npi->npi_local_addr_in.s_addr == npe->npe_npi.npi_local_addr_in.s_addr &&
964 			    in_npi->npi_foreign_addr_in.s_addr == npe->npe_npi.npi_foreign_addr_in.s_addr) {
965 				*best_match = npe;
966 				return true;
967 			}
968 		}
969 		if ((npe->npe_npi.npi_flags & NPIF_IPV6) && (npe->npe_npi.npi_flags & NPIF_IPV6)) {
970 			if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
971 			    sizeof(union in_addr_4_6)) == 0 &&
972 			    memcmp(&npe->npe_npi.npi_foreign_addr_, &in_npi->npi_foreign_addr_,
973 			    sizeof(union in_addr_4_6)) == 0) {
974 				*best_match = npe;
975 				return true;
976 			}
977 		}
978 	}
979 	/*
980 	 * Skip connected entries as we are looking for a wildcard match
981 	 * on the local address and port
982 	 */
983 	if (npe->npe_npi.npi_foreign_port != 0) {
984 		return false;
985 	}
986 	/*
987 	 * The local address matches: this is our 2nd best match
988 	 */
989 	if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
990 	    sizeof(union in_addr_4_6)) == 0) {
991 		*best_match = npe;
992 		return false;
993 	}
994 	/*
995 	 * Only the local port matches, do not override a match
996 	 * on the local address
997 	 */
998 	if (*best_match == NULL) {
999 		*best_match = npe;
1000 	}
1001 	return false;
1002 }
1003 
1004 /*
1005  *
1006  */
1007 static bool
net_port_info_find_match(struct net_port_info * in_npi)1008 net_port_info_find_match(struct net_port_info *in_npi)
1009 {
1010 	struct net_port_entry *npe;
1011 	struct net_port_entry *best_match = NULL;
1012 
1013 	lck_mtx_lock(&net_port_entry_head_lock);
1014 
1015 	uint32_t count = 0;
1016 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(in_npi->npi_local_port), npe_hash_next) {
1017 		count += 1;
1018 		if (net_port_info_match_npi(npe, in_npi, &best_match)) {
1019 			break;
1020 		}
1021 	}
1022 
1023 	if (best_match != NULL) {
1024 		best_match->npe_npi.npi_flags |= NPIF_WAKEPKT;
1025 		in_npi->npi_owner_pid = best_match->npe_npi.npi_owner_pid;
1026 		in_npi->npi_effective_pid = best_match->npe_npi.npi_effective_pid;
1027 		strlcpy(in_npi->npi_owner_pname, best_match->npe_npi.npi_owner_pname,
1028 		    sizeof(in_npi->npi_owner_pname));
1029 		strlcpy(in_npi->npi_effective_pname, best_match->npe_npi.npi_effective_pname,
1030 		    sizeof(in_npi->npi_effective_pname));
1031 		uuid_copy(in_npi->npi_owner_uuid, best_match->npe_npi.npi_owner_uuid);
1032 		uuid_copy(in_npi->npi_effective_uuid, best_match->npe_npi.npi_effective_uuid);
1033 	}
1034 	lck_mtx_unlock(&net_port_entry_head_lock);
1035 
1036 	if (__improbable(net_wake_pkt_debug > 0)) {
1037 		if (best_match != NULL) {
1038 			net_port_info_log_npi("wake packet match", in_npi);
1039 		} else {
1040 			net_port_info_log_npi("wake packet no match", in_npi);
1041 		}
1042 	}
1043 
1044 	return best_match != NULL ? true : false;
1045 }
1046 
1047 #if (DEBUG || DEVELOPMENT)
1048 static void
net_port_info_log_una_wake_event(const char * s,struct net_port_info_una_wake_event * ev)1049 net_port_info_log_una_wake_event(const char *s, struct net_port_info_una_wake_event *ev)
1050 {
1051 	char lbuf[MAX_IPv6_STR_LEN] = {};
1052 	char fbuf[MAX_IPv6_STR_LEN] = {};
1053 
1054 	if (ev->una_wake_pkt_flags & NPIF_IPV4) {
1055 		inet_ntop(PF_INET, &ev->una_wake_pkt_local_addr_._in_a_4.s_addr,
1056 		    lbuf, sizeof(lbuf));
1057 		inet_ntop(PF_INET, &ev->una_wake_pkt_foreign_addr_._in_a_4.s_addr,
1058 		    fbuf, sizeof(fbuf));
1059 	} else if (ev->una_wake_pkt_flags & NPIF_IPV6) {
1060 		inet_ntop(PF_INET6, &ev->una_wake_pkt_local_addr_._in_a_6.s6_addr,
1061 		    lbuf, sizeof(lbuf));
1062 		inet_ntop(PF_INET6, &ev->una_wake_pkt_foreign_addr_._in_a_6.s6_addr,
1063 		    fbuf, sizeof(fbuf));
1064 	}
1065 	os_log(OS_LOG_DEFAULT, "%s if %s (%u) proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proto: %u",
1066 	    s != NULL ? s : "",
1067 	    ev->una_wake_pkt_ifname, ev->una_wake_pkt_if_index,
1068 	    ev->una_wake_pkt_flags & NPIF_TCP ? "tcp" : ev->una_wake_pkt_flags ? "udp" :
1069 	    ev->una_wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1070 	    lbuf, ntohs(ev->una_wake_pkt_local_port),
1071 	    fbuf, ntohs(ev->una_wake_pkt_foreign_port),
1072 	    ev->una_wake_pkt_total_len, ev->una_wake_pkt_data_len,
1073 	    ev->una_wake_pkt_control_flags, ev->una_wake_pkt_proto);
1074 }
1075 
1076 static void
net_port_info_log_wake_event(const char * s,struct net_port_info_wake_event * ev)1077 net_port_info_log_wake_event(const char *s, struct net_port_info_wake_event *ev)
1078 {
1079 	char lbuf[MAX_IPv6_STR_LEN] = {};
1080 	char fbuf[MAX_IPv6_STR_LEN] = {};
1081 
1082 	if (ev->wake_pkt_flags & NPIF_IPV4) {
1083 		inet_ntop(PF_INET, &ev->wake_pkt_local_addr_._in_a_4.s_addr,
1084 		    lbuf, sizeof(lbuf));
1085 		inet_ntop(PF_INET, &ev->wake_pkt_foreign_addr_._in_a_4.s_addr,
1086 		    fbuf, sizeof(fbuf));
1087 	} else if (ev->wake_pkt_flags & NPIF_IPV6) {
1088 		inet_ntop(PF_INET6, &ev->wake_pkt_local_addr_._in_a_6.s6_addr,
1089 		    lbuf, sizeof(lbuf));
1090 		inet_ntop(PF_INET6, &ev->wake_pkt_foreign_addr_._in_a_6.s6_addr,
1091 		    fbuf, sizeof(fbuf));
1092 	}
1093 	os_log(OS_LOG_DEFAULT, "%s if %s (%u) proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proc %s eproc %s",
1094 	    s != NULL ? s : "",
1095 	    ev->wake_pkt_ifname, ev->wake_pkt_if_index,
1096 	    ev->wake_pkt_flags & NPIF_TCP ? "tcp" : ev->wake_pkt_flags ? "udp" :
1097 	    ev->wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1098 	    lbuf, ntohs(ev->wake_pkt_port),
1099 	    fbuf, ntohs(ev->wake_pkt_foreign_port),
1100 	    ev->wake_pkt_total_len, ev->wake_pkt_data_len, ev->wake_pkt_control_flags,
1101 	    ev->wake_pkt_owner_pname, ev->wake_pkt_effective_pname);
1102 }
1103 
1104 #endif /* (DEBUG || DEVELOPMENT) */
1105 
1106 static void
if_notify_unattributed_wake_mbuf(struct ifnet * ifp,struct mbuf * m,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)1107 if_notify_unattributed_wake_mbuf(struct ifnet *ifp, struct mbuf *m,
1108     struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
1109     uint16_t pkt_control_flags, uint16_t proto)
1110 {
1111 	struct kev_msg ev_msg = {};
1112 
1113 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1114 
1115 	lck_mtx_lock(&net_port_entry_head_lock);
1116 	if (has_notified_unattributed_wake) {
1117 		lck_mtx_unlock(&net_port_entry_head_lock);
1118 		if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1119 
1120 		if (__improbable(net_wake_pkt_debug > 0)) {
1121 			net_port_info_log_npi("already notified unattributed wake packet", npi);
1122 		}
1123 		return;
1124 	}
1125 	has_notified_unattributed_wake = true;
1126 	lck_mtx_unlock(&net_port_entry_head_lock);
1127 
1128 	if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1129 
1130 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1131 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1132 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1133 	ev_msg.event_code  = KEV_POWER_UNATTRIBUTED_WAKE;
1134 
1135 	struct net_port_info_una_wake_event event_data = {};
1136 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1137 	event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1138 	event_data.una_wake_pkt_flags = npi->npi_flags;
1139 
1140 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
1141 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1142 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1143 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1144 
1145 	event_data.una_wake_pkt_total_len = pkt_total_len;
1146 	event_data.una_wake_pkt_data_len = pkt_data_len;
1147 	event_data.una_wake_pkt_control_flags = pkt_control_flags;
1148 	event_data.una_wake_pkt_proto = proto;
1149 
1150 	if (ifp != NULL) {
1151 		strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
1152 		    sizeof(event_data.una_wake_pkt_ifname));
1153 	} else {
1154 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1155 	}
1156 
1157 	event_data.una_wake_ptk_len = m->m_pkthdr.len > NPI_MAX_UNA_WAKE_PKT_LEN ?
1158 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t)m->m_pkthdr.len;
1159 
1160 	errno_t error = mbuf_copydata(m, 0, event_data.una_wake_ptk_len,
1161 	    (void *)event_data.una_wake_pkt);
1162 	if (error != 0) {
1163 		uuid_string_t wake_uuid_str;
1164 
1165 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1166 		os_log_error(OS_LOG_DEFAULT,
1167 		    "%s: mbuf_copydata() failed with error %d for wake uuid %s",
1168 		    __func__, error, wake_uuid_str);
1169 
1170 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1171 		return;
1172 	}
1173 
1174 	ev_msg.dv[0].data_ptr = &event_data;
1175 	ev_msg.dv[0].data_length = sizeof(event_data);
1176 
1177 	int result = kev_post_msg(&ev_msg);
1178 	if (result != 0) {
1179 		uuid_string_t wake_uuid_str;
1180 
1181 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1182 		os_log_error(OS_LOG_DEFAULT,
1183 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1184 		    __func__, result, wake_uuid_str);
1185 
1186 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1187 	}
1188 
1189 #if (DEBUG || DEVELOPMENT)
1190 	net_port_info_log_una_wake_event("unattributed wake packet event", &event_data);
1191 #endif /* (DEBUG || DEVELOPMENT) */
1192 }
1193 
1194 static void
if_notify_wake_packet(struct ifnet * ifp,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags)1195 if_notify_wake_packet(struct ifnet *ifp, struct net_port_info *npi,
1196     uint32_t pkt_total_len, uint32_t pkt_data_len, uint16_t pkt_control_flags)
1197 {
1198 	struct kev_msg ev_msg = {};
1199 
1200 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1201 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1202 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1203 	ev_msg.event_code  = KEV_POWER_WAKE_PACKET;
1204 
1205 	struct net_port_info_wake_event event_data = {};
1206 
1207 	uuid_copy(event_data.wake_uuid, current_wakeuuid);
1208 	event_data.wake_pkt_if_index = ifp->if_index;
1209 	event_data.wake_pkt_port = npi->npi_local_port;
1210 	event_data.wake_pkt_flags = npi->npi_flags;
1211 	event_data.wake_pkt_owner_pid = npi->npi_owner_pid;
1212 	event_data.wake_pkt_effective_pid = npi->npi_effective_pid;
1213 	strlcpy(event_data.wake_pkt_owner_pname, npi->npi_owner_pname,
1214 	    sizeof(event_data.wake_pkt_owner_pname));
1215 	strlcpy(event_data.wake_pkt_effective_pname, npi->npi_effective_pname,
1216 	    sizeof(event_data.wake_pkt_effective_pname));
1217 	uuid_copy(event_data.wake_pkt_owner_uuid, npi->npi_owner_uuid);
1218 	uuid_copy(event_data.wake_pkt_effective_uuid, npi->npi_effective_uuid);
1219 
1220 	event_data.wake_pkt_foreign_port = npi->npi_foreign_port;
1221 	event_data.wake_pkt_local_addr_ = npi->npi_local_addr_;
1222 	event_data.wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1223 	strlcpy(event_data.wake_pkt_ifname, IF_XNAME(ifp), sizeof(event_data.wake_pkt_ifname));
1224 
1225 	event_data.wake_pkt_total_len = pkt_total_len;
1226 	event_data.wake_pkt_data_len = pkt_data_len;
1227 	event_data.wake_pkt_control_flags = pkt_control_flags;
1228 
1229 	ev_msg.dv[0].data_ptr = &event_data;
1230 	ev_msg.dv[0].data_length = sizeof(event_data);
1231 
1232 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1233 
1234 	lck_mtx_lock(&net_port_entry_head_lock);
1235 
1236 	if (has_notified_wake_pkt) {
1237 		lck_mtx_unlock(&net_port_entry_head_lock);
1238 		if_ports_used_stats.ifpu_dup_wake_pkt_event += 1;
1239 
1240 		if (__improbable(net_wake_pkt_debug > 0)) {
1241 			net_port_info_log_npi("already notified wake packet", npi);
1242 		}
1243 		return;
1244 	}
1245 	has_notified_wake_pkt = true;
1246 
1247 	memcpy(&last_attributed_wake_event, &event_data, sizeof(last_attributed_wake_event));
1248 
1249 	lck_mtx_unlock(&net_port_entry_head_lock);
1250 
1251 	if_ports_used_stats.ifpu_wake_pkt_event += 1;
1252 
1253 
1254 	int result = kev_post_msg(&ev_msg);
1255 	if (result != 0) {
1256 		uuid_string_t wake_uuid_str;
1257 
1258 		uuid_unparse(event_data.wake_uuid, wake_uuid_str);
1259 		os_log_error(OS_LOG_DEFAULT,
1260 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1261 		    __func__, result, wake_uuid_str);
1262 
1263 		if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1264 	}
1265 #if (DEBUG || DEVELOPMENT)
1266 	net_port_info_log_wake_event("attributed wake packet event", &event_data);
1267 #endif /* (DEBUG || DEVELOPMENT) */
1268 }
1269 
1270 static bool
is_encapsulated_esp(struct mbuf * m,size_t data_offset)1271 is_encapsulated_esp(struct mbuf *m, size_t data_offset)
1272 {
1273 	/*
1274 	 * They are three cases:
1275 	 * - Keep alive: 1 byte payload
1276 	 * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
1277 	 * - otherwise it's ESP
1278 	 */
1279 	ASSERT(m->m_pkthdr.len >= data_offset);
1280 
1281 	size_t data_len = m->m_pkthdr.len - data_offset;
1282 	if (data_len == 1) {
1283 		return false;
1284 	} else if (data_len > ESP_HDR_SIZE) {
1285 		uint8_t payload[ESP_HDR_SIZE];
1286 
1287 		errno_t error = mbuf_copydata(m, data_offset, ESP_HDR_SIZE, &payload);
1288 		if (error != 0) {
1289 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ESP_HDR_SIZE) error %d",
1290 			    __func__, error);
1291 		} else if (payload[0] == 0 && payload[1] == 0 &&
1292 		    payload[2] == 0 && payload[3] == 0) {
1293 			return false;
1294 		}
1295 	}
1296 	return true;
1297 }
1298 
1299 void
if_ports_used_match_mbuf(struct ifnet * ifp,protocol_family_t proto_family,struct mbuf * m)1300 if_ports_used_match_mbuf(struct ifnet *ifp, protocol_family_t proto_family, struct mbuf *m)
1301 {
1302 	errno_t error;
1303 	struct net_port_info npi = {};
1304 	bool found = false;
1305 	uint32_t pkt_total_len = 0;
1306 	uint32_t pkt_data_len = 0;
1307 	uint16_t pkt_control_flags = 0;
1308 	uint16_t pkt_proto = 0;
1309 
1310 	if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == 0) {
1311 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1312 		os_log_error(OS_LOG_DEFAULT, "%s: called PKTF_WAKE_PKT not set from %s",
1313 		    __func__, ifp != NULL ? IF_XNAME(ifp) : "");
1314 		return;
1315 	}
1316 
1317 	if_ports_used_stats.ifpu_so_match_wake_pkt += 1;
1318 	npi.npi_flags |= NPIF_SOCKET; /* For logging */
1319 	pkt_total_len = m->m_pkthdr.len;
1320 	pkt_data_len = pkt_total_len;
1321 
1322 	if (ifp != NULL) {
1323 		npi.npi_if_index = ifp->if_index;
1324 		if (IFNET_IS_COMPANION_LINK(ifp)) {
1325 			npi.npi_flags |= NPIF_COMPLINK;
1326 		}
1327 	}
1328 
1329 	if (proto_family == PF_INET) {
1330 		struct ip iphdr = {};
1331 
1332 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1333 
1334 		error = mbuf_copydata(m, 0, sizeof(struct ip), &iphdr);
1335 		if (error != 0) {
1336 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip) error %d",
1337 			    __func__, error);
1338 			goto failed;
1339 		}
1340 		npi.npi_flags |= NPIF_IPV4;
1341 		npi.npi_local_addr_in = iphdr.ip_dst;
1342 		npi.npi_foreign_addr_in = iphdr.ip_src;
1343 
1344 		/*
1345 		 * Check if this is a fragment that is not the first fragment
1346 		 */
1347 		if ((ntohs(iphdr.ip_off) & ~(IP_DF | IP_RF)) &&
1348 		    (ntohs(iphdr.ip_off) & IP_OFFMASK) != 0) {
1349 			npi.npi_flags |= NPIF_FRAG;
1350 			if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1351 		}
1352 
1353 		if ((iphdr.ip_hl << 2) < pkt_data_len) {
1354 			pkt_data_len -= iphdr.ip_hl << 2;
1355 		} else {
1356 			pkt_data_len = 0;
1357 		}
1358 
1359 		pkt_proto = iphdr.ip_p;
1360 
1361 		switch (iphdr.ip_p) {
1362 		case IPPROTO_TCP: {
1363 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1364 			npi.npi_flags |= NPIF_TCP;
1365 
1366 			if (npi.npi_flags & NPIF_FRAG) {
1367 				goto failed;
1368 			}
1369 
1370 			struct tcphdr th = {};
1371 			error = mbuf_copydata(m, iphdr.ip_hl << 2, sizeof(struct tcphdr), &th);
1372 			if (error != 0) {
1373 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1374 				    __func__, error);
1375 				goto failed;
1376 			}
1377 			npi.npi_local_port = th.th_dport;
1378 			npi.npi_foreign_port = th.th_sport;
1379 
1380 			if (pkt_data_len < sizeof(struct tcphdr) ||
1381 			    pkt_data_len < (th.th_off << 2)) {
1382 				pkt_data_len = 0;
1383 			} else {
1384 				pkt_data_len -= th.th_off << 2;
1385 			}
1386 			pkt_control_flags = th.th_flags;
1387 			break;
1388 		}
1389 		case IPPROTO_UDP: {
1390 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1391 			npi.npi_flags |= NPIF_UDP;
1392 
1393 			if (npi.npi_flags & NPIF_FRAG) {
1394 				goto failed;
1395 			}
1396 			struct udphdr uh = {};
1397 			size_t udp_offset = iphdr.ip_hl << 2;
1398 
1399 			error = mbuf_copydata(m, udp_offset, sizeof(struct udphdr), &uh);
1400 			if (error != 0) {
1401 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1402 				    __func__, error);
1403 				goto failed;
1404 			}
1405 			npi.npi_local_port = uh.uh_dport;
1406 			npi.npi_foreign_port = uh.uh_sport;
1407 			/*
1408 			 * Let the ESP layer handle wake packets
1409 			 */
1410 			if (ntohs(uh.uh_dport) == PORT_ISAKMP_NATT ||
1411 			    ntohs(uh.uh_sport) == PORT_ISAKMP_NATT) {
1412 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1413 				if (is_encapsulated_esp(m, udp_offset + sizeof(struct udphdr))) {
1414 					if (net_wake_pkt_debug > 0) {
1415 						net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1416 					}
1417 					return;
1418 				}
1419 			}
1420 
1421 			if (pkt_data_len < sizeof(struct udphdr)) {
1422 				pkt_data_len = 0;
1423 			} else {
1424 				pkt_data_len -= sizeof(struct udphdr);
1425 			}
1426 			break;
1427 		}
1428 		case IPPROTO_ESP: {
1429 			/*
1430 			 * Let the ESP layer handle wake packets
1431 			 */
1432 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1433 			npi.npi_flags |= NPIF_ESP;
1434 			if (net_wake_pkt_debug > 0) {
1435 				net_port_info_log_npi("defer ESP matching", &npi);
1436 			}
1437 			return;
1438 		}
1439 		default:
1440 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1441 			os_log(OS_LOG_DEFAULT, "%s: unexpected IPv4 protocol %u from %s",
1442 			    __func__, iphdr.ip_p, IF_XNAME(ifp));
1443 			goto failed;
1444 		}
1445 	} else if (proto_family == PF_INET6) {
1446 		struct ip6_hdr ip6_hdr = {};
1447 
1448 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1449 
1450 		error = mbuf_copydata(m, 0, sizeof(struct ip6_hdr), &ip6_hdr);
1451 		if (error != 0) {
1452 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_hdr) error %d",
1453 			    __func__, error);
1454 			goto failed;
1455 		}
1456 		npi.npi_flags |= NPIF_IPV6;
1457 		memcpy(&npi.npi_local_addr_in6, &ip6_hdr.ip6_dst, sizeof(struct in6_addr));
1458 		memcpy(&npi.npi_foreign_addr_in6, &ip6_hdr.ip6_src, sizeof(struct in6_addr));
1459 
1460 		size_t l3_len = sizeof(struct ip6_hdr);
1461 		uint8_t l4_proto = ip6_hdr.ip6_nxt;
1462 
1463 		pkt_proto = l4_proto;
1464 
1465 		if (pkt_data_len < l3_len) {
1466 			pkt_data_len = 0;
1467 		} else {
1468 			pkt_data_len -= l3_len;
1469 		}
1470 
1471 		/*
1472 		 * Check if this is a fragment that is not the first fragment
1473 		 */
1474 		if (l4_proto == IPPROTO_FRAGMENT) {
1475 			struct ip6_frag ip6_frag;
1476 
1477 			error = mbuf_copydata(m, sizeof(struct ip6_hdr), sizeof(struct ip6_frag), &ip6_frag);
1478 			if (error != 0) {
1479 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_frag) error %d",
1480 				    __func__, error);
1481 				goto failed;
1482 			}
1483 
1484 			l3_len += sizeof(struct ip6_frag);
1485 			l4_proto = ip6_frag.ip6f_nxt;
1486 
1487 			if ((ip6_frag.ip6f_offlg & IP6F_OFF_MASK) != 0) {
1488 				npi.npi_flags |= NPIF_FRAG;
1489 				if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1490 			}
1491 		}
1492 
1493 
1494 		switch (l4_proto) {
1495 		case IPPROTO_TCP: {
1496 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1497 			npi.npi_flags |= NPIF_TCP;
1498 
1499 			/*
1500 			 * Cannot attribute a fragment that is not the first fragment as it
1501 			 * not have the TCP header
1502 			 */
1503 			if (npi.npi_flags & NPIF_FRAG) {
1504 				goto failed;
1505 			}
1506 
1507 			struct tcphdr th = {};
1508 
1509 			error = mbuf_copydata(m, l3_len, sizeof(struct tcphdr), &th);
1510 			if (error != 0) {
1511 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1512 				    __func__, error);
1513 				if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1514 				goto failed;
1515 			}
1516 			npi.npi_local_port = th.th_dport;
1517 			npi.npi_foreign_port = th.th_sport;
1518 
1519 			if (pkt_data_len < sizeof(struct tcphdr) ||
1520 			    pkt_data_len < (th.th_off << 2)) {
1521 				pkt_data_len = 0;
1522 			} else {
1523 				pkt_data_len -= th.th_off << 2;
1524 			}
1525 			pkt_control_flags = th.th_flags;
1526 			break;
1527 		}
1528 		case IPPROTO_UDP: {
1529 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1530 			npi.npi_flags |= NPIF_UDP;
1531 
1532 			/*
1533 			 * Cannot attribute a fragment that is not the first fragment as it
1534 			 * not have the UDP header
1535 			 */
1536 			if (npi.npi_flags & NPIF_FRAG) {
1537 				goto failed;
1538 			}
1539 
1540 			struct udphdr uh = {};
1541 
1542 			error = mbuf_copydata(m, l3_len, sizeof(struct udphdr), &uh);
1543 			if (error != 0) {
1544 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1545 				    __func__, error);
1546 				if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1547 				goto failed;
1548 			}
1549 			npi.npi_local_port = uh.uh_dport;
1550 			npi.npi_foreign_port = uh.uh_sport;
1551 			/*
1552 			 * Let the ESP layer handle wake packets
1553 			 */
1554 			if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1555 			    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1556 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1557 				if (is_encapsulated_esp(m, l3_len + sizeof(struct udphdr))) {
1558 					if (net_wake_pkt_debug > 0) {
1559 						net_port_info_log_npi("defer encapsulated ESP matching", &npi);
1560 					}
1561 					return;
1562 				}
1563 			}
1564 
1565 			if (pkt_data_len < sizeof(struct udphdr)) {
1566 				pkt_data_len = 0;
1567 			} else {
1568 				pkt_data_len -= sizeof(struct udphdr);
1569 			}
1570 			break;
1571 		}
1572 		case IPPROTO_ESP: {
1573 			/*
1574 			 * Let the ESP layer handle the wake packet
1575 			 */
1576 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1577 			npi.npi_flags |= NPIF_ESP;
1578 			if (net_wake_pkt_debug > 0) {
1579 				net_port_info_log_npi("defer ESP matching", &npi);
1580 			}
1581 			return;
1582 		}
1583 		default:
1584 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1585 
1586 			os_log(OS_LOG_DEFAULT, "%s: unexpected IPv6 protocol %u from %s",
1587 			    __func__, ip6_hdr.ip6_nxt, IF_XNAME(ifp));
1588 			goto failed;
1589 		}
1590 	} else {
1591 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1592 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %d from %s",
1593 		    __func__, proto_family, IF_XNAME(ifp));
1594 		goto failed;
1595 	}
1596 	if (ifp == NULL) {
1597 		goto failed;
1598 	}
1599 
1600 	found = net_port_info_find_match(&npi);
1601 	if (found) {
1602 		if_notify_wake_packet(ifp, &npi,
1603 		    pkt_total_len, pkt_data_len, pkt_control_flags);
1604 	} else {
1605 		if_notify_unattributed_wake_mbuf(ifp, m, &npi,
1606 		    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1607 	}
1608 	return;
1609 failed:
1610 	if_notify_unattributed_wake_mbuf(ifp, m, &npi,
1611 	    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1612 }
1613 
1614 #if SKYWALK
1615 
1616 static void
if_notify_unattributed_wake_pkt(struct ifnet * ifp,struct __kern_packet * pkt,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)1617 if_notify_unattributed_wake_pkt(struct ifnet *ifp, struct __kern_packet *pkt,
1618     struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
1619     uint16_t pkt_control_flags, uint16_t proto)
1620 {
1621 	struct kev_msg ev_msg = {};
1622 
1623 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1624 
1625 	lck_mtx_lock(&net_port_entry_head_lock);
1626 	if (has_notified_unattributed_wake) {
1627 		lck_mtx_unlock(&net_port_entry_head_lock);
1628 		if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1629 
1630 		if (__improbable(net_wake_pkt_debug > 0)) {
1631 			net_port_info_log_npi("already notified unattributed wake packet", npi);
1632 		}
1633 		return;
1634 	}
1635 	has_notified_unattributed_wake = true;
1636 	lck_mtx_unlock(&net_port_entry_head_lock);
1637 
1638 	if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1639 
1640 	if (ifp == NULL) {
1641 		os_log(OS_LOG_DEFAULT, "%s: receive interface is NULL",
1642 		    __func__);
1643 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1644 	}
1645 
1646 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1647 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1648 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1649 	ev_msg.event_code  = KEV_POWER_UNATTRIBUTED_WAKE;
1650 
1651 	struct net_port_info_una_wake_event event_data = {};
1652 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1653 	event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1654 	event_data.una_wake_pkt_flags = npi->npi_flags;
1655 
1656 	uint16_t offset = kern_packet_get_network_header_offset(SK_PKT2PH(pkt));
1657 	event_data.una_wake_ptk_len =
1658 	    pkt->pkt_length - offset > NPI_MAX_UNA_WAKE_PKT_LEN ?
1659 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t) pkt->pkt_length - offset;
1660 
1661 	kern_packet_copy_bytes(SK_PKT2PH(pkt), offset, event_data.una_wake_ptk_len,
1662 	    event_data.una_wake_pkt);
1663 
1664 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
1665 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1666 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1667 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1668 	if (ifp != NULL) {
1669 		strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
1670 		    sizeof(event_data.una_wake_pkt_ifname));
1671 	}
1672 
1673 	event_data.una_wake_pkt_total_len = pkt_total_len;
1674 	event_data.una_wake_pkt_data_len = pkt_data_len;
1675 	event_data.una_wake_pkt_control_flags = pkt_control_flags;
1676 	event_data.una_wake_pkt_proto = proto;
1677 
1678 	ev_msg.dv[0].data_ptr = &event_data;
1679 	ev_msg.dv[0].data_length = sizeof(event_data);
1680 
1681 	int result = kev_post_msg(&ev_msg);
1682 	if (result != 0) {
1683 		uuid_string_t wake_uuid_str;
1684 
1685 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1686 		os_log_error(OS_LOG_DEFAULT,
1687 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1688 		    __func__, result, wake_uuid_str);
1689 
1690 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1691 	}
1692 #if (DEBUG || DEVELOPMENT)
1693 	net_port_info_log_una_wake_event("unattributed wake packet event", &event_data);
1694 #endif /* (DEBUG || DEVELOPMENT) */
1695 }
1696 
1697 void
if_ports_used_match_pkt(struct ifnet * ifp,struct __kern_packet * pkt)1698 if_ports_used_match_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
1699 {
1700 	struct net_port_info npi = {};
1701 	bool found = false;
1702 	uint32_t pkt_total_len = 0;
1703 	uint32_t pkt_data_len = 0;
1704 	uint16_t pkt_control_flags = 0;
1705 	uint16_t pkt_proto = 0;
1706 
1707 	if ((pkt->pkt_pflags & PKT_F_WAKE_PKT) == 0) {
1708 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1709 		os_log_error(OS_LOG_DEFAULT, "%s: called PKT_F_WAKE_PKT not set from %s",
1710 		    __func__, IF_XNAME(ifp));
1711 		return;
1712 	}
1713 
1714 	if_ports_used_stats.ifpu_ch_match_wake_pkt += 1;
1715 	npi.npi_flags |= NPIF_CHANNEL; /* For logging */
1716 	pkt_total_len = pkt->pkt_flow_ip_hlen +
1717 	    pkt->pkt_flow_tcp_hlen + pkt->pkt_flow_ulen;
1718 	pkt_data_len = pkt->pkt_flow_ulen;
1719 
1720 	if (ifp != NULL) {
1721 		npi.npi_if_index = ifp->if_index;
1722 		if (IFNET_IS_COMPANION_LINK(ifp)) {
1723 			npi.npi_flags |= NPIF_COMPLINK;
1724 		}
1725 	}
1726 
1727 	switch (pkt->pkt_flow_ip_ver) {
1728 	case IPVERSION:
1729 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1730 
1731 		npi.npi_flags |= NPIF_IPV4;
1732 		npi.npi_local_addr_in = pkt->pkt_flow_ipv4_dst;
1733 		npi.npi_foreign_addr_in = pkt->pkt_flow_ipv4_src;
1734 		break;
1735 	case IPV6_VERSION:
1736 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1737 
1738 		npi.npi_flags |= NPIF_IPV6;
1739 		memcpy(&npi.npi_local_addr_in6, &pkt->pkt_flow_ipv6_dst,
1740 		    sizeof(struct in6_addr));
1741 		memcpy(&npi.npi_foreign_addr_in6, &pkt->pkt_flow_ipv6_src,
1742 		    sizeof(struct in6_addr));
1743 		break;
1744 	default:
1745 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1746 
1747 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %u from %s",
1748 		    __func__, pkt->pkt_flow_ip_ver, IF_XNAME(ifp));
1749 		goto failed;
1750 	}
1751 	pkt_proto = pkt->pkt_flow_ip_ver;
1752 
1753 	/*
1754 	 * Check if this is a fragment that is not the first fragment
1755 	 */
1756 	if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
1757 		os_log(OS_LOG_DEFAULT, "%s: unexpected wake fragment from %s",
1758 		    __func__, IF_XNAME(ifp));
1759 		npi.npi_flags |= NPIF_FRAG;
1760 		if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1761 	}
1762 
1763 	switch (pkt->pkt_flow_ip_proto) {
1764 	case IPPROTO_TCP: {
1765 		if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1766 		npi.npi_flags |= NPIF_TCP;
1767 
1768 		/*
1769 		 * Cannot attribute a fragment that is not the first fragment as it
1770 		 * not have the TCP header
1771 		 */
1772 		if (npi.npi_flags & NPIF_FRAG) {
1773 			goto failed;
1774 		}
1775 		struct tcphdr *tcp = (struct tcphdr *)pkt->pkt_flow_tcp_hdr;
1776 		if (tcp == NULL) {
1777 			os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned TCP header from %s",
1778 			    __func__, IF_XNAME(ifp));
1779 			if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1780 			goto failed;
1781 		}
1782 		npi.npi_local_port = tcp->th_dport;
1783 		npi.npi_foreign_port = tcp->th_sport;
1784 		pkt_control_flags = tcp->th_flags;
1785 		break;
1786 	}
1787 	case IPPROTO_UDP: {
1788 		if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1789 		npi.npi_flags |= NPIF_UDP;
1790 
1791 		/*
1792 		 * Cannot attribute a fragment that is not the first fragment as it
1793 		 * not have the UDP header
1794 		 */
1795 		if (npi.npi_flags & NPIF_FRAG) {
1796 			goto failed;
1797 		}
1798 		struct udphdr *uh = (struct udphdr *)pkt->pkt_flow_udp_hdr;
1799 		if (uh == NULL) {
1800 			os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned UDP header from %s",
1801 			    __func__, IF_XNAME(ifp));
1802 			if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1803 			goto failed;
1804 		}
1805 		npi.npi_local_port = uh->uh_dport;
1806 		npi.npi_foreign_port = uh->uh_sport;
1807 
1808 		/*
1809 		 * Defer matching of UDP NAT traversal to ip_input
1810 		 * (assumes IKE uses sockets)
1811 		 */
1812 		if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1813 		    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1814 			if_ports_used_stats.ifpu_deferred_isakmp_natt_wake_pkt += 1;
1815 			if (net_wake_pkt_debug > 0) {
1816 				net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1817 			}
1818 			return;
1819 		}
1820 		break;
1821 	}
1822 	case IPPROTO_ESP: {
1823 		/*
1824 		 * Let the ESP layer handle the wake packet
1825 		 */
1826 		if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1827 		npi.npi_flags |= NPIF_ESP;
1828 		if (net_wake_pkt_debug > 0) {
1829 			net_port_info_log_npi("defer ESP matching", &npi);
1830 		}
1831 		return;
1832 	}
1833 	default:
1834 		if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1835 
1836 		os_log(OS_LOG_DEFAULT, "%s: unexpected IP protocol %u from %s",
1837 		    __func__, pkt->pkt_flow_ip_proto, IF_XNAME(ifp));
1838 		goto failed;
1839 	}
1840 
1841 	if (ifp == NULL) {
1842 		goto failed;
1843 	}
1844 
1845 	found = net_port_info_find_match(&npi);
1846 	if (found) {
1847 		if_notify_wake_packet(ifp, &npi,
1848 		    pkt_total_len, pkt_data_len, pkt_control_flags);
1849 	} else {
1850 		if_notify_unattributed_wake_pkt(ifp, pkt, &npi,
1851 		    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1852 	}
1853 	return;
1854 failed:
1855 	if_notify_unattributed_wake_pkt(ifp, pkt, &npi,
1856 	    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1857 }
1858 #endif /* SKYWALK */
1859 
1860 int
1861 sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS
1862 {
1863 #pragma unused(oidp, arg1, arg2)
1864 	size_t len = sizeof(struct net_port_info_wake_event);
1865 
1866 	if (req->oldptr != 0) {
1867 		len = MIN(req->oldlen, len);
1868 	}
1869 	lck_mtx_lock(&net_port_entry_head_lock);
1870 	int error = SYSCTL_OUT(req, &last_attributed_wake_event, len);
1871 	lck_mtx_unlock(&net_port_entry_head_lock);
1872 
1873 	return error;
1874 }
1875