xref: /xnu-8019.80.24/bsd/net/if_ports_used.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2017-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/types.h>
30 #include <sys/sysctl.h>
31 #include <sys/time.h>
32 #include <sys/mcache.h>
33 #include <sys/malloc.h>
34 #include <sys/kauth.h>
35 #include <sys/kern_event.h>
36 #include <sys/bitstring.h>
37 #include <sys/priv.h>
38 #include <sys/proc.h>
39 #include <sys/protosw.h>
40 #include <sys/socket.h>
41 
42 #include <kern/locks.h>
43 #include <kern/zalloc.h>
44 
45 #include <libkern/libkern.h>
46 
47 #include <net/kpi_interface.h>
48 #include <net/if_var.h>
49 #include <net/if_ports_used.h>
50 
51 #include <netinet/in_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/tcp_var.h>
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/udp.h>
57 
58 #if SKYWALK
59 #include <skywalk/os_skywalk_private.h>
60 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
61 #include <skywalk/namespace/netns.h>
62 #endif /* SKYWALK */
63 
64 #include <stdbool.h>
65 
66 #include <os/log.h>
67 
68 #define ESP_HDR_SIZE 4
69 #define PORT_ISAKMP 500
70 #define PORT_ISAKMP_NATT 4500   /* rfc3948 */
71 
72 extern bool IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len);
73 
74 SYSCTL_DECL(_net_link_generic_system);
75 
76 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, port_used,
77     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used");
78 
79 struct if_ports_used_stats if_ports_used_stats = {};
80 static int sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS;
81 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, stats,
82     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
83     sysctl_if_ports_used_stats, "S,struct if_ports_used_stats", "");
84 
85 static uuid_t current_wakeuuid;
86 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, current_wakeuuid,
87     CTLFLAG_RD | CTLFLAG_LOCKED,
88     current_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
89 
90 static int sysctl_net_port_info_list SYSCTL_HANDLER_ARGS;
91 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, list,
92     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
93     sysctl_net_port_info_list, "S,xnpigen", "");
94 
95 static int use_test_wakeuuid = 0;
96 static uuid_t test_wakeuuid;
97 
98 #if (DEVELOPMENT || DEBUG)
99 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, use_test_wakeuuid,
100     CTLFLAG_RW | CTLFLAG_LOCKED,
101     &use_test_wakeuuid, 0, "");
102 
103 int sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS;
104 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, new_test_wakeuuid,
105     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
106     sysctl_new_test_wakeuuid, "S,uuid_t", "");
107 
108 int sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS;
109 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, clear_test_wakeuuid,
110     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
111     sysctl_clear_test_wakeuuid, "S,uuid_t", "");
112 
113 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, test_wakeuuid,
114     CTLFLAG_RD | CTLFLAG_LOCKED,
115     test_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
116 #endif /* (DEVELOPMENT || DEBUG) */
117 
118 static int sysctl_get_ports_used SYSCTL_HANDLER_ARGS;
119 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, get_ports_used,
120     CTLFLAG_RD | CTLFLAG_LOCKED,
121     sysctl_get_ports_used, "");
122 
123 static int if_ports_used_verbose = 0;
124 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, verbose,
125     CTLFLAG_RW | CTLFLAG_LOCKED,
126     &if_ports_used_verbose, 0, "");
127 
128 struct timeval wakeuuid_not_set_last_time;
129 int sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS;
130 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
131     wakeuuid_not_set_last_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
132     0, 0, sysctl_wakeuuid_not_set_last_time, "S,timeval", "");
133 
134 char wakeuuid_not_set_last_if[IFXNAMSIZ];
135 int sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS;
136 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
137     wakeuuid_not_set_last_if, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
138     0, 0, sysctl_wakeuuid_not_set_last_if, "A", "");
139 
140 struct timeval wakeuuid_last_update_time;
141 int sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS;
142 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
143     wakeuuid_last_update_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
144     0, 0, sysctl_wakeuuid_last_update_time, "S,timeval", "");
145 
146 static bool has_notified_wake_pkt = false;
147 static bool has_notified_unattributed_wake = false;
148 
149 static LCK_GRP_DECLARE(net_port_entry_head_lock_group, "net port entry lock");
150 static LCK_MTX_DECLARE(net_port_entry_head_lock, &net_port_entry_head_lock_group);
151 
152 
153 struct net_port_entry {
154 	SLIST_ENTRY(net_port_entry)     npe_list_next;
155 	TAILQ_ENTRY(net_port_entry)     npe_hash_next;
156 	struct net_port_info            npe_npi;
157 };
158 
159 static ZONE_DECLARE(net_port_entry_zone, "net_port_entry",
160     sizeof(struct net_port_entry), ZC_NONE);
161 
162 static SLIST_HEAD(net_port_entry_list, net_port_entry) net_port_entry_list =
163     SLIST_HEAD_INITIALIZER(&net_port_entry_list);
164 
165 struct timeval wakeuiid_last_check;
166 
167 
168 #if (DEBUG | DEVELOPMENT)
169 static int64_t npi_search_list_total = 0;
170 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_total,
171     CTLFLAG_RD | CTLFLAG_LOCKED,
172     &npi_search_list_total, "");
173 
174 static int64_t npi_search_list_max = 0;
175 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_max,
176     CTLFLAG_RD | CTLFLAG_LOCKED,
177     &npi_search_list_max, "");
178 #endif /* (DEBUG | DEVELOPMENT) */
179 
180 /*
181  * Hashing of the net_port_entry list is based on the local port
182  *
183  * The hash masks uses the least significant bits so we have to use host byte order
184  * when applying the mask because the LSB have more entropy that the MSB (most local ports
185  * are in the high dynamic port range)
186  */
187 #define NPE_HASH_BUCKET_COUNT 32
188 #define NPE_HASH_MASK (NPE_HASH_BUCKET_COUNT - 1)
189 #define NPE_HASH_VAL(_lport) (ntohs(_lport) & NPE_HASH_MASK)
190 #define NPE_HASH_HEAD(_lport) (&net_port_entry_hash_table[NPE_HASH_VAL(_lport)])
191 
192 static TAILQ_HEAD(net_port_entry_hash_table, net_port_entry) * net_port_entry_hash_table = NULL;
193 
194 /*
195  * Initialize IPv4 source address hash table.
196  */
197 void
if_ports_used_init(void)198 if_ports_used_init(void)
199 {
200 	if (net_port_entry_hash_table != NULL) {
201 		return;
202 	}
203 
204 	MALLOC(net_port_entry_hash_table, struct net_port_entry_hash_table *,
205 	    NPE_HASH_BUCKET_COUNT * sizeof(*net_port_entry_hash_table),
206 	    M_IFADDR, M_WAITOK | M_ZERO);
207 	if (net_port_entry_hash_table == NULL) {
208 		panic("net_port_entry_hash_table allocation failed");
209 	}
210 }
211 
212 static void
net_port_entry_list_clear(void)213 net_port_entry_list_clear(void)
214 {
215 	struct net_port_entry *npe;
216 
217 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
218 
219 	while ((npe = SLIST_FIRST(&net_port_entry_list)) != NULL) {
220 		SLIST_REMOVE_HEAD(&net_port_entry_list, npe_list_next);
221 		TAILQ_REMOVE(NPE_HASH_HEAD(npe->npe_npi.npi_local_port), npe, npe_hash_next);
222 
223 		zfree(net_port_entry_zone, npe);
224 	}
225 
226 	for (int i = 0; i < NPE_HASH_BUCKET_COUNT; i++) {
227 		VERIFY(TAILQ_EMPTY(&net_port_entry_hash_table[i]));
228 	}
229 
230 	if_ports_used_stats.ifpu_npe_count = 0;
231 	if_ports_used_stats.ifpu_wakeuid_gen++;
232 }
233 
234 static bool
get_test_wake_uuid(uuid_string_t wakeuuid_str,size_t len)235 get_test_wake_uuid(uuid_string_t wakeuuid_str, size_t len)
236 {
237 	if (__improbable(use_test_wakeuuid)) {
238 		if (!uuid_is_null(test_wakeuuid)) {
239 			if (wakeuuid_str != NULL && len != 0) {
240 				uuid_unparse(test_wakeuuid, wakeuuid_str);
241 			}
242 			return true;
243 		} else {
244 			return false;
245 		}
246 	} else {
247 		return false;
248 	}
249 }
250 
251 static bool
is_wakeuuid_set(void)252 is_wakeuuid_set(void)
253 {
254 	/*
255 	 * IOPMCopySleepWakeUUIDKey() tells if SleepWakeUUID is currently set
256 	 * That means we are currently in a sleep/wake cycle
257 	 */
258 	return get_test_wake_uuid(NULL, 0) || IOPMCopySleepWakeUUIDKey(NULL, 0);
259 }
260 
261 void
if_ports_used_update_wakeuuid(struct ifnet * ifp)262 if_ports_used_update_wakeuuid(struct ifnet *ifp)
263 {
264 	uuid_t wakeuuid;
265 	bool wakeuuid_is_set = false;
266 	bool updated = false;
267 	uuid_string_t wakeuuid_str;
268 
269 	uuid_clear(wakeuuid);
270 
271 	if (__improbable(use_test_wakeuuid)) {
272 		wakeuuid_is_set = get_test_wake_uuid(wakeuuid_str,
273 		    sizeof(wakeuuid_str));
274 	} else {
275 		wakeuuid_is_set = IOPMCopySleepWakeUUIDKey(wakeuuid_str,
276 		    sizeof(wakeuuid_str));
277 	}
278 
279 	if (wakeuuid_is_set) {
280 		if (uuid_parse(wakeuuid_str, wakeuuid) != 0) {
281 			os_log(OS_LOG_DEFAULT,
282 			    "%s: IOPMCopySleepWakeUUIDKey got bad value %s\n",
283 			    __func__, wakeuuid_str);
284 			wakeuuid_is_set = false;
285 		}
286 	}
287 
288 	if (!wakeuuid_is_set) {
289 		if (ifp != NULL) {
290 			if (if_ports_used_verbose > 0) {
291 				os_log_info(OS_LOG_DEFAULT,
292 				    "%s: SleepWakeUUID not set, "
293 				    "don't update the port list for %s\n",
294 				    __func__, ifp != NULL ? if_name(ifp) : "");
295 			}
296 			if_ports_used_stats.ifpu_wakeuuid_not_set_count += 1;
297 			microtime(&wakeuuid_not_set_last_time);
298 			strlcpy(wakeuuid_not_set_last_if, if_name(ifp),
299 			    sizeof(wakeuuid_not_set_last_if));
300 		}
301 		return;
302 	}
303 
304 	lck_mtx_lock(&net_port_entry_head_lock);
305 	if (uuid_compare(wakeuuid, current_wakeuuid) != 0) {
306 		net_port_entry_list_clear();
307 		uuid_copy(current_wakeuuid, wakeuuid);
308 		microtime(&wakeuuid_last_update_time);
309 		updated = true;
310 
311 		has_notified_wake_pkt = false;
312 		has_notified_unattributed_wake = false;
313 	}
314 	/*
315 	 * Record the time last checked
316 	 */
317 	microuptime(&wakeuiid_last_check);
318 	lck_mtx_unlock(&net_port_entry_head_lock);
319 
320 	if (updated && if_ports_used_verbose > 0) {
321 		uuid_string_t uuid_str;
322 
323 		uuid_unparse(current_wakeuuid, uuid_str);
324 		os_log(OS_LOG_DEFAULT, "%s: current wakeuuid %s",
325 		    __func__, uuid_str);
326 	}
327 }
328 
329 static bool
net_port_info_equal(const struct net_port_info * x,const struct net_port_info * y)330 net_port_info_equal(const struct net_port_info *x,
331     const struct net_port_info *y)
332 {
333 	ASSERT(x != NULL && y != NULL);
334 
335 	if (x->npi_if_index == y->npi_if_index &&
336 	    x->npi_local_port == y->npi_local_port &&
337 	    x->npi_foreign_port == y->npi_foreign_port &&
338 	    x->npi_owner_pid == y->npi_owner_pid &&
339 	    x->npi_effective_pid == y->npi_effective_pid &&
340 	    x->npi_flags == y->npi_flags &&
341 	    memcmp(&x->npi_local_addr_, &y->npi_local_addr_,
342 	    sizeof(union in_addr_4_6)) == 0 &&
343 	    memcmp(&x->npi_foreign_addr_, &y->npi_foreign_addr_,
344 	    sizeof(union in_addr_4_6)) == 0) {
345 		return true;
346 	}
347 	return false;
348 }
349 
350 static bool
net_port_info_has_entry(const struct net_port_info * npi)351 net_port_info_has_entry(const struct net_port_info *npi)
352 {
353 	struct net_port_entry *npe;
354 	bool found = false;
355 	int32_t count = 0;
356 
357 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
358 
359 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(npi->npi_local_port), npe_hash_next) {
360 		count += 1;
361 		if (net_port_info_equal(&npe->npe_npi, npi)) {
362 			found = true;
363 			break;
364 		}
365 	}
366 	if_ports_used_stats.ifpu_npi_hash_search_total += count;
367 	if (count > if_ports_used_stats.ifpu_npi_hash_search_max) {
368 		if_ports_used_stats.ifpu_npi_hash_search_max = count;
369 	}
370 
371 	return found;
372 }
373 
374 static bool
net_port_info_add_entry(const struct net_port_info * npi)375 net_port_info_add_entry(const struct net_port_info *npi)
376 {
377 	struct net_port_entry   *npe = NULL;
378 	uint32_t num = 0;
379 	bool entry_added = false;
380 
381 	ASSERT(npi != NULL);
382 
383 	if (__improbable(is_wakeuuid_set() == false)) {
384 		if_ports_used_stats.ifpu_npi_not_added_no_wakeuuid++;
385 		if (if_ports_used_verbose > 0) {
386 			os_log(OS_LOG_DEFAULT, "%s: wakeuuid not set not adding "
387 			    "port: %u flags: 0x%xif: %u pid: %u epid %u",
388 			    __func__,
389 			    ntohs(npi->npi_local_port),
390 			    npi->npi_flags,
391 			    npi->npi_if_index,
392 			    npi->npi_owner_pid,
393 			    npi->npi_effective_pid);
394 		}
395 		return false;
396 	}
397 
398 	npe = zalloc_flags(net_port_entry_zone, Z_WAITOK | Z_ZERO);
399 	if (__improbable(npe == NULL)) {
400 		os_log(OS_LOG_DEFAULT, "%s: zalloc() failed for "
401 		    "port: %u flags: 0x%x if: %u pid: %u epid %u",
402 		    __func__,
403 		    ntohs(npi->npi_local_port),
404 		    npi->npi_flags,
405 		    npi->npi_if_index,
406 		    npi->npi_owner_pid,
407 		    npi->npi_effective_pid);
408 		return false;
409 	}
410 
411 	memcpy(&npe->npe_npi, npi, sizeof(npe->npe_npi));
412 
413 	lck_mtx_lock(&net_port_entry_head_lock);
414 
415 	if (net_port_info_has_entry(npi) == false) {
416 		SLIST_INSERT_HEAD(&net_port_entry_list, npe, npe_list_next);
417 		TAILQ_INSERT_HEAD(NPE_HASH_HEAD(npi->npi_local_port), npe, npe_hash_next);
418 		num = (uint32_t)if_ports_used_stats.ifpu_npe_count++; /* rollover OK */
419 		entry_added = true;
420 
421 		if (if_ports_used_stats.ifpu_npe_count > if_ports_used_stats.ifpu_npe_max) {
422 			if_ports_used_stats.ifpu_npe_max = if_ports_used_stats.ifpu_npe_count;
423 		}
424 		if_ports_used_stats.ifpu_npe_total++;
425 
426 		if (if_ports_used_verbose > 1) {
427 			os_log(OS_LOG_DEFAULT, "%s: num %u for "
428 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
429 			    __func__,
430 			    num,
431 			    ntohs(npi->npi_local_port),
432 			    npi->npi_flags,
433 			    npi->npi_if_index,
434 			    npi->npi_owner_pid,
435 			    npi->npi_effective_pid);
436 		}
437 	} else {
438 		if_ports_used_stats.ifpu_npe_dup++;
439 		if (if_ports_used_verbose > 2) {
440 			os_log(OS_LOG_DEFAULT, "%s: already added "
441 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
442 			    __func__,
443 			    ntohs(npi->npi_local_port),
444 			    npi->npi_flags,
445 			    npi->npi_if_index,
446 			    npi->npi_owner_pid,
447 			    npi->npi_effective_pid);
448 		}
449 	}
450 
451 	lck_mtx_unlock(&net_port_entry_head_lock);
452 
453 	if (entry_added == false) {
454 		zfree(net_port_entry_zone, npe);
455 	}
456 	return entry_added;
457 }
458 
459 #if (DEVELOPMENT || DEBUG)
460 int
461 sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS
462 {
463 #pragma unused(oidp, arg1, arg2)
464 	int error = 0;
465 
466 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
467 		return EPERM;
468 	}
469 	if (req->oldptr == USER_ADDR_NULL) {
470 		req->oldidx = sizeof(uuid_t);
471 		return 0;
472 	}
473 	if (req->newptr != USER_ADDR_NULL) {
474 		uuid_generate(test_wakeuuid);
475 		if_ports_used_update_wakeuuid(NULL);
476 	}
477 	error = SYSCTL_OUT(req, test_wakeuuid,
478 	    MIN(sizeof(uuid_t), req->oldlen));
479 
480 	return error;
481 }
482 
483 int
484 sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS
485 {
486 #pragma unused(oidp, arg1, arg2)
487 	int error = 0;
488 
489 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
490 		return EPERM;
491 	}
492 	if (req->oldptr == USER_ADDR_NULL) {
493 		req->oldidx = sizeof(uuid_t);
494 		return 0;
495 	}
496 	if (req->newptr != USER_ADDR_NULL) {
497 		uuid_clear(test_wakeuuid);
498 		if_ports_used_update_wakeuuid(NULL);
499 	}
500 	error = SYSCTL_OUT(req, test_wakeuuid,
501 	    MIN(sizeof(uuid_t), req->oldlen));
502 
503 	return error;
504 }
505 
506 #endif /* (DEVELOPMENT || DEBUG) */
507 
508 static int
sysctl_timeval(struct sysctl_req * req,const struct timeval * tv)509 sysctl_timeval(struct sysctl_req *req, const struct timeval *tv)
510 {
511 	if (proc_is64bit(req->p)) {
512 		struct user64_timeval tv64 = {};
513 
514 		tv64.tv_sec = tv->tv_sec;
515 		tv64.tv_usec = tv->tv_usec;
516 		return SYSCTL_OUT(req, &tv64, sizeof(tv64));
517 	} else {
518 		struct user32_timeval tv32 = {};
519 
520 		tv32.tv_sec = (user32_time_t)tv->tv_sec;
521 		tv32.tv_usec = tv->tv_usec;
522 		return SYSCTL_OUT(req, &tv32, sizeof(tv32));
523 	}
524 }
525 
526 int
527 sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS
528 {
529 #pragma unused(oidp, arg1, arg2)
530 
531 	return sysctl_timeval(req, &wakeuuid_last_update_time);
532 }
533 
534 int
535 sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS
536 {
537 #pragma unused(oidp, arg1, arg2)
538 
539 	return sysctl_timeval(req, &wakeuuid_not_set_last_time);
540 }
541 
542 int
543 sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS
544 {
545 #pragma unused(oidp, arg1, arg2)
546 
547 	return SYSCTL_OUT(req, &wakeuuid_not_set_last_if, strlen(wakeuuid_not_set_last_if) + 1);
548 }
549 
550 int
551 sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS
552 {
553 #pragma unused(oidp, arg1, arg2)
554 	size_t len = sizeof(struct if_ports_used_stats);
555 
556 	if (req->oldptr != 0) {
557 		len = MIN(req->oldlen, sizeof(struct if_ports_used_stats));
558 	}
559 	return SYSCTL_OUT(req, &if_ports_used_stats, len);
560 }
561 
562 static int
563 sysctl_net_port_info_list SYSCTL_HANDLER_ARGS
564 {
565 #pragma unused(oidp, arg1, arg2)
566 	int error = 0;
567 	struct xnpigen xnpigen;
568 	struct net_port_entry *npe;
569 
570 	if ((error = priv_check_cred(kauth_cred_get(),
571 	    PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0)) != 0) {
572 		return EPERM;
573 	}
574 	lck_mtx_lock(&net_port_entry_head_lock);
575 
576 	if (req->oldptr == USER_ADDR_NULL) {
577 		/* Add a 25% cushion */
578 		size_t cnt = (size_t)if_ports_used_stats.ifpu_npe_count;
579 		cnt += cnt >> 4;
580 		req->oldidx = sizeof(struct xnpigen) +
581 		    cnt * sizeof(struct net_port_info);
582 		goto done;
583 	}
584 
585 	memset(&xnpigen, 0, sizeof(struct xnpigen));
586 	xnpigen.xng_len = sizeof(struct xnpigen);
587 	xnpigen.xng_gen = (uint32_t)if_ports_used_stats.ifpu_wakeuid_gen;
588 	uuid_copy(xnpigen.xng_wakeuuid, current_wakeuuid);
589 	xnpigen.xng_npi_count = (uint32_t)if_ports_used_stats.ifpu_npe_count;
590 	xnpigen.xng_npi_size = sizeof(struct net_port_info);
591 	error = SYSCTL_OUT(req, &xnpigen, sizeof(xnpigen));
592 	if (error != 0) {
593 		printf("%s: SYSCTL_OUT(xnpigen) error %d\n",
594 		    __func__, error);
595 		goto done;
596 	}
597 
598 	SLIST_FOREACH(npe, &net_port_entry_list, npe_list_next) {
599 		error = SYSCTL_OUT(req, &npe->npe_npi,
600 		    sizeof(struct net_port_info));
601 		if (error != 0) {
602 			printf("%s: SYSCTL_OUT(npi) error %d\n",
603 			    __func__, error);
604 			goto done;
605 		}
606 	}
607 done:
608 	lck_mtx_unlock(&net_port_entry_head_lock);
609 
610 	return error;
611 }
612 
613 /*
614  * Mirror the arguments of ifnet_get_local_ports_extended()
615  *  ifindex
616  *  protocol
617  *  flags
618  */
619 static int
620 sysctl_get_ports_used SYSCTL_HANDLER_ARGS
621 {
622 #pragma unused(oidp)
623 	int *name = (int *)arg1;
624 	int namelen = arg2;
625 	int error = 0;
626 	int idx;
627 	protocol_family_t protocol;
628 	u_int32_t flags;
629 	ifnet_t ifp = NULL;
630 	u_int8_t *bitfield = NULL;
631 
632 	if (req->newptr != USER_ADDR_NULL) {
633 		error = EPERM;
634 		goto done;
635 	}
636 	/*
637 	 * 3 is the required number of parameters: ifindex, protocol and flags
638 	 */
639 	if (namelen != 3) {
640 		error = ENOENT;
641 		goto done;
642 	}
643 
644 	if (req->oldptr == USER_ADDR_NULL) {
645 		req->oldidx = bitstr_size(IP_PORTRANGE_SIZE);
646 		goto done;
647 	}
648 	if (req->oldlen < bitstr_size(IP_PORTRANGE_SIZE)) {
649 		error = ENOMEM;
650 		goto done;
651 	}
652 	bitfield = (u_int8_t *) kalloc_data(bitstr_size(IP_PORTRANGE_SIZE),
653 	    Z_WAITOK | Z_ZERO);
654 	if (bitfield == NULL) {
655 		error = ENOMEM;
656 		goto done;
657 	}
658 
659 	idx = name[0];
660 	protocol = name[1];
661 	flags = name[2];
662 
663 	ifnet_head_lock_shared();
664 	if (IF_INDEX_IN_RANGE(idx)) {
665 		ifp = ifindex2ifnet[idx];
666 	}
667 	ifnet_head_done();
668 
669 	error = ifnet_get_local_ports_extended(ifp, protocol, flags, bitfield);
670 	if (error != 0) {
671 		printf("%s: ifnet_get_local_ports_extended() error %d\n",
672 		    __func__, error);
673 		goto done;
674 	}
675 	error = SYSCTL_OUT(req, bitfield, bitstr_size(IP_PORTRANGE_SIZE));
676 done:
677 	if (bitfield != NULL) {
678 		kfree_data(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
679 	}
680 	return error;
681 }
682 
683 __private_extern__ bool
if_ports_used_add_inpcb(const uint32_t ifindex,const struct inpcb * inp)684 if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp)
685 {
686 	struct net_port_info npi = {};
687 	struct socket *so = inp->inp_socket;
688 
689 	/* This is unlikely to happen but better be safe than sorry */
690 	if (ifindex > UINT16_MAX) {
691 		os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
692 		return false;
693 	}
694 
695 	if (ifindex != 0) {
696 		npi.npi_if_index = (uint16_t)ifindex;
697 	} else if (inp->inp_last_outifp != NULL) {
698 		npi.npi_if_index = (uint16_t)inp->inp_last_outifp->if_index;
699 	}
700 	if (IF_INDEX_IN_RANGE(npi.npi_if_index)) {
701 		struct ifnet *ifp = ifindex2ifnet[npi.npi_if_index];
702 		if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
703 			npi.npi_flags |= NPIF_COMPLINK;
704 		}
705 	}
706 
707 	npi.npi_flags |= NPIF_SOCKET;
708 
709 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
710 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
711 
712 	if (so->so_options & SO_NOWAKEFROMSLEEP) {
713 		npi.npi_flags |= NPIF_NOWAKE;
714 	}
715 
716 	if (SOCK_PROTO(so) == IPPROTO_TCP) {
717 		struct tcpcb *tp = intotcpcb(inp);
718 
719 		npi.npi_flags |= NPIF_TCP;
720 		if (tp != NULL && tp->t_state == TCPS_LISTEN) {
721 			npi.npi_flags |= NPIF_LISTEN;
722 		}
723 	} else if (SOCK_PROTO(so) == IPPROTO_UDP) {
724 		npi.npi_flags |= NPIF_UDP;
725 	} else {
726 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for inp %p", __func__,
727 		    SOCK_PROTO(inp->inp_socket), inp);
728 		return false;
729 	}
730 
731 	uuid_copy(npi.npi_flow_uuid, inp->necp_client_uuid);
732 
733 	npi.npi_local_port = inp->inp_lport;
734 	npi.npi_foreign_port = inp->inp_fport;
735 
736 	/*
737 	 * Take in account IPv4 addresses mapped on IPv6
738 	 */
739 	if ((inp->inp_vflag & INP_IPV6) != 0 && (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
740 	    (inp->inp_vflag & (INP_IPV6 | INP_IPV4)) == (INP_IPV6 | INP_IPV4)) {
741 		npi.npi_flags |= NPIF_IPV6 | NPIF_IPV4;
742 		memcpy(&npi.npi_local_addr_in6,
743 		    &inp->in6p_laddr, sizeof(struct in6_addr));
744 	} else if (inp->inp_vflag & INP_IPV4) {
745 		npi.npi_flags |= NPIF_IPV4;
746 		npi.npi_local_addr_in = inp->inp_laddr;
747 		npi.npi_foreign_addr_in = inp->inp_faddr;
748 	} else {
749 		npi.npi_flags |= NPIF_IPV6;
750 		memcpy(&npi.npi_local_addr_in6,
751 		    &inp->in6p_laddr, sizeof(struct in6_addr));
752 		memcpy(&npi.npi_foreign_addr_in6,
753 		    &inp->in6p_faddr, sizeof(struct in6_addr));
754 
755 		/* Clear the embedded scope ID */
756 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
757 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
758 		}
759 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
760 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
761 		}
762 	}
763 
764 	npi.npi_owner_pid = so->last_pid;
765 
766 	if (so->last_pid != 0) {
767 		proc_name(so->last_pid, npi.npi_owner_pname,
768 		    sizeof(npi.npi_owner_pname));
769 		uuid_copy(npi.npi_owner_uuid, so->last_uuid);
770 	}
771 
772 	if (so->so_flags & SOF_DELEGATED) {
773 		npi.npi_flags |= NPIF_DELEGATED;
774 		npi.npi_effective_pid = so->e_pid;
775 		if (so->e_pid != 0) {
776 			proc_name(so->e_pid, npi.npi_effective_pname,
777 			    sizeof(npi.npi_effective_pname));
778 		}
779 		uuid_copy(npi.npi_effective_uuid, so->e_uuid);
780 	} else {
781 		npi.npi_effective_pid = so->last_pid;
782 		if (so->last_pid != 0) {
783 			strlcpy(npi.npi_effective_pname, npi.npi_owner_pname,
784 			    sizeof(npi.npi_effective_pname));
785 		}
786 		uuid_copy(npi.npi_effective_uuid, so->last_uuid);
787 	}
788 
789 	return net_port_info_add_entry(&npi);
790 }
791 
792 #if SKYWALK
793 __private_extern__ bool
if_ports_used_add_flow_entry(const struct flow_entry * fe,const uint32_t ifindex,const struct ns_flow_info * nfi,uint32_t ns_flags)794 if_ports_used_add_flow_entry(const struct flow_entry *fe, const uint32_t ifindex,
795     const struct ns_flow_info *nfi, uint32_t ns_flags)
796 {
797 	struct net_port_info npi = {};
798 
799 	/* This is unlikely to happen but better be safe than sorry */
800 	if (ifindex > UINT16_MAX) {
801 		os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
802 		return false;
803 	}
804 	npi.npi_if_index = (uint16_t)ifindex;
805 	if (IF_INDEX_IN_RANGE(ifindex)) {
806 		struct ifnet *ifp = ifindex2ifnet[ifindex];
807 		if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
808 			npi.npi_flags |= NPIF_COMPLINK;
809 		}
810 	}
811 
812 	npi.npi_flags |= NPIF_CHANNEL;
813 
814 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
815 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
816 
817 	if (ns_flags & NETNS_NOWAKEFROMSLEEP) {
818 		npi.npi_flags |= NPIF_NOWAKE;
819 	}
820 	if ((ns_flags & NETNS_OWNER_MASK) == NETNS_LISTENER) {
821 		npi.npi_flags |= NPIF_LISTEN;
822 	}
823 
824 	uuid_copy(npi.npi_flow_uuid, nfi->nfi_flow_uuid);
825 
826 	if (nfi->nfi_protocol == IPPROTO_TCP) {
827 		npi.npi_flags |= NPIF_TCP;
828 	} else if (nfi->nfi_protocol == IPPROTO_UDP) {
829 		npi.npi_flags |= NPIF_UDP;
830 	} else {
831 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for nfi %p",
832 		    __func__, nfi->nfi_protocol, nfi);
833 		return false;
834 	}
835 
836 	if (nfi->nfi_laddr.sa.sa_family == AF_INET) {
837 		npi.npi_flags |= NPIF_IPV4;
838 
839 		npi.npi_local_port = nfi->nfi_laddr.sin.sin_port;
840 		npi.npi_foreign_port = nfi->nfi_faddr.sin.sin_port;
841 
842 		npi.npi_local_addr_in = nfi->nfi_laddr.sin.sin_addr;
843 		npi.npi_foreign_addr_in = nfi->nfi_faddr.sin.sin_addr;
844 	} else {
845 		npi.npi_flags |= NPIF_IPV6;
846 
847 		npi.npi_local_port = nfi->nfi_laddr.sin6.sin6_port;
848 		npi.npi_foreign_port = nfi->nfi_faddr.sin6.sin6_port;
849 
850 		memcpy(&npi.npi_local_addr_in6,
851 		    &nfi->nfi_laddr.sin6.sin6_addr, sizeof(struct in6_addr));
852 		memcpy(&npi.npi_foreign_addr_in6,
853 		    &nfi->nfi_faddr.sin6.sin6_addr, sizeof(struct in6_addr));
854 
855 		/* Clear the embedded scope ID */
856 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
857 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
858 		}
859 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
860 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
861 		}
862 	}
863 
864 	npi.npi_owner_pid = nfi->nfi_owner_pid;
865 	strlcpy(npi.npi_owner_pname, nfi->nfi_owner_name,
866 	    sizeof(npi.npi_owner_pname));
867 
868 	/*
869 	 * Get the proc UUID from the pid as the the proc UUID is not present
870 	 * in the flow_entry
871 	 */
872 	proc_t proc = proc_find(npi.npi_owner_pid);
873 	if (proc != PROC_NULL) {
874 		proc_getexecutableuuid(proc, npi.npi_owner_uuid, sizeof(npi.npi_owner_uuid));
875 		proc_rele(proc);
876 	}
877 	if (nfi->nfi_effective_pid != -1) {
878 		npi.npi_effective_pid = nfi->nfi_effective_pid;
879 		strlcpy(npi.npi_effective_pname, nfi->nfi_effective_name,
880 		    sizeof(npi.npi_effective_pname));
881 		uuid_copy(npi.npi_effective_uuid, fe->fe_eproc_uuid);
882 	} else {
883 		npi.npi_effective_pid = npi.npi_owner_pid;
884 		strlcpy(npi.npi_effective_pname, npi.npi_owner_pname,
885 		    sizeof(npi.npi_effective_pname));
886 		uuid_copy(npi.npi_effective_uuid, npi.npi_owner_uuid);
887 	}
888 
889 	return net_port_info_add_entry(&npi);
890 }
891 
892 #endif /* SKYWALK */
893 
894 static void
net_port_info_log_npi(const char * s,const struct net_port_info * npi)895 net_port_info_log_npi(const char *s, const struct net_port_info *npi)
896 {
897 	char lbuf[MAX_IPv6_STR_LEN] = {};
898 	char fbuf[MAX_IPv6_STR_LEN] = {};
899 
900 	if (npi->npi_flags & NPIF_IPV4) {
901 		inet_ntop(PF_INET, &npi->npi_local_addr_in.s_addr,
902 		    lbuf, sizeof(lbuf));
903 		inet_ntop(PF_INET, &npi->npi_foreign_addr_in.s_addr,
904 		    fbuf, sizeof(fbuf));
905 	} else if (npi->npi_flags & NPIF_IPV6) {
906 		inet_ntop(PF_INET6, &npi->npi_local_addr_in6,
907 		    lbuf, sizeof(lbuf));
908 		inet_ntop(PF_INET6, &npi->npi_foreign_addr_in6,
909 		    fbuf, sizeof(fbuf));
910 	}
911 	os_log(OS_LOG_DEFAULT, "%s net_port_info if_index %u arch %s family %s proto %s local %s:%u foreign %s:%u pid: %u epid %u",
912 	    s != NULL ? s : "",
913 	    npi->npi_if_index,
914 	    (npi->npi_flags & NPIF_SOCKET) ? "so" : (npi->npi_flags & NPIF_CHANNEL) ? "ch" : "unknown",
915 	    (npi->npi_flags & NPIF_IPV4) ? "ipv4" : (npi->npi_flags & NPIF_IPV6) ? "ipv6" : "unknown",
916 	    npi->npi_flags & NPIF_TCP ? "tcp" : npi->npi_flags & NPIF_UDP ? "udp" :
917 	    npi->npi_flags & NPIF_ESP ? "esp" : "unknown",
918 	    lbuf, ntohs(npi->npi_local_port),
919 	    fbuf, ntohs(npi->npi_foreign_port),
920 	    npi->npi_owner_pid,
921 	    npi->npi_effective_pid);
922 }
923 
924 #define NPI_MATCH_IPV4 (NPIF_IPV4 | NPIF_TCP | NPIF_UDP)
925 #define NPI_MATCH_IPV6 (NPIF_IPV6 | NPIF_TCP | NPIF_UDP)
926 
927 static bool
net_port_info_match_npi(struct net_port_entry * npe,const struct net_port_info * in_npi,struct net_port_entry ** best_match)928 net_port_info_match_npi(struct net_port_entry *npe, const struct net_port_info *in_npi,
929     struct net_port_entry **best_match)
930 {
931 	if (__improbable(net_wake_pkt_debug > 1)) {
932 		net_port_info_log_npi("  ", &npe->npe_npi);
933 	}
934 
935 	/*
936 	 * The interfaces must match or be both companion link
937 	 */
938 	if (npe->npe_npi.npi_if_index != in_npi->npi_if_index &&
939 	    !((npe->npe_npi.npi_flags & NPIF_COMPLINK) && (in_npi->npi_flags & NPIF_COMPLINK))) {
940 		return false;
941 	}
942 
943 	/*
944 	 * The local ports and protocols must match
945 	 */
946 	if (npe->npe_npi.npi_local_port != in_npi->npi_local_port ||
947 	    ((npe->npe_npi.npi_flags & NPI_MATCH_IPV4) != (in_npi->npi_flags & NPI_MATCH_IPV4) &&
948 	    (npe->npe_npi.npi_flags & NPI_MATCH_IPV6) != (in_npi->npi_flags & NPI_MATCH_IPV6))) {
949 		return false;
950 	}
951 	/*
952 	 * Search stops on an exact match
953 	 */
954 	if (npe->npe_npi.npi_foreign_port == in_npi->npi_foreign_port) {
955 		if ((npe->npe_npi.npi_flags & NPIF_IPV4) && (npe->npe_npi.npi_flags & NPIF_IPV4)) {
956 			if (in_npi->npi_local_addr_in.s_addr == npe->npe_npi.npi_local_addr_in.s_addr &&
957 			    in_npi->npi_foreign_addr_in.s_addr == npe->npe_npi.npi_foreign_addr_in.s_addr) {
958 				*best_match = npe;
959 				return true;
960 			}
961 		}
962 		if ((npe->npe_npi.npi_flags & NPIF_IPV6) && (npe->npe_npi.npi_flags & NPIF_IPV6)) {
963 			if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
964 			    sizeof(union in_addr_4_6)) == 0 &&
965 			    memcmp(&npe->npe_npi.npi_foreign_addr_, &in_npi->npi_foreign_addr_,
966 			    sizeof(union in_addr_4_6)) == 0) {
967 				*best_match = npe;
968 				return true;
969 			}
970 		}
971 	}
972 	/*
973 	 * Skip connected entries as we are looking for a wildcard match
974 	 * on the local address and port
975 	 */
976 	if (npe->npe_npi.npi_foreign_port != 0) {
977 		return false;
978 	}
979 	/*
980 	 * The local address matches: this is our 2nd best match
981 	 */
982 	if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
983 	    sizeof(union in_addr_4_6)) == 0) {
984 		*best_match = npe;
985 		return false;
986 	}
987 	/*
988 	 * Only the local port matches, do not override a match
989 	 * on the local address
990 	 */
991 	if (*best_match == NULL) {
992 		*best_match = npe;
993 	}
994 	return false;
995 }
996 
997 /*
998  *
999  */
1000 static bool
net_port_info_find_match(struct net_port_info * in_npi)1001 net_port_info_find_match(struct net_port_info *in_npi)
1002 {
1003 	struct net_port_entry *npe;
1004 	struct net_port_entry *best_match = NULL;
1005 
1006 	lck_mtx_lock(&net_port_entry_head_lock);
1007 
1008 	uint32_t count = 0;
1009 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(in_npi->npi_local_port), npe_hash_next) {
1010 		count += 1;
1011 		if (net_port_info_match_npi(npe, in_npi, &best_match)) {
1012 			break;
1013 		}
1014 	}
1015 
1016 	if (best_match != NULL) {
1017 		best_match->npe_npi.npi_flags |= NPIF_WAKEPKT;
1018 		in_npi->npi_owner_pid = best_match->npe_npi.npi_owner_pid;
1019 		in_npi->npi_effective_pid = best_match->npe_npi.npi_effective_pid;
1020 		strlcpy(in_npi->npi_owner_pname, best_match->npe_npi.npi_owner_pname,
1021 		    sizeof(in_npi->npi_owner_pname));
1022 		strlcpy(in_npi->npi_effective_pname, best_match->npe_npi.npi_effective_pname,
1023 		    sizeof(in_npi->npi_effective_pname));
1024 		uuid_copy(in_npi->npi_owner_uuid, best_match->npe_npi.npi_owner_uuid);
1025 		uuid_copy(in_npi->npi_effective_uuid, best_match->npe_npi.npi_effective_uuid);
1026 	}
1027 	lck_mtx_unlock(&net_port_entry_head_lock);
1028 
1029 	if (__improbable(net_wake_pkt_debug > 0)) {
1030 		if (best_match != NULL) {
1031 			net_port_info_log_npi("wake packet match", in_npi);
1032 		} else {
1033 			net_port_info_log_npi("wake packet no match", in_npi);
1034 		}
1035 	}
1036 
1037 	return best_match != NULL ? true : false;
1038 }
1039 
1040 static void
if_notify_unattributed_wake_mbuf(struct ifnet * ifp,struct mbuf * m,struct net_port_info * npi)1041 if_notify_unattributed_wake_mbuf(struct ifnet *ifp, struct mbuf *m,
1042     struct net_port_info *npi)
1043 {
1044 	struct kev_msg ev_msg = {};
1045 
1046 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1047 
1048 	lck_mtx_lock(&net_port_entry_head_lock);
1049 	if (has_notified_unattributed_wake) {
1050 		lck_mtx_unlock(&net_port_entry_head_lock);
1051 		if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1052 
1053 		if (__improbable(net_wake_pkt_debug > 0)) {
1054 			net_port_info_log_npi("already notified unattributed wake packet", npi);
1055 		}
1056 		return;
1057 	}
1058 	has_notified_unattributed_wake = true;
1059 	lck_mtx_unlock(&net_port_entry_head_lock);
1060 
1061 	if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1062 
1063 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1064 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1065 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1066 	ev_msg.event_code  = KEV_POWER_UNATTRIBUTED_WAKE;
1067 
1068 	struct net_port_info_una_wake_event event_data = {};
1069 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1070 	event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1071 	event_data.una_wake_pkt_flags = npi->npi_flags;
1072 
1073 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
1074 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1075 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1076 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1077 
1078 	if (ifp != NULL) {
1079 		strlcpy(event_data.una_wake_pkt_ifname, ifp->if_xname,
1080 		    sizeof(event_data.una_wake_pkt_ifname));
1081 	} else {
1082 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1083 	}
1084 
1085 	event_data.una_wake_ptk_len = m->m_pkthdr.len > NPI_MAX_UNA_WAKE_PKT_LEN ?
1086 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t)m->m_pkthdr.len;
1087 
1088 	errno_t error = mbuf_copydata(m, 0, event_data.una_wake_ptk_len,
1089 	    (void *)event_data.una_wake_pkt);
1090 	if (error != 0) {
1091 		uuid_string_t wake_uuid_str;
1092 
1093 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1094 		os_log_error(OS_LOG_DEFAULT,
1095 		    "%s: mbuf_copydata() failed with error %d for wake uuid %s",
1096 		    __func__, error, wake_uuid_str);
1097 
1098 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1099 		return;
1100 	}
1101 
1102 	ev_msg.dv[0].data_ptr = &event_data;
1103 	ev_msg.dv[0].data_length = sizeof(event_data);
1104 
1105 	int result = kev_post_msg(&ev_msg);
1106 	if (result != 0) {
1107 		uuid_string_t wake_uuid_str;
1108 
1109 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1110 		os_log_error(OS_LOG_DEFAULT,
1111 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1112 		    __func__, result, wake_uuid_str);
1113 
1114 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1115 	}
1116 }
1117 
1118 static void
if_notify_wake_packet(struct ifnet * ifp,struct net_port_info * npi)1119 if_notify_wake_packet(struct ifnet *ifp, struct net_port_info *npi)
1120 {
1121 	struct kev_msg ev_msg = {};
1122 
1123 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1124 
1125 	lck_mtx_lock(&net_port_entry_head_lock);
1126 	if (has_notified_wake_pkt) {
1127 		lck_mtx_unlock(&net_port_entry_head_lock);
1128 		if_ports_used_stats.ifpu_dup_wake_pkt_event += 1;
1129 
1130 		if (__improbable(net_wake_pkt_debug > 0)) {
1131 			net_port_info_log_npi("already notified wake packet", npi);
1132 		}
1133 		return;
1134 	}
1135 	has_notified_wake_pkt = true;
1136 	lck_mtx_unlock(&net_port_entry_head_lock);
1137 
1138 	if_ports_used_stats.ifpu_wake_pkt_event += 1;
1139 
1140 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1141 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1142 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1143 	ev_msg.event_code  = KEV_POWER_WAKE_PACKET;
1144 
1145 	struct net_port_info_wake_event event_data = {};
1146 
1147 	uuid_copy(event_data.wake_uuid, current_wakeuuid);
1148 	event_data.wake_pkt_if_index = ifp->if_index;
1149 	event_data.wake_pkt_port = npi->npi_local_port;
1150 	event_data.wake_pkt_flags = npi->npi_flags;
1151 	event_data.wake_pkt_owner_pid = npi->npi_owner_pid;
1152 	event_data.wake_pkt_effective_pid = npi->npi_effective_pid;
1153 	strlcpy(event_data.wake_pkt_owner_pname, npi->npi_owner_pname,
1154 	    sizeof(event_data.wake_pkt_owner_pname));
1155 	strlcpy(event_data.wake_pkt_effective_pname, npi->npi_effective_pname,
1156 	    sizeof(event_data.wake_pkt_effective_pname));
1157 	uuid_copy(event_data.wake_pkt_owner_uuid, npi->npi_owner_uuid);
1158 	uuid_copy(event_data.wake_pkt_effective_uuid, npi->npi_effective_uuid);
1159 
1160 	event_data.wake_pkt_foreign_port = npi->npi_foreign_port;
1161 	event_data.wake_pkt_local_addr_ = npi->npi_local_addr_;
1162 	event_data.wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1163 	strlcpy(event_data.wake_pkt_ifname, ifp->if_xname, sizeof(event_data.wake_pkt_ifname));
1164 
1165 	ev_msg.dv[0].data_ptr = &event_data;
1166 	ev_msg.dv[0].data_length = sizeof(event_data);
1167 
1168 	int result = kev_post_msg(&ev_msg);
1169 	if (result != 0) {
1170 		uuid_string_t wake_uuid_str;
1171 
1172 		uuid_unparse(event_data.wake_uuid, wake_uuid_str);
1173 		os_log_error(OS_LOG_DEFAULT,
1174 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1175 		    __func__, result, wake_uuid_str);
1176 
1177 		if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1178 	}
1179 }
1180 
1181 static bool
is_encapsulated_esp(struct mbuf * m,size_t data_offset)1182 is_encapsulated_esp(struct mbuf *m, size_t data_offset)
1183 {
1184 	/*
1185 	 * They are three cases:
1186 	 * - Keep alive: 1 byte payload
1187 	 * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
1188 	 * - otherwise it's ESP
1189 	 */
1190 	ASSERT(m->m_pkthdr.len >= data_offset);
1191 
1192 	size_t data_len = m->m_pkthdr.len - data_offset;
1193 	if (data_len == 1) {
1194 		return false;
1195 	} else if (data_len > ESP_HDR_SIZE) {
1196 		uint8_t payload[ESP_HDR_SIZE];
1197 
1198 		errno_t error = mbuf_copydata(m, data_offset, ESP_HDR_SIZE, &payload);
1199 		if (error != 0) {
1200 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ESP_HDR_SIZE) error %d",
1201 			    __func__, error);
1202 		} else if (payload[0] == 0 && payload[1] == 0 &&
1203 		    payload[2] == 0 && payload[3] == 0) {
1204 			return false;
1205 		}
1206 	}
1207 	return true;
1208 }
1209 
1210 void
if_ports_used_match_mbuf(struct ifnet * ifp,protocol_family_t proto_family,struct mbuf * m)1211 if_ports_used_match_mbuf(struct ifnet *ifp, protocol_family_t proto_family, struct mbuf *m)
1212 {
1213 	errno_t error;
1214 	struct net_port_info npi = {};
1215 	bool found = false;
1216 
1217 	if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == 0) {
1218 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1219 		os_log_error(OS_LOG_DEFAULT, "%s: called PKTF_WAKE_PKT not set from %s",
1220 		    __func__, ifp != NULL ? ifp->if_xname : "");
1221 		return;
1222 	}
1223 	if (ifp == NULL) {
1224 		goto failed;
1225 	}
1226 
1227 	if_ports_used_stats.ifpu_so_match_wake_pkt += 1;
1228 
1229 	npi.npi_if_index = ifp->if_index;
1230 	if (IFNET_IS_COMPANION_LINK(ifp)) {
1231 		npi.npi_flags |= NPIF_COMPLINK;
1232 	}
1233 	npi.npi_flags |= NPIF_SOCKET; /* For logging */
1234 	if (proto_family == PF_INET) {
1235 		struct ip iphdr = {};
1236 
1237 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1238 
1239 		error = mbuf_copydata(m, 0, sizeof(struct ip), &iphdr);
1240 		if (error != 0) {
1241 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip) error %d",
1242 			    __func__, error);
1243 			goto failed;
1244 		}
1245 		npi.npi_flags |= NPIF_IPV4;
1246 		npi.npi_local_addr_in = iphdr.ip_dst;
1247 		npi.npi_foreign_addr_in = iphdr.ip_src;
1248 
1249 		/*
1250 		 * Check if this is a fragment that is not the first fragment
1251 		 */
1252 		if ((ntohs(iphdr.ip_off) & ~(IP_DF | IP_RF)) &&
1253 		    (ntohs(iphdr.ip_off) & IP_OFFMASK) != 0) {
1254 			npi.npi_flags |= NPIF_FRAG;
1255 			if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1256 		}
1257 
1258 		switch (iphdr.ip_p) {
1259 		case IPPROTO_TCP: {
1260 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1261 			npi.npi_flags |= NPIF_TCP;
1262 
1263 			if (npi.npi_flags & NPIF_FRAG) {
1264 				goto failed;
1265 			}
1266 
1267 			struct tcphdr th = {};
1268 			error = mbuf_copydata(m, iphdr.ip_hl << 2, sizeof(struct tcphdr), &th);
1269 			if (error != 0) {
1270 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1271 				    __func__, error);
1272 				goto failed;
1273 			}
1274 			npi.npi_local_port = th.th_dport;
1275 			npi.npi_foreign_port = th.th_sport;
1276 			break;
1277 		}
1278 		case IPPROTO_UDP: {
1279 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1280 			npi.npi_flags |= NPIF_UDP;
1281 
1282 			if (npi.npi_flags & NPIF_FRAG) {
1283 				goto failed;
1284 			}
1285 			struct udphdr uh = {};
1286 			size_t udp_offset = iphdr.ip_hl << 2;
1287 
1288 			error = mbuf_copydata(m, udp_offset, sizeof(struct udphdr), &uh);
1289 			if (error != 0) {
1290 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1291 				    __func__, error);
1292 				goto failed;
1293 			}
1294 			npi.npi_local_port = uh.uh_dport;
1295 			npi.npi_foreign_port = uh.uh_sport;
1296 			/*
1297 			 * Let the ESP layer handle wake packets
1298 			 */
1299 			if (ntohs(uh.uh_dport) == PORT_ISAKMP_NATT ||
1300 			    ntohs(uh.uh_sport) == PORT_ISAKMP_NATT) {
1301 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1302 				if (is_encapsulated_esp(m, udp_offset + sizeof(struct udphdr))) {
1303 					if (net_wake_pkt_debug > 0) {
1304 						net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1305 					}
1306 					return;
1307 				}
1308 			}
1309 			break;
1310 		}
1311 		case IPPROTO_ESP: {
1312 			/*
1313 			 * Let the ESP layer handle wake packets
1314 			 */
1315 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1316 			npi.npi_flags |= NPIF_ESP;
1317 			if (net_wake_pkt_debug > 0) {
1318 				net_port_info_log_npi("defer ESP matching", &npi);
1319 			}
1320 			return;
1321 		}
1322 		default:
1323 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1324 			os_log(OS_LOG_DEFAULT, "%s: unexpected IPv4 protocol %u from %s",
1325 			    __func__, iphdr.ip_p, ifp->if_xname);
1326 			goto failed;
1327 		}
1328 	} else if (proto_family == PF_INET6) {
1329 		struct ip6_hdr ip6_hdr = {};
1330 
1331 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1332 
1333 		error = mbuf_copydata(m, 0, sizeof(struct ip6_hdr), &ip6_hdr);
1334 		if (error != 0) {
1335 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_hdr) error %d",
1336 			    __func__, error);
1337 			goto failed;
1338 		}
1339 		npi.npi_flags |= NPIF_IPV6;
1340 		memcpy(&npi.npi_local_addr_in6, &ip6_hdr.ip6_dst, sizeof(struct in6_addr));
1341 		memcpy(&npi.npi_foreign_addr_in6, &ip6_hdr.ip6_src, sizeof(struct in6_addr));
1342 
1343 		size_t l3_len = sizeof(struct ip6_hdr);
1344 		uint8_t l4_proto = ip6_hdr.ip6_nxt;
1345 
1346 		/*
1347 		 * Check if this is a fragment that is not the first fragment
1348 		 */
1349 		if (l4_proto == IPPROTO_FRAGMENT) {
1350 			struct ip6_frag ip6_frag;
1351 
1352 			error = mbuf_copydata(m, sizeof(struct ip6_hdr), sizeof(struct ip6_frag), &ip6_frag);
1353 			if (error != 0) {
1354 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_frag) error %d",
1355 				    __func__, error);
1356 				goto failed;
1357 			}
1358 
1359 			l3_len += sizeof(struct ip6_frag);
1360 			l4_proto = ip6_frag.ip6f_nxt;
1361 
1362 			if ((ip6_frag.ip6f_offlg & IP6F_OFF_MASK) != 0) {
1363 				npi.npi_flags |= NPIF_FRAG;
1364 				if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1365 			}
1366 		}
1367 
1368 
1369 		switch (l4_proto) {
1370 		case IPPROTO_TCP: {
1371 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1372 			npi.npi_flags |= NPIF_TCP;
1373 
1374 			/*
1375 			 * Cannot attribute a fragment that is not the first fragment as it
1376 			 * not have the TCP header
1377 			 */
1378 			if (npi.npi_flags & NPIF_FRAG) {
1379 				goto failed;
1380 			}
1381 
1382 			struct tcphdr th = {};
1383 
1384 			error = mbuf_copydata(m, l3_len, sizeof(struct tcphdr), &th);
1385 			if (error != 0) {
1386 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1387 				    __func__, error);
1388 				if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1389 				goto failed;
1390 			}
1391 			npi.npi_local_port = th.th_dport;
1392 			npi.npi_foreign_port = th.th_sport;
1393 			break;
1394 		}
1395 		case IPPROTO_UDP: {
1396 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1397 			npi.npi_flags |= NPIF_UDP;
1398 
1399 			/*
1400 			 * Cannot attribute a fragment that is not the first fragment as it
1401 			 * not have the UDP header
1402 			 */
1403 			if (npi.npi_flags & NPIF_FRAG) {
1404 				goto failed;
1405 			}
1406 
1407 			struct udphdr uh = {};
1408 
1409 			error = mbuf_copydata(m, l3_len, sizeof(struct udphdr), &uh);
1410 			if (error != 0) {
1411 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1412 				    __func__, error);
1413 				if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1414 				goto failed;
1415 			}
1416 			npi.npi_local_port = uh.uh_dport;
1417 			npi.npi_foreign_port = uh.uh_sport;
1418 			/*
1419 			 * Let the ESP layer handle wake packets
1420 			 */
1421 			if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1422 			    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1423 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1424 				if (is_encapsulated_esp(m, l3_len + sizeof(struct udphdr))) {
1425 					if (net_wake_pkt_debug > 0) {
1426 						net_port_info_log_npi("defer encapsulated ESP matching", &npi);
1427 					}
1428 					return;
1429 				}
1430 			}
1431 			break;
1432 		}
1433 		case IPPROTO_ESP: {
1434 			/*
1435 			 * Let the ESP layer handle the wake packet
1436 			 */
1437 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1438 			npi.npi_flags |= NPIF_ESP;
1439 			if (net_wake_pkt_debug > 0) {
1440 				net_port_info_log_npi("defer ESP matching", &npi);
1441 			}
1442 			return;
1443 		}
1444 		default:
1445 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1446 
1447 			os_log(OS_LOG_DEFAULT, "%s: unexpected IPv6 protocol %u from %s",
1448 			    __func__, ip6_hdr.ip6_nxt, ifp->if_xname);
1449 			goto failed;
1450 		}
1451 	} else {
1452 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1453 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %d from %s",
1454 		    __func__, proto_family, ifp->if_xname);
1455 		goto failed;
1456 	}
1457 	found = net_port_info_find_match(&npi);
1458 	if (found) {
1459 		if_notify_wake_packet(ifp, &npi);
1460 	} else {
1461 		if_notify_unattributed_wake_mbuf(ifp, m, &npi);
1462 	}
1463 	return;
1464 failed:
1465 	if_notify_unattributed_wake_mbuf(ifp, m, &npi);
1466 }
1467 
1468 #if SKYWALK
1469 
1470 static void
if_notify_unattributed_wake_pkt(struct ifnet * ifp,struct __kern_packet * pkt,struct net_port_info * npi)1471 if_notify_unattributed_wake_pkt(struct ifnet *ifp, struct __kern_packet *pkt,
1472     struct net_port_info *npi)
1473 {
1474 	struct kev_msg ev_msg = {};
1475 
1476 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1477 
1478 	lck_mtx_lock(&net_port_entry_head_lock);
1479 	if (has_notified_unattributed_wake) {
1480 		lck_mtx_unlock(&net_port_entry_head_lock);
1481 		if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1482 
1483 		if (__improbable(net_wake_pkt_debug > 0)) {
1484 			net_port_info_log_npi("already notified unattributed wake packet", npi);
1485 		}
1486 		return;
1487 	}
1488 	has_notified_unattributed_wake = true;
1489 	lck_mtx_unlock(&net_port_entry_head_lock);
1490 
1491 	if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1492 
1493 	if (ifp == NULL) {
1494 		os_log(OS_LOG_DEFAULT, "%s: receive interface is NULL",
1495 		    __func__);
1496 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1497 	}
1498 
1499 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1500 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1501 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1502 	ev_msg.event_code  = KEV_POWER_UNATTRIBUTED_WAKE;
1503 
1504 	struct net_port_info_una_wake_event event_data = {};
1505 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1506 	event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1507 	event_data.una_wake_pkt_flags = npi->npi_flags;
1508 
1509 	uint16_t offset = kern_packet_get_network_header_offset(SK_PKT2PH(pkt));
1510 	event_data.una_wake_ptk_len =
1511 	    pkt->pkt_length - offset > NPI_MAX_UNA_WAKE_PKT_LEN ?
1512 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t) pkt->pkt_length - offset;
1513 
1514 	kern_packet_copy_bytes(SK_PKT2PH(pkt), offset, event_data.una_wake_ptk_len,
1515 	    event_data.una_wake_pkt);
1516 
1517 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
1518 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1519 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1520 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1521 	if (ifp != NULL) {
1522 		strlcpy(event_data.una_wake_pkt_ifname, ifp->if_xname,
1523 		    sizeof(event_data.una_wake_pkt_ifname));
1524 	}
1525 
1526 	ev_msg.dv[0].data_ptr = &event_data;
1527 	ev_msg.dv[0].data_length = sizeof(event_data);
1528 
1529 	int result = kev_post_msg(&ev_msg);
1530 	if (result != 0) {
1531 		uuid_string_t wake_uuid_str;
1532 
1533 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1534 		os_log_error(OS_LOG_DEFAULT,
1535 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1536 		    __func__, result, wake_uuid_str);
1537 
1538 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1539 	}
1540 }
1541 
1542 void
if_ports_used_match_pkt(struct ifnet * ifp,struct __kern_packet * pkt)1543 if_ports_used_match_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
1544 {
1545 	struct net_port_info npi = {};
1546 	bool found = false;
1547 
1548 	if ((pkt->pkt_pflags & PKT_F_WAKE_PKT) == 0) {
1549 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1550 		os_log_error(OS_LOG_DEFAULT, "%s: called PKT_F_WAKE_PKT not set from %s",
1551 		    __func__, ifp != NULL ? ifp->if_xname : "");
1552 		return;
1553 	}
1554 	if (ifp == NULL) {
1555 		goto failed;
1556 	}
1557 
1558 	if_ports_used_stats.ifpu_ch_match_wake_pkt += 1;
1559 
1560 	npi.npi_if_index = ifp->if_index;
1561 	if (IFNET_IS_COMPANION_LINK(ifp)) {
1562 		npi.npi_flags |= NPIF_COMPLINK;
1563 	}
1564 	npi.npi_flags |= NPIF_CHANNEL; /* For logging */
1565 	switch (pkt->pkt_flow_ip_ver) {
1566 	case IPVERSION:
1567 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1568 
1569 		npi.npi_flags |= NPIF_IPV4;
1570 		npi.npi_local_addr_in = pkt->pkt_flow_ipv4_dst;
1571 		npi.npi_foreign_addr_in = pkt->pkt_flow_ipv4_src;
1572 		break;
1573 	case IPV6_VERSION:
1574 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1575 
1576 		npi.npi_flags |= NPIF_IPV6;
1577 		memcpy(&npi.npi_local_addr_in6, &pkt->pkt_flow_ipv6_dst,
1578 		    sizeof(struct in6_addr));
1579 		memcpy(&npi.npi_foreign_addr_in6, &pkt->pkt_flow_ipv6_src,
1580 		    sizeof(struct in6_addr));
1581 		break;
1582 	default:
1583 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1584 
1585 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %u from %s",
1586 		    __func__, pkt->pkt_flow_ip_ver, ifp->if_xname);
1587 		goto failed;
1588 	}
1589 
1590 	/*
1591 	 * Check if this is a fragment that is not the first fragment
1592 	 */
1593 	if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
1594 		os_log(OS_LOG_DEFAULT, "%s: unexpected wake fragment from %s",
1595 		    __func__, ifp->if_xname);
1596 		npi.npi_flags |= NPIF_FRAG;
1597 		if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1598 	}
1599 
1600 	switch (pkt->pkt_flow_ip_proto) {
1601 	case IPPROTO_TCP: {
1602 		if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1603 		npi.npi_flags |= NPIF_TCP;
1604 
1605 		/*
1606 		 * Cannot attribute a fragment that is not the first fragment as it
1607 		 * not have the TCP header
1608 		 */
1609 		if (npi.npi_flags & NPIF_FRAG) {
1610 			goto failed;
1611 		}
1612 		struct tcphdr *tcp = (struct tcphdr *)pkt->pkt_flow_tcp_hdr;
1613 		if (tcp == NULL) {
1614 			os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned TCP header from %s",
1615 			    __func__, ifp->if_xname);
1616 			if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1617 			goto failed;
1618 		}
1619 		npi.npi_local_port = tcp->th_dport;
1620 		npi.npi_foreign_port = tcp->th_sport;
1621 		break;
1622 	}
1623 	case IPPROTO_UDP: {
1624 		if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1625 		npi.npi_flags |= NPIF_UDP;
1626 
1627 		/*
1628 		 * Cannot attribute a fragment that is not the first fragment as it
1629 		 * not have the UDP header
1630 		 */
1631 		if (npi.npi_flags & NPIF_FRAG) {
1632 			goto failed;
1633 		}
1634 		struct udphdr *uh = (struct udphdr *)pkt->pkt_flow_udp_hdr;
1635 		if (uh == NULL) {
1636 			os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned UDP header from %s",
1637 			    __func__, ifp->if_xname);
1638 			if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1639 			goto failed;
1640 		}
1641 		npi.npi_local_port = uh->uh_dport;
1642 		npi.npi_foreign_port = uh->uh_sport;
1643 
1644 		/*
1645 		 * Defer matching of UDP NAT traversal to ip_input
1646 		 * (assumes IKE uses sockets)
1647 		 */
1648 		if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1649 		    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1650 			if_ports_used_stats.ifpu_deferred_isakmp_natt_wake_pkt += 1;
1651 			if (net_wake_pkt_debug > 0) {
1652 				net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1653 			}
1654 			return;
1655 		}
1656 		break;
1657 	}
1658 	case IPPROTO_ESP: {
1659 		/*
1660 		 * Let the ESP layer handle the wake packet
1661 		 */
1662 		if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1663 		npi.npi_flags |= NPIF_ESP;
1664 		if (net_wake_pkt_debug > 0) {
1665 			net_port_info_log_npi("defer ESP matching", &npi);
1666 		}
1667 		return;
1668 	}
1669 	default:
1670 		if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1671 
1672 		os_log(OS_LOG_DEFAULT, "%s: unexpected IP protocol %u from %s",
1673 		    __func__, pkt->pkt_flow_ip_proto, ifp->if_xname);
1674 		goto failed;
1675 	}
1676 
1677 	found = net_port_info_find_match(&npi);
1678 	if (found) {
1679 		if_notify_wake_packet(ifp, &npi);
1680 	} else {
1681 		if_notify_unattributed_wake_pkt(ifp, pkt, &npi);
1682 	}
1683 	return;
1684 failed:
1685 	if_notify_unattributed_wake_pkt(ifp, pkt, &npi);
1686 }
1687 #endif /* SKYWALK */
1688