xref: /xnu-8020.101.4/bsd/net/if_ports_used.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2017-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/types.h>
30 #include <sys/sysctl.h>
31 #include <sys/time.h>
32 #include <sys/mcache.h>
33 #include <sys/malloc.h>
34 #include <sys/kauth.h>
35 #include <sys/kern_event.h>
36 #include <sys/bitstring.h>
37 #include <sys/priv.h>
38 #include <sys/proc.h>
39 #include <sys/protosw.h>
40 #include <sys/socket.h>
41 
42 #include <kern/locks.h>
43 #include <kern/zalloc.h>
44 
45 #include <libkern/libkern.h>
46 
47 #include <net/kpi_interface.h>
48 #include <net/if_var.h>
49 #include <net/if_ports_used.h>
50 
51 #include <netinet/in_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/tcp_var.h>
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/udp.h>
57 
58 #if SKYWALK
59 #include <skywalk/os_skywalk_private.h>
60 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
61 #include <skywalk/namespace/netns.h>
62 #endif /* SKYWALK */
63 
64 #include <stdbool.h>
65 
66 #include <os/log.h>
67 
68 #define ESP_HDR_SIZE 4
69 #define PORT_ISAKMP 500
70 #define PORT_ISAKMP_NATT 4500   /* rfc3948 */
71 
72 extern bool IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len);
73 
74 SYSCTL_DECL(_net_link_generic_system);
75 
76 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, port_used,
77     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used");
78 
79 struct if_ports_used_stats if_ports_used_stats = {};
80 static int sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS;
81 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, stats,
82     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
83     sysctl_if_ports_used_stats, "S,struct if_ports_used_stats", "");
84 
85 static uuid_t current_wakeuuid;
86 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, current_wakeuuid,
87     CTLFLAG_RD | CTLFLAG_LOCKED,
88     current_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
89 
90 static int sysctl_net_port_info_list SYSCTL_HANDLER_ARGS;
91 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, list,
92     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
93     sysctl_net_port_info_list, "S,xnpigen", "");
94 
95 static int use_test_wakeuuid = 0;
96 static uuid_t test_wakeuuid;
97 
98 #if (DEVELOPMENT || DEBUG)
99 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, use_test_wakeuuid,
100     CTLFLAG_RW | CTLFLAG_LOCKED,
101     &use_test_wakeuuid, 0, "");
102 
103 int sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS;
104 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, new_test_wakeuuid,
105     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
106     sysctl_new_test_wakeuuid, "S,uuid_t", "");
107 
108 int sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS;
109 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, clear_test_wakeuuid,
110     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
111     sysctl_clear_test_wakeuuid, "S,uuid_t", "");
112 
113 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, test_wakeuuid,
114     CTLFLAG_RD | CTLFLAG_LOCKED,
115     test_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
116 #endif /* (DEVELOPMENT || DEBUG) */
117 
118 static int sysctl_get_ports_used SYSCTL_HANDLER_ARGS;
119 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, get_ports_used,
120     CTLFLAG_RD | CTLFLAG_LOCKED,
121     sysctl_get_ports_used, "");
122 
123 static int if_ports_used_verbose = 0;
124 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, verbose,
125     CTLFLAG_RW | CTLFLAG_LOCKED,
126     &if_ports_used_verbose, 0, "");
127 
128 struct timeval wakeuuid_not_set_last_time;
129 int sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS;
130 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
131     wakeuuid_not_set_last_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
132     0, 0, sysctl_wakeuuid_not_set_last_time, "S,timeval", "");
133 
134 char wakeuuid_not_set_last_if[IFXNAMSIZ];
135 int sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS;
136 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
137     wakeuuid_not_set_last_if, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
138     0, 0, sysctl_wakeuuid_not_set_last_if, "A", "");
139 
140 struct timeval wakeuuid_last_update_time;
141 int sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS;
142 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
143     wakeuuid_last_update_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
144     0, 0, sysctl_wakeuuid_last_update_time, "S,timeval", "");
145 
146 static bool has_notified_wake_pkt = false;
147 static bool has_notified_unattributed_wake = false;
148 
149 static LCK_GRP_DECLARE(net_port_entry_head_lock_group, "net port entry lock");
150 static LCK_MTX_DECLARE(net_port_entry_head_lock, &net_port_entry_head_lock_group);
151 
152 
153 struct net_port_entry {
154 	SLIST_ENTRY(net_port_entry)     npe_list_next;
155 	TAILQ_ENTRY(net_port_entry)     npe_hash_next;
156 	struct net_port_info            npe_npi;
157 };
158 
159 static ZONE_DEFINE(net_port_entry_zone, "net_port_entry",
160     sizeof(struct net_port_entry), ZC_NONE);
161 
162 static SLIST_HEAD(net_port_entry_list, net_port_entry) net_port_entry_list =
163     SLIST_HEAD_INITIALIZER(&net_port_entry_list);
164 
165 struct timeval wakeuiid_last_check;
166 
167 
168 #if (DEBUG | DEVELOPMENT)
169 static int64_t npi_search_list_total = 0;
170 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_total,
171     CTLFLAG_RD | CTLFLAG_LOCKED,
172     &npi_search_list_total, "");
173 
174 static int64_t npi_search_list_max = 0;
175 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_max,
176     CTLFLAG_RD | CTLFLAG_LOCKED,
177     &npi_search_list_max, "");
178 #endif /* (DEBUG | DEVELOPMENT) */
179 
180 /*
181  * Hashing of the net_port_entry list is based on the local port
182  *
183  * The hash masks uses the least significant bits so we have to use host byte order
184  * when applying the mask because the LSB have more entropy that the MSB (most local ports
185  * are in the high dynamic port range)
186  */
187 #define NPE_HASH_BUCKET_COUNT 32
188 #define NPE_HASH_MASK (NPE_HASH_BUCKET_COUNT - 1)
189 #define NPE_HASH_VAL(_lport) (ntohs(_lport) & NPE_HASH_MASK)
190 #define NPE_HASH_HEAD(_lport) (&net_port_entry_hash_table[NPE_HASH_VAL(_lport)])
191 
192 static TAILQ_HEAD(net_port_entry_hash_table, net_port_entry) * net_port_entry_hash_table = NULL;
193 
194 /*
195  * Initialize IPv4 source address hash table.
196  */
197 void
if_ports_used_init(void)198 if_ports_used_init(void)
199 {
200 	if (net_port_entry_hash_table != NULL) {
201 		return;
202 	}
203 
204 	net_port_entry_hash_table = zalloc_permanent(
205 		NPE_HASH_BUCKET_COUNT * sizeof(*net_port_entry_hash_table),
206 		ZALIGN_PTR);
207 }
208 
209 static void
net_port_entry_list_clear(void)210 net_port_entry_list_clear(void)
211 {
212 	struct net_port_entry *npe;
213 
214 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
215 
216 	while ((npe = SLIST_FIRST(&net_port_entry_list)) != NULL) {
217 		SLIST_REMOVE_HEAD(&net_port_entry_list, npe_list_next);
218 		TAILQ_REMOVE(NPE_HASH_HEAD(npe->npe_npi.npi_local_port), npe, npe_hash_next);
219 
220 		zfree(net_port_entry_zone, npe);
221 	}
222 
223 	for (int i = 0; i < NPE_HASH_BUCKET_COUNT; i++) {
224 		VERIFY(TAILQ_EMPTY(&net_port_entry_hash_table[i]));
225 	}
226 
227 	if_ports_used_stats.ifpu_npe_count = 0;
228 	if_ports_used_stats.ifpu_wakeuid_gen++;
229 }
230 
231 static bool
get_test_wake_uuid(uuid_string_t wakeuuid_str,size_t len)232 get_test_wake_uuid(uuid_string_t wakeuuid_str, size_t len)
233 {
234 	if (__improbable(use_test_wakeuuid)) {
235 		if (!uuid_is_null(test_wakeuuid)) {
236 			if (wakeuuid_str != NULL && len != 0) {
237 				uuid_unparse(test_wakeuuid, wakeuuid_str);
238 			}
239 			return true;
240 		} else {
241 			return false;
242 		}
243 	} else {
244 		return false;
245 	}
246 }
247 
248 static bool
is_wakeuuid_set(void)249 is_wakeuuid_set(void)
250 {
251 	/*
252 	 * IOPMCopySleepWakeUUIDKey() tells if SleepWakeUUID is currently set
253 	 * That means we are currently in a sleep/wake cycle
254 	 */
255 	return get_test_wake_uuid(NULL, 0) || IOPMCopySleepWakeUUIDKey(NULL, 0);
256 }
257 
258 void
if_ports_used_update_wakeuuid(struct ifnet * ifp)259 if_ports_used_update_wakeuuid(struct ifnet *ifp)
260 {
261 	uuid_t wakeuuid;
262 	bool wakeuuid_is_set = false;
263 	bool updated = false;
264 	uuid_string_t wakeuuid_str;
265 
266 	uuid_clear(wakeuuid);
267 
268 	if (__improbable(use_test_wakeuuid)) {
269 		wakeuuid_is_set = get_test_wake_uuid(wakeuuid_str,
270 		    sizeof(wakeuuid_str));
271 	} else {
272 		wakeuuid_is_set = IOPMCopySleepWakeUUIDKey(wakeuuid_str,
273 		    sizeof(wakeuuid_str));
274 	}
275 
276 	if (wakeuuid_is_set) {
277 		if (uuid_parse(wakeuuid_str, wakeuuid) != 0) {
278 			os_log(OS_LOG_DEFAULT,
279 			    "%s: IOPMCopySleepWakeUUIDKey got bad value %s\n",
280 			    __func__, wakeuuid_str);
281 			wakeuuid_is_set = false;
282 		}
283 	}
284 
285 	if (!wakeuuid_is_set) {
286 		if (ifp != NULL) {
287 			if (if_ports_used_verbose > 0) {
288 				os_log_info(OS_LOG_DEFAULT,
289 				    "%s: SleepWakeUUID not set, "
290 				    "don't update the port list for %s\n",
291 				    __func__, ifp != NULL ? if_name(ifp) : "");
292 			}
293 			if_ports_used_stats.ifpu_wakeuuid_not_set_count += 1;
294 			microtime(&wakeuuid_not_set_last_time);
295 			strlcpy(wakeuuid_not_set_last_if, if_name(ifp),
296 			    sizeof(wakeuuid_not_set_last_if));
297 		}
298 		return;
299 	}
300 
301 	lck_mtx_lock(&net_port_entry_head_lock);
302 	if (uuid_compare(wakeuuid, current_wakeuuid) != 0) {
303 		net_port_entry_list_clear();
304 		uuid_copy(current_wakeuuid, wakeuuid);
305 		microtime(&wakeuuid_last_update_time);
306 		updated = true;
307 
308 		has_notified_wake_pkt = false;
309 		has_notified_unattributed_wake = false;
310 	}
311 	/*
312 	 * Record the time last checked
313 	 */
314 	microuptime(&wakeuiid_last_check);
315 	lck_mtx_unlock(&net_port_entry_head_lock);
316 
317 	if (updated && if_ports_used_verbose > 0) {
318 		uuid_string_t uuid_str;
319 
320 		uuid_unparse(current_wakeuuid, uuid_str);
321 		os_log(OS_LOG_DEFAULT, "%s: current wakeuuid %s",
322 		    __func__, uuid_str);
323 	}
324 }
325 
326 static bool
net_port_info_equal(const struct net_port_info * x,const struct net_port_info * y)327 net_port_info_equal(const struct net_port_info *x,
328     const struct net_port_info *y)
329 {
330 	ASSERT(x != NULL && y != NULL);
331 
332 	if (x->npi_if_index == y->npi_if_index &&
333 	    x->npi_local_port == y->npi_local_port &&
334 	    x->npi_foreign_port == y->npi_foreign_port &&
335 	    x->npi_owner_pid == y->npi_owner_pid &&
336 	    x->npi_effective_pid == y->npi_effective_pid &&
337 	    x->npi_flags == y->npi_flags &&
338 	    memcmp(&x->npi_local_addr_, &y->npi_local_addr_,
339 	    sizeof(union in_addr_4_6)) == 0 &&
340 	    memcmp(&x->npi_foreign_addr_, &y->npi_foreign_addr_,
341 	    sizeof(union in_addr_4_6)) == 0) {
342 		return true;
343 	}
344 	return false;
345 }
346 
347 static bool
net_port_info_has_entry(const struct net_port_info * npi)348 net_port_info_has_entry(const struct net_port_info *npi)
349 {
350 	struct net_port_entry *npe;
351 	bool found = false;
352 	int32_t count = 0;
353 
354 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
355 
356 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(npi->npi_local_port), npe_hash_next) {
357 		count += 1;
358 		if (net_port_info_equal(&npe->npe_npi, npi)) {
359 			found = true;
360 			break;
361 		}
362 	}
363 	if_ports_used_stats.ifpu_npi_hash_search_total += count;
364 	if (count > if_ports_used_stats.ifpu_npi_hash_search_max) {
365 		if_ports_used_stats.ifpu_npi_hash_search_max = count;
366 	}
367 
368 	return found;
369 }
370 
371 static bool
net_port_info_add_entry(const struct net_port_info * npi)372 net_port_info_add_entry(const struct net_port_info *npi)
373 {
374 	struct net_port_entry   *npe = NULL;
375 	uint32_t num = 0;
376 	bool entry_added = false;
377 
378 	ASSERT(npi != NULL);
379 
380 	if (__improbable(is_wakeuuid_set() == false)) {
381 		if_ports_used_stats.ifpu_npi_not_added_no_wakeuuid++;
382 		if (if_ports_used_verbose > 0) {
383 			os_log(OS_LOG_DEFAULT, "%s: wakeuuid not set not adding "
384 			    "port: %u flags: 0x%xif: %u pid: %u epid %u",
385 			    __func__,
386 			    ntohs(npi->npi_local_port),
387 			    npi->npi_flags,
388 			    npi->npi_if_index,
389 			    npi->npi_owner_pid,
390 			    npi->npi_effective_pid);
391 		}
392 		return false;
393 	}
394 
395 	npe = zalloc_flags(net_port_entry_zone, Z_WAITOK | Z_ZERO);
396 	if (__improbable(npe == NULL)) {
397 		os_log(OS_LOG_DEFAULT, "%s: zalloc() failed for "
398 		    "port: %u flags: 0x%x if: %u pid: %u epid %u",
399 		    __func__,
400 		    ntohs(npi->npi_local_port),
401 		    npi->npi_flags,
402 		    npi->npi_if_index,
403 		    npi->npi_owner_pid,
404 		    npi->npi_effective_pid);
405 		return false;
406 	}
407 
408 	memcpy(&npe->npe_npi, npi, sizeof(npe->npe_npi));
409 
410 	lck_mtx_lock(&net_port_entry_head_lock);
411 
412 	if (net_port_info_has_entry(npi) == false) {
413 		SLIST_INSERT_HEAD(&net_port_entry_list, npe, npe_list_next);
414 		TAILQ_INSERT_HEAD(NPE_HASH_HEAD(npi->npi_local_port), npe, npe_hash_next);
415 		num = (uint32_t)if_ports_used_stats.ifpu_npe_count++; /* rollover OK */
416 		entry_added = true;
417 
418 		if (if_ports_used_stats.ifpu_npe_count > if_ports_used_stats.ifpu_npe_max) {
419 			if_ports_used_stats.ifpu_npe_max = if_ports_used_stats.ifpu_npe_count;
420 		}
421 		if_ports_used_stats.ifpu_npe_total++;
422 
423 		if (if_ports_used_verbose > 1) {
424 			os_log(OS_LOG_DEFAULT, "%s: num %u for "
425 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
426 			    __func__,
427 			    num,
428 			    ntohs(npi->npi_local_port),
429 			    npi->npi_flags,
430 			    npi->npi_if_index,
431 			    npi->npi_owner_pid,
432 			    npi->npi_effective_pid);
433 		}
434 	} else {
435 		if_ports_used_stats.ifpu_npe_dup++;
436 		if (if_ports_used_verbose > 2) {
437 			os_log(OS_LOG_DEFAULT, "%s: already added "
438 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
439 			    __func__,
440 			    ntohs(npi->npi_local_port),
441 			    npi->npi_flags,
442 			    npi->npi_if_index,
443 			    npi->npi_owner_pid,
444 			    npi->npi_effective_pid);
445 		}
446 	}
447 
448 	lck_mtx_unlock(&net_port_entry_head_lock);
449 
450 	if (entry_added == false) {
451 		zfree(net_port_entry_zone, npe);
452 	}
453 	return entry_added;
454 }
455 
456 #if (DEVELOPMENT || DEBUG)
457 int
458 sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS
459 {
460 #pragma unused(oidp, arg1, arg2)
461 	int error = 0;
462 
463 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
464 		return EPERM;
465 	}
466 	if (req->oldptr == USER_ADDR_NULL) {
467 		req->oldidx = sizeof(uuid_t);
468 		return 0;
469 	}
470 	if (req->newptr != USER_ADDR_NULL) {
471 		uuid_generate(test_wakeuuid);
472 		if_ports_used_update_wakeuuid(NULL);
473 	}
474 	error = SYSCTL_OUT(req, test_wakeuuid,
475 	    MIN(sizeof(uuid_t), req->oldlen));
476 
477 	return error;
478 }
479 
480 int
481 sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS
482 {
483 #pragma unused(oidp, arg1, arg2)
484 	int error = 0;
485 
486 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
487 		return EPERM;
488 	}
489 	if (req->oldptr == USER_ADDR_NULL) {
490 		req->oldidx = sizeof(uuid_t);
491 		return 0;
492 	}
493 	if (req->newptr != USER_ADDR_NULL) {
494 		uuid_clear(test_wakeuuid);
495 		if_ports_used_update_wakeuuid(NULL);
496 	}
497 	error = SYSCTL_OUT(req, test_wakeuuid,
498 	    MIN(sizeof(uuid_t), req->oldlen));
499 
500 	return error;
501 }
502 
503 #endif /* (DEVELOPMENT || DEBUG) */
504 
505 static int
sysctl_timeval(struct sysctl_req * req,const struct timeval * tv)506 sysctl_timeval(struct sysctl_req *req, const struct timeval *tv)
507 {
508 	if (proc_is64bit(req->p)) {
509 		struct user64_timeval tv64 = {};
510 
511 		tv64.tv_sec = tv->tv_sec;
512 		tv64.tv_usec = tv->tv_usec;
513 		return SYSCTL_OUT(req, &tv64, sizeof(tv64));
514 	} else {
515 		struct user32_timeval tv32 = {};
516 
517 		tv32.tv_sec = (user32_time_t)tv->tv_sec;
518 		tv32.tv_usec = tv->tv_usec;
519 		return SYSCTL_OUT(req, &tv32, sizeof(tv32));
520 	}
521 }
522 
523 int
524 sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS
525 {
526 #pragma unused(oidp, arg1, arg2)
527 
528 	return sysctl_timeval(req, &wakeuuid_last_update_time);
529 }
530 
531 int
532 sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS
533 {
534 #pragma unused(oidp, arg1, arg2)
535 
536 	return sysctl_timeval(req, &wakeuuid_not_set_last_time);
537 }
538 
539 int
540 sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS
541 {
542 #pragma unused(oidp, arg1, arg2)
543 
544 	return SYSCTL_OUT(req, &wakeuuid_not_set_last_if, strlen(wakeuuid_not_set_last_if) + 1);
545 }
546 
547 int
548 sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS
549 {
550 #pragma unused(oidp, arg1, arg2)
551 	size_t len = sizeof(struct if_ports_used_stats);
552 
553 	if (req->oldptr != 0) {
554 		len = MIN(req->oldlen, sizeof(struct if_ports_used_stats));
555 	}
556 	return SYSCTL_OUT(req, &if_ports_used_stats, len);
557 }
558 
559 static int
560 sysctl_net_port_info_list SYSCTL_HANDLER_ARGS
561 {
562 #pragma unused(oidp, arg1, arg2)
563 	int error = 0;
564 	struct xnpigen xnpigen;
565 	struct net_port_entry *npe;
566 
567 	if ((error = priv_check_cred(kauth_cred_get(),
568 	    PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0)) != 0) {
569 		return EPERM;
570 	}
571 	lck_mtx_lock(&net_port_entry_head_lock);
572 
573 	if (req->oldptr == USER_ADDR_NULL) {
574 		/* Add a 25% cushion */
575 		size_t cnt = (size_t)if_ports_used_stats.ifpu_npe_count;
576 		cnt += cnt >> 4;
577 		req->oldidx = sizeof(struct xnpigen) +
578 		    cnt * sizeof(struct net_port_info);
579 		goto done;
580 	}
581 
582 	memset(&xnpigen, 0, sizeof(struct xnpigen));
583 	xnpigen.xng_len = sizeof(struct xnpigen);
584 	xnpigen.xng_gen = (uint32_t)if_ports_used_stats.ifpu_wakeuid_gen;
585 	uuid_copy(xnpigen.xng_wakeuuid, current_wakeuuid);
586 	xnpigen.xng_npi_count = (uint32_t)if_ports_used_stats.ifpu_npe_count;
587 	xnpigen.xng_npi_size = sizeof(struct net_port_info);
588 	error = SYSCTL_OUT(req, &xnpigen, sizeof(xnpigen));
589 	if (error != 0) {
590 		printf("%s: SYSCTL_OUT(xnpigen) error %d\n",
591 		    __func__, error);
592 		goto done;
593 	}
594 
595 	SLIST_FOREACH(npe, &net_port_entry_list, npe_list_next) {
596 		error = SYSCTL_OUT(req, &npe->npe_npi,
597 		    sizeof(struct net_port_info));
598 		if (error != 0) {
599 			printf("%s: SYSCTL_OUT(npi) error %d\n",
600 			    __func__, error);
601 			goto done;
602 		}
603 	}
604 done:
605 	lck_mtx_unlock(&net_port_entry_head_lock);
606 
607 	return error;
608 }
609 
610 /*
611  * Mirror the arguments of ifnet_get_local_ports_extended()
612  *  ifindex
613  *  protocol
614  *  flags
615  */
616 static int
617 sysctl_get_ports_used SYSCTL_HANDLER_ARGS
618 {
619 #pragma unused(oidp)
620 	int *name = (int *)arg1;
621 	int namelen = arg2;
622 	int error = 0;
623 	int idx;
624 	protocol_family_t protocol;
625 	u_int32_t flags;
626 	ifnet_t ifp = NULL;
627 	u_int8_t *bitfield = NULL;
628 
629 	if (req->newptr != USER_ADDR_NULL) {
630 		error = EPERM;
631 		goto done;
632 	}
633 	/*
634 	 * 3 is the required number of parameters: ifindex, protocol and flags
635 	 */
636 	if (namelen != 3) {
637 		error = ENOENT;
638 		goto done;
639 	}
640 
641 	if (req->oldptr == USER_ADDR_NULL) {
642 		req->oldidx = bitstr_size(IP_PORTRANGE_SIZE);
643 		goto done;
644 	}
645 	if (req->oldlen < bitstr_size(IP_PORTRANGE_SIZE)) {
646 		error = ENOMEM;
647 		goto done;
648 	}
649 	bitfield = (u_int8_t *) kalloc_data(bitstr_size(IP_PORTRANGE_SIZE),
650 	    Z_WAITOK | Z_ZERO);
651 	if (bitfield == NULL) {
652 		error = ENOMEM;
653 		goto done;
654 	}
655 
656 	idx = name[0];
657 	protocol = name[1];
658 	flags = name[2];
659 
660 	ifnet_head_lock_shared();
661 	if (IF_INDEX_IN_RANGE(idx)) {
662 		ifp = ifindex2ifnet[idx];
663 	}
664 	ifnet_head_done();
665 
666 	error = ifnet_get_local_ports_extended(ifp, protocol, flags, bitfield);
667 	if (error != 0) {
668 		printf("%s: ifnet_get_local_ports_extended() error %d\n",
669 		    __func__, error);
670 		goto done;
671 	}
672 	error = SYSCTL_OUT(req, bitfield, bitstr_size(IP_PORTRANGE_SIZE));
673 done:
674 	if (bitfield != NULL) {
675 		kfree_data(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
676 	}
677 	return error;
678 }
679 
680 __private_extern__ bool
if_ports_used_add_inpcb(const uint32_t ifindex,const struct inpcb * inp)681 if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp)
682 {
683 	struct net_port_info npi = {};
684 	struct socket *so = inp->inp_socket;
685 
686 	/* This is unlikely to happen but better be safe than sorry */
687 	if (ifindex > UINT16_MAX) {
688 		os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
689 		return false;
690 	}
691 
692 	if (ifindex != 0) {
693 		npi.npi_if_index = (uint16_t)ifindex;
694 	} else if (inp->inp_last_outifp != NULL) {
695 		npi.npi_if_index = (uint16_t)inp->inp_last_outifp->if_index;
696 	}
697 	if (IF_INDEX_IN_RANGE(npi.npi_if_index)) {
698 		struct ifnet *ifp = ifindex2ifnet[npi.npi_if_index];
699 		if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
700 			npi.npi_flags |= NPIF_COMPLINK;
701 		}
702 	}
703 
704 	npi.npi_flags |= NPIF_SOCKET;
705 
706 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
707 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
708 
709 	if (so->so_options & SO_NOWAKEFROMSLEEP) {
710 		npi.npi_flags |= NPIF_NOWAKE;
711 	}
712 
713 	if (SOCK_PROTO(so) == IPPROTO_TCP) {
714 		struct tcpcb *tp = intotcpcb(inp);
715 
716 		npi.npi_flags |= NPIF_TCP;
717 		if (tp != NULL && tp->t_state == TCPS_LISTEN) {
718 			npi.npi_flags |= NPIF_LISTEN;
719 		}
720 	} else if (SOCK_PROTO(so) == IPPROTO_UDP) {
721 		npi.npi_flags |= NPIF_UDP;
722 	} else {
723 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for inp %p", __func__,
724 		    SOCK_PROTO(inp->inp_socket), inp);
725 		return false;
726 	}
727 
728 	uuid_copy(npi.npi_flow_uuid, inp->necp_client_uuid);
729 
730 	npi.npi_local_port = inp->inp_lport;
731 	npi.npi_foreign_port = inp->inp_fport;
732 
733 	/*
734 	 * Take in account IPv4 addresses mapped on IPv6
735 	 */
736 	if ((inp->inp_vflag & INP_IPV6) != 0 && (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
737 	    (inp->inp_vflag & (INP_IPV6 | INP_IPV4)) == (INP_IPV6 | INP_IPV4)) {
738 		npi.npi_flags |= NPIF_IPV6 | NPIF_IPV4;
739 		memcpy(&npi.npi_local_addr_in6,
740 		    &inp->in6p_laddr, sizeof(struct in6_addr));
741 	} else if (inp->inp_vflag & INP_IPV4) {
742 		npi.npi_flags |= NPIF_IPV4;
743 		npi.npi_local_addr_in = inp->inp_laddr;
744 		npi.npi_foreign_addr_in = inp->inp_faddr;
745 	} else {
746 		npi.npi_flags |= NPIF_IPV6;
747 		memcpy(&npi.npi_local_addr_in6,
748 		    &inp->in6p_laddr, sizeof(struct in6_addr));
749 		memcpy(&npi.npi_foreign_addr_in6,
750 		    &inp->in6p_faddr, sizeof(struct in6_addr));
751 
752 		/* Clear the embedded scope ID */
753 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
754 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
755 		}
756 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
757 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
758 		}
759 	}
760 
761 	npi.npi_owner_pid = so->last_pid;
762 
763 	if (so->last_pid != 0) {
764 		proc_name(so->last_pid, npi.npi_owner_pname,
765 		    sizeof(npi.npi_owner_pname));
766 		uuid_copy(npi.npi_owner_uuid, so->last_uuid);
767 	}
768 
769 	if (so->so_flags & SOF_DELEGATED) {
770 		npi.npi_flags |= NPIF_DELEGATED;
771 		npi.npi_effective_pid = so->e_pid;
772 		if (so->e_pid != 0) {
773 			proc_name(so->e_pid, npi.npi_effective_pname,
774 			    sizeof(npi.npi_effective_pname));
775 		}
776 		uuid_copy(npi.npi_effective_uuid, so->e_uuid);
777 	} else {
778 		npi.npi_effective_pid = so->last_pid;
779 		if (so->last_pid != 0) {
780 			strlcpy(npi.npi_effective_pname, npi.npi_owner_pname,
781 			    sizeof(npi.npi_effective_pname));
782 		}
783 		uuid_copy(npi.npi_effective_uuid, so->last_uuid);
784 	}
785 
786 	return net_port_info_add_entry(&npi);
787 }
788 
789 #if SKYWALK
790 __private_extern__ bool
if_ports_used_add_flow_entry(const struct flow_entry * fe,const uint32_t ifindex,const struct ns_flow_info * nfi,uint32_t ns_flags)791 if_ports_used_add_flow_entry(const struct flow_entry *fe, const uint32_t ifindex,
792     const struct ns_flow_info *nfi, uint32_t ns_flags)
793 {
794 	struct net_port_info npi = {};
795 
796 	/* This is unlikely to happen but better be safe than sorry */
797 	if (ifindex > UINT16_MAX) {
798 		os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
799 		return false;
800 	}
801 	npi.npi_if_index = (uint16_t)ifindex;
802 	if (IF_INDEX_IN_RANGE(ifindex)) {
803 		struct ifnet *ifp = ifindex2ifnet[ifindex];
804 		if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
805 			npi.npi_flags |= NPIF_COMPLINK;
806 		}
807 	}
808 
809 	npi.npi_flags |= NPIF_CHANNEL;
810 
811 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
812 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
813 
814 	if (ns_flags & NETNS_NOWAKEFROMSLEEP) {
815 		npi.npi_flags |= NPIF_NOWAKE;
816 	}
817 	if ((ns_flags & NETNS_OWNER_MASK) == NETNS_LISTENER) {
818 		npi.npi_flags |= NPIF_LISTEN;
819 	}
820 
821 	uuid_copy(npi.npi_flow_uuid, nfi->nfi_flow_uuid);
822 
823 	if (nfi->nfi_protocol == IPPROTO_TCP) {
824 		npi.npi_flags |= NPIF_TCP;
825 	} else if (nfi->nfi_protocol == IPPROTO_UDP) {
826 		npi.npi_flags |= NPIF_UDP;
827 	} else {
828 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for nfi %p",
829 		    __func__, nfi->nfi_protocol, nfi);
830 		return false;
831 	}
832 
833 	if (nfi->nfi_laddr.sa.sa_family == AF_INET) {
834 		npi.npi_flags |= NPIF_IPV4;
835 
836 		npi.npi_local_port = nfi->nfi_laddr.sin.sin_port;
837 		npi.npi_foreign_port = nfi->nfi_faddr.sin.sin_port;
838 
839 		npi.npi_local_addr_in = nfi->nfi_laddr.sin.sin_addr;
840 		npi.npi_foreign_addr_in = nfi->nfi_faddr.sin.sin_addr;
841 	} else {
842 		npi.npi_flags |= NPIF_IPV6;
843 
844 		npi.npi_local_port = nfi->nfi_laddr.sin6.sin6_port;
845 		npi.npi_foreign_port = nfi->nfi_faddr.sin6.sin6_port;
846 
847 		memcpy(&npi.npi_local_addr_in6,
848 		    &nfi->nfi_laddr.sin6.sin6_addr, sizeof(struct in6_addr));
849 		memcpy(&npi.npi_foreign_addr_in6,
850 		    &nfi->nfi_faddr.sin6.sin6_addr, sizeof(struct in6_addr));
851 
852 		/* Clear the embedded scope ID */
853 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
854 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
855 		}
856 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
857 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
858 		}
859 	}
860 
861 	npi.npi_owner_pid = nfi->nfi_owner_pid;
862 	strlcpy(npi.npi_owner_pname, nfi->nfi_owner_name,
863 	    sizeof(npi.npi_owner_pname));
864 
865 	/*
866 	 * Get the proc UUID from the pid as the the proc UUID is not present
867 	 * in the flow_entry
868 	 */
869 	proc_t proc = proc_find(npi.npi_owner_pid);
870 	if (proc != PROC_NULL) {
871 		proc_getexecutableuuid(proc, npi.npi_owner_uuid, sizeof(npi.npi_owner_uuid));
872 		proc_rele(proc);
873 	}
874 	if (nfi->nfi_effective_pid != -1) {
875 		npi.npi_effective_pid = nfi->nfi_effective_pid;
876 		strlcpy(npi.npi_effective_pname, nfi->nfi_effective_name,
877 		    sizeof(npi.npi_effective_pname));
878 		uuid_copy(npi.npi_effective_uuid, fe->fe_eproc_uuid);
879 	} else {
880 		npi.npi_effective_pid = npi.npi_owner_pid;
881 		strlcpy(npi.npi_effective_pname, npi.npi_owner_pname,
882 		    sizeof(npi.npi_effective_pname));
883 		uuid_copy(npi.npi_effective_uuid, npi.npi_owner_uuid);
884 	}
885 
886 	return net_port_info_add_entry(&npi);
887 }
888 
889 #endif /* SKYWALK */
890 
891 static void
net_port_info_log_npi(const char * s,const struct net_port_info * npi)892 net_port_info_log_npi(const char *s, const struct net_port_info *npi)
893 {
894 	char lbuf[MAX_IPv6_STR_LEN] = {};
895 	char fbuf[MAX_IPv6_STR_LEN] = {};
896 
897 	if (npi->npi_flags & NPIF_IPV4) {
898 		inet_ntop(PF_INET, &npi->npi_local_addr_in.s_addr,
899 		    lbuf, sizeof(lbuf));
900 		inet_ntop(PF_INET, &npi->npi_foreign_addr_in.s_addr,
901 		    fbuf, sizeof(fbuf));
902 	} else if (npi->npi_flags & NPIF_IPV6) {
903 		inet_ntop(PF_INET6, &npi->npi_local_addr_in6,
904 		    lbuf, sizeof(lbuf));
905 		inet_ntop(PF_INET6, &npi->npi_foreign_addr_in6,
906 		    fbuf, sizeof(fbuf));
907 	}
908 	os_log(OS_LOG_DEFAULT, "%s net_port_info if_index %u arch %s family %s proto %s local %s:%u foreign %s:%u pid: %u epid %u",
909 	    s != NULL ? s : "",
910 	    npi->npi_if_index,
911 	    (npi->npi_flags & NPIF_SOCKET) ? "so" : (npi->npi_flags & NPIF_CHANNEL) ? "ch" : "unknown",
912 	    (npi->npi_flags & NPIF_IPV4) ? "ipv4" : (npi->npi_flags & NPIF_IPV6) ? "ipv6" : "unknown",
913 	    npi->npi_flags & NPIF_TCP ? "tcp" : npi->npi_flags & NPIF_UDP ? "udp" :
914 	    npi->npi_flags & NPIF_ESP ? "esp" : "unknown",
915 	    lbuf, ntohs(npi->npi_local_port),
916 	    fbuf, ntohs(npi->npi_foreign_port),
917 	    npi->npi_owner_pid,
918 	    npi->npi_effective_pid);
919 }
920 
921 #define NPI_MATCH_IPV4 (NPIF_IPV4 | NPIF_TCP | NPIF_UDP)
922 #define NPI_MATCH_IPV6 (NPIF_IPV6 | NPIF_TCP | NPIF_UDP)
923 
924 static bool
net_port_info_match_npi(struct net_port_entry * npe,const struct net_port_info * in_npi,struct net_port_entry ** best_match)925 net_port_info_match_npi(struct net_port_entry *npe, const struct net_port_info *in_npi,
926     struct net_port_entry **best_match)
927 {
928 	if (__improbable(net_wake_pkt_debug > 1)) {
929 		net_port_info_log_npi("  ", &npe->npe_npi);
930 	}
931 
932 	/*
933 	 * The interfaces must match or be both companion link
934 	 */
935 	if (npe->npe_npi.npi_if_index != in_npi->npi_if_index &&
936 	    !((npe->npe_npi.npi_flags & NPIF_COMPLINK) && (in_npi->npi_flags & NPIF_COMPLINK))) {
937 		return false;
938 	}
939 
940 	/*
941 	 * The local ports and protocols must match
942 	 */
943 	if (npe->npe_npi.npi_local_port != in_npi->npi_local_port ||
944 	    ((npe->npe_npi.npi_flags & NPI_MATCH_IPV4) != (in_npi->npi_flags & NPI_MATCH_IPV4) &&
945 	    (npe->npe_npi.npi_flags & NPI_MATCH_IPV6) != (in_npi->npi_flags & NPI_MATCH_IPV6))) {
946 		return false;
947 	}
948 	/*
949 	 * Search stops on an exact match
950 	 */
951 	if (npe->npe_npi.npi_foreign_port == in_npi->npi_foreign_port) {
952 		if ((npe->npe_npi.npi_flags & NPIF_IPV4) && (npe->npe_npi.npi_flags & NPIF_IPV4)) {
953 			if (in_npi->npi_local_addr_in.s_addr == npe->npe_npi.npi_local_addr_in.s_addr &&
954 			    in_npi->npi_foreign_addr_in.s_addr == npe->npe_npi.npi_foreign_addr_in.s_addr) {
955 				*best_match = npe;
956 				return true;
957 			}
958 		}
959 		if ((npe->npe_npi.npi_flags & NPIF_IPV6) && (npe->npe_npi.npi_flags & NPIF_IPV6)) {
960 			if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
961 			    sizeof(union in_addr_4_6)) == 0 &&
962 			    memcmp(&npe->npe_npi.npi_foreign_addr_, &in_npi->npi_foreign_addr_,
963 			    sizeof(union in_addr_4_6)) == 0) {
964 				*best_match = npe;
965 				return true;
966 			}
967 		}
968 	}
969 	/*
970 	 * Skip connected entries as we are looking for a wildcard match
971 	 * on the local address and port
972 	 */
973 	if (npe->npe_npi.npi_foreign_port != 0) {
974 		return false;
975 	}
976 	/*
977 	 * The local address matches: this is our 2nd best match
978 	 */
979 	if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
980 	    sizeof(union in_addr_4_6)) == 0) {
981 		*best_match = npe;
982 		return false;
983 	}
984 	/*
985 	 * Only the local port matches, do not override a match
986 	 * on the local address
987 	 */
988 	if (*best_match == NULL) {
989 		*best_match = npe;
990 	}
991 	return false;
992 }
993 
994 /*
995  *
996  */
997 static bool
net_port_info_find_match(struct net_port_info * in_npi)998 net_port_info_find_match(struct net_port_info *in_npi)
999 {
1000 	struct net_port_entry *npe;
1001 	struct net_port_entry *best_match = NULL;
1002 
1003 	lck_mtx_lock(&net_port_entry_head_lock);
1004 
1005 	uint32_t count = 0;
1006 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(in_npi->npi_local_port), npe_hash_next) {
1007 		count += 1;
1008 		if (net_port_info_match_npi(npe, in_npi, &best_match)) {
1009 			break;
1010 		}
1011 	}
1012 
1013 	if (best_match != NULL) {
1014 		best_match->npe_npi.npi_flags |= NPIF_WAKEPKT;
1015 		in_npi->npi_owner_pid = best_match->npe_npi.npi_owner_pid;
1016 		in_npi->npi_effective_pid = best_match->npe_npi.npi_effective_pid;
1017 		strlcpy(in_npi->npi_owner_pname, best_match->npe_npi.npi_owner_pname,
1018 		    sizeof(in_npi->npi_owner_pname));
1019 		strlcpy(in_npi->npi_effective_pname, best_match->npe_npi.npi_effective_pname,
1020 		    sizeof(in_npi->npi_effective_pname));
1021 		uuid_copy(in_npi->npi_owner_uuid, best_match->npe_npi.npi_owner_uuid);
1022 		uuid_copy(in_npi->npi_effective_uuid, best_match->npe_npi.npi_effective_uuid);
1023 	}
1024 	lck_mtx_unlock(&net_port_entry_head_lock);
1025 
1026 	if (__improbable(net_wake_pkt_debug > 0)) {
1027 		if (best_match != NULL) {
1028 			net_port_info_log_npi("wake packet match", in_npi);
1029 		} else {
1030 			net_port_info_log_npi("wake packet no match", in_npi);
1031 		}
1032 	}
1033 
1034 	return best_match != NULL ? true : false;
1035 }
1036 
1037 static void
if_notify_unattributed_wake_mbuf(struct ifnet * ifp,struct mbuf * m,struct net_port_info * npi)1038 if_notify_unattributed_wake_mbuf(struct ifnet *ifp, struct mbuf *m,
1039     struct net_port_info *npi)
1040 {
1041 	struct kev_msg ev_msg = {};
1042 
1043 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1044 
1045 	lck_mtx_lock(&net_port_entry_head_lock);
1046 	if (has_notified_unattributed_wake) {
1047 		lck_mtx_unlock(&net_port_entry_head_lock);
1048 		if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1049 
1050 		if (__improbable(net_wake_pkt_debug > 0)) {
1051 			net_port_info_log_npi("already notified unattributed wake packet", npi);
1052 		}
1053 		return;
1054 	}
1055 	has_notified_unattributed_wake = true;
1056 	lck_mtx_unlock(&net_port_entry_head_lock);
1057 
1058 	if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1059 
1060 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1061 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1062 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1063 	ev_msg.event_code  = KEV_POWER_UNATTRIBUTED_WAKE;
1064 
1065 	struct net_port_info_una_wake_event event_data = {};
1066 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1067 	event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1068 	event_data.una_wake_pkt_flags = npi->npi_flags;
1069 
1070 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
1071 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1072 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1073 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1074 
1075 	if (ifp != NULL) {
1076 		strlcpy(event_data.una_wake_pkt_ifname, ifp->if_xname,
1077 		    sizeof(event_data.una_wake_pkt_ifname));
1078 	} else {
1079 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1080 	}
1081 
1082 	event_data.una_wake_ptk_len = m->m_pkthdr.len > NPI_MAX_UNA_WAKE_PKT_LEN ?
1083 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t)m->m_pkthdr.len;
1084 
1085 	errno_t error = mbuf_copydata(m, 0, event_data.una_wake_ptk_len,
1086 	    (void *)event_data.una_wake_pkt);
1087 	if (error != 0) {
1088 		uuid_string_t wake_uuid_str;
1089 
1090 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1091 		os_log_error(OS_LOG_DEFAULT,
1092 		    "%s: mbuf_copydata() failed with error %d for wake uuid %s",
1093 		    __func__, error, wake_uuid_str);
1094 
1095 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1096 		return;
1097 	}
1098 
1099 	ev_msg.dv[0].data_ptr = &event_data;
1100 	ev_msg.dv[0].data_length = sizeof(event_data);
1101 
1102 	int result = kev_post_msg(&ev_msg);
1103 	if (result != 0) {
1104 		uuid_string_t wake_uuid_str;
1105 
1106 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1107 		os_log_error(OS_LOG_DEFAULT,
1108 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1109 		    __func__, result, wake_uuid_str);
1110 
1111 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1112 	}
1113 }
1114 
1115 static void
if_notify_wake_packet(struct ifnet * ifp,struct net_port_info * npi)1116 if_notify_wake_packet(struct ifnet *ifp, struct net_port_info *npi)
1117 {
1118 	struct kev_msg ev_msg = {};
1119 
1120 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1121 
1122 	lck_mtx_lock(&net_port_entry_head_lock);
1123 	if (has_notified_wake_pkt) {
1124 		lck_mtx_unlock(&net_port_entry_head_lock);
1125 		if_ports_used_stats.ifpu_dup_wake_pkt_event += 1;
1126 
1127 		if (__improbable(net_wake_pkt_debug > 0)) {
1128 			net_port_info_log_npi("already notified wake packet", npi);
1129 		}
1130 		return;
1131 	}
1132 	has_notified_wake_pkt = true;
1133 	lck_mtx_unlock(&net_port_entry_head_lock);
1134 
1135 	if_ports_used_stats.ifpu_wake_pkt_event += 1;
1136 
1137 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1138 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1139 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1140 	ev_msg.event_code  = KEV_POWER_WAKE_PACKET;
1141 
1142 	struct net_port_info_wake_event event_data = {};
1143 
1144 	uuid_copy(event_data.wake_uuid, current_wakeuuid);
1145 	event_data.wake_pkt_if_index = ifp->if_index;
1146 	event_data.wake_pkt_port = npi->npi_local_port;
1147 	event_data.wake_pkt_flags = npi->npi_flags;
1148 	event_data.wake_pkt_owner_pid = npi->npi_owner_pid;
1149 	event_data.wake_pkt_effective_pid = npi->npi_effective_pid;
1150 	strlcpy(event_data.wake_pkt_owner_pname, npi->npi_owner_pname,
1151 	    sizeof(event_data.wake_pkt_owner_pname));
1152 	strlcpy(event_data.wake_pkt_effective_pname, npi->npi_effective_pname,
1153 	    sizeof(event_data.wake_pkt_effective_pname));
1154 	uuid_copy(event_data.wake_pkt_owner_uuid, npi->npi_owner_uuid);
1155 	uuid_copy(event_data.wake_pkt_effective_uuid, npi->npi_effective_uuid);
1156 
1157 	event_data.wake_pkt_foreign_port = npi->npi_foreign_port;
1158 	event_data.wake_pkt_local_addr_ = npi->npi_local_addr_;
1159 	event_data.wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1160 	strlcpy(event_data.wake_pkt_ifname, ifp->if_xname, sizeof(event_data.wake_pkt_ifname));
1161 
1162 	ev_msg.dv[0].data_ptr = &event_data;
1163 	ev_msg.dv[0].data_length = sizeof(event_data);
1164 
1165 	int result = kev_post_msg(&ev_msg);
1166 	if (result != 0) {
1167 		uuid_string_t wake_uuid_str;
1168 
1169 		uuid_unparse(event_data.wake_uuid, wake_uuid_str);
1170 		os_log_error(OS_LOG_DEFAULT,
1171 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1172 		    __func__, result, wake_uuid_str);
1173 
1174 		if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1175 	}
1176 }
1177 
1178 static bool
is_encapsulated_esp(struct mbuf * m,size_t data_offset)1179 is_encapsulated_esp(struct mbuf *m, size_t data_offset)
1180 {
1181 	/*
1182 	 * They are three cases:
1183 	 * - Keep alive: 1 byte payload
1184 	 * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
1185 	 * - otherwise it's ESP
1186 	 */
1187 	ASSERT(m->m_pkthdr.len >= data_offset);
1188 
1189 	size_t data_len = m->m_pkthdr.len - data_offset;
1190 	if (data_len == 1) {
1191 		return false;
1192 	} else if (data_len > ESP_HDR_SIZE) {
1193 		uint8_t payload[ESP_HDR_SIZE];
1194 
1195 		errno_t error = mbuf_copydata(m, data_offset, ESP_HDR_SIZE, &payload);
1196 		if (error != 0) {
1197 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ESP_HDR_SIZE) error %d",
1198 			    __func__, error);
1199 		} else if (payload[0] == 0 && payload[1] == 0 &&
1200 		    payload[2] == 0 && payload[3] == 0) {
1201 			return false;
1202 		}
1203 	}
1204 	return true;
1205 }
1206 
1207 void
if_ports_used_match_mbuf(struct ifnet * ifp,protocol_family_t proto_family,struct mbuf * m)1208 if_ports_used_match_mbuf(struct ifnet *ifp, protocol_family_t proto_family, struct mbuf *m)
1209 {
1210 	errno_t error;
1211 	struct net_port_info npi = {};
1212 	bool found = false;
1213 
1214 	if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == 0) {
1215 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1216 		os_log_error(OS_LOG_DEFAULT, "%s: called PKTF_WAKE_PKT not set from %s",
1217 		    __func__, ifp != NULL ? ifp->if_xname : "");
1218 		return;
1219 	}
1220 	if (ifp == NULL) {
1221 		goto failed;
1222 	}
1223 
1224 	if_ports_used_stats.ifpu_so_match_wake_pkt += 1;
1225 
1226 	npi.npi_if_index = ifp->if_index;
1227 	if (IFNET_IS_COMPANION_LINK(ifp)) {
1228 		npi.npi_flags |= NPIF_COMPLINK;
1229 	}
1230 	npi.npi_flags |= NPIF_SOCKET; /* For logging */
1231 	if (proto_family == PF_INET) {
1232 		struct ip iphdr = {};
1233 
1234 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1235 
1236 		error = mbuf_copydata(m, 0, sizeof(struct ip), &iphdr);
1237 		if (error != 0) {
1238 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip) error %d",
1239 			    __func__, error);
1240 			goto failed;
1241 		}
1242 		npi.npi_flags |= NPIF_IPV4;
1243 		npi.npi_local_addr_in = iphdr.ip_dst;
1244 		npi.npi_foreign_addr_in = iphdr.ip_src;
1245 
1246 		/*
1247 		 * Check if this is a fragment that is not the first fragment
1248 		 */
1249 		if ((ntohs(iphdr.ip_off) & ~(IP_DF | IP_RF)) &&
1250 		    (ntohs(iphdr.ip_off) & IP_OFFMASK) != 0) {
1251 			npi.npi_flags |= NPIF_FRAG;
1252 			if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1253 		}
1254 
1255 		switch (iphdr.ip_p) {
1256 		case IPPROTO_TCP: {
1257 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1258 			npi.npi_flags |= NPIF_TCP;
1259 
1260 			if (npi.npi_flags & NPIF_FRAG) {
1261 				goto failed;
1262 			}
1263 
1264 			struct tcphdr th = {};
1265 			error = mbuf_copydata(m, iphdr.ip_hl << 2, sizeof(struct tcphdr), &th);
1266 			if (error != 0) {
1267 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1268 				    __func__, error);
1269 				goto failed;
1270 			}
1271 			npi.npi_local_port = th.th_dport;
1272 			npi.npi_foreign_port = th.th_sport;
1273 			break;
1274 		}
1275 		case IPPROTO_UDP: {
1276 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1277 			npi.npi_flags |= NPIF_UDP;
1278 
1279 			if (npi.npi_flags & NPIF_FRAG) {
1280 				goto failed;
1281 			}
1282 			struct udphdr uh = {};
1283 			size_t udp_offset = iphdr.ip_hl << 2;
1284 
1285 			error = mbuf_copydata(m, udp_offset, sizeof(struct udphdr), &uh);
1286 			if (error != 0) {
1287 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1288 				    __func__, error);
1289 				goto failed;
1290 			}
1291 			npi.npi_local_port = uh.uh_dport;
1292 			npi.npi_foreign_port = uh.uh_sport;
1293 			/*
1294 			 * Let the ESP layer handle wake packets
1295 			 */
1296 			if (ntohs(uh.uh_dport) == PORT_ISAKMP_NATT ||
1297 			    ntohs(uh.uh_sport) == PORT_ISAKMP_NATT) {
1298 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1299 				if (is_encapsulated_esp(m, udp_offset + sizeof(struct udphdr))) {
1300 					if (net_wake_pkt_debug > 0) {
1301 						net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1302 					}
1303 					return;
1304 				}
1305 			}
1306 			break;
1307 		}
1308 		case IPPROTO_ESP: {
1309 			/*
1310 			 * Let the ESP layer handle wake packets
1311 			 */
1312 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1313 			npi.npi_flags |= NPIF_ESP;
1314 			if (net_wake_pkt_debug > 0) {
1315 				net_port_info_log_npi("defer ESP matching", &npi);
1316 			}
1317 			return;
1318 		}
1319 		default:
1320 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1321 			os_log(OS_LOG_DEFAULT, "%s: unexpected IPv4 protocol %u from %s",
1322 			    __func__, iphdr.ip_p, ifp->if_xname);
1323 			goto failed;
1324 		}
1325 	} else if (proto_family == PF_INET6) {
1326 		struct ip6_hdr ip6_hdr = {};
1327 
1328 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1329 
1330 		error = mbuf_copydata(m, 0, sizeof(struct ip6_hdr), &ip6_hdr);
1331 		if (error != 0) {
1332 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_hdr) error %d",
1333 			    __func__, error);
1334 			goto failed;
1335 		}
1336 		npi.npi_flags |= NPIF_IPV6;
1337 		memcpy(&npi.npi_local_addr_in6, &ip6_hdr.ip6_dst, sizeof(struct in6_addr));
1338 		memcpy(&npi.npi_foreign_addr_in6, &ip6_hdr.ip6_src, sizeof(struct in6_addr));
1339 
1340 		size_t l3_len = sizeof(struct ip6_hdr);
1341 		uint8_t l4_proto = ip6_hdr.ip6_nxt;
1342 
1343 		/*
1344 		 * Check if this is a fragment that is not the first fragment
1345 		 */
1346 		if (l4_proto == IPPROTO_FRAGMENT) {
1347 			struct ip6_frag ip6_frag;
1348 
1349 			error = mbuf_copydata(m, sizeof(struct ip6_hdr), sizeof(struct ip6_frag), &ip6_frag);
1350 			if (error != 0) {
1351 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_frag) error %d",
1352 				    __func__, error);
1353 				goto failed;
1354 			}
1355 
1356 			l3_len += sizeof(struct ip6_frag);
1357 			l4_proto = ip6_frag.ip6f_nxt;
1358 
1359 			if ((ip6_frag.ip6f_offlg & IP6F_OFF_MASK) != 0) {
1360 				npi.npi_flags |= NPIF_FRAG;
1361 				if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1362 			}
1363 		}
1364 
1365 
1366 		switch (l4_proto) {
1367 		case IPPROTO_TCP: {
1368 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1369 			npi.npi_flags |= NPIF_TCP;
1370 
1371 			/*
1372 			 * Cannot attribute a fragment that is not the first fragment as it
1373 			 * not have the TCP header
1374 			 */
1375 			if (npi.npi_flags & NPIF_FRAG) {
1376 				goto failed;
1377 			}
1378 
1379 			struct tcphdr th = {};
1380 
1381 			error = mbuf_copydata(m, l3_len, sizeof(struct tcphdr), &th);
1382 			if (error != 0) {
1383 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1384 				    __func__, error);
1385 				if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1386 				goto failed;
1387 			}
1388 			npi.npi_local_port = th.th_dport;
1389 			npi.npi_foreign_port = th.th_sport;
1390 			break;
1391 		}
1392 		case IPPROTO_UDP: {
1393 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1394 			npi.npi_flags |= NPIF_UDP;
1395 
1396 			/*
1397 			 * Cannot attribute a fragment that is not the first fragment as it
1398 			 * not have the UDP header
1399 			 */
1400 			if (npi.npi_flags & NPIF_FRAG) {
1401 				goto failed;
1402 			}
1403 
1404 			struct udphdr uh = {};
1405 
1406 			error = mbuf_copydata(m, l3_len, sizeof(struct udphdr), &uh);
1407 			if (error != 0) {
1408 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1409 				    __func__, error);
1410 				if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1411 				goto failed;
1412 			}
1413 			npi.npi_local_port = uh.uh_dport;
1414 			npi.npi_foreign_port = uh.uh_sport;
1415 			/*
1416 			 * Let the ESP layer handle wake packets
1417 			 */
1418 			if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1419 			    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1420 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1421 				if (is_encapsulated_esp(m, l3_len + sizeof(struct udphdr))) {
1422 					if (net_wake_pkt_debug > 0) {
1423 						net_port_info_log_npi("defer encapsulated ESP matching", &npi);
1424 					}
1425 					return;
1426 				}
1427 			}
1428 			break;
1429 		}
1430 		case IPPROTO_ESP: {
1431 			/*
1432 			 * Let the ESP layer handle the wake packet
1433 			 */
1434 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1435 			npi.npi_flags |= NPIF_ESP;
1436 			if (net_wake_pkt_debug > 0) {
1437 				net_port_info_log_npi("defer ESP matching", &npi);
1438 			}
1439 			return;
1440 		}
1441 		default:
1442 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1443 
1444 			os_log(OS_LOG_DEFAULT, "%s: unexpected IPv6 protocol %u from %s",
1445 			    __func__, ip6_hdr.ip6_nxt, ifp->if_xname);
1446 			goto failed;
1447 		}
1448 	} else {
1449 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1450 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %d from %s",
1451 		    __func__, proto_family, ifp->if_xname);
1452 		goto failed;
1453 	}
1454 	found = net_port_info_find_match(&npi);
1455 	if (found) {
1456 		if_notify_wake_packet(ifp, &npi);
1457 	} else {
1458 		if_notify_unattributed_wake_mbuf(ifp, m, &npi);
1459 	}
1460 	return;
1461 failed:
1462 	if_notify_unattributed_wake_mbuf(ifp, m, &npi);
1463 }
1464 
1465 #if SKYWALK
1466 
1467 static void
if_notify_unattributed_wake_pkt(struct ifnet * ifp,struct __kern_packet * pkt,struct net_port_info * npi)1468 if_notify_unattributed_wake_pkt(struct ifnet *ifp, struct __kern_packet *pkt,
1469     struct net_port_info *npi)
1470 {
1471 	struct kev_msg ev_msg = {};
1472 
1473 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1474 
1475 	lck_mtx_lock(&net_port_entry_head_lock);
1476 	if (has_notified_unattributed_wake) {
1477 		lck_mtx_unlock(&net_port_entry_head_lock);
1478 		if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1479 
1480 		if (__improbable(net_wake_pkt_debug > 0)) {
1481 			net_port_info_log_npi("already notified unattributed wake packet", npi);
1482 		}
1483 		return;
1484 	}
1485 	has_notified_unattributed_wake = true;
1486 	lck_mtx_unlock(&net_port_entry_head_lock);
1487 
1488 	if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1489 
1490 	if (ifp == NULL) {
1491 		os_log(OS_LOG_DEFAULT, "%s: receive interface is NULL",
1492 		    __func__);
1493 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1494 	}
1495 
1496 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1497 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1498 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1499 	ev_msg.event_code  = KEV_POWER_UNATTRIBUTED_WAKE;
1500 
1501 	struct net_port_info_una_wake_event event_data = {};
1502 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1503 	event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1504 	event_data.una_wake_pkt_flags = npi->npi_flags;
1505 
1506 	uint16_t offset = kern_packet_get_network_header_offset(SK_PKT2PH(pkt));
1507 	event_data.una_wake_ptk_len =
1508 	    pkt->pkt_length - offset > NPI_MAX_UNA_WAKE_PKT_LEN ?
1509 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t) pkt->pkt_length - offset;
1510 
1511 	kern_packet_copy_bytes(SK_PKT2PH(pkt), offset, event_data.una_wake_ptk_len,
1512 	    event_data.una_wake_pkt);
1513 
1514 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
1515 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1516 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1517 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1518 	if (ifp != NULL) {
1519 		strlcpy(event_data.una_wake_pkt_ifname, ifp->if_xname,
1520 		    sizeof(event_data.una_wake_pkt_ifname));
1521 	}
1522 
1523 	ev_msg.dv[0].data_ptr = &event_data;
1524 	ev_msg.dv[0].data_length = sizeof(event_data);
1525 
1526 	int result = kev_post_msg(&ev_msg);
1527 	if (result != 0) {
1528 		uuid_string_t wake_uuid_str;
1529 
1530 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1531 		os_log_error(OS_LOG_DEFAULT,
1532 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1533 		    __func__, result, wake_uuid_str);
1534 
1535 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1536 	}
1537 }
1538 
1539 void
if_ports_used_match_pkt(struct ifnet * ifp,struct __kern_packet * pkt)1540 if_ports_used_match_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
1541 {
1542 	struct net_port_info npi = {};
1543 	bool found = false;
1544 
1545 	if ((pkt->pkt_pflags & PKT_F_WAKE_PKT) == 0) {
1546 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1547 		os_log_error(OS_LOG_DEFAULT, "%s: called PKT_F_WAKE_PKT not set from %s",
1548 		    __func__, ifp != NULL ? ifp->if_xname : "");
1549 		return;
1550 	}
1551 	if (ifp == NULL) {
1552 		goto failed;
1553 	}
1554 
1555 	if_ports_used_stats.ifpu_ch_match_wake_pkt += 1;
1556 
1557 	npi.npi_if_index = ifp->if_index;
1558 	if (IFNET_IS_COMPANION_LINK(ifp)) {
1559 		npi.npi_flags |= NPIF_COMPLINK;
1560 	}
1561 	npi.npi_flags |= NPIF_CHANNEL; /* For logging */
1562 	switch (pkt->pkt_flow_ip_ver) {
1563 	case IPVERSION:
1564 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1565 
1566 		npi.npi_flags |= NPIF_IPV4;
1567 		npi.npi_local_addr_in = pkt->pkt_flow_ipv4_dst;
1568 		npi.npi_foreign_addr_in = pkt->pkt_flow_ipv4_src;
1569 		break;
1570 	case IPV6_VERSION:
1571 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1572 
1573 		npi.npi_flags |= NPIF_IPV6;
1574 		memcpy(&npi.npi_local_addr_in6, &pkt->pkt_flow_ipv6_dst,
1575 		    sizeof(struct in6_addr));
1576 		memcpy(&npi.npi_foreign_addr_in6, &pkt->pkt_flow_ipv6_src,
1577 		    sizeof(struct in6_addr));
1578 		break;
1579 	default:
1580 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1581 
1582 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %u from %s",
1583 		    __func__, pkt->pkt_flow_ip_ver, ifp->if_xname);
1584 		goto failed;
1585 	}
1586 
1587 	/*
1588 	 * Check if this is a fragment that is not the first fragment
1589 	 */
1590 	if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
1591 		os_log(OS_LOG_DEFAULT, "%s: unexpected wake fragment from %s",
1592 		    __func__, ifp->if_xname);
1593 		npi.npi_flags |= NPIF_FRAG;
1594 		if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1595 	}
1596 
1597 	switch (pkt->pkt_flow_ip_proto) {
1598 	case IPPROTO_TCP: {
1599 		if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1600 		npi.npi_flags |= NPIF_TCP;
1601 
1602 		/*
1603 		 * Cannot attribute a fragment that is not the first fragment as it
1604 		 * not have the TCP header
1605 		 */
1606 		if (npi.npi_flags & NPIF_FRAG) {
1607 			goto failed;
1608 		}
1609 		struct tcphdr *tcp = (struct tcphdr *)pkt->pkt_flow_tcp_hdr;
1610 		if (tcp == NULL) {
1611 			os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned TCP header from %s",
1612 			    __func__, ifp->if_xname);
1613 			if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1614 			goto failed;
1615 		}
1616 		npi.npi_local_port = tcp->th_dport;
1617 		npi.npi_foreign_port = tcp->th_sport;
1618 		break;
1619 	}
1620 	case IPPROTO_UDP: {
1621 		if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1622 		npi.npi_flags |= NPIF_UDP;
1623 
1624 		/*
1625 		 * Cannot attribute a fragment that is not the first fragment as it
1626 		 * not have the UDP header
1627 		 */
1628 		if (npi.npi_flags & NPIF_FRAG) {
1629 			goto failed;
1630 		}
1631 		struct udphdr *uh = (struct udphdr *)pkt->pkt_flow_udp_hdr;
1632 		if (uh == NULL) {
1633 			os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned UDP header from %s",
1634 			    __func__, ifp->if_xname);
1635 			if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1636 			goto failed;
1637 		}
1638 		npi.npi_local_port = uh->uh_dport;
1639 		npi.npi_foreign_port = uh->uh_sport;
1640 
1641 		/*
1642 		 * Defer matching of UDP NAT traversal to ip_input
1643 		 * (assumes IKE uses sockets)
1644 		 */
1645 		if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1646 		    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1647 			if_ports_used_stats.ifpu_deferred_isakmp_natt_wake_pkt += 1;
1648 			if (net_wake_pkt_debug > 0) {
1649 				net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1650 			}
1651 			return;
1652 		}
1653 		break;
1654 	}
1655 	case IPPROTO_ESP: {
1656 		/*
1657 		 * Let the ESP layer handle the wake packet
1658 		 */
1659 		if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1660 		npi.npi_flags |= NPIF_ESP;
1661 		if (net_wake_pkt_debug > 0) {
1662 			net_port_info_log_npi("defer ESP matching", &npi);
1663 		}
1664 		return;
1665 	}
1666 	default:
1667 		if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1668 
1669 		os_log(OS_LOG_DEFAULT, "%s: unexpected IP protocol %u from %s",
1670 		    __func__, pkt->pkt_flow_ip_proto, ifp->if_xname);
1671 		goto failed;
1672 	}
1673 
1674 	found = net_port_info_find_match(&npi);
1675 	if (found) {
1676 		if_notify_wake_packet(ifp, &npi);
1677 	} else {
1678 		if_notify_unattributed_wake_pkt(ifp, pkt, &npi);
1679 	}
1680 	return;
1681 failed:
1682 	if_notify_unattributed_wake_pkt(ifp, pkt, &npi);
1683 }
1684 #endif /* SKYWALK */
1685