xref: /xnu-11417.140.69/bsd/net/if_ports_used.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2017-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/types.h>
30 #include <sys/time.h>
31 #include <sys/mcache.h>
32 #include <sys/malloc.h>
33 #include <sys/kauth.h>
34 #include <sys/kern_event.h>
35 #include <sys/bitstring.h>
36 #include <sys/priv.h>
37 #include <sys/proc.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 
41 #include <kern/locks.h>
42 #include <kern/zalloc.h>
43 
44 #include <libkern/libkern.h>
45 
46 #include <net/kpi_interface.h>
47 #include <net/if_var.h>
48 #include <net/if_ports_used.h>
49 #include <net/net_sysctl.h>
50 
51 #include <netinet/in_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/tcp_var.h>
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/udp.h>
57 
58 #if SKYWALK
59 #include <skywalk/os_skywalk_private.h>
60 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
61 #include <skywalk/namespace/netns.h>
62 #endif /* SKYWALK */
63 
64 #include <stdbool.h>
65 
66 #include <os/log.h>
67 
68 #include <IOKit/IOBSD.h>
69 
70 #define ESP_HDR_SIZE 4
71 #define PORT_ISAKMP 500
72 #define PORT_ISAKMP_NATT 4500   /* rfc3948 */
73 
74 #define IF_XNAME(ifp) ((ifp) != NULL ? (ifp)->if_xname : (const char * __null_terminated)"")
75 
76 extern bool IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len);
77 
78 SYSCTL_DECL(_net_link_generic_system);
79 
80 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, port_used,
81     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used");
82 
83 struct if_ports_used_stats if_ports_used_stats = {};
84 static int sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS;
85 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, stats,
86     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
87     sysctl_if_ports_used_stats, "S,struct if_ports_used_stats", "");
88 
89 static uuid_t current_wakeuuid;
90 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, current_wakeuuid,
91     CTLFLAG_RD | CTLFLAG_LOCKED,
92     current_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
93 
94 static int sysctl_net_port_info_list SYSCTL_HANDLER_ARGS;
95 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, list,
96     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
97     sysctl_net_port_info_list, "S,xnpigen", "");
98 
99 static int use_test_wakeuuid = 0;
100 static uuid_t test_wakeuuid;
101 
102 #if (DEVELOPMENT || DEBUG)
103 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, use_test_wakeuuid,
104     CTLFLAG_RW | CTLFLAG_LOCKED,
105     &use_test_wakeuuid, 0, "");
106 
107 int sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS;
108 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, new_test_wakeuuid,
109     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
110     sysctl_new_test_wakeuuid, "S,uuid_t", "");
111 
112 int sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS;
113 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, clear_test_wakeuuid,
114     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
115     sysctl_clear_test_wakeuuid, "S,uuid_t", "");
116 
117 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, test_wakeuuid,
118     CTLFLAG_RD | CTLFLAG_LOCKED,
119     test_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
120 #endif /* (DEVELOPMENT || DEBUG) */
121 
122 static int sysctl_get_ports_used SYSCTL_HANDLER_ARGS;
123 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, get_ports_used,
124     CTLFLAG_RD | CTLFLAG_LOCKED,
125     sysctl_get_ports_used, "");
126 
127 int if_ports_used_verbose = 0;
128 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, verbose,
129     CTLFLAG_RW | CTLFLAG_LOCKED,
130     &if_ports_used_verbose, 0, "");
131 
132 struct timeval wakeuuid_not_set_last_time;
133 int sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS;
134 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
135     wakeuuid_not_set_last_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
136     0, 0, sysctl_wakeuuid_not_set_last_time, "S,timeval", "");
137 
138 char wakeuuid_not_set_last_if[IFXNAMSIZ];
139 int sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS;
140 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
141     wakeuuid_not_set_last_if, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
142     0, 0, sysctl_wakeuuid_not_set_last_if, "A", "");
143 
144 struct timeval wakeuuid_last_update_time;
145 int sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS;
146 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
147     wakeuuid_last_update_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
148     0, 0, sysctl_wakeuuid_last_update_time, "S,timeval", "");
149 
150 static bool            last_wake_phy_if_set = false;
151 static char            last_wake_phy_if_name[IFNAMSIZ]; /* name + unit */
152 static uint32_t        last_wake_phy_if_family;
153 static uint32_t        last_wake_phy_if_subfamily;
154 static uint32_t        last_wake_phy_if_functional_type;
155 static bool            last_wake_phy_if_delay_wake_pkt = false;
156 
157 static bool has_notified_wake_pkt = false;
158 static bool has_notified_unattributed_wake = false;
159 
160 static LCK_GRP_DECLARE(net_port_entry_head_lock_group, "net port entry lock");
161 static LCK_MTX_DECLARE(net_port_entry_head_lock, &net_port_entry_head_lock_group);
162 
163 
164 struct net_port_entry {
165 	SLIST_ENTRY(net_port_entry)     npe_list_next;
166 	TAILQ_ENTRY(net_port_entry)     npe_hash_next;
167 	struct net_port_info            npe_npi;
168 };
169 
170 static KALLOC_TYPE_DEFINE(net_port_entry_zone, struct net_port_entry, NET_KT_DEFAULT);
171 
172 static SLIST_HEAD(net_port_entry_list, net_port_entry) net_port_entry_list =
173     SLIST_HEAD_INITIALIZER(&net_port_entry_list);
174 
175 struct timeval wakeuiid_last_check;
176 
177 
178 #if (DEBUG | DEVELOPMENT)
179 static int64_t npi_search_list_total = 0;
180 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_total,
181     CTLFLAG_RD | CTLFLAG_LOCKED,
182     &npi_search_list_total, "");
183 
184 static int64_t npi_search_list_max = 0;
185 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_max,
186     CTLFLAG_RD | CTLFLAG_LOCKED,
187     &npi_search_list_max, "");
188 #endif /* (DEBUG | DEVELOPMENT) */
189 
190 /*
191  * Hashing of the net_port_entry list is based on the local port
192  *
193  * The hash masks uses the least significant bits so we have to use host byte order
194  * when applying the mask because the LSB have more entropy that the MSB (most local ports
195  * are in the high dynamic port range)
196  */
197 #define NPE_HASH_BUCKET_COUNT 32
198 #define NPE_HASH_MASK (NPE_HASH_BUCKET_COUNT - 1)
199 #define NPE_HASH_VAL(_lport) (ntohs(_lport) & NPE_HASH_MASK)
200 #define NPE_HASH_HEAD(_lport) (&net_port_entry_hash_table[NPE_HASH_VAL(_lport)])
201 
202 static TAILQ_HEAD(net_port_entry_hash_table, net_port_entry) * __indexable net_port_entry_hash_table = NULL;
203 
204 /*
205  * For some types of physical interface we need to delay the notiication of wake packet event
206  * until a user land interface controller confirms the wake was caused by its packet
207  */
208 struct net_port_info_wake_pkt_event {
209 	uint32_t                npi_wp_code;
210 	uint32_t                npi_wp_flags;
211 	union {
212 		struct net_port_info_wake_event _npi_ev_wake_pkt_attributed;
213 		struct net_port_info_una_wake_event _npi_ev_wake_pkt_unattributed;
214 	} npi_ev_wake_pkt_;
215 };
216 
217 #define npi_ev_wake_pkt_attributed npi_ev_wake_pkt_._npi_ev_wake_pkt_attributed
218 #define npi_ev_wake_pkt_unattributed npi_ev_wake_pkt_._npi_ev_wake_pkt_unattributed
219 
220 int sysctl_wake_pkt_event_notify SYSCTL_HANDLER_ARGS;
221 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, wake_pkt_event_notify,
222     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY, 0, 0,
223     sysctl_wake_pkt_event_notify, "I", "");
224 
225 /* Bitmap of the interface families to delay the notification of wake packet events */
226 static uint32_t npi_wake_packet_event_delay_if_families = 0;
227 
228 /* How many interfaces families are supported */
229 #define NPI_MAX_IF_FAMILY_BITS 32
230 
231 int sysctl_wake_pkt_event_delay_if_families SYSCTL_HANDLER_ARGS;
232 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, wake_pkt_event_delay_if_families,
233     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
234     sysctl_wake_pkt_event_delay_if_families, "I", "");
235 
236 
237 static struct net_port_info_wake_pkt_event last_wake_pkt_event;
238 
239 int sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS;
240 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
241     last_attributed_wake_event, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
242     0, 0, sysctl_last_attributed_wake_event, "S,net_port_info_wake_event", "");
243 
244 int sysctl_last_unattributed_wake_event SYSCTL_HANDLER_ARGS;
245 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
246     last_unattributed_wake_event, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
247     0, 0, sysctl_last_unattributed_wake_event, "S,net_port_info_una_wake_event", "");
248 
249 /*
250  * Initialize IPv4 source address hash table.
251  */
252 void
if_ports_used_init(void)253 if_ports_used_init(void)
254 {
255 	if (net_port_entry_hash_table != NULL) {
256 		return;
257 	}
258 
259 	net_port_entry_hash_table = zalloc_permanent(
260 		NPE_HASH_BUCKET_COUNT * sizeof(*net_port_entry_hash_table),
261 		ZALIGN_PTR);
262 }
263 
264 static void
net_port_entry_list_clear(void)265 net_port_entry_list_clear(void)
266 {
267 	struct net_port_entry *npe;
268 
269 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
270 
271 	while ((npe = SLIST_FIRST(&net_port_entry_list)) != NULL) {
272 		SLIST_REMOVE_HEAD(&net_port_entry_list, npe_list_next);
273 		TAILQ_REMOVE(NPE_HASH_HEAD(npe->npe_npi.npi_local_port), npe, npe_hash_next);
274 
275 		zfree(net_port_entry_zone, npe);
276 	}
277 
278 	for (int i = 0; i < NPE_HASH_BUCKET_COUNT; i++) {
279 		VERIFY(TAILQ_EMPTY(&net_port_entry_hash_table[i]));
280 	}
281 
282 	if_ports_used_stats.ifpu_npe_count = 0;
283 	if_ports_used_stats.ifpu_wakeuid_gen++;
284 }
285 
286 static bool
get_test_wake_uuid(uuid_string_t wakeuuid_str)287 get_test_wake_uuid(uuid_string_t wakeuuid_str)
288 {
289 	if (!uuid_is_null(test_wakeuuid)) {
290 		if (wakeuuid_str != NULL) {
291 			uuid_unparse(test_wakeuuid, wakeuuid_str);
292 		}
293 		return true;
294 	}
295 
296 	return false;
297 }
298 
299 static bool
is_wakeuuid_set(void)300 is_wakeuuid_set(void)
301 {
302 	if (__improbable(use_test_wakeuuid) && !uuid_is_null(test_wakeuuid)) {
303 		return true;
304 	}
305 
306 	/*
307 	 * IOPMCopySleepWakeUUIDKey() tells if SleepWakeUUID is currently set
308 	 * That means we are currently in a sleep/wake cycle
309 	 */
310 	return IOPMCopySleepWakeUUIDKey(NULL, 0);
311 }
312 
313 void
if_ports_used_update_wakeuuid(struct ifnet * ifp)314 if_ports_used_update_wakeuuid(struct ifnet *ifp)
315 {
316 	uuid_t wakeuuid;
317 	bool wakeuuid_is_set = false;
318 	bool updated = false;
319 	uuid_string_t wakeuuid_str;
320 
321 	uuid_clear(wakeuuid);
322 
323 	if (__improbable(use_test_wakeuuid)) {
324 		wakeuuid_is_set = get_test_wake_uuid(wakeuuid_str);
325 	} else {
326 		wakeuuid_is_set = IOPMCopySleepWakeUUIDKey(wakeuuid_str,
327 		    sizeof(wakeuuid_str));
328 	}
329 
330 	if (wakeuuid_is_set) {
331 		if (uuid_parse(wakeuuid_str, wakeuuid) != 0) {
332 			os_log(OS_LOG_DEFAULT,
333 			    "%s: IOPMCopySleepWakeUUIDKey got bad value %s\n",
334 			    __func__, wakeuuid_str);
335 			wakeuuid_is_set = false;
336 		}
337 	}
338 
339 	if (!wakeuuid_is_set) {
340 		if (ifp != NULL) {
341 			if (if_ports_used_verbose > 0) {
342 				os_log_info(OS_LOG_DEFAULT,
343 				    "%s: SleepWakeUUID not set, "
344 				    "don't update the port list for %s\n",
345 				    __func__, ifp != NULL ? if_name(ifp) : "");
346 			}
347 			if_ports_used_stats.ifpu_wakeuuid_not_set_count += 1;
348 			microtime(&wakeuuid_not_set_last_time);
349 			strlcpy(wakeuuid_not_set_last_if, if_name(ifp),
350 			    sizeof(wakeuuid_not_set_last_if));
351 		}
352 		return;
353 	}
354 
355 	lck_mtx_lock(&net_port_entry_head_lock);
356 	if (uuid_compare(wakeuuid, current_wakeuuid) != 0) {
357 		if (last_wake_phy_if_delay_wake_pkt) {
358 			if_ports_used_stats.ifpu_delayed_wake_event_undelivered++;
359 		}
360 
361 		net_port_entry_list_clear();
362 		uuid_copy(current_wakeuuid, wakeuuid);
363 		microtime(&wakeuuid_last_update_time);
364 		updated = true;
365 
366 		has_notified_wake_pkt = false;
367 		has_notified_unattributed_wake = false;
368 
369 		memset(&last_wake_pkt_event, 0, sizeof(last_wake_pkt_event));
370 
371 		last_wake_phy_if_set = false;
372 		memset(&last_wake_phy_if_name, 0, sizeof(last_wake_phy_if_name));
373 		last_wake_phy_if_family = IFRTYPE_FAMILY_ANY;
374 		last_wake_phy_if_subfamily = IFRTYPE_SUBFAMILY_ANY;
375 		last_wake_phy_if_functional_type = IFRTYPE_FUNCTIONAL_UNKNOWN;
376 		last_wake_phy_if_delay_wake_pkt = false;
377 	}
378 	/*
379 	 * Record the time last checked
380 	 */
381 	microuptime(&wakeuiid_last_check);
382 	lck_mtx_unlock(&net_port_entry_head_lock);
383 
384 	if (updated && if_ports_used_verbose > 0) {
385 		uuid_string_t uuid_str;
386 
387 		uuid_unparse(current_wakeuuid, uuid_str);
388 		os_log(OS_LOG_DEFAULT, "%s: current wakeuuid %s",
389 		    __func__, uuid_str);
390 	}
391 }
392 
393 static bool
net_port_info_equal(const struct net_port_info * x,const struct net_port_info * y)394 net_port_info_equal(const struct net_port_info *x,
395     const struct net_port_info *y)
396 {
397 	ASSERT(x != NULL && y != NULL);
398 
399 	if (x->npi_if_index == y->npi_if_index &&
400 	    x->npi_local_port == y->npi_local_port &&
401 	    x->npi_foreign_port == y->npi_foreign_port &&
402 	    x->npi_owner_pid == y->npi_owner_pid &&
403 	    x->npi_effective_pid == y->npi_effective_pid &&
404 	    x->npi_flags == y->npi_flags &&
405 	    memcmp(&x->npi_local_addr_, &y->npi_local_addr_,
406 	    sizeof(union in_addr_4_6)) == 0 &&
407 	    memcmp(&x->npi_foreign_addr_, &y->npi_foreign_addr_,
408 	    sizeof(union in_addr_4_6)) == 0) {
409 		return true;
410 	}
411 	return false;
412 }
413 
414 static bool
net_port_info_has_entry(const struct net_port_info * npi)415 net_port_info_has_entry(const struct net_port_info *npi)
416 {
417 	struct net_port_entry *npe;
418 	bool found = false;
419 	int32_t count = 0;
420 
421 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
422 
423 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(npi->npi_local_port), npe_hash_next) {
424 		count += 1;
425 		if (net_port_info_equal(&npe->npe_npi, npi)) {
426 			found = true;
427 			break;
428 		}
429 	}
430 	if_ports_used_stats.ifpu_npi_hash_search_total += count;
431 	if (count > if_ports_used_stats.ifpu_npi_hash_search_max) {
432 		if_ports_used_stats.ifpu_npi_hash_search_max = count;
433 	}
434 
435 	return found;
436 }
437 
438 static bool
net_port_info_add_entry(const struct net_port_info * npi)439 net_port_info_add_entry(const struct net_port_info *npi)
440 {
441 	struct net_port_entry   *npe = NULL;
442 	uint32_t num = 0;
443 	bool entry_added = false;
444 
445 	ASSERT(npi != NULL);
446 
447 	if (__improbable(is_wakeuuid_set() == false)) {
448 		if_ports_used_stats.ifpu_npi_not_added_no_wakeuuid++;
449 		if (if_ports_used_verbose > 0) {
450 			os_log(OS_LOG_DEFAULT, "%s: wakeuuid not set not adding "
451 			    "port: %u flags: 0x%xif: %u pid: %u epid %u",
452 			    __func__,
453 			    ntohs(npi->npi_local_port),
454 			    npi->npi_flags,
455 			    npi->npi_if_index,
456 			    npi->npi_owner_pid,
457 			    npi->npi_effective_pid);
458 		}
459 		return false;
460 	}
461 
462 	npe = zalloc_flags(net_port_entry_zone, Z_WAITOK | Z_ZERO);
463 	if (__improbable(npe == NULL)) {
464 		os_log(OS_LOG_DEFAULT, "%s: zalloc() failed for "
465 		    "port: %u flags: 0x%x if: %u pid: %u epid %u",
466 		    __func__,
467 		    ntohs(npi->npi_local_port),
468 		    npi->npi_flags,
469 		    npi->npi_if_index,
470 		    npi->npi_owner_pid,
471 		    npi->npi_effective_pid);
472 		return false;
473 	}
474 
475 	memcpy(&npe->npe_npi, npi, sizeof(npe->npe_npi));
476 
477 	lck_mtx_lock(&net_port_entry_head_lock);
478 
479 	if (net_port_info_has_entry(npi) == false) {
480 		SLIST_INSERT_HEAD(&net_port_entry_list, npe, npe_list_next);
481 		TAILQ_INSERT_HEAD(NPE_HASH_HEAD(npi->npi_local_port), npe, npe_hash_next);
482 		num = (uint32_t)if_ports_used_stats.ifpu_npe_count++; /* rollover OK */
483 		entry_added = true;
484 
485 		if (if_ports_used_stats.ifpu_npe_count > if_ports_used_stats.ifpu_npe_max) {
486 			if_ports_used_stats.ifpu_npe_max = if_ports_used_stats.ifpu_npe_count;
487 		}
488 		if_ports_used_stats.ifpu_npe_total++;
489 
490 		if (if_ports_used_verbose > 1) {
491 			os_log(OS_LOG_DEFAULT, "%s: num %u for "
492 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
493 			    __func__,
494 			    num,
495 			    ntohs(npi->npi_local_port),
496 			    npi->npi_flags,
497 			    npi->npi_if_index,
498 			    npi->npi_owner_pid,
499 			    npi->npi_effective_pid);
500 		}
501 	} else {
502 		if_ports_used_stats.ifpu_npe_dup++;
503 		if (if_ports_used_verbose > 2) {
504 			os_log(OS_LOG_DEFAULT, "%s: already added "
505 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
506 			    __func__,
507 			    ntohs(npi->npi_local_port),
508 			    npi->npi_flags,
509 			    npi->npi_if_index,
510 			    npi->npi_owner_pid,
511 			    npi->npi_effective_pid);
512 		}
513 	}
514 
515 	lck_mtx_unlock(&net_port_entry_head_lock);
516 
517 	if (entry_added == false) {
518 		zfree(net_port_entry_zone, npe);
519 	}
520 	return entry_added;
521 }
522 
523 #if (DEVELOPMENT || DEBUG)
524 int
525 sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS
526 {
527 #pragma unused(oidp, arg1, arg2)
528 	int error = 0;
529 
530 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
531 		return EPERM;
532 	}
533 	if (req->oldptr == USER_ADDR_NULL) {
534 		req->oldidx = sizeof(uuid_t);
535 		return 0;
536 	}
537 	if (req->newptr != USER_ADDR_NULL) {
538 		uuid_generate(test_wakeuuid);
539 		if_ports_used_update_wakeuuid(NULL);
540 	}
541 	error = SYSCTL_OUT(req, test_wakeuuid,
542 	    MIN(sizeof(uuid_t), req->oldlen));
543 
544 	return error;
545 }
546 
547 int
548 sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS
549 {
550 #pragma unused(oidp, arg1, arg2)
551 	int error = 0;
552 
553 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
554 		return EPERM;
555 	}
556 	if (req->oldptr == USER_ADDR_NULL) {
557 		req->oldidx = sizeof(uuid_t);
558 		return 0;
559 	}
560 	if (req->newptr != USER_ADDR_NULL) {
561 		uuid_clear(test_wakeuuid);
562 		if_ports_used_update_wakeuuid(NULL);
563 	}
564 	error = SYSCTL_OUT(req, test_wakeuuid,
565 	    MIN(sizeof(uuid_t), req->oldlen));
566 
567 	return error;
568 }
569 
570 #endif /* (DEVELOPMENT || DEBUG) */
571 
572 static int
sysctl_timeval(struct sysctl_req * req,const struct timeval * tv)573 sysctl_timeval(struct sysctl_req *req, const struct timeval *tv)
574 {
575 	if (proc_is64bit(req->p)) {
576 		struct user64_timeval tv64 = {};
577 
578 		tv64.tv_sec = tv->tv_sec;
579 		tv64.tv_usec = tv->tv_usec;
580 		return SYSCTL_OUT(req, &tv64, sizeof(tv64));
581 	} else {
582 		struct user32_timeval tv32 = {};
583 
584 		tv32.tv_sec = (user32_time_t)tv->tv_sec;
585 		tv32.tv_usec = tv->tv_usec;
586 		return SYSCTL_OUT(req, &tv32, sizeof(tv32));
587 	}
588 }
589 
590 int
591 sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS
592 {
593 #pragma unused(oidp, arg1, arg2)
594 
595 	return sysctl_timeval(req, &wakeuuid_last_update_time);
596 }
597 
598 int
599 sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS
600 {
601 #pragma unused(oidp, arg1, arg2)
602 
603 	return sysctl_timeval(req, &wakeuuid_not_set_last_time);
604 }
605 
606 int
607 sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS
608 {
609 #pragma unused(oidp, arg1, arg2)
610 
611 	return SYSCTL_OUT(req, &wakeuuid_not_set_last_if, strbuflen(wakeuuid_not_set_last_if) + 1);
612 }
613 
614 int
615 sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS
616 {
617 #pragma unused(oidp, arg1, arg2)
618 	size_t len = sizeof(struct if_ports_used_stats);
619 
620 	if (req->oldptr != 0) {
621 		len = MIN(req->oldlen, sizeof(struct if_ports_used_stats));
622 	}
623 	return SYSCTL_OUT(req, &if_ports_used_stats, len);
624 }
625 
626 static int
627 sysctl_net_port_info_list SYSCTL_HANDLER_ARGS
628 {
629 #pragma unused(oidp, arg1, arg2)
630 	int error = 0;
631 	struct xnpigen xnpigen;
632 	struct net_port_entry *npe;
633 
634 	if ((error = priv_check_cred(kauth_cred_get(),
635 	    PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0)) != 0) {
636 		return EPERM;
637 	}
638 	lck_mtx_lock(&net_port_entry_head_lock);
639 
640 	if (req->oldptr == USER_ADDR_NULL) {
641 		/* Add a 25% cushion */
642 		size_t cnt = (size_t)if_ports_used_stats.ifpu_npe_count;
643 		cnt += cnt >> 4;
644 		req->oldidx = sizeof(struct xnpigen) +
645 		    cnt * sizeof(struct net_port_info);
646 		goto done;
647 	}
648 
649 	memset(&xnpigen, 0, sizeof(struct xnpigen));
650 	xnpigen.xng_len = sizeof(struct xnpigen);
651 	xnpigen.xng_gen = (uint32_t)if_ports_used_stats.ifpu_wakeuid_gen;
652 	uuid_copy(xnpigen.xng_wakeuuid, current_wakeuuid);
653 	xnpigen.xng_npi_count = (uint32_t)if_ports_used_stats.ifpu_npe_count;
654 	xnpigen.xng_npi_size = sizeof(struct net_port_info);
655 	error = SYSCTL_OUT(req, &xnpigen, sizeof(xnpigen));
656 	if (error != 0) {
657 		printf("%s: SYSCTL_OUT(xnpigen) error %d\n",
658 		    __func__, error);
659 		goto done;
660 	}
661 
662 	SLIST_FOREACH(npe, &net_port_entry_list, npe_list_next) {
663 		error = SYSCTL_OUT(req, &npe->npe_npi,
664 		    sizeof(struct net_port_info));
665 		if (error != 0) {
666 			printf("%s: SYSCTL_OUT(npi) error %d\n",
667 			    __func__, error);
668 			goto done;
669 		}
670 	}
671 done:
672 	lck_mtx_unlock(&net_port_entry_head_lock);
673 
674 	return error;
675 }
676 
677 /*
678  * Mirror the arguments of ifnet_get_local_ports_extended()
679  *  ifindex
680  *  protocol
681  *  flags
682  */
683 static int
684 sysctl_get_ports_used SYSCTL_HANDLER_ARGS
685 {
686 #pragma unused(oidp)
687 	/*
688 	 * 3 is the required number of parameters: ifindex, protocol and flags
689 	 */
690 	DECLARE_SYSCTL_HANDLER_ARG_ARRAY(int, 3, name, namelen);
691 	int error = 0;
692 	int idx;
693 	protocol_family_t protocol;
694 	u_int32_t flags;
695 	ifnet_t ifp = NULL;
696 	u_int8_t *bitfield = NULL;
697 
698 	if (req->newptr != USER_ADDR_NULL) {
699 		error = EPERM;
700 		goto done;
701 	}
702 
703 	if (req->oldptr == USER_ADDR_NULL) {
704 		req->oldidx = bitstr_size(IP_PORTRANGE_SIZE);
705 		goto done;
706 	}
707 	if (req->oldlen < bitstr_size(IP_PORTRANGE_SIZE)) {
708 		error = ENOMEM;
709 		goto done;
710 	}
711 	bitfield = (u_int8_t *) kalloc_data(bitstr_size(IP_PORTRANGE_SIZE),
712 	    Z_WAITOK | Z_ZERO);
713 	if (bitfield == NULL) {
714 		error = ENOMEM;
715 		goto done;
716 	}
717 
718 	idx = name[0];
719 	protocol = name[1];
720 	flags = name[2];
721 
722 	ifnet_head_lock_shared();
723 	if (IF_INDEX_IN_RANGE(idx)) {
724 		ifp = ifindex2ifnet[idx];
725 	}
726 	ifnet_head_done();
727 
728 	error = ifnet_get_local_ports_extended(ifp, protocol, flags, bitfield);
729 	if (error != 0) {
730 		printf("%s: ifnet_get_local_ports_extended() error %d\n",
731 		    __func__, error);
732 		goto done;
733 	}
734 	error = SYSCTL_OUT(req, bitfield, bitstr_size(IP_PORTRANGE_SIZE));
735 done:
736 	if (bitfield != NULL) {
737 		kfree_data(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
738 	}
739 	return error;
740 }
741 
742 __private_extern__ bool
if_ports_used_add_inpcb(const uint32_t ifindex,const struct inpcb * inp)743 if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp)
744 {
745 	struct net_port_info npi = {};
746 	struct socket *so = inp->inp_socket;
747 
748 	/* This is unlikely to happen but better be safe than sorry */
749 	if (ifindex > UINT16_MAX) {
750 		os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
751 		return false;
752 	}
753 
754 	if (ifindex != 0) {
755 		npi.npi_if_index = (uint16_t)ifindex;
756 	} else if (inp->inp_last_outifp != NULL) {
757 		npi.npi_if_index = (uint16_t)inp->inp_last_outifp->if_index;
758 	}
759 	if (IF_INDEX_IN_RANGE(npi.npi_if_index)) {
760 		struct ifnet *ifp = ifindex2ifnet[npi.npi_if_index];
761 		if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
762 			npi.npi_flags |= NPIF_COMPLINK;
763 		}
764 	}
765 
766 	npi.npi_flags |= NPIF_SOCKET;
767 
768 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
769 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
770 
771 	if (so->so_options & SO_NOWAKEFROMSLEEP) {
772 		npi.npi_flags |= NPIF_NOWAKE;
773 	}
774 
775 	if (SOCK_PROTO(so) == IPPROTO_TCP) {
776 		struct tcpcb *tp = intotcpcb(inp);
777 
778 		npi.npi_flags |= NPIF_TCP;
779 		if (tp != NULL && tp->t_state == TCPS_LISTEN) {
780 			npi.npi_flags |= NPIF_LISTEN;
781 		}
782 	} else if (SOCK_PROTO(so) == IPPROTO_UDP) {
783 		npi.npi_flags |= NPIF_UDP;
784 	} else {
785 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for inp %p", __func__,
786 		    SOCK_PROTO(inp->inp_socket), inp);
787 		return false;
788 	}
789 
790 	uuid_copy(npi.npi_flow_uuid, inp->necp_client_uuid);
791 
792 	npi.npi_local_port = inp->inp_lport;
793 	npi.npi_foreign_port = inp->inp_fport;
794 
795 	/*
796 	 * Take in account IPv4 addresses mapped on IPv6
797 	 */
798 	if ((inp->inp_vflag & INP_IPV6) != 0 && (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
799 	    (inp->inp_vflag & (INP_IPV6 | INP_IPV4)) == (INP_IPV6 | INP_IPV4)) {
800 		npi.npi_flags |= NPIF_IPV6 | NPIF_IPV4;
801 		memcpy(&npi.npi_local_addr_in6,
802 		    &inp->in6p_laddr, sizeof(struct in6_addr));
803 	} else if (inp->inp_vflag & INP_IPV4) {
804 		npi.npi_flags |= NPIF_IPV4;
805 		npi.npi_local_addr_in = inp->inp_laddr;
806 		npi.npi_foreign_addr_in = inp->inp_faddr;
807 	} else {
808 		npi.npi_flags |= NPIF_IPV6;
809 		memcpy(&npi.npi_local_addr_in6,
810 		    &inp->in6p_laddr, sizeof(struct in6_addr));
811 		memcpy(&npi.npi_foreign_addr_in6,
812 		    &inp->in6p_faddr, sizeof(struct in6_addr));
813 
814 		/* Clear the embedded scope ID */
815 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
816 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
817 		}
818 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
819 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
820 		}
821 	}
822 
823 	npi.npi_owner_pid = so->last_pid;
824 
825 	if (so->last_pid != 0) {
826 		proc_name(so->last_pid, npi.npi_owner_pname,
827 		    sizeof(npi.npi_owner_pname));
828 		uuid_copy(npi.npi_owner_uuid, so->last_uuid);
829 	}
830 
831 	if (so->so_flags & SOF_DELEGATED) {
832 		npi.npi_flags |= NPIF_DELEGATED;
833 		npi.npi_effective_pid = so->e_pid;
834 		if (so->e_pid != 0) {
835 			proc_name(so->e_pid, npi.npi_effective_pname,
836 			    sizeof(npi.npi_effective_pname));
837 		}
838 		uuid_copy(npi.npi_effective_uuid, so->e_uuid);
839 	} else {
840 		npi.npi_effective_pid = so->last_pid;
841 		if (so->last_pid != 0) {
842 			strbufcpy(npi.npi_effective_pname, npi.npi_owner_pname);
843 		}
844 		uuid_copy(npi.npi_effective_uuid, so->last_uuid);
845 	}
846 
847 	return net_port_info_add_entry(&npi);
848 }
849 
850 #if SKYWALK
851 __private_extern__ bool
if_ports_used_add_flow_entry(const struct flow_entry * fe,const uint32_t ifindex,const struct ns_flow_info * nfi,uint32_t ns_flags)852 if_ports_used_add_flow_entry(const struct flow_entry *fe, const uint32_t ifindex,
853     const struct ns_flow_info *nfi, uint32_t ns_flags)
854 {
855 	struct net_port_info npi = {};
856 
857 	/* This is unlikely to happen but better be safe than sorry */
858 	if (ifindex > UINT16_MAX) {
859 		os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
860 		return false;
861 	}
862 	npi.npi_if_index = (uint16_t)ifindex;
863 	if (IF_INDEX_IN_RANGE(ifindex)) {
864 		struct ifnet *ifp = ifindex2ifnet[ifindex];
865 		if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
866 			npi.npi_flags |= NPIF_COMPLINK;
867 		}
868 	}
869 
870 	npi.npi_flags |= NPIF_CHANNEL;
871 
872 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
873 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
874 
875 	if (ns_flags & NETNS_NOWAKEFROMSLEEP) {
876 		npi.npi_flags |= NPIF_NOWAKE;
877 	}
878 	if ((ns_flags & NETNS_OWNER_MASK) == NETNS_LISTENER) {
879 		npi.npi_flags |= NPIF_LISTEN;
880 	}
881 
882 	uuid_copy(npi.npi_flow_uuid, nfi->nfi_flow_uuid);
883 
884 	if (nfi->nfi_protocol == IPPROTO_TCP) {
885 		npi.npi_flags |= NPIF_TCP;
886 	} else if (nfi->nfi_protocol == IPPROTO_UDP) {
887 		npi.npi_flags |= NPIF_UDP;
888 	} else {
889 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for nfi %p",
890 		    __func__, nfi->nfi_protocol, nfi);
891 		return false;
892 	}
893 
894 	if (nfi->nfi_laddr.sa.sa_family == AF_INET) {
895 		npi.npi_flags |= NPIF_IPV4;
896 
897 		npi.npi_local_port = nfi->nfi_laddr.sin.sin_port;
898 		npi.npi_foreign_port = nfi->nfi_faddr.sin.sin_port;
899 
900 		npi.npi_local_addr_in = nfi->nfi_laddr.sin.sin_addr;
901 		npi.npi_foreign_addr_in = nfi->nfi_faddr.sin.sin_addr;
902 	} else {
903 		npi.npi_flags |= NPIF_IPV6;
904 
905 		npi.npi_local_port = nfi->nfi_laddr.sin6.sin6_port;
906 		npi.npi_foreign_port = nfi->nfi_faddr.sin6.sin6_port;
907 
908 		memcpy(&npi.npi_local_addr_in6,
909 		    &nfi->nfi_laddr.sin6.sin6_addr, sizeof(struct in6_addr));
910 		memcpy(&npi.npi_foreign_addr_in6,
911 		    &nfi->nfi_faddr.sin6.sin6_addr, sizeof(struct in6_addr));
912 
913 		/* Clear the embedded scope ID */
914 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
915 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
916 		}
917 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
918 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
919 		}
920 	}
921 
922 	npi.npi_owner_pid = nfi->nfi_owner_pid;
923 	strbufcpy(npi.npi_owner_pname, nfi->nfi_owner_name);
924 
925 	/*
926 	 * Get the proc UUID from the pid as the the proc UUID is not present
927 	 * in the flow_entry
928 	 */
929 	proc_t proc = proc_find(npi.npi_owner_pid);
930 	if (proc != PROC_NULL) {
931 		proc_getexecutableuuid(proc, npi.npi_owner_uuid, sizeof(npi.npi_owner_uuid));
932 		proc_rele(proc);
933 	}
934 	if (nfi->nfi_effective_pid != -1) {
935 		npi.npi_effective_pid = nfi->nfi_effective_pid;
936 		strbufcpy(npi.npi_effective_pname, nfi->nfi_effective_name);
937 		uuid_copy(npi.npi_effective_uuid, fe->fe_eproc_uuid);
938 	} else {
939 		npi.npi_effective_pid = npi.npi_owner_pid;
940 		strbufcpy(npi.npi_effective_pname, npi.npi_owner_pname);
941 		uuid_copy(npi.npi_effective_uuid, npi.npi_owner_uuid);
942 	}
943 
944 	return net_port_info_add_entry(&npi);
945 }
946 
947 #endif /* SKYWALK */
948 
949 static void
net_port_info_log_npi(const char * s,const struct net_port_info * npi)950 net_port_info_log_npi(const char *s, const struct net_port_info *npi)
951 {
952 	char lbuf[MAX_IPv6_STR_LEN] = {};
953 	char fbuf[MAX_IPv6_STR_LEN] = {};
954 
955 	if (npi->npi_flags & NPIF_IPV4) {
956 		inet_ntop(PF_INET, &npi->npi_local_addr_in.s_addr,
957 		    lbuf, sizeof(lbuf));
958 		inet_ntop(PF_INET, &npi->npi_foreign_addr_in.s_addr,
959 		    fbuf, sizeof(fbuf));
960 	} else if (npi->npi_flags & NPIF_IPV6) {
961 		inet_ntop(PF_INET6, &npi->npi_local_addr_in6,
962 		    lbuf, sizeof(lbuf));
963 		inet_ntop(PF_INET6, &npi->npi_foreign_addr_in6,
964 		    fbuf, sizeof(fbuf));
965 	}
966 	os_log(OS_LOG_DEFAULT, "%s net_port_info if_index %u arch %s family %s proto %s local %s:%u foreign %s:%u pid: %u epid %u",
967 	    s != NULL ? s : "",
968 	    npi->npi_if_index,
969 	    (npi->npi_flags & NPIF_SOCKET) ? "so" : (npi->npi_flags & NPIF_CHANNEL) ? "ch" : "unknown",
970 	    (npi->npi_flags & NPIF_IPV4) ? "ipv4" : (npi->npi_flags & NPIF_IPV6) ? "ipv6" : "unknown",
971 	    npi->npi_flags & NPIF_TCP ? "tcp" : npi->npi_flags & NPIF_UDP ? "udp" :
972 	    npi->npi_flags & NPIF_ESP ? "esp" : "unknown",
973 	    lbuf, ntohs(npi->npi_local_port),
974 	    fbuf, ntohs(npi->npi_foreign_port),
975 	    npi->npi_owner_pid,
976 	    npi->npi_effective_pid);
977 }
978 
979 /*
980  * net_port_info_match_npi() returns true for an exact match that does not have "no wake" set
981  */
982 #define NPI_MATCH_IPV4 (NPIF_IPV4 | NPIF_TCP | NPIF_UDP)
983 #define NPI_MATCH_IPV6 (NPIF_IPV6 | NPIF_TCP | NPIF_UDP)
984 
985 static bool
net_port_info_match_npi(struct net_port_entry * npe,const struct net_port_info * in_npi,struct net_port_entry ** best_match)986 net_port_info_match_npi(struct net_port_entry *npe, const struct net_port_info *in_npi,
987     struct net_port_entry **best_match)
988 {
989 	if (__improbable(net_wake_pkt_debug > 1)) {
990 		net_port_info_log_npi("  ", &npe->npe_npi);
991 	}
992 
993 	/*
994 	 * The interfaces must match or be both companion link
995 	 */
996 	if (npe->npe_npi.npi_if_index != in_npi->npi_if_index &&
997 	    !((npe->npe_npi.npi_flags & NPIF_COMPLINK) && (in_npi->npi_flags & NPIF_COMPLINK))) {
998 		return false;
999 	}
1000 
1001 	/*
1002 	 * The local ports and protocols must match
1003 	 */
1004 	if (npe->npe_npi.npi_local_port != in_npi->npi_local_port ||
1005 	    ((npe->npe_npi.npi_flags & NPI_MATCH_IPV4) != (in_npi->npi_flags & NPI_MATCH_IPV4) &&
1006 	    (npe->npe_npi.npi_flags & NPI_MATCH_IPV6) != (in_npi->npi_flags & NPI_MATCH_IPV6))) {
1007 		return false;
1008 	}
1009 
1010 	/*
1011 	 * Search stops on an exact match
1012 	 */
1013 	if (npe->npe_npi.npi_foreign_port == in_npi->npi_foreign_port) {
1014 		if ((npe->npe_npi.npi_flags & NPIF_IPV4) && (npe->npe_npi.npi_flags & NPIF_IPV4)) {
1015 			if (in_npi->npi_local_addr_in.s_addr == npe->npe_npi.npi_local_addr_in.s_addr &&
1016 			    in_npi->npi_foreign_addr_in.s_addr == npe->npe_npi.npi_foreign_addr_in.s_addr) {
1017 				if (npe->npe_npi.npi_flags & NPIF_NOWAKE) {
1018 					/*
1019 					 * Do not overwrite an existing match when "no wake" is set
1020 					 */
1021 					if (*best_match == NULL) {
1022 						*best_match = npe;
1023 					}
1024 					return false;
1025 				}
1026 				*best_match = npe;
1027 				return true;
1028 			}
1029 		}
1030 		if ((npe->npe_npi.npi_flags & NPIF_IPV6) && (npe->npe_npi.npi_flags & NPIF_IPV6)) {
1031 			if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
1032 			    sizeof(union in_addr_4_6)) == 0 &&
1033 			    memcmp(&npe->npe_npi.npi_foreign_addr_, &in_npi->npi_foreign_addr_,
1034 			    sizeof(union in_addr_4_6)) == 0) {
1035 				if (npe->npe_npi.npi_flags & NPIF_NOWAKE) {
1036 					/*
1037 					 * Do not overwrite an existing match when "no wake" is set
1038 					 */
1039 					if (*best_match == NULL) {
1040 						*best_match = npe;
1041 					}
1042 					return false;
1043 				}
1044 				*best_match = npe;
1045 				return true;
1046 			}
1047 		}
1048 	}
1049 	/*
1050 	 * Skip connected entries as we are looking for a wildcard match
1051 	 * on the local address and port
1052 	 */
1053 	if (npe->npe_npi.npi_foreign_port != 0) {
1054 		return false;
1055 	}
1056 	/*
1057 	 * Do not overwrite an existing match when "no wake" is set
1058 	 */
1059 	if (*best_match != NULL && (npe->npe_npi.npi_flags & NPIF_NOWAKE) != 0) {
1060 		return false;
1061 	}
1062 	/*
1063 	 * The local address matches: this is our 2nd best match
1064 	 */
1065 	if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
1066 	    sizeof(union in_addr_4_6)) == 0) {
1067 		*best_match = npe;
1068 		return false;
1069 	}
1070 
1071 	/*
1072 	 * Only the local port matches, do not override a match
1073 	 * on the local address
1074 	 */
1075 	if (*best_match == NULL) {
1076 		*best_match = npe;
1077 	}
1078 	return false;
1079 }
1080 #undef NPI_MATCH_IPV4
1081 #undef NPI_MATCH_IPV6
1082 
1083 /*
1084  *
1085  */
1086 static bool
net_port_info_find_match(struct net_port_info * in_npi)1087 net_port_info_find_match(struct net_port_info *in_npi)
1088 {
1089 	struct net_port_entry *npe;
1090 	struct net_port_entry * __single best_match = NULL;
1091 
1092 	lck_mtx_lock(&net_port_entry_head_lock);
1093 
1094 	uint32_t count = 0;
1095 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(in_npi->npi_local_port), npe_hash_next) {
1096 		count += 1;
1097 		/*
1098 		 * Search stop on an exact match
1099 		 */
1100 		if (net_port_info_match_npi(npe, in_npi, &best_match)) {
1101 			break;
1102 		}
1103 	}
1104 
1105 	if (best_match != NULL) {
1106 		best_match->npe_npi.npi_flags |= NPIF_WAKEPKT;
1107 		if (best_match->npe_npi.npi_flags & NPIF_NOWAKE) {
1108 			in_npi->npi_flags |= NPIF_NOWAKE;
1109 		}
1110 		in_npi->npi_owner_pid = best_match->npe_npi.npi_owner_pid;
1111 		in_npi->npi_effective_pid = best_match->npe_npi.npi_effective_pid;
1112 		strbufcpy(in_npi->npi_owner_pname, best_match->npe_npi.npi_owner_pname);
1113 		strbufcpy(in_npi->npi_effective_pname, best_match->npe_npi.npi_effective_pname);
1114 		uuid_copy(in_npi->npi_owner_uuid, best_match->npe_npi.npi_owner_uuid);
1115 		uuid_copy(in_npi->npi_effective_uuid, best_match->npe_npi.npi_effective_uuid);
1116 	}
1117 	lck_mtx_unlock(&net_port_entry_head_lock);
1118 
1119 	if (__improbable(net_wake_pkt_debug > 0)) {
1120 		if (best_match != NULL) {
1121 			net_port_info_log_npi("wake packet match", in_npi);
1122 		} else {
1123 			net_port_info_log_npi("wake packet no match", in_npi);
1124 		}
1125 	}
1126 
1127 	return best_match != NULL ? true : false;
1128 }
1129 
1130 #if (DEBUG || DEVELOPMENT)
1131 static void
net_port_info_log_una_wake_event(const char * s,struct net_port_info_una_wake_event * ev)1132 net_port_info_log_una_wake_event(const char *s, struct net_port_info_una_wake_event *ev)
1133 {
1134 	char lbuf[MAX_IPv6_STR_LEN] = {};
1135 	char fbuf[MAX_IPv6_STR_LEN] = {};
1136 
1137 	if (ev->una_wake_pkt_flags & NPIF_IPV4) {
1138 		inet_ntop(PF_INET, &ev->una_wake_pkt_local_addr_._in_a_4.s_addr,
1139 		    lbuf, sizeof(lbuf));
1140 		inet_ntop(PF_INET, &ev->una_wake_pkt_foreign_addr_._in_a_4.s_addr,
1141 		    fbuf, sizeof(fbuf));
1142 	} else if (ev->una_wake_pkt_flags & NPIF_IPV6) {
1143 		inet_ntop(PF_INET6, &ev->una_wake_pkt_local_addr_._in_a_6.s6_addr,
1144 		    lbuf, sizeof(lbuf));
1145 		inet_ntop(PF_INET6, &ev->una_wake_pkt_foreign_addr_._in_a_6.s6_addr,
1146 		    fbuf, sizeof(fbuf));
1147 	}
1148 	os_log(OS_LOG_DEFAULT, "%s if %s (%u) phy_if %s proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proto: %u",
1149 	    s != NULL ? s : "",
1150 	    ev->una_wake_pkt_ifname, ev->una_wake_pkt_if_index, ev->una_wake_pkt_phy_ifname,
1151 	    ev->una_wake_pkt_flags & NPIF_TCP ? "tcp" : ev->una_wake_pkt_flags & NPIF_UDP ? "udp" :
1152 	    ev->una_wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1153 	    lbuf, ntohs(ev->una_wake_pkt_local_port),
1154 	    fbuf, ntohs(ev->una_wake_pkt_foreign_port),
1155 	    ev->una_wake_pkt_total_len, ev->una_wake_pkt_data_len,
1156 	    ev->una_wake_pkt_control_flags, ev->una_wake_pkt_proto);
1157 }
1158 
1159 static void
net_port_info_log_wake_event(const char * s,struct net_port_info_wake_event * ev)1160 net_port_info_log_wake_event(const char *s, struct net_port_info_wake_event *ev)
1161 {
1162 	char lbuf[MAX_IPv6_STR_LEN] = {};
1163 	char fbuf[MAX_IPv6_STR_LEN] = {};
1164 
1165 	if (ev->wake_pkt_flags & NPIF_IPV4) {
1166 		inet_ntop(PF_INET, &ev->wake_pkt_local_addr_._in_a_4.s_addr,
1167 		    lbuf, sizeof(lbuf));
1168 		inet_ntop(PF_INET, &ev->wake_pkt_foreign_addr_._in_a_4.s_addr,
1169 		    fbuf, sizeof(fbuf));
1170 	} else if (ev->wake_pkt_flags & NPIF_IPV6) {
1171 		inet_ntop(PF_INET6, &ev->wake_pkt_local_addr_._in_a_6.s6_addr,
1172 		    lbuf, sizeof(lbuf));
1173 		inet_ntop(PF_INET6, &ev->wake_pkt_foreign_addr_._in_a_6.s6_addr,
1174 		    fbuf, sizeof(fbuf));
1175 	}
1176 	os_log(OS_LOG_DEFAULT, "%s if %s (%u) phy_if %s proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proc %s eproc %s",
1177 	    s != NULL ? s : "",
1178 	    ev->wake_pkt_ifname, ev->wake_pkt_if_index, ev->wake_pkt_phy_ifname,
1179 	    ev->wake_pkt_flags & NPIF_TCP ? "tcp" : ev->wake_pkt_flags ? "udp" :
1180 	    ev->wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1181 	    lbuf, ntohs(ev->wake_pkt_port),
1182 	    fbuf, ntohs(ev->wake_pkt_foreign_port),
1183 	    ev->wake_pkt_total_len, ev->wake_pkt_data_len, ev->wake_pkt_control_flags,
1184 	    ev->wake_pkt_owner_pname, ev->wake_pkt_effective_pname);
1185 }
1186 
1187 #endif /* (DEBUG || DEVELOPMENT) */
1188 
1189 /*
1190  * The process attribution of a wake packet can take several steps:
1191  *
1192  * 1) After device wakes, the first interface that sees a wake packet is the
1193  *    physical interface and we remember it via if_set_wake_physical_interface()
1194  *
1195  * 2) We try to attribute a packet to a flow or not based on the physical interface.
1196  *    If we find a flow, then the physical interface is the same as the interface used
1197  *    by the TCP/UDP flow.
1198  *
1199  * 3) If the packet is tunneled or redirected we are going to do the attribution again
1200  *    and the physical will be different from the interface used the TCP/UDP flow.
1201  */
1202 static bool
is_wake_pkt_event_delay(uint32_t ifrtype)1203 is_wake_pkt_event_delay(uint32_t ifrtype)
1204 {
1205 	// Prevent overflow of the bitstring
1206 	if (ifrtype >= NPI_MAX_IF_FAMILY_BITS) {
1207 		return false;
1208 	}
1209 	if (bitstr_test((bitstr_t *)&npi_wake_packet_event_delay_if_families, ifrtype)) {
1210 		return true;
1211 	}
1212 	return false;
1213 }
1214 
1215 static void
if_set_wake_physical_interface(struct ifnet * ifp)1216 if_set_wake_physical_interface(struct ifnet *ifp)
1217 {
1218 	if (last_wake_phy_if_set == true || ifp == NULL) {
1219 		return;
1220 	}
1221 	last_wake_phy_if_set = true;
1222 	strlcpy(last_wake_phy_if_name, IF_XNAME(ifp), sizeof(last_wake_phy_if_name));
1223 	last_wake_phy_if_family = ifp->if_family;
1224 	last_wake_phy_if_subfamily = ifp->if_subfamily;
1225 	last_wake_phy_if_functional_type = if_functional_type(ifp, true);
1226 	if ((ifp->if_xflags & IFXF_DELAYWAKEPKTEVENT) != 0 || is_wake_pkt_event_delay(last_wake_phy_if_family)) {
1227 		last_wake_phy_if_delay_wake_pkt = true;
1228 	}
1229 }
1230 
1231 static void
deliver_unattributed_wake_packet_event(struct net_port_info_una_wake_event * event_data)1232 deliver_unattributed_wake_packet_event(struct net_port_info_una_wake_event *event_data)
1233 {
1234 	struct kev_msg ev_msg = {};
1235 
1236 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1237 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1238 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1239 	ev_msg.event_code  = KEV_POWER_UNATTRIBUTED_WAKE;
1240 
1241 	ev_msg.dv[0].data_ptr = event_data;
1242 	ev_msg.dv[0].data_length = sizeof(struct net_port_info_una_wake_event);
1243 
1244 	int result = kev_post_msg(&ev_msg);
1245 	if (result != 0) {
1246 		uuid_string_t wake_uuid_str;
1247 
1248 		uuid_unparse(event_data->una_wake_uuid, wake_uuid_str);
1249 		os_log_error(OS_LOG_DEFAULT,
1250 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1251 		    __func__, result, wake_uuid_str);
1252 
1253 		if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1254 	}
1255 #if (DEBUG || DEVELOPMENT)
1256 	net_port_info_log_una_wake_event("unattributed wake packet event", event_data);
1257 #endif /* (DEBUG || DEVELOPMENT) */
1258 }
1259 
1260 static void
deliver_attributed_wake_packet_event(struct net_port_info_wake_event * event_data)1261 deliver_attributed_wake_packet_event(struct net_port_info_wake_event *event_data)
1262 {
1263 	struct kev_msg ev_msg = {};
1264 
1265 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1266 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1267 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1268 	ev_msg.event_code  = KEV_POWER_WAKE_PACKET;
1269 
1270 	ev_msg.dv[0].data_ptr = event_data;
1271 	ev_msg.dv[0].data_length = sizeof(struct net_port_info_wake_event);
1272 
1273 	int result = kev_post_msg(&ev_msg);
1274 	if (result != 0) {
1275 		uuid_string_t wake_uuid_str;
1276 
1277 		uuid_unparse(event_data->wake_uuid, wake_uuid_str);
1278 		os_log_error(OS_LOG_DEFAULT,
1279 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1280 		    __func__, result, wake_uuid_str);
1281 
1282 		if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1283 	}
1284 #if (DEBUG || DEVELOPMENT)
1285 	net_port_info_log_wake_event("attributed wake packet event", event_data);
1286 #endif /* (DEBUG || DEVELOPMENT) */
1287 }
1288 
1289 static void
if_notify_unattributed_wake_mbuf(struct ifnet * ifp,struct mbuf * m,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)1290 if_notify_unattributed_wake_mbuf(struct ifnet *ifp, struct mbuf *m,
1291     struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
1292     uint16_t pkt_control_flags, uint16_t proto)
1293 {
1294 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1295 
1296 	lck_mtx_lock(&net_port_entry_head_lock);
1297 	if (has_notified_unattributed_wake) {
1298 		lck_mtx_unlock(&net_port_entry_head_lock);
1299 		if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1300 
1301 		if (__improbable(net_wake_pkt_debug > 0)) {
1302 			net_port_info_log_npi("already notified unattributed wake packet", npi);
1303 		}
1304 		return;
1305 	}
1306 	has_notified_unattributed_wake = true;
1307 	lck_mtx_unlock(&net_port_entry_head_lock);
1308 
1309 	if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1310 
1311 	struct net_port_info_una_wake_event event_data = {};
1312 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1313 	event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1314 	event_data.una_wake_pkt_flags = npi->npi_flags;
1315 
1316 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
1317 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1318 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1319 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1320 
1321 	event_data.una_wake_pkt_total_len = pkt_total_len;
1322 	event_data.una_wake_pkt_data_len = pkt_data_len;
1323 	event_data.una_wake_pkt_control_flags = pkt_control_flags;
1324 	event_data.una_wake_pkt_proto = proto;
1325 
1326 	if (ifp != NULL) {
1327 		strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
1328 		    sizeof(event_data.una_wake_pkt_ifname));
1329 		event_data.una_wake_pkt_if_info.npi_if_family = ifp->if_family;
1330 		event_data.una_wake_pkt_if_info.npi_if_subfamily = ifp->if_subfamily;
1331 		event_data.una_wake_pkt_if_info.npi_if_functional_type = if_functional_type(ifp, true);
1332 
1333 		strbufcpy(event_data.una_wake_pkt_phy_ifname, last_wake_phy_if_name);
1334 		event_data.una_wake_pkt_phy_if_info.npi_if_family = last_wake_phy_if_family;
1335 		event_data.una_wake_pkt_phy_if_info.npi_if_subfamily = last_wake_phy_if_subfamily;
1336 		event_data.una_wake_pkt_phy_if_info.npi_if_functional_type = last_wake_phy_if_functional_type;
1337 	} else {
1338 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1339 	}
1340 
1341 	event_data.una_wake_ptk_len = m->m_pkthdr.len > NPI_MAX_UNA_WAKE_PKT_LEN ?
1342 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t)m->m_pkthdr.len;
1343 
1344 	errno_t error = mbuf_copydata(m, 0, event_data.una_wake_ptk_len,
1345 	    (void *)event_data.una_wake_pkt);
1346 	if (error != 0) {
1347 		uuid_string_t wake_uuid_str;
1348 
1349 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1350 		os_log_error(OS_LOG_DEFAULT,
1351 		    "%s: mbuf_copydata() failed with error %d for wake uuid %s",
1352 		    __func__, error, wake_uuid_str);
1353 
1354 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1355 		return;
1356 	}
1357 
1358 	last_wake_pkt_event.npi_wp_code = KEV_POWER_UNATTRIBUTED_WAKE;
1359 	memcpy(&last_wake_pkt_event.npi_ev_wake_pkt_unattributed, &event_data, sizeof(last_wake_pkt_event.npi_ev_wake_pkt_unattributed));
1360 
1361 	if (last_wake_phy_if_delay_wake_pkt) {
1362 #if (DEBUG || DEVELOPMENT)
1363 		if (if_ports_used_verbose > 0) {
1364 			net_port_info_log_una_wake_event("delay unattributed wake packet event", &event_data);
1365 		}
1366 #endif /* (DEBUG || DEVELOPMENT) */
1367 		return;
1368 	}
1369 
1370 	deliver_unattributed_wake_packet_event(&event_data);
1371 }
1372 
1373 static void
if_notify_wake_packet(struct ifnet * ifp,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags)1374 if_notify_wake_packet(struct ifnet *ifp, struct net_port_info *npi,
1375     uint32_t pkt_total_len, uint32_t pkt_data_len, uint16_t pkt_control_flags)
1376 {
1377 	struct net_port_info_wake_event event_data = {};
1378 
1379 	uuid_copy(event_data.wake_uuid, current_wakeuuid);
1380 	event_data.wake_pkt_if_index = ifp->if_index;
1381 	event_data.wake_pkt_port = npi->npi_local_port;
1382 	event_data.wake_pkt_flags = npi->npi_flags;
1383 	event_data.wake_pkt_owner_pid = npi->npi_owner_pid;
1384 	event_data.wake_pkt_effective_pid = npi->npi_effective_pid;
1385 	strbufcpy(event_data.wake_pkt_owner_pname, npi->npi_owner_pname);
1386 	strbufcpy(event_data.wake_pkt_effective_pname, npi->npi_effective_pname);
1387 	uuid_copy(event_data.wake_pkt_owner_uuid, npi->npi_owner_uuid);
1388 	uuid_copy(event_data.wake_pkt_effective_uuid, npi->npi_effective_uuid);
1389 
1390 	event_data.wake_pkt_foreign_port = npi->npi_foreign_port;
1391 	event_data.wake_pkt_local_addr_ = npi->npi_local_addr_;
1392 	event_data.wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1393 	strlcpy(event_data.wake_pkt_ifname, IF_XNAME(ifp), sizeof(event_data.wake_pkt_ifname));
1394 
1395 	event_data.wake_pkt_if_info.npi_if_family = ifp->if_family;
1396 	event_data.wake_pkt_if_info.npi_if_subfamily = ifp->if_subfamily;
1397 	event_data.wake_pkt_if_info.npi_if_functional_type = if_functional_type(ifp, true);
1398 
1399 	strbufcpy(event_data.wake_pkt_phy_ifname, last_wake_phy_if_name);
1400 	event_data.wake_pkt_phy_if_info.npi_if_family = last_wake_phy_if_family;
1401 	event_data.wake_pkt_phy_if_info.npi_if_subfamily = last_wake_phy_if_subfamily;
1402 	event_data.wake_pkt_phy_if_info.npi_if_functional_type = last_wake_phy_if_functional_type;
1403 
1404 	event_data.wake_pkt_total_len = pkt_total_len;
1405 	event_data.wake_pkt_data_len = pkt_data_len;
1406 	event_data.wake_pkt_control_flags = pkt_control_flags;
1407 	if (npi->npi_flags & NPIF_NOWAKE) {
1408 		event_data.wake_pkt_control_flags |= NPICF_NOWAKE;
1409 	}
1410 
1411 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1412 
1413 	lck_mtx_lock(&net_port_entry_head_lock);
1414 
1415 	if (has_notified_wake_pkt) {
1416 		lck_mtx_unlock(&net_port_entry_head_lock);
1417 		if_ports_used_stats.ifpu_dup_wake_pkt_event += 1;
1418 
1419 		if (__improbable(net_wake_pkt_debug > 0)) {
1420 			net_port_info_log_npi("already notified wake packet", npi);
1421 		}
1422 		return;
1423 	}
1424 	has_notified_wake_pkt = true;
1425 
1426 	last_wake_pkt_event.npi_wp_code = KEV_POWER_WAKE_PACKET;
1427 	memcpy(&last_wake_pkt_event.npi_ev_wake_pkt_attributed, &event_data, sizeof(last_wake_pkt_event.npi_ev_wake_pkt_attributed));
1428 
1429 	lck_mtx_unlock(&net_port_entry_head_lock);
1430 
1431 	if (npi->npi_flags & NPIF_NOWAKE) {
1432 		if_ports_used_stats.ifpu_spurious_wake_event += 1;
1433 	} else {
1434 		if_ports_used_stats.ifpu_wake_pkt_event += 1;
1435 	}
1436 
1437 	if (last_wake_phy_if_delay_wake_pkt) {
1438 #if (DEBUG || DEVELOPMENT)
1439 		if (if_ports_used_verbose > 0) {
1440 			net_port_info_log_wake_event("delay attributed wake packet event", &event_data);
1441 		}
1442 #endif /* (DEBUG || DEVELOPMENT) */
1443 		return;
1444 	}
1445 
1446 	deliver_attributed_wake_packet_event(&event_data);
1447 }
1448 
1449 static bool
is_encapsulated_esp(struct mbuf * m,size_t data_offset)1450 is_encapsulated_esp(struct mbuf *m, size_t data_offset)
1451 {
1452 	/*
1453 	 * They are three cases:
1454 	 * - Keep alive: 1 byte payload
1455 	 * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
1456 	 * - otherwise it's ESP
1457 	 */
1458 	ASSERT(m->m_pkthdr.len >= data_offset);
1459 
1460 	size_t data_len = m->m_pkthdr.len - data_offset;
1461 	if (data_len == 1) {
1462 		return false;
1463 	} else if (data_len > ESP_HDR_SIZE) {
1464 		uint8_t payload[ESP_HDR_SIZE];
1465 
1466 		errno_t error = mbuf_copydata(m, data_offset, ESP_HDR_SIZE, &payload);
1467 		if (error != 0) {
1468 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ESP_HDR_SIZE) error %d",
1469 			    __func__, error);
1470 		} else if (payload[0] == 0 && payload[1] == 0 &&
1471 		    payload[2] == 0 && payload[3] == 0) {
1472 			return false;
1473 		}
1474 	}
1475 	return true;
1476 }
1477 
1478 void
if_ports_used_match_mbuf(struct ifnet * ifp,protocol_family_t proto_family,struct mbuf * m)1479 if_ports_used_match_mbuf(struct ifnet *ifp, protocol_family_t proto_family, struct mbuf *m)
1480 {
1481 	errno_t error;
1482 	struct net_port_info npi = {};
1483 	bool found = false;
1484 	uint32_t pkt_total_len = 0;
1485 	uint32_t pkt_data_len = 0;
1486 	uint16_t pkt_control_flags = 0;
1487 	uint16_t pkt_proto = 0;
1488 
1489 	if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == 0) {
1490 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1491 		os_log_error(OS_LOG_DEFAULT, "%s: called PKTF_WAKE_PKT not set from %s",
1492 		    __func__, ifp != NULL ? IF_XNAME(ifp) : "");
1493 		return;
1494 	}
1495 
1496 	if_ports_used_stats.ifpu_so_match_wake_pkt += 1;
1497 	npi.npi_flags |= NPIF_SOCKET; /* For logging */
1498 	pkt_total_len = m->m_pkthdr.len;
1499 	pkt_data_len = pkt_total_len;
1500 
1501 	if (ifp != NULL) {
1502 		npi.npi_if_index = ifp->if_index;
1503 		if (IFNET_IS_COMPANION_LINK(ifp)) {
1504 			npi.npi_flags |= NPIF_COMPLINK;
1505 		}
1506 		if_set_wake_physical_interface(ifp);
1507 	}
1508 
1509 	if (proto_family == PF_INET) {
1510 		struct ip iphdr = {};
1511 
1512 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1513 
1514 		error = mbuf_copydata(m, 0, sizeof(struct ip), &iphdr);
1515 		if (error != 0) {
1516 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip) error %d",
1517 			    __func__, error);
1518 			goto failed;
1519 		}
1520 		npi.npi_flags |= NPIF_IPV4;
1521 		npi.npi_local_addr_in = iphdr.ip_dst;
1522 		npi.npi_foreign_addr_in = iphdr.ip_src;
1523 
1524 		/*
1525 		 * Check if this is a fragment that is not the first fragment
1526 		 */
1527 		if ((ntohs(iphdr.ip_off) & ~(IP_DF | IP_RF)) &&
1528 		    (ntohs(iphdr.ip_off) & IP_OFFMASK) != 0) {
1529 			npi.npi_flags |= NPIF_FRAG;
1530 			if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1531 		}
1532 
1533 		if ((iphdr.ip_hl << 2) < pkt_data_len) {
1534 			pkt_data_len -= iphdr.ip_hl << 2;
1535 		} else {
1536 			pkt_data_len = 0;
1537 		}
1538 
1539 		pkt_proto = iphdr.ip_p;
1540 
1541 		switch (iphdr.ip_p) {
1542 		case IPPROTO_TCP: {
1543 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1544 			npi.npi_flags |= NPIF_TCP;
1545 
1546 			if (npi.npi_flags & NPIF_FRAG) {
1547 				goto failed;
1548 			}
1549 
1550 			struct tcphdr th = {};
1551 			error = mbuf_copydata(m, iphdr.ip_hl << 2, sizeof(struct tcphdr), &th);
1552 			if (error != 0) {
1553 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1554 				    __func__, error);
1555 				goto failed;
1556 			}
1557 			npi.npi_local_port = th.th_dport;
1558 			npi.npi_foreign_port = th.th_sport;
1559 
1560 			if (pkt_data_len < sizeof(struct tcphdr) ||
1561 			    pkt_data_len < (th.th_off << 2)) {
1562 				pkt_data_len = 0;
1563 			} else {
1564 				pkt_data_len -= th.th_off << 2;
1565 			}
1566 			pkt_control_flags = th.th_flags;
1567 			break;
1568 		}
1569 		case IPPROTO_UDP: {
1570 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1571 			npi.npi_flags |= NPIF_UDP;
1572 
1573 			if (npi.npi_flags & NPIF_FRAG) {
1574 				goto failed;
1575 			}
1576 			struct udphdr uh = {};
1577 			size_t udp_offset = iphdr.ip_hl << 2;
1578 
1579 			error = mbuf_copydata(m, udp_offset, sizeof(struct udphdr), &uh);
1580 			if (error != 0) {
1581 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1582 				    __func__, error);
1583 				goto failed;
1584 			}
1585 			npi.npi_local_port = uh.uh_dport;
1586 			npi.npi_foreign_port = uh.uh_sport;
1587 			/*
1588 			 * Let the ESP layer handle wake packets
1589 			 */
1590 			if (ntohs(uh.uh_dport) == PORT_ISAKMP_NATT ||
1591 			    ntohs(uh.uh_sport) == PORT_ISAKMP_NATT) {
1592 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1593 				if (is_encapsulated_esp(m, udp_offset + sizeof(struct udphdr))) {
1594 					if (net_wake_pkt_debug > 0) {
1595 						net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1596 					}
1597 					return;
1598 				}
1599 			}
1600 
1601 			if (pkt_data_len < sizeof(struct udphdr)) {
1602 				pkt_data_len = 0;
1603 			} else {
1604 				pkt_data_len -= sizeof(struct udphdr);
1605 			}
1606 			break;
1607 		}
1608 		case IPPROTO_ESP: {
1609 			/*
1610 			 * Let the ESP layer handle wake packets
1611 			 */
1612 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1613 			npi.npi_flags |= NPIF_ESP;
1614 			if (net_wake_pkt_debug > 0) {
1615 				net_port_info_log_npi("defer ESP matching", &npi);
1616 			}
1617 			return;
1618 		}
1619 		default:
1620 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1621 			os_log(OS_LOG_DEFAULT, "%s: unexpected IPv4 protocol %u from %s",
1622 			    __func__, iphdr.ip_p, IF_XNAME(ifp));
1623 			goto failed;
1624 		}
1625 	} else if (proto_family == PF_INET6) {
1626 		struct ip6_hdr ip6_hdr = {};
1627 
1628 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1629 
1630 		error = mbuf_copydata(m, 0, sizeof(struct ip6_hdr), &ip6_hdr);
1631 		if (error != 0) {
1632 			os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_hdr) error %d",
1633 			    __func__, error);
1634 			goto failed;
1635 		}
1636 		npi.npi_flags |= NPIF_IPV6;
1637 		memcpy(&npi.npi_local_addr_in6, &ip6_hdr.ip6_dst, sizeof(struct in6_addr));
1638 		memcpy(&npi.npi_foreign_addr_in6, &ip6_hdr.ip6_src, sizeof(struct in6_addr));
1639 
1640 		size_t l3_len = sizeof(struct ip6_hdr);
1641 		uint8_t l4_proto = ip6_hdr.ip6_nxt;
1642 
1643 		pkt_proto = l4_proto;
1644 
1645 		if (pkt_data_len < l3_len) {
1646 			pkt_data_len = 0;
1647 		} else {
1648 			pkt_data_len -= l3_len;
1649 		}
1650 
1651 		/*
1652 		 * Check if this is a fragment that is not the first fragment
1653 		 */
1654 		if (l4_proto == IPPROTO_FRAGMENT) {
1655 			struct ip6_frag ip6_frag;
1656 
1657 			error = mbuf_copydata(m, sizeof(struct ip6_hdr), sizeof(struct ip6_frag), &ip6_frag);
1658 			if (error != 0) {
1659 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_frag) error %d",
1660 				    __func__, error);
1661 				goto failed;
1662 			}
1663 
1664 			l3_len += sizeof(struct ip6_frag);
1665 			l4_proto = ip6_frag.ip6f_nxt;
1666 
1667 			if ((ip6_frag.ip6f_offlg & IP6F_OFF_MASK) != 0) {
1668 				npi.npi_flags |= NPIF_FRAG;
1669 				if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1670 			}
1671 		}
1672 
1673 
1674 		switch (l4_proto) {
1675 		case IPPROTO_TCP: {
1676 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1677 			npi.npi_flags |= NPIF_TCP;
1678 
1679 			/*
1680 			 * Cannot attribute a fragment that is not the first fragment as it
1681 			 * not have the TCP header
1682 			 */
1683 			if (npi.npi_flags & NPIF_FRAG) {
1684 				goto failed;
1685 			}
1686 
1687 			struct tcphdr th = {};
1688 
1689 			error = mbuf_copydata(m, l3_len, sizeof(struct tcphdr), &th);
1690 			if (error != 0) {
1691 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1692 				    __func__, error);
1693 				if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1694 				goto failed;
1695 			}
1696 			npi.npi_local_port = th.th_dport;
1697 			npi.npi_foreign_port = th.th_sport;
1698 
1699 			if (pkt_data_len < sizeof(struct tcphdr) ||
1700 			    pkt_data_len < (th.th_off << 2)) {
1701 				pkt_data_len = 0;
1702 			} else {
1703 				pkt_data_len -= th.th_off << 2;
1704 			}
1705 			pkt_control_flags = th.th_flags;
1706 			break;
1707 		}
1708 		case IPPROTO_UDP: {
1709 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1710 			npi.npi_flags |= NPIF_UDP;
1711 
1712 			/*
1713 			 * Cannot attribute a fragment that is not the first fragment as it
1714 			 * not have the UDP header
1715 			 */
1716 			if (npi.npi_flags & NPIF_FRAG) {
1717 				goto failed;
1718 			}
1719 
1720 			struct udphdr uh = {};
1721 
1722 			error = mbuf_copydata(m, l3_len, sizeof(struct udphdr), &uh);
1723 			if (error != 0) {
1724 				os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1725 				    __func__, error);
1726 				if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1727 				goto failed;
1728 			}
1729 			npi.npi_local_port = uh.uh_dport;
1730 			npi.npi_foreign_port = uh.uh_sport;
1731 			/*
1732 			 * Let the ESP layer handle wake packets
1733 			 */
1734 			if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1735 			    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1736 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1737 				if (is_encapsulated_esp(m, l3_len + sizeof(struct udphdr))) {
1738 					if (net_wake_pkt_debug > 0) {
1739 						net_port_info_log_npi("defer encapsulated ESP matching", &npi);
1740 					}
1741 					return;
1742 				}
1743 			}
1744 
1745 			if (pkt_data_len < sizeof(struct udphdr)) {
1746 				pkt_data_len = 0;
1747 			} else {
1748 				pkt_data_len -= sizeof(struct udphdr);
1749 			}
1750 			break;
1751 		}
1752 		case IPPROTO_ESP: {
1753 			/*
1754 			 * Let the ESP layer handle the wake packet
1755 			 */
1756 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1757 			npi.npi_flags |= NPIF_ESP;
1758 			if (net_wake_pkt_debug > 0) {
1759 				net_port_info_log_npi("defer ESP matching", &npi);
1760 			}
1761 			return;
1762 		}
1763 		default:
1764 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1765 
1766 			os_log(OS_LOG_DEFAULT, "%s: unexpected IPv6 protocol %u from %s",
1767 			    __func__, ip6_hdr.ip6_nxt, IF_XNAME(ifp));
1768 			goto failed;
1769 		}
1770 	} else {
1771 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1772 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %d from %s",
1773 		    __func__, proto_family, IF_XNAME(ifp));
1774 		goto failed;
1775 	}
1776 	if (ifp == NULL) {
1777 		goto failed;
1778 	}
1779 
1780 	found = net_port_info_find_match(&npi);
1781 	if (found) {
1782 		if_notify_wake_packet(ifp, &npi,
1783 		    pkt_total_len, pkt_data_len, pkt_control_flags);
1784 	} else {
1785 		if_notify_unattributed_wake_mbuf(ifp, m, &npi,
1786 		    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1787 	}
1788 	return;
1789 failed:
1790 	if_notify_unattributed_wake_mbuf(ifp, m, &npi,
1791 	    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1792 }
1793 
1794 #if SKYWALK
1795 
1796 static void
if_notify_unattributed_wake_pkt(struct ifnet * ifp,struct __kern_packet * pkt,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)1797 if_notify_unattributed_wake_pkt(struct ifnet *ifp, struct __kern_packet *pkt,
1798     struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
1799     uint16_t pkt_control_flags, uint16_t proto)
1800 {
1801 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1802 
1803 	lck_mtx_lock(&net_port_entry_head_lock);
1804 	if (has_notified_unattributed_wake) {
1805 		lck_mtx_unlock(&net_port_entry_head_lock);
1806 		if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1807 
1808 		if (__improbable(net_wake_pkt_debug > 0)) {
1809 			net_port_info_log_npi("already notified unattributed wake packet", npi);
1810 		}
1811 		return;
1812 	}
1813 	has_notified_unattributed_wake = true;
1814 	lck_mtx_unlock(&net_port_entry_head_lock);
1815 
1816 	if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1817 
1818 	if (ifp == NULL) {
1819 		os_log(OS_LOG_DEFAULT, "%s: receive interface is NULL",
1820 		    __func__);
1821 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1822 	}
1823 
1824 	struct net_port_info_una_wake_event event_data = {};
1825 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1826 	event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1827 	event_data.una_wake_pkt_flags = npi->npi_flags;
1828 
1829 	uint16_t offset = kern_packet_get_network_header_offset(SK_PKT2PH(pkt));
1830 	event_data.una_wake_ptk_len =
1831 	    pkt->pkt_length - offset > NPI_MAX_UNA_WAKE_PKT_LEN ?
1832 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t) pkt->pkt_length - offset;
1833 
1834 	kern_packet_copy_bytes(SK_PKT2PH(pkt), offset, event_data.una_wake_ptk_len,
1835 	    event_data.una_wake_pkt);
1836 
1837 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
1838 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1839 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1840 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1841 	if (ifp != NULL) {
1842 		strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
1843 		    sizeof(event_data.una_wake_pkt_ifname));
1844 	}
1845 
1846 	event_data.una_wake_pkt_total_len = pkt_total_len;
1847 	event_data.una_wake_pkt_data_len = pkt_data_len;
1848 	event_data.una_wake_pkt_control_flags = pkt_control_flags;
1849 	event_data.una_wake_pkt_proto = proto;
1850 
1851 	last_wake_pkt_event.npi_wp_code = KEV_POWER_UNATTRIBUTED_WAKE;
1852 	memcpy(&last_wake_pkt_event.npi_ev_wake_pkt_unattributed, &event_data, sizeof(last_wake_pkt_event.npi_ev_wake_pkt_unattributed));
1853 
1854 	if (last_wake_phy_if_delay_wake_pkt) {
1855 #if (DEBUG || DEVELOPMENT)
1856 		if (if_ports_used_verbose > 0) {
1857 			net_port_info_log_una_wake_event("delay unattributed wake packet event", &event_data);
1858 		}
1859 #endif /* (DEBUG || DEVELOPMENT) */
1860 		return;
1861 	}
1862 
1863 	deliver_unattributed_wake_packet_event(&event_data);
1864 }
1865 
1866 void
if_ports_used_match_pkt(struct ifnet * ifp,struct __kern_packet * pkt)1867 if_ports_used_match_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
1868 {
1869 	struct net_port_info npi = {};
1870 	bool found = false;
1871 	uint32_t pkt_total_len = 0;
1872 	uint32_t pkt_data_len = 0;
1873 	uint16_t pkt_control_flags = 0;
1874 	uint16_t pkt_proto = 0;
1875 
1876 	if ((pkt->pkt_pflags & PKT_F_WAKE_PKT) == 0) {
1877 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1878 		os_log_error(OS_LOG_DEFAULT, "%s: called PKT_F_WAKE_PKT not set from %s",
1879 		    __func__, IF_XNAME(ifp));
1880 		return;
1881 	}
1882 
1883 	if_ports_used_stats.ifpu_ch_match_wake_pkt += 1;
1884 	npi.npi_flags |= NPIF_CHANNEL; /* For logging */
1885 	pkt_total_len = pkt->pkt_flow_ip_hlen +
1886 	    pkt->pkt_flow_tcp_hlen + pkt->pkt_flow_ulen;
1887 	pkt_data_len = pkt->pkt_flow_ulen;
1888 
1889 	if (ifp != NULL) {
1890 		npi.npi_if_index = ifp->if_index;
1891 		if (IFNET_IS_COMPANION_LINK(ifp)) {
1892 			npi.npi_flags |= NPIF_COMPLINK;
1893 		}
1894 		if_set_wake_physical_interface(ifp);
1895 	}
1896 
1897 	switch (pkt->pkt_flow_ip_ver) {
1898 	case IPVERSION:
1899 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1900 
1901 		npi.npi_flags |= NPIF_IPV4;
1902 		npi.npi_local_addr_in = pkt->pkt_flow_ipv4_dst;
1903 		npi.npi_foreign_addr_in = pkt->pkt_flow_ipv4_src;
1904 		break;
1905 	case IPV6_VERSION:
1906 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1907 
1908 		npi.npi_flags |= NPIF_IPV6;
1909 		memcpy(&npi.npi_local_addr_in6, &pkt->pkt_flow_ipv6_dst,
1910 		    sizeof(struct in6_addr));
1911 		memcpy(&npi.npi_foreign_addr_in6, &pkt->pkt_flow_ipv6_src,
1912 		    sizeof(struct in6_addr));
1913 		break;
1914 	default:
1915 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1916 
1917 		os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %u from %s",
1918 		    __func__, pkt->pkt_flow_ip_ver, IF_XNAME(ifp));
1919 		goto failed;
1920 	}
1921 	pkt_proto = pkt->pkt_flow_ip_ver;
1922 
1923 	/*
1924 	 * Check if this is a fragment that is not the first fragment
1925 	 */
1926 	if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
1927 		os_log(OS_LOG_DEFAULT, "%s: unexpected wake fragment from %s",
1928 		    __func__, IF_XNAME(ifp));
1929 		npi.npi_flags |= NPIF_FRAG;
1930 		if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1931 	}
1932 
1933 	switch (pkt->pkt_flow_ip_proto) {
1934 	case IPPROTO_TCP: {
1935 		if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1936 		npi.npi_flags |= NPIF_TCP;
1937 
1938 		/*
1939 		 * Cannot attribute a fragment that is not the first fragment as it
1940 		 * not have the TCP header
1941 		 */
1942 		if (npi.npi_flags & NPIF_FRAG) {
1943 			goto failed;
1944 		}
1945 		struct tcphdr * __single tcp = __unsafe_forge_single(struct tcphdr *, pkt->pkt_flow_tcp_hdr);
1946 		if (tcp == NULL) {
1947 			os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned TCP header from %s",
1948 			    __func__, IF_XNAME(ifp));
1949 			if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1950 			goto failed;
1951 		}
1952 		npi.npi_local_port = tcp->th_dport;
1953 		npi.npi_foreign_port = tcp->th_sport;
1954 		pkt_control_flags = tcp->th_flags;
1955 		break;
1956 	}
1957 	case IPPROTO_UDP: {
1958 		if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1959 		npi.npi_flags |= NPIF_UDP;
1960 
1961 		/*
1962 		 * Cannot attribute a fragment that is not the first fragment as it
1963 		 * not have the UDP header
1964 		 */
1965 		if (npi.npi_flags & NPIF_FRAG) {
1966 			goto failed;
1967 		}
1968 		struct udphdr * __single uh = __unsafe_forge_single(struct udphdr *, pkt->pkt_flow_udp_hdr);
1969 		if (uh == NULL) {
1970 			os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned UDP header from %s",
1971 			    __func__, IF_XNAME(ifp));
1972 			if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1973 			goto failed;
1974 		}
1975 		npi.npi_local_port = uh->uh_dport;
1976 		npi.npi_foreign_port = uh->uh_sport;
1977 
1978 		/*
1979 		 * Defer matching of UDP NAT traversal to ip_input
1980 		 * (assumes IKE uses sockets)
1981 		 */
1982 		if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1983 		    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1984 			if_ports_used_stats.ifpu_deferred_isakmp_natt_wake_pkt += 1;
1985 			if (net_wake_pkt_debug > 0) {
1986 				net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1987 			}
1988 			return;
1989 		}
1990 		break;
1991 	}
1992 	case IPPROTO_ESP: {
1993 		/*
1994 		 * Let the ESP layer handle the wake packet
1995 		 */
1996 		if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1997 		npi.npi_flags |= NPIF_ESP;
1998 		if (net_wake_pkt_debug > 0) {
1999 			net_port_info_log_npi("defer ESP matching", &npi);
2000 		}
2001 		return;
2002 	}
2003 	default:
2004 		if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
2005 
2006 		os_log(OS_LOG_DEFAULT, "%s: unexpected IP protocol %u from %s",
2007 		    __func__, pkt->pkt_flow_ip_proto, IF_XNAME(ifp));
2008 		goto failed;
2009 	}
2010 
2011 	if (ifp == NULL) {
2012 		goto failed;
2013 	}
2014 
2015 	found = net_port_info_find_match(&npi);
2016 	if (found) {
2017 		if_notify_wake_packet(ifp, &npi,
2018 		    pkt_total_len, pkt_data_len, pkt_control_flags);
2019 	} else {
2020 		if_notify_unattributed_wake_pkt(ifp, pkt, &npi,
2021 		    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
2022 	}
2023 	return;
2024 failed:
2025 	if_notify_unattributed_wake_pkt(ifp, pkt, &npi,
2026 	    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
2027 }
2028 #endif /* SKYWALK */
2029 
2030 int
2031 sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS
2032 {
2033 #pragma unused(oidp, arg1, arg2)
2034 	struct net_port_info_wake_event net_port_info_wake_event = { 0 };
2035 	size_t len = sizeof(net_port_info_wake_event);
2036 	int error;
2037 
2038 	lck_mtx_lock(&net_port_entry_head_lock);
2039 	if (last_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
2040 		memcpy(&net_port_info_wake_event, &last_wake_pkt_event.npi_ev_wake_pkt_attributed, len);
2041 	}
2042 	lck_mtx_unlock(&net_port_entry_head_lock);
2043 
2044 	if (req->oldptr != 0) {
2045 		len = MIN(req->oldlen, len);
2046 	}
2047 	error = SYSCTL_OUT(req, &net_port_info_wake_event, len);
2048 
2049 	return error;
2050 }
2051 
2052 int
2053 sysctl_last_unattributed_wake_event SYSCTL_HANDLER_ARGS
2054 {
2055 #pragma unused(oidp, arg1, arg2)
2056 	struct net_port_info_una_wake_event net_port_info_una_wake_event = { 0 };
2057 	size_t len = sizeof(net_port_info_una_wake_event);
2058 	int error;
2059 
2060 	lck_mtx_lock(&net_port_entry_head_lock);
2061 	if (last_wake_pkt_event.npi_wp_code == KEV_POWER_UNATTRIBUTED_WAKE) {
2062 		memcpy(&net_port_info_una_wake_event, &last_wake_pkt_event.npi_ev_wake_pkt_unattributed, len);
2063 	}
2064 	lck_mtx_unlock(&net_port_entry_head_lock);
2065 
2066 	if (req->oldptr != 0) {
2067 		len = MIN(req->oldlen, len);
2068 	}
2069 	error = SYSCTL_OUT(req, &net_port_info_una_wake_event, len);
2070 
2071 	return error;
2072 }
2073 
2074 /*
2075  * Pass the interface family of the interface that caused the wake
2076  */
2077 int
2078 sysctl_wake_pkt_event_notify SYSCTL_HANDLER_ARGS
2079 {
2080 #pragma unused(oidp, arg1, arg2)
2081 	long long val = 0;
2082 	int error = 0;
2083 	int changed = 0;
2084 	uint32_t if_family = 0;
2085 
2086 	error = sysctl_io_number(req, val, sizeof(val), &val, &changed);
2087 	if (error != 0 || req->newptr == 0 || changed == 0) {
2088 		return error;
2089 	}
2090 
2091 	if (val < 0 || val > UINT32_MAX) {
2092 		return EINVAL;
2093 	}
2094 	if_family = (uint32_t)val;
2095 
2096 	if (!IOCurrentTaskHasEntitlement(WAKE_PKT_EVENT_CONTROL_ENTITLEMENT)) {
2097 		return EPERM;
2098 	}
2099 
2100 	os_log(OS_LOG_DEFAULT, "sysctl_wake_pkt_event_notify proc %s:%u val %u last_wake_phy_if_delay_wake_pkt %d last_wake_phy_if_family %u",
2101 	    proc_best_name(current_proc()), proc_selfpid(),
2102 	    if_family, last_wake_phy_if_delay_wake_pkt, last_wake_phy_if_family);
2103 
2104 	if (last_wake_phy_if_delay_wake_pkt && val == last_wake_phy_if_family) {
2105 		last_wake_phy_if_delay_wake_pkt = false;
2106 		if (last_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
2107 			deliver_attributed_wake_packet_event(&last_wake_pkt_event.npi_ev_wake_pkt_attributed);
2108 		} else {
2109 			deliver_unattributed_wake_packet_event(&last_wake_pkt_event.npi_ev_wake_pkt_unattributed);
2110 		}
2111 	}
2112 
2113 	return 0;
2114 }
2115 
2116 int
2117 sysctl_wake_pkt_event_delay_if_families SYSCTL_HANDLER_ARGS
2118 {
2119 #pragma unused(oidp, arg1, arg2)
2120 	long long val = npi_wake_packet_event_delay_if_families;
2121 	int error;
2122 	int changed = 0;
2123 	uint32_t if_families = 0;
2124 
2125 	error = sysctl_io_number(req, val, sizeof(val), &val, &changed);
2126 	if (error != 0 || req->newptr == 0 || changed == 0) {
2127 		return error;
2128 	}
2129 	if (val < 0 || val > UINT32_MAX) {
2130 		return EINVAL;
2131 	}
2132 	if_families = (uint32_t)val;
2133 
2134 	if (!IOCurrentTaskHasEntitlement(WAKE_PKT_EVENT_CONTROL_ENTITLEMENT)) {
2135 		return EPERM;
2136 	}
2137 
2138 	os_log(OS_LOG_DEFAULT, "sysctl_wake_pkt_event_delay_if_families proc %s:%u npi_wake_packet_event_delay_if_families 0x%x -> 0x%x",
2139 	    proc_best_name(current_proc()), proc_selfpid(),
2140 	    npi_wake_packet_event_delay_if_families, if_families);
2141 
2142 	/* The value is the bitmap of the functional types to delay */
2143 	npi_wake_packet_event_delay_if_families = if_families;
2144 
2145 	return 0;
2146 }
2147