xref: /xnu-12377.1.9/bsd/net/if_ports_used.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea) !
1 /*
2  * Copyright (c) 2017-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/types.h>
30 #include <sys/time.h>
31 #include <sys/mcache.h>
32 #include <sys/malloc.h>
33 #include <sys/kauth.h>
34 #include <sys/kern_event.h>
35 #include <sys/bitstring.h>
36 #include <sys/priv.h>
37 #include <sys/proc.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 
41 #include <kern/locks.h>
42 #include <kern/zalloc.h>
43 
44 #include <libkern/libkern.h>
45 
46 #include <net/kpi_interface.h>
47 #include <net/if_var.h>
48 #include <net/if_ports_used.h>
49 #include <net/net_sysctl.h>
50 
51 #include <netinet/in_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/tcp_var.h>
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/udp.h>
57 
58 #if SKYWALK
59 #include <skywalk/os_skywalk_private.h>
60 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
61 #include <skywalk/namespace/netns.h>
62 #endif /* SKYWALK */
63 
64 #include <stdbool.h>
65 
66 #include <os/log.h>
67 
68 #include <IOKit/IOBSD.h>
69 
70 #include <string.h>
71 
72 #define ESP_HDR_SIZE 4
73 #define PORT_ISAKMP 500
74 #define PORT_ISAKMP_NATT 4500   /* rfc3948 */
75 
76 #define IF_XNAME(ifp) ((ifp) != NULL ? (ifp)->if_xname : (const char * __null_terminated)"")
77 
78 extern bool IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len);
79 
80 SYSCTL_DECL(_net_link_generic_system);
81 
82 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, port_used,
83     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used");
84 
85 struct if_ports_used_stats if_ports_used_stats = {};
86 static int sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS;
87 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, stats,
88     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
89     sysctl_if_ports_used_stats, "S,struct if_ports_used_stats", "");
90 
91 static uuid_t current_wakeuuid;
92 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, current_wakeuuid,
93     CTLFLAG_RD | CTLFLAG_LOCKED,
94     current_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
95 
96 static int sysctl_net_port_info_list SYSCTL_HANDLER_ARGS;
97 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, list,
98     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
99     sysctl_net_port_info_list, "S,xnpigen", "");
100 
101 static int use_test_wakeuuid = 0;
102 static uuid_t test_wakeuuid;
103 
104 #if (DEVELOPMENT || DEBUG)
105 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, use_test_wakeuuid,
106     CTLFLAG_RW | CTLFLAG_LOCKED,
107     &use_test_wakeuuid, 0, "");
108 
109 static int sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS;
110 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, new_test_wakeuuid,
111     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
112     sysctl_new_test_wakeuuid, "S,uuid_t", "");
113 
114 static int sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS;
115 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, clear_test_wakeuuid,
116     CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
117     sysctl_clear_test_wakeuuid, "S,uuid_t", "");
118 
119 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, test_wakeuuid,
120     CTLFLAG_RD | CTLFLAG_LOCKED,
121     test_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
122 
123 /*
124  * use_fake_lpw is used for testing only
125  */
126 #define FAKE_LPW_OFF            0 /* fake LPW off */
127 #define FAKE_LPW_ON_ONCE        1 /* use fake LPW once */
128 #define FAKE_LPW_ALWAYS_ON      2 /* permanent fake LPW mode */
129 #define FAKE_LPW_FLIP_ON        3 /* LPW on, then switch to off */
130 #define FAKE_LPW_FLIP_OFF       4 /* LPW off, then switch to on */
131 
132 static int use_fake_lpw = 0;
133 static int sysctl_use_fake_lpw SYSCTL_HANDLER_ARGS;
134 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, use_fake_lpw,
135     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
136     &use_fake_lpw, 0, &sysctl_use_fake_lpw, "I", "");
137 
138 bool fake_lpw_mode_is_set = false;
139 
140 SYSCTL_NODE(_net_link_generic_system_port_used, OID_AUTO, mark_wake_packet,
141     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used");
142 
143 static int sysctl_mark_wake_packet_port SYSCTL_HANDLER_ARGS;
144 static int sysctl_mark_wake_packet_if SYSCTL_HANDLER_ARGS;
145 
146 static int mark_wake_packet_local_port = 0;
147 SYSCTL_PROC(_net_link_generic_system_port_used_mark_wake_packet, OID_AUTO, local_port,
148     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
149     &mark_wake_packet_local_port, 0, &sysctl_mark_wake_packet_port, "I", "");
150 
151 static int mark_wake_packet_remote_port = 0;
152 SYSCTL_PROC(_net_link_generic_system_port_used_mark_wake_packet, OID_AUTO, remote_port,
153     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
154     &mark_wake_packet_remote_port, 0, &sysctl_mark_wake_packet_port, "I", "");
155 
156 static int mark_wake_packet_ipproto = 0;
157 SYSCTL_INT(_net_link_generic_system_port_used_mark_wake_packet, OID_AUTO, ipproto,
158     CTLFLAG_RW | CTLFLAG_LOCKED,
159     &mark_wake_packet_ipproto, 0, "");
160 
161 static char mark_wake_packet_if[IFNAMSIZ];
162 SYSCTL_PROC(_net_link_generic_system_port_used_mark_wake_packet, OID_AUTO, if,
163     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
164     0, 0, sysctl_mark_wake_packet_if, "A", "");
165 
166 #endif /* (DEVELOPMENT || DEBUG) */
167 
168 static int sysctl_get_ports_used SYSCTL_HANDLER_ARGS;
169 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, get_ports_used,
170     CTLFLAG_RD | CTLFLAG_LOCKED,
171     sysctl_get_ports_used, "");
172 
173 int if_ports_used_verbose = 0;
174 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, verbose,
175     CTLFLAG_RW | CTLFLAG_LOCKED,
176     &if_ports_used_verbose, 0, "");
177 
178 struct timeval wakeuuid_not_set_last_time;
179 int sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS;
180 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
181     wakeuuid_not_set_last_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
182     0, 0, sysctl_wakeuuid_not_set_last_time, "S,timeval", "");
183 
184 char wakeuuid_not_set_last_if[IFXNAMSIZ];
185 int sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS;
186 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
187     wakeuuid_not_set_last_if, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
188     0, 0, sysctl_wakeuuid_not_set_last_if, "A", "");
189 
190 struct timeval wakeuuid_last_update_time;
191 int sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS;
192 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
193     wakeuuid_last_update_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
194     0, 0, sysctl_wakeuuid_last_update_time, "S,timeval", "");
195 
196 static bool            last_wake_phy_if_set = false;
197 static char            last_wake_phy_if_name[IFNAMSIZ]; /* name + unit */
198 static uint32_t        last_wake_phy_if_family;
199 static uint32_t        last_wake_phy_if_subfamily;
200 static uint32_t        last_wake_phy_if_functional_type;
201 static bool            last_wake_phy_if_delay_wake_pkt = false;
202 static bool            last_wake_phy_if_lpw = false;
203 
204 static bool has_notified_wake_pkt = false;
205 static bool has_notified_unattributed_wake = false;
206 
207 static bool is_lpw_mode = false;
208 
209 static LCK_GRP_DECLARE(net_port_entry_head_lock_group, "net port entry lock");
210 static LCK_MTX_DECLARE(net_port_entry_head_lock, &net_port_entry_head_lock_group);
211 
212 
213 struct net_port_entry {
214 	SLIST_ENTRY(net_port_entry)     npe_list_next;
215 	TAILQ_ENTRY(net_port_entry)     npe_hash_next;
216 	struct net_port_info            npe_npi;
217 };
218 
219 static KALLOC_TYPE_DEFINE(net_port_entry_zone, struct net_port_entry, NET_KT_DEFAULT);
220 
221 static SLIST_HEAD(net_port_entry_list, net_port_entry) net_port_entry_list =
222     SLIST_HEAD_INITIALIZER(&net_port_entry_list);
223 
224 struct timeval wakeuiid_last_check;
225 
226 /*
227  * Hashing of the net_port_entry list is based on the local port
228  *
229  * The hash masks uses the least significant bits so we have to use host byte order
230  * when applying the mask because the LSB have more entropy that the MSB (most local ports
231  * are in the high dynamic port range)
232  */
233 #define NPE_HASH_BUCKET_COUNT 32
234 #define NPE_HASH_MASK (NPE_HASH_BUCKET_COUNT - 1)
235 #define NPE_HASH_VAL(_lport) (ntohs(_lport) & NPE_HASH_MASK)
236 #define NPE_HASH_HEAD(_lport) (&net_port_entry_hash_table[NPE_HASH_VAL(_lport)])
237 
238 static TAILQ_HEAD(net_port_entry_hash_table, net_port_entry) * __indexable net_port_entry_hash_table = NULL;
239 
240 /*
241  * For some types of physical interface we need to delay the notification of wake packet events
242  * until a user land interface controller confirms the AP wake was caused by its packet
243  */
244 struct net_port_info_wake_pkt_event {
245 	uint32_t                npi_wp_code;
246 	uint32_t                npi_wp_flags;
247 	union {
248 		struct net_port_info_wake_event _npi_ev_wake_pkt_attributed;
249 		struct net_port_info_una_wake_event _npi_ev_wake_pkt_unattributed;
250 	} npi_ev_wake_pkt_;
251 };
252 
253 #define npi_ev_wake_pkt_attributed npi_ev_wake_pkt_._npi_ev_wake_pkt_attributed
254 #define npi_ev_wake_pkt_unattributed npi_ev_wake_pkt_._npi_ev_wake_pkt_unattributed
255 
256 int sysctl_wake_pkt_event_notify SYSCTL_HANDLER_ARGS;
257 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, wake_pkt_event_notify,
258     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY, 0, 0,
259     sysctl_wake_pkt_event_notify, "I", "");
260 
261 /* Bitmap of the interface families to delay the notification of wake packet events */
262 static uint32_t npi_wake_packet_event_delay_if_families = 0;
263 
264 /* How many interfaces families are supported */
265 #define NPI_MAX_IF_FAMILY_BITS 32
266 
267 int sysctl_wake_pkt_event_delay_if_families SYSCTL_HANDLER_ARGS;
268 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, wake_pkt_event_delay_if_families,
269     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
270     sysctl_wake_pkt_event_delay_if_families, "I", "");
271 
272 /* last_wake_pkt_event is informational */
273 static struct net_port_info_wake_pkt_event last_wake_pkt_event;
274 
275 /*
276  * delay_wake_pkt_event hold the current wake packet event that is delayed waiting for
277  * confirmation from a userspace agent
278  * It can be overwritten as a wake packet makes its way up the stack
279  */
280 static struct net_port_info_wake_pkt_event delay_wake_pkt_event;
281 
282 int sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS;
283 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
284     last_attributed_wake_event, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
285     0, 0, sysctl_last_attributed_wake_event, "S,net_port_info_wake_event", "");
286 
287 int sysctl_last_unattributed_wake_event SYSCTL_HANDLER_ARGS;
288 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
289     last_unattributed_wake_event, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
290     0, 0, sysctl_last_unattributed_wake_event, "S,net_port_info_una_wake_event", "");
291 
292 os_log_t wake_packet_log_handle = NULL;
293 
294 static bool is_wake_pkt_event_delay(uint32_t ifrtype);
295 
296 static bool
_if_need_delayed_wake_pkt_event_inner(struct ifnet * ifp)297 _if_need_delayed_wake_pkt_event_inner(struct ifnet *ifp)
298 {
299 	if ((ifp->if_xflags & IFXF_DELAYWAKEPKTEVENT) != 0 ||
300 	    is_wake_pkt_event_delay(ifp->if_family)) {
301 		return true;
302 	}
303 	return false;
304 }
305 
306 static bool
if_need_delayed_wake_pkt_event(struct ifnet * ifp)307 if_need_delayed_wake_pkt_event(struct ifnet *ifp)
308 {
309 	if (ifp != NULL) {
310 		if (_if_need_delayed_wake_pkt_event_inner(ifp) == true) {
311 			return true;
312 		}
313 		if (ifp->if_delegated.ifp != NULL) {
314 			return _if_need_delayed_wake_pkt_event_inner(ifp->if_delegated.ifp);
315 		}
316 	}
317 	return false;
318 }
319 
320 /*
321  * Initialize IPv4 source address hash table.
322  */
323 void
if_ports_used_init(void)324 if_ports_used_init(void)
325 {
326 	if (net_port_entry_hash_table != NULL) {
327 		return;
328 	}
329 
330 	wake_packet_log_handle = os_log_create("com.apple.xnu.net.wake_packet", "");
331 
332 	net_port_entry_hash_table = zalloc_permanent(
333 		NPE_HASH_BUCKET_COUNT * sizeof(*net_port_entry_hash_table),
334 		ZALIGN_PTR);
335 }
336 
337 bool
if_is_lpw_enabled(struct ifnet * ifp)338 if_is_lpw_enabled(struct ifnet *ifp)
339 {
340 	bool old_is_lpw_mode = is_lpw_mode;
341 
342 	if (ifp == NULL) {
343 		return false;
344 	}
345 
346 	if ((ifp->if_xflags & IFXF_LOW_POWER_WAKE) == 0 && last_wake_phy_if_lpw == false) {
347 		return false;
348 	}
349 
350 #if (DEBUG || DEVELOPMENT)
351 	if (use_fake_lpw != FAKE_LPW_OFF) {
352 		if (strlcmp(mark_wake_packet_if, IF_XNAME(ifp), IFNAMSIZ) == 0) {
353 			fake_lpw_mode_is_set = true;
354 
355 			switch (use_fake_lpw) {
356 			case FAKE_LPW_ON_ONCE:
357 				is_lpw_mode = true;
358 				use_fake_lpw = FAKE_LPW_OFF;
359 				break;
360 			case FAKE_LPW_ALWAYS_ON:
361 				is_lpw_mode = true;
362 				break;
363 			case FAKE_LPW_FLIP_ON:
364 				is_lpw_mode = true;
365 				use_fake_lpw = FAKE_LPW_FLIP_OFF;
366 				break;
367 			case FAKE_LPW_FLIP_OFF:
368 				is_lpw_mode = false;
369 				use_fake_lpw = FAKE_LPW_FLIP_ON;
370 				break;
371 			}
372 
373 			if (if_ports_used_verbose && is_lpw_mode != old_is_lpw_mode) {
374 				os_log(wake_packet_log_handle, "if_is_lpw_enabled %s set LPW to %d",
375 				    IF_XNAME(ifp), is_lpw_mode == true ? 1 : 0);
376 			}
377 
378 			return is_lpw_mode;
379 		}
380 		/* In fake mode, ignore packets from other interfaces */
381 		return false;
382 	}
383 #endif /* (DEBUG || DEVELOPMENT) */
384 
385 	if (IOPMIsLPWMode()) {
386 		is_lpw_mode = true;
387 	} else {
388 		is_lpw_mode = false;
389 	}
390 	if (if_ports_used_verbose && is_lpw_mode != old_is_lpw_mode) {
391 		os_log(wake_packet_log_handle, "if_is_lpw_enabled %s set LPW to %d",
392 		    IF_XNAME(ifp), is_lpw_mode == true ? 1 : 0);
393 	}
394 
395 	return is_lpw_mode;
396 }
397 
398 void
if_exit_lpw(struct ifnet * ifp,const char * lpw_exit_reason)399 if_exit_lpw(struct ifnet *ifp, const char *lpw_exit_reason)
400 {
401 	if (if_is_lpw_enabled(ifp) == false) {
402 		return;
403 	}
404 	is_lpw_mode = false;
405 
406 	if_ports_used_stats.ifpu_lpw_to_full_wake++;
407 	os_log_error(wake_packet_log_handle, "if_exit_lpw: LPW to Full Wake requested on %s reason %s",
408 	    IF_XNAME(ifp), lpw_exit_reason);
409 
410 #if (DEVELOPMENT || DEBUG)
411 	if (fake_lpw_mode_is_set == true) {
412 		/* Let's not mess up with the IO power management subsystem */
413 		if (IOPMIsLPWMode() == false) {
414 			return;
415 		}
416 	}
417 #endif /* (DEVELOPMENT || DEBUG) */
418 
419 	IOPMNetworkStackFullWake(kIOPMNetworkStackFullWakeFlag, "Network.ConnectionNotIdle");
420 }
421 
422 static void
net_port_entry_list_clear(void)423 net_port_entry_list_clear(void)
424 {
425 	struct net_port_entry *npe;
426 
427 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
428 
429 	while ((npe = SLIST_FIRST(&net_port_entry_list)) != NULL) {
430 		SLIST_REMOVE_HEAD(&net_port_entry_list, npe_list_next);
431 		TAILQ_REMOVE(NPE_HASH_HEAD(npe->npe_npi.npi_local_port), npe, npe_hash_next);
432 
433 		zfree(net_port_entry_zone, npe);
434 	}
435 
436 	for (int i = 0; i < NPE_HASH_BUCKET_COUNT; i++) {
437 		VERIFY(TAILQ_EMPTY(&net_port_entry_hash_table[i]));
438 	}
439 
440 	if_ports_used_stats.ifpu_npe_count = 0;
441 	if_ports_used_stats.ifpu_wakeuid_gen++;
442 }
443 
444 static bool
get_test_wake_uuid(uuid_string_t wakeuuid_str)445 get_test_wake_uuid(uuid_string_t wakeuuid_str)
446 {
447 	if (!uuid_is_null(test_wakeuuid)) {
448 		if (wakeuuid_str != NULL) {
449 			uuid_unparse(test_wakeuuid, wakeuuid_str);
450 		}
451 		return true;
452 	}
453 
454 	return false;
455 }
456 
457 static bool
is_wakeuuid_set(void)458 is_wakeuuid_set(void)
459 {
460 	if (__improbable(use_test_wakeuuid) && !uuid_is_null(test_wakeuuid)) {
461 		return true;
462 	}
463 
464 	/*
465 	 * IOPMCopySleepWakeUUIDKey() tells if SleepWakeUUID is currently set
466 	 * That means we are currently in a sleep/wake cycle
467 	 */
468 	return IOPMCopySleepWakeUUIDKey(NULL, 0);
469 }
470 
471 void
if_ports_used_update_wakeuuid(struct ifnet * ifp)472 if_ports_used_update_wakeuuid(struct ifnet *ifp)
473 {
474 	uuid_t wakeuuid;
475 	bool wakeuuid_is_set = false;
476 	bool updated = false;
477 	uuid_string_t wakeuuid_str;
478 
479 	uuid_clear(wakeuuid);
480 
481 	if (__improbable(use_test_wakeuuid)) {
482 		wakeuuid_is_set = get_test_wake_uuid(wakeuuid_str);
483 	} else {
484 		wakeuuid_is_set = IOPMCopySleepWakeUUIDKey(wakeuuid_str,
485 		    sizeof(wakeuuid_str));
486 	}
487 
488 	if (wakeuuid_is_set) {
489 		if (uuid_parse(wakeuuid_str, wakeuuid) != 0) {
490 			os_log(wake_packet_log_handle,
491 			    "if_ports_used_update_wakeuuid: IOPMCopySleepWakeUUIDKey got bad value %s\n",
492 			    wakeuuid_str);
493 			wakeuuid_is_set = false;
494 		}
495 	}
496 
497 	if (!wakeuuid_is_set) {
498 		if (ifp != NULL) {
499 			if (if_ports_used_verbose > 0) {
500 				os_log_info(wake_packet_log_handle,
501 				    "if_ports_used_update_wakeuuid: SleepWakeUUID not set, "
502 				    "don't update the port list for %s\n",
503 				    ifp != NULL ? if_name(ifp) : "");
504 			}
505 			if_ports_used_stats.ifpu_wakeuuid_not_set_count += 1;
506 			microtime(&wakeuuid_not_set_last_time);
507 			strlcpy(wakeuuid_not_set_last_if, if_name(ifp),
508 			    sizeof(wakeuuid_not_set_last_if));
509 		}
510 		return;
511 	}
512 
513 	lck_mtx_lock(&net_port_entry_head_lock);
514 	if (uuid_compare(wakeuuid, current_wakeuuid) != 0) {
515 		if (last_wake_phy_if_delay_wake_pkt) {
516 			if_ports_used_stats.ifpu_delayed_wake_event_undelivered++;
517 		}
518 
519 		net_port_entry_list_clear();
520 		uuid_copy(current_wakeuuid, wakeuuid);
521 		microtime(&wakeuuid_last_update_time);
522 		updated = true;
523 
524 		has_notified_wake_pkt = false;
525 		has_notified_unattributed_wake = false;
526 
527 		memset(&last_wake_pkt_event, 0, sizeof(last_wake_pkt_event));
528 		memset(&delay_wake_pkt_event, 0, sizeof(delay_wake_pkt_event));
529 
530 		last_wake_phy_if_set = false;
531 		memset(&last_wake_phy_if_name, 0, sizeof(last_wake_phy_if_name));
532 		last_wake_phy_if_family = IFRTYPE_FAMILY_ANY;
533 		last_wake_phy_if_subfamily = IFRTYPE_SUBFAMILY_ANY;
534 		last_wake_phy_if_functional_type = IFRTYPE_FUNCTIONAL_UNKNOWN;
535 		last_wake_phy_if_delay_wake_pkt = false;
536 		last_wake_phy_if_lpw = false;
537 
538 		is_lpw_mode = false;
539 #if (DEVELOPMENT || DEBUG)
540 		fake_lpw_mode_is_set = false;
541 #endif /* (DEVELOPMENT || DEBUG) */
542 	}
543 	/*
544 	 * Record the time last checked
545 	 */
546 	microuptime(&wakeuiid_last_check);
547 	lck_mtx_unlock(&net_port_entry_head_lock);
548 
549 	if (updated && if_ports_used_verbose > 0) {
550 		uuid_string_t uuid_str;
551 
552 		uuid_unparse(current_wakeuuid, uuid_str);
553 		os_log(wake_packet_log_handle, "if_ports_used_update_wakeuuid: current wakeuuid %s for %s",
554 		    uuid_str, ifp != NULL ? if_name(ifp) : "");
555 	}
556 }
557 
558 static bool
net_port_info_equal(const struct net_port_info * x,const struct net_port_info * y)559 net_port_info_equal(const struct net_port_info *x,
560     const struct net_port_info *y)
561 {
562 	ASSERT(x != NULL && y != NULL);
563 
564 	if (x->npi_if_index == y->npi_if_index &&
565 	    x->npi_local_port == y->npi_local_port &&
566 	    x->npi_foreign_port == y->npi_foreign_port &&
567 	    x->npi_owner_pid == y->npi_owner_pid &&
568 	    x->npi_effective_pid == y->npi_effective_pid &&
569 	    x->npi_flags == y->npi_flags &&
570 	    memcmp(&x->npi_local_addr_, &y->npi_local_addr_,
571 	    sizeof(union in_addr_4_6)) == 0 &&
572 	    memcmp(&x->npi_foreign_addr_, &y->npi_foreign_addr_,
573 	    sizeof(union in_addr_4_6)) == 0) {
574 		return true;
575 	}
576 	return false;
577 }
578 
579 static bool
net_port_info_has_entry(const struct net_port_info * npi)580 net_port_info_has_entry(const struct net_port_info *npi)
581 {
582 	struct net_port_entry *npe;
583 	bool found = false;
584 	int32_t count = 0;
585 
586 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
587 
588 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(npi->npi_local_port), npe_hash_next) {
589 		count += 1;
590 		if (net_port_info_equal(&npe->npe_npi, npi)) {
591 			found = true;
592 			break;
593 		}
594 	}
595 	if_ports_used_stats.ifpu_npi_hash_search_total += count;
596 	if (count > if_ports_used_stats.ifpu_npi_hash_search_max) {
597 		if_ports_used_stats.ifpu_npi_hash_search_max = count;
598 	}
599 
600 	return found;
601 }
602 
603 static bool
net_port_info_add_entry(const struct net_port_info * npi)604 net_port_info_add_entry(const struct net_port_info *npi)
605 {
606 	struct net_port_entry   *npe = NULL;
607 	uint32_t num = 0;
608 	bool entry_added = false;
609 
610 	ASSERT(npi != NULL);
611 
612 	if (__improbable(is_wakeuuid_set() == false)) {
613 		if_ports_used_stats.ifpu_npi_not_added_no_wakeuuid++;
614 		if (if_ports_used_verbose > 0) {
615 			os_log(wake_packet_log_handle, "%s: wakeuuid not set not adding "
616 			    "port: %u flags: 0x%xif: %u pid: %u epid %u",
617 			    __func__,
618 			    ntohs(npi->npi_local_port),
619 			    npi->npi_flags,
620 			    npi->npi_if_index,
621 			    npi->npi_owner_pid,
622 			    npi->npi_effective_pid);
623 		}
624 		return false;
625 	}
626 
627 	npe = zalloc_flags(net_port_entry_zone, Z_WAITOK | Z_ZERO);
628 	if (__improbable(npe == NULL)) {
629 		os_log(wake_packet_log_handle, "%s: zalloc() failed for "
630 		    "port: %u flags: 0x%x if: %u pid: %u epid %u",
631 		    __func__,
632 		    ntohs(npi->npi_local_port),
633 		    npi->npi_flags,
634 		    npi->npi_if_index,
635 		    npi->npi_owner_pid,
636 		    npi->npi_effective_pid);
637 		return false;
638 	}
639 
640 	memcpy(&npe->npe_npi, npi, sizeof(npe->npe_npi));
641 
642 	if (IF_INDEX_IN_RANGE(npe->npe_npi.npi_if_index)) {
643 		struct ifnet *ifp = ifindex2ifnet[npe->npe_npi.npi_if_index];
644 		if (ifp != NULL) {
645 			if (IFNET_IS_COMPANION_LINK(ifp)) {
646 				npe->npe_npi.npi_flags |= NPIF_COMPLINK;
647 			}
648 			if (if_need_delayed_wake_pkt_event(ifp)) {
649 				npe->npe_npi.npi_flags |= NPIF_DELAYWAKEPKTEVENT;
650 			}
651 		}
652 	}
653 
654 	lck_mtx_lock(&net_port_entry_head_lock);
655 
656 	if (net_port_info_has_entry(npi) == false) {
657 		SLIST_INSERT_HEAD(&net_port_entry_list, npe, npe_list_next);
658 		TAILQ_INSERT_HEAD(NPE_HASH_HEAD(npi->npi_local_port), npe, npe_hash_next);
659 		num = (uint32_t)if_ports_used_stats.ifpu_npe_count++; /* rollover OK */
660 		entry_added = true;
661 
662 		if (if_ports_used_stats.ifpu_npe_count > if_ports_used_stats.ifpu_npe_max) {
663 			if_ports_used_stats.ifpu_npe_max = if_ports_used_stats.ifpu_npe_count;
664 		}
665 		if_ports_used_stats.ifpu_npe_total++;
666 
667 		if (if_ports_used_verbose > 1) {
668 			os_log(wake_packet_log_handle, "%s: num %u for "
669 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
670 			    __func__,
671 			    num,
672 			    ntohs(npi->npi_local_port),
673 			    npi->npi_flags,
674 			    npi->npi_if_index,
675 			    npi->npi_owner_pid,
676 			    npi->npi_effective_pid);
677 		}
678 	} else {
679 		if_ports_used_stats.ifpu_npe_dup++;
680 		if (if_ports_used_verbose > 2) {
681 			os_log(wake_packet_log_handle, "%s: already added "
682 			    "port: %u flags: 0x%x if: %u pid: %u epid %u",
683 			    __func__,
684 			    ntohs(npi->npi_local_port),
685 			    npi->npi_flags,
686 			    npi->npi_if_index,
687 			    npi->npi_owner_pid,
688 			    npi->npi_effective_pid);
689 		}
690 	}
691 
692 	lck_mtx_unlock(&net_port_entry_head_lock);
693 
694 	if (entry_added == false) {
695 		zfree(net_port_entry_zone, npe);
696 	}
697 	return entry_added;
698 }
699 
700 #if (DEVELOPMENT || DEBUG)
701 static int
702 sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS
703 {
704 #pragma unused(oidp, arg1, arg2)
705 	int error = 0;
706 
707 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
708 		return EPERM;
709 	}
710 	if (req->oldptr == USER_ADDR_NULL) {
711 		req->oldidx = sizeof(uuid_t);
712 		return 0;
713 	}
714 	if (req->newptr != USER_ADDR_NULL) {
715 		uuid_generate(test_wakeuuid);
716 		if_ports_used_update_wakeuuid(NULL);
717 	}
718 	error = SYSCTL_OUT(req, test_wakeuuid,
719 	    MIN(sizeof(uuid_t), req->oldlen));
720 
721 	return error;
722 }
723 
724 static int
725 sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS
726 {
727 #pragma unused(oidp, arg1, arg2)
728 	int error = 0;
729 
730 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
731 		return EPERM;
732 	}
733 	if (req->oldptr == USER_ADDR_NULL) {
734 		req->oldidx = sizeof(uuid_t);
735 		return 0;
736 	}
737 	if (req->newptr != USER_ADDR_NULL) {
738 		uuid_clear(test_wakeuuid);
739 		if_ports_used_update_wakeuuid(NULL);
740 	}
741 	error = SYSCTL_OUT(req, test_wakeuuid,
742 	    MIN(sizeof(uuid_t), req->oldlen));
743 
744 	return error;
745 }
746 
747 #endif /* (DEVELOPMENT || DEBUG) */
748 
749 static int
sysctl_timeval(struct sysctl_req * req,const struct timeval * tv)750 sysctl_timeval(struct sysctl_req *req, const struct timeval *tv)
751 {
752 	if (proc_is64bit(req->p)) {
753 		struct user64_timeval tv64 = {};
754 
755 		tv64.tv_sec = tv->tv_sec;
756 		tv64.tv_usec = tv->tv_usec;
757 		return SYSCTL_OUT(req, &tv64, sizeof(tv64));
758 	} else {
759 		struct user32_timeval tv32 = {};
760 
761 		tv32.tv_sec = (user32_time_t)tv->tv_sec;
762 		tv32.tv_usec = tv->tv_usec;
763 		return SYSCTL_OUT(req, &tv32, sizeof(tv32));
764 	}
765 }
766 
767 int
768 sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS
769 {
770 #pragma unused(oidp, arg1, arg2)
771 
772 	return sysctl_timeval(req, &wakeuuid_last_update_time);
773 }
774 
775 int
776 sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS
777 {
778 #pragma unused(oidp, arg1, arg2)
779 
780 	return sysctl_timeval(req, &wakeuuid_not_set_last_time);
781 }
782 
783 int
784 sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS
785 {
786 #pragma unused(oidp, arg1, arg2)
787 
788 	return SYSCTL_OUT(req, &wakeuuid_not_set_last_if, strbuflen(wakeuuid_not_set_last_if) + 1);
789 }
790 
791 int
792 sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS
793 {
794 #pragma unused(oidp, arg1, arg2)
795 	size_t len = sizeof(struct if_ports_used_stats);
796 
797 	if (req->oldptr != 0) {
798 		len = MIN(req->oldlen, sizeof(struct if_ports_used_stats));
799 	}
800 	return SYSCTL_OUT(req, &if_ports_used_stats, len);
801 }
802 
803 static int
804 sysctl_net_port_info_list SYSCTL_HANDLER_ARGS
805 {
806 #pragma unused(oidp, arg1, arg2)
807 	int error = 0;
808 	struct xnpigen xnpigen;
809 	struct net_port_entry *npe;
810 
811 	if ((error = priv_check_cred(kauth_cred_get(),
812 	    PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0)) != 0) {
813 		return EPERM;
814 	}
815 	lck_mtx_lock(&net_port_entry_head_lock);
816 
817 	if (req->oldptr == USER_ADDR_NULL) {
818 		/* Add a 25% cushion */
819 		size_t cnt = (size_t)if_ports_used_stats.ifpu_npe_count;
820 		cnt += cnt >> 4;
821 		req->oldidx = sizeof(struct xnpigen) +
822 		    cnt * sizeof(struct net_port_info);
823 		goto done;
824 	}
825 
826 	memset(&xnpigen, 0, sizeof(struct xnpigen));
827 	xnpigen.xng_len = sizeof(struct xnpigen);
828 	xnpigen.xng_gen = (uint32_t)if_ports_used_stats.ifpu_wakeuid_gen;
829 	uuid_copy(xnpigen.xng_wakeuuid, current_wakeuuid);
830 	xnpigen.xng_npi_count = (uint32_t)if_ports_used_stats.ifpu_npe_count;
831 	xnpigen.xng_npi_size = sizeof(struct net_port_info);
832 	error = SYSCTL_OUT(req, &xnpigen, sizeof(xnpigen));
833 	if (error != 0) {
834 		printf("%s: SYSCTL_OUT(xnpigen) error %d\n",
835 		    __func__, error);
836 		goto done;
837 	}
838 
839 	SLIST_FOREACH(npe, &net_port_entry_list, npe_list_next) {
840 		error = SYSCTL_OUT(req, &npe->npe_npi,
841 		    sizeof(struct net_port_info));
842 		if (error != 0) {
843 			printf("%s: SYSCTL_OUT(npi) error %d\n",
844 			    __func__, error);
845 			goto done;
846 		}
847 	}
848 done:
849 	lck_mtx_unlock(&net_port_entry_head_lock);
850 
851 	return error;
852 }
853 
854 /*
855  * Mirror the arguments of ifnet_get_local_ports_extended()
856  *  ifindex
857  *  protocol
858  *  flags
859  */
860 static int
861 sysctl_get_ports_used SYSCTL_HANDLER_ARGS
862 {
863 #pragma unused(oidp)
864 	/*
865 	 * 3 is the required number of parameters: ifindex, protocol and flags
866 	 */
867 	DECLARE_SYSCTL_HANDLER_ARG_ARRAY(int, 3, name, namelen);
868 	int error = 0;
869 	int idx;
870 	protocol_family_t protocol;
871 	u_int32_t flags;
872 	ifnet_t ifp = NULL;
873 	u_int8_t *bitfield = NULL;
874 
875 	if (req->newptr != USER_ADDR_NULL) {
876 		error = EPERM;
877 		goto done;
878 	}
879 
880 	if (req->oldptr == USER_ADDR_NULL) {
881 		req->oldidx = bitstr_size(IP_PORTRANGE_SIZE);
882 		goto done;
883 	}
884 	if (req->oldlen < bitstr_size(IP_PORTRANGE_SIZE)) {
885 		error = ENOMEM;
886 		goto done;
887 	}
888 	bitfield = (u_int8_t *) kalloc_data(bitstr_size(IP_PORTRANGE_SIZE),
889 	    Z_WAITOK | Z_ZERO);
890 	if (bitfield == NULL) {
891 		error = ENOMEM;
892 		goto done;
893 	}
894 
895 	idx = name[0];
896 	protocol = name[1];
897 	flags = name[2];
898 
899 	ifnet_head_lock_shared();
900 	if (IF_INDEX_IN_RANGE(idx)) {
901 		ifp = ifindex2ifnet[idx];
902 	}
903 	ifnet_head_done();
904 
905 	error = ifnet_get_local_ports_extended(ifp, protocol, flags, bitfield);
906 	if (error != 0) {
907 		printf("%s: ifnet_get_local_ports_extended() error %d\n",
908 		    __func__, error);
909 		goto done;
910 	}
911 	error = SYSCTL_OUT(req, bitfield, bitstr_size(IP_PORTRANGE_SIZE));
912 done:
913 	if (bitfield != NULL) {
914 		kfree_data(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
915 	}
916 	return error;
917 }
918 
919 __private_extern__ bool
if_ports_used_add_inpcb(const uint32_t ifindex,const struct inpcb * inp)920 if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp)
921 {
922 	struct net_port_info npi = {};
923 	struct socket *so = inp->inp_socket;
924 
925 	/* This is unlikely to happen but better be safe than sorry */
926 	if (ifindex > UINT16_MAX) {
927 		os_log(wake_packet_log_handle, "%s: ifindex %u too big", __func__, ifindex);
928 		return false;
929 	}
930 
931 	if (ifindex != 0) {
932 		npi.npi_if_index = (uint16_t)ifindex;
933 	} else if (inp->inp_last_outifp != NULL) {
934 		npi.npi_if_index = (uint16_t)inp->inp_last_outifp->if_index;
935 	}
936 
937 	npi.npi_flags |= NPIF_SOCKET;
938 
939 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
940 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
941 
942 	if (so->so_options & SO_NOWAKEFROMSLEEP) {
943 		npi.npi_flags |= NPIF_NOWAKE;
944 	}
945 
946 	if (inp->inp_flags2 & INP2_CONNECTION_IDLE) {
947 		npi.npi_flags |= NPIF_CONNECTION_IDLE;
948 	}
949 
950 	if (SOCK_PROTO(so) == IPPROTO_TCP) {
951 		struct tcpcb *tp = intotcpcb(inp);
952 
953 		npi.npi_flags |= NPIF_TCP;
954 		if (tp != NULL && tp->t_state == TCPS_LISTEN) {
955 			npi.npi_flags |= NPIF_LISTEN;
956 		}
957 	} else if (SOCK_PROTO(so) == IPPROTO_UDP) {
958 		npi.npi_flags |= NPIF_UDP;
959 	} else {
960 		os_log(wake_packet_log_handle, "%s: unexpected protocol %u for inp %p", __func__,
961 		    SOCK_PROTO(inp->inp_socket), inp);
962 		return false;
963 	}
964 
965 	uuid_copy(npi.npi_flow_uuid, inp->necp_client_uuid);
966 
967 	npi.npi_local_port = inp->inp_lport;
968 	npi.npi_foreign_port = inp->inp_fport;
969 
970 	/*
971 	 * Take in account IPv4 addresses mapped on IPv6
972 	 */
973 	if ((inp->inp_vflag & INP_IPV6) != 0 && (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
974 	    (inp->inp_vflag & (INP_IPV6 | INP_IPV4)) == (INP_IPV6 | INP_IPV4)) {
975 		npi.npi_flags |= NPIF_IPV6 | NPIF_IPV4;
976 		memcpy(&npi.npi_local_addr_in6,
977 		    &inp->in6p_laddr, sizeof(struct in6_addr));
978 	} else if (inp->inp_vflag & INP_IPV4) {
979 		npi.npi_flags |= NPIF_IPV4;
980 		npi.npi_local_addr_in = inp->inp_laddr;
981 		npi.npi_foreign_addr_in = inp->inp_faddr;
982 	} else {
983 		npi.npi_flags |= NPIF_IPV6;
984 		memcpy(&npi.npi_local_addr_in6,
985 		    &inp->in6p_laddr, sizeof(struct in6_addr));
986 		memcpy(&npi.npi_foreign_addr_in6,
987 		    &inp->in6p_faddr, sizeof(struct in6_addr));
988 
989 		/* Clear the embedded scope ID */
990 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
991 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
992 		}
993 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
994 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
995 		}
996 	}
997 
998 	npi.npi_owner_pid = so->last_pid;
999 
1000 	if (so->last_pid != 0) {
1001 		proc_name(so->last_pid, npi.npi_owner_pname,
1002 		    sizeof(npi.npi_owner_pname));
1003 		uuid_copy(npi.npi_owner_uuid, so->last_uuid);
1004 	}
1005 
1006 	if (so->so_flags & SOF_DELEGATED) {
1007 		npi.npi_flags |= NPIF_DELEGATED;
1008 		npi.npi_effective_pid = so->e_pid;
1009 		if (so->e_pid != 0) {
1010 			proc_name(so->e_pid, npi.npi_effective_pname,
1011 			    sizeof(npi.npi_effective_pname));
1012 		}
1013 		uuid_copy(npi.npi_effective_uuid, so->e_uuid);
1014 	} else {
1015 		npi.npi_effective_pid = so->last_pid;
1016 		if (so->last_pid != 0) {
1017 			strbufcpy(npi.npi_effective_pname, npi.npi_owner_pname);
1018 		}
1019 		uuid_copy(npi.npi_effective_uuid, so->last_uuid);
1020 	}
1021 
1022 	return net_port_info_add_entry(&npi);
1023 }
1024 
1025 #if SKYWALK
1026 __private_extern__ bool
if_ports_used_add_flow_entry(const struct flow_entry * fe,const uint32_t ifindex,const struct ns_flow_info * nfi,uint32_t ns_flags)1027 if_ports_used_add_flow_entry(const struct flow_entry *fe, const uint32_t ifindex,
1028     const struct ns_flow_info *nfi, uint32_t ns_flags)
1029 {
1030 	struct net_port_info npi = {};
1031 
1032 	/* This is unlikely to happen but better be safe than sorry */
1033 	if (ifindex > UINT16_MAX) {
1034 		os_log(wake_packet_log_handle, "%s: ifindex %u too big", __func__, ifindex);
1035 		return false;
1036 	}
1037 	npi.npi_if_index = (uint16_t)ifindex;
1038 
1039 	npi.npi_flags |= NPIF_CHANNEL;
1040 
1041 	npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
1042 	npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
1043 
1044 	if (ns_flags & NETNS_NOWAKEFROMSLEEP) {
1045 		npi.npi_flags |= NPIF_NOWAKE;
1046 	}
1047 	if (ns_flags & NETNS_CONNECTION_IDLE) {
1048 		npi.npi_flags |= NPIF_CONNECTION_IDLE;
1049 	}
1050 	if ((ns_flags & NETNS_OWNER_MASK) == NETNS_LISTENER) {
1051 		npi.npi_flags |= NPIF_LISTEN;
1052 	}
1053 
1054 	uuid_copy(npi.npi_flow_uuid, nfi->nfi_flow_uuid);
1055 
1056 	if (nfi->nfi_protocol == IPPROTO_TCP) {
1057 		npi.npi_flags |= NPIF_TCP;
1058 	} else if (nfi->nfi_protocol == IPPROTO_UDP) {
1059 		npi.npi_flags |= NPIF_UDP;
1060 	} else {
1061 		os_log(wake_packet_log_handle, "%s: unexpected protocol %u for nfi %p",
1062 		    __func__, nfi->nfi_protocol, nfi);
1063 		return false;
1064 	}
1065 
1066 	if (nfi->nfi_laddr.sa.sa_family == AF_INET) {
1067 		npi.npi_flags |= NPIF_IPV4;
1068 
1069 		npi.npi_local_port = nfi->nfi_laddr.sin.sin_port;
1070 		npi.npi_foreign_port = nfi->nfi_faddr.sin.sin_port;
1071 
1072 		npi.npi_local_addr_in = nfi->nfi_laddr.sin.sin_addr;
1073 		npi.npi_foreign_addr_in = nfi->nfi_faddr.sin.sin_addr;
1074 	} else {
1075 		npi.npi_flags |= NPIF_IPV6;
1076 
1077 		npi.npi_local_port = nfi->nfi_laddr.sin6.sin6_port;
1078 		npi.npi_foreign_port = nfi->nfi_faddr.sin6.sin6_port;
1079 
1080 		memcpy(&npi.npi_local_addr_in6,
1081 		    &nfi->nfi_laddr.sin6.sin6_addr, sizeof(struct in6_addr));
1082 		memcpy(&npi.npi_foreign_addr_in6,
1083 		    &nfi->nfi_faddr.sin6.sin6_addr, sizeof(struct in6_addr));
1084 
1085 		/* Clear the embedded scope ID */
1086 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
1087 			npi.npi_local_addr_in6.s6_addr16[1] = 0;
1088 		}
1089 		if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
1090 			npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
1091 		}
1092 	}
1093 
1094 	npi.npi_owner_pid = nfi->nfi_owner_pid;
1095 	strbufcpy(npi.npi_owner_pname, nfi->nfi_owner_name);
1096 
1097 	/*
1098 	 * Get the proc UUID from the pid as the the proc UUID is not present
1099 	 * in the flow_entry
1100 	 */
1101 	proc_t proc = proc_find(npi.npi_owner_pid);
1102 	if (proc != PROC_NULL) {
1103 		proc_getexecutableuuid(proc, npi.npi_owner_uuid, sizeof(npi.npi_owner_uuid));
1104 		proc_rele(proc);
1105 	}
1106 	if (nfi->nfi_effective_pid != -1) {
1107 		npi.npi_effective_pid = nfi->nfi_effective_pid;
1108 		strbufcpy(npi.npi_effective_pname, nfi->nfi_effective_name);
1109 		uuid_copy(npi.npi_effective_uuid, fe->fe_eproc_uuid);
1110 	} else {
1111 		npi.npi_effective_pid = npi.npi_owner_pid;
1112 		strbufcpy(npi.npi_effective_pname, npi.npi_owner_pname);
1113 		uuid_copy(npi.npi_effective_uuid, npi.npi_owner_uuid);
1114 	}
1115 
1116 	return net_port_info_add_entry(&npi);
1117 }
1118 
1119 #endif /* SKYWALK */
1120 
1121 static void
net_port_info_log_npi(const char * s,const struct net_port_info * npi)1122 net_port_info_log_npi(const char *s, const struct net_port_info *npi)
1123 {
1124 	char lbuf[MAX_IPv6_STR_LEN] = {};
1125 	char fbuf[MAX_IPv6_STR_LEN] = {};
1126 
1127 	if (npi == NULL) {
1128 		os_log(wake_packet_log_handle, "%s", s);
1129 		return;
1130 	}
1131 
1132 	if (npi->npi_flags & NPIF_IPV4) {
1133 		inet_ntop(PF_INET, &npi->npi_local_addr_in.s_addr,
1134 		    lbuf, sizeof(lbuf));
1135 		inet_ntop(PF_INET, &npi->npi_foreign_addr_in.s_addr,
1136 		    fbuf, sizeof(fbuf));
1137 	} else if (npi->npi_flags & NPIF_IPV6) {
1138 		inet_ntop(PF_INET6, &npi->npi_local_addr_in6,
1139 		    lbuf, sizeof(lbuf));
1140 		inet_ntop(PF_INET6, &npi->npi_foreign_addr_in6,
1141 		    fbuf, sizeof(fbuf));
1142 	}
1143 	os_log(wake_packet_log_handle, "%s net_port_info if_index %u arch %s family %s proto %s local %s:%u foreign %s:%u pid: %u epid %u",
1144 	    s != NULL ? s : "",
1145 	    npi->npi_if_index,
1146 	    (npi->npi_flags & NPIF_SOCKET) ? "so" : (npi->npi_flags & NPIF_CHANNEL) ? "ch" : "unknown",
1147 	    (npi->npi_flags & NPIF_IPV4) ? "ipv4" : (npi->npi_flags & NPIF_IPV6) ? "ipv6" : "unknown",
1148 	    npi->npi_flags & NPIF_TCP ? "tcp" : npi->npi_flags & NPIF_UDP ? "udp" :
1149 	    npi->npi_flags & NPIF_ESP ? "esp" : "unknown",
1150 	    lbuf, ntohs(npi->npi_local_port),
1151 	    fbuf, ntohs(npi->npi_foreign_port),
1152 	    npi->npi_owner_pid,
1153 	    npi->npi_effective_pid);
1154 }
1155 
1156 /*
1157  * net_port_info_match_npi() returns true for an exact match that does not have "no wake" set
1158  */
1159 #define NPI_MATCH_IPV4 (NPIF_IPV4 | NPIF_TCP | NPIF_UDP)
1160 #define NPI_MATCH_IPV6 (NPIF_IPV6 | NPIF_TCP | NPIF_UDP)
1161 
1162 static bool
net_port_info_match_npi(struct net_port_entry * npe,const struct net_port_info * in_npi,struct net_port_entry ** best_match)1163 net_port_info_match_npi(struct net_port_entry *npe, const struct net_port_info *in_npi,
1164     struct net_port_entry **best_match)
1165 {
1166 	if (__improbable(net_wake_pkt_debug > 1)) {
1167 		net_port_info_log_npi("net_port_info_match_npi", &npe->npe_npi);
1168 	}
1169 
1170 	/*
1171 	 * The interfaces must match or be both companion link
1172 	 */
1173 	if (npe->npe_npi.npi_if_index != in_npi->npi_if_index &&
1174 	    !((npe->npe_npi.npi_flags & NPIF_COMPLINK) && (in_npi->npi_flags & NPIF_COMPLINK))) {
1175 		return false;
1176 	}
1177 
1178 	/*
1179 	 * The local ports and protocols must match
1180 	 */
1181 	if (npe->npe_npi.npi_local_port != in_npi->npi_local_port ||
1182 	    ((npe->npe_npi.npi_flags & NPI_MATCH_IPV4) != (in_npi->npi_flags & NPI_MATCH_IPV4) &&
1183 	    (npe->npe_npi.npi_flags & NPI_MATCH_IPV6) != (in_npi->npi_flags & NPI_MATCH_IPV6))) {
1184 		return false;
1185 	}
1186 
1187 	/*
1188 	 * Search stops on an exact match
1189 	 */
1190 	if (npe->npe_npi.npi_foreign_port == in_npi->npi_foreign_port) {
1191 		if ((npe->npe_npi.npi_flags & NPIF_IPV4) && (npe->npe_npi.npi_flags & NPIF_IPV4)) {
1192 			if (in_npi->npi_local_addr_in.s_addr == npe->npe_npi.npi_local_addr_in.s_addr &&
1193 			    in_npi->npi_foreign_addr_in.s_addr == npe->npe_npi.npi_foreign_addr_in.s_addr) {
1194 				if (npe->npe_npi.npi_flags & NPIF_NOWAKE) {
1195 					/*
1196 					 * Do not overwrite an existing match when "no wake" is set
1197 					 */
1198 					if (*best_match == NULL) {
1199 						*best_match = npe;
1200 					}
1201 					return false;
1202 				}
1203 				*best_match = npe;
1204 				return true;
1205 			}
1206 		}
1207 		if ((npe->npe_npi.npi_flags & NPIF_IPV6) && (npe->npe_npi.npi_flags & NPIF_IPV6)) {
1208 			if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
1209 			    sizeof(union in_addr_4_6)) == 0 &&
1210 			    memcmp(&npe->npe_npi.npi_foreign_addr_, &in_npi->npi_foreign_addr_,
1211 			    sizeof(union in_addr_4_6)) == 0) {
1212 				if (npe->npe_npi.npi_flags & NPIF_NOWAKE) {
1213 					/*
1214 					 * Do not overwrite an existing match when "no wake" is set
1215 					 */
1216 					if (*best_match == NULL) {
1217 						*best_match = npe;
1218 					}
1219 					return false;
1220 				}
1221 				*best_match = npe;
1222 				return true;
1223 			}
1224 		}
1225 	}
1226 	/*
1227 	 * Skip connected entries as we are looking for a wildcard match
1228 	 * on the local address and port
1229 	 */
1230 	if (npe->npe_npi.npi_foreign_port != 0) {
1231 		return false;
1232 	}
1233 	/*
1234 	 * Do not overwrite an existing match when "no wake" is set
1235 	 */
1236 	if (*best_match != NULL && (npe->npe_npi.npi_flags & NPIF_NOWAKE) != 0) {
1237 		return false;
1238 	}
1239 	/*
1240 	 * The local address matches: this is our 2nd best match
1241 	 */
1242 	if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
1243 	    sizeof(union in_addr_4_6)) == 0) {
1244 		*best_match = npe;
1245 		return false;
1246 	}
1247 
1248 	/*
1249 	 * Only the local port matches, do not override a match
1250 	 * on the local address
1251 	 */
1252 	if (*best_match == NULL) {
1253 		*best_match = npe;
1254 	}
1255 	return false;
1256 }
1257 #undef NPI_MATCH_IPV4
1258 #undef NPI_MATCH_IPV6
1259 
1260 /*
1261  *
1262  */
1263 static bool
net_port_info_find_match(struct net_port_info * in_npi)1264 net_port_info_find_match(struct net_port_info *in_npi)
1265 {
1266 	struct net_port_entry *npe;
1267 	struct net_port_entry * __single best_match = NULL;
1268 
1269 	lck_mtx_lock(&net_port_entry_head_lock);
1270 
1271 	uint32_t count = 0;
1272 	TAILQ_FOREACH(npe, NPE_HASH_HEAD(in_npi->npi_local_port), npe_hash_next) {
1273 		count += 1;
1274 		/*
1275 		 * Search stop on an exact match
1276 		 */
1277 		if (net_port_info_match_npi(npe, in_npi, &best_match)) {
1278 			break;
1279 		}
1280 	}
1281 
1282 	if (best_match != NULL) {
1283 		best_match->npe_npi.npi_flags |= NPIF_WAKEPKT;
1284 		in_npi->npi_flags = best_match->npe_npi.npi_flags;
1285 		in_npi->npi_owner_pid = best_match->npe_npi.npi_owner_pid;
1286 		in_npi->npi_effective_pid = best_match->npe_npi.npi_effective_pid;
1287 		strbufcpy(in_npi->npi_owner_pname, best_match->npe_npi.npi_owner_pname);
1288 		strbufcpy(in_npi->npi_effective_pname, best_match->npe_npi.npi_effective_pname);
1289 		uuid_copy(in_npi->npi_owner_uuid, best_match->npe_npi.npi_owner_uuid);
1290 		uuid_copy(in_npi->npi_effective_uuid, best_match->npe_npi.npi_effective_uuid);
1291 	}
1292 	lck_mtx_unlock(&net_port_entry_head_lock);
1293 
1294 	if (__improbable(net_wake_pkt_debug > 0)) {
1295 		if (best_match != NULL) {
1296 			net_port_info_log_npi("wake packet match", in_npi);
1297 		} else {
1298 			net_port_info_log_npi("wake packet no match", in_npi);
1299 		}
1300 	}
1301 
1302 	return best_match != NULL ? true : false;
1303 }
1304 
1305 #if (DEBUG || DEVELOPMENT)
1306 static void
net_port_info_log_una_wake_event(const char * s,struct net_port_info_una_wake_event * ev)1307 net_port_info_log_una_wake_event(const char *s, struct net_port_info_una_wake_event *ev)
1308 {
1309 	char lbuf[MAX_IPv6_STR_LEN] = {};
1310 	char fbuf[MAX_IPv6_STR_LEN] = {};
1311 
1312 	if (ev->una_wake_pkt_flags & NPIF_IPV4) {
1313 		inet_ntop(PF_INET, &ev->una_wake_pkt_local_addr_._in_a_4.s_addr,
1314 		    lbuf, sizeof(lbuf));
1315 		inet_ntop(PF_INET, &ev->una_wake_pkt_foreign_addr_._in_a_4.s_addr,
1316 		    fbuf, sizeof(fbuf));
1317 	} else if (ev->una_wake_pkt_flags & NPIF_IPV6) {
1318 		inet_ntop(PF_INET6, &ev->una_wake_pkt_local_addr_._in_a_6.s6_addr,
1319 		    lbuf, sizeof(lbuf));
1320 		inet_ntop(PF_INET6, &ev->una_wake_pkt_foreign_addr_._in_a_6.s6_addr,
1321 		    fbuf, sizeof(fbuf));
1322 	}
1323 	os_log(wake_packet_log_handle, "%s if %s (%u) phy_if %s proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proto: %u lpw: %d",
1324 	    s != NULL ? s : "",
1325 	    ev->una_wake_pkt_ifname, ev->una_wake_pkt_if_index, ev->una_wake_pkt_phy_ifname,
1326 	    ev->una_wake_pkt_flags & NPIF_TCP ? "tcp" : ev->una_wake_pkt_flags & NPIF_UDP ? "udp" :
1327 	    ev->una_wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1328 	    lbuf, ntohs(ev->una_wake_pkt_local_port),
1329 	    fbuf, ntohs(ev->una_wake_pkt_foreign_port),
1330 	    ev->una_wake_pkt_total_len, ev->una_wake_pkt_data_len,
1331 	    ev->una_wake_pkt_control_flags, ev->una_wake_pkt_proto,
1332 	    ev->una_wake_pkt_flags & NPIF_LPW ? 1 : 0);
1333 }
1334 
1335 static void
net_port_info_log_wake_event(const char * s,struct net_port_info_wake_event * ev)1336 net_port_info_log_wake_event(const char *s, struct net_port_info_wake_event *ev)
1337 {
1338 	char lbuf[MAX_IPv6_STR_LEN] = {};
1339 	char fbuf[MAX_IPv6_STR_LEN] = {};
1340 
1341 	if (ev->wake_pkt_flags & NPIF_IPV4) {
1342 		inet_ntop(PF_INET, &ev->wake_pkt_local_addr_._in_a_4.s_addr,
1343 		    lbuf, sizeof(lbuf));
1344 		inet_ntop(PF_INET, &ev->wake_pkt_foreign_addr_._in_a_4.s_addr,
1345 		    fbuf, sizeof(fbuf));
1346 	} else if (ev->wake_pkt_flags & NPIF_IPV6) {
1347 		inet_ntop(PF_INET6, &ev->wake_pkt_local_addr_._in_a_6.s6_addr,
1348 		    lbuf, sizeof(lbuf));
1349 		inet_ntop(PF_INET6, &ev->wake_pkt_foreign_addr_._in_a_6.s6_addr,
1350 		    fbuf, sizeof(fbuf));
1351 	}
1352 	os_log(wake_packet_log_handle, "%s if %s (%u) phy_if %s proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proc %s eproc %s idle %d lpw %d",
1353 	    s != NULL ? s : "",
1354 	    ev->wake_pkt_ifname, ev->wake_pkt_if_index, ev->wake_pkt_phy_ifname,
1355 	    ev->wake_pkt_flags & NPIF_TCP ? "tcp" : ev->wake_pkt_flags ? "udp" :
1356 	    ev->wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1357 	    lbuf, ntohs(ev->wake_pkt_port),
1358 	    fbuf, ntohs(ev->wake_pkt_foreign_port),
1359 	    ev->wake_pkt_total_len, ev->wake_pkt_data_len, ev->wake_pkt_control_flags,
1360 	    ev->wake_pkt_owner_pname, ev->wake_pkt_effective_pname,
1361 	    ev->wake_pkt_flags & NPIF_CONNECTION_IDLE ? 1 : 0,
1362 	    ev->wake_pkt_flags & NPIF_LPW ? 1 : 0);
1363 }
1364 
1365 #endif /* (DEBUG || DEVELOPMENT) */
1366 
1367 /*
1368  * The process attribution of a wake packet can take several steps:
1369  *
1370  * 1) After device wakes, the first interface that sees a wake packet is the
1371  *    physical interface and we remember it via if_set_wake_physical_interface()
1372  *
1373  * 2) We try to attribute a packet to a flow or not based on the physical interface.
1374  *    If we find a flow, then the physical interface is the same as the interface used
1375  *    by the TCP/UDP flow.
1376  *
1377  * 3) If the packet is tunneled or redirected we are going to do the attribution again
1378  *    and the physical will be different from the interface used the TCP/UDP flow.
1379  */
1380 static bool
is_wake_pkt_event_delay(uint32_t ifrtype)1381 is_wake_pkt_event_delay(uint32_t ifrtype)
1382 {
1383 	// Prevent overflow of the bitstring
1384 	if (ifrtype >= NPI_MAX_IF_FAMILY_BITS) {
1385 		return false;
1386 	}
1387 	if (bitstr_test((bitstr_t *)&npi_wake_packet_event_delay_if_families, ifrtype)) {
1388 		return true;
1389 	}
1390 	return false;
1391 }
1392 
1393 static int
if_set_wake_physical_interface(struct ifnet * ifp)1394 if_set_wake_physical_interface(struct ifnet *ifp)
1395 {
1396 	/*
1397 	 * A physical interface is either Ethernet, cellular or companion link over BT
1398 	 * otherwise assumes it is some kind of tunnel
1399 	 */
1400 	if (ifp->if_family != IFNET_FAMILY_ETHERNET && ifp->if_family != IFNET_FAMILY_CELLULAR &&
1401 	    IFNET_IS_COMPANION_LINK_BLUETOOTH(ifp) == false) {
1402 		return 0;
1403 	}
1404 
1405 	/*
1406 	 * Only handle a wake from a physical interface per wake cycle
1407 	 */
1408 	if (last_wake_phy_if_set == true) {
1409 		if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1410 		os_log(wake_packet_log_handle,
1411 		    "if_set_wake_physical_interface ignored on %s because already set on %s",
1412 		    IF_XNAME(ifp), last_wake_phy_if_name);
1413 		return EJUSTRETURN;
1414 	}
1415 
1416 	last_wake_phy_if_set = true;
1417 	strlcpy(last_wake_phy_if_name, IF_XNAME(ifp), sizeof(last_wake_phy_if_name));
1418 	last_wake_phy_if_family = ifp->if_family;
1419 	last_wake_phy_if_subfamily = ifp->if_subfamily;
1420 	last_wake_phy_if_functional_type = if_functional_type(ifp, true);
1421 
1422 	if (if_need_delayed_wake_pkt_event(ifp)) {
1423 		if_ports_used_stats.ifpu_delay_phy_wake_pkt += 1;
1424 		last_wake_phy_if_delay_wake_pkt = true;
1425 		os_log(wake_packet_log_handle, "if_set_wake_physical_interface %s last_wake_phy_if_delay_wake_pkt set",
1426 		    IF_XNAME(ifp));
1427 	}
1428 	if ((ifp->if_flags & IFXF_LOW_POWER_WAKE) != 0) {
1429 		last_wake_phy_if_lpw = true;
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 static void
deliver_unattributed_wake_packet_event(struct net_port_info_una_wake_event * event_data)1436 deliver_unattributed_wake_packet_event(struct net_port_info_una_wake_event *event_data)
1437 {
1438 	struct kev_msg ev_msg = {};
1439 
1440 	if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1441 
1442 	last_wake_pkt_event.npi_wp_code = KEV_POWER_UNATTRIBUTED_WAKE;
1443 	memcpy(&last_wake_pkt_event.npi_ev_wake_pkt_unattributed, event_data,
1444 	    sizeof(struct net_port_info_una_wake_event));
1445 
1446 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1447 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1448 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1449 	ev_msg.event_code  = KEV_POWER_UNATTRIBUTED_WAKE;
1450 
1451 	ev_msg.dv[0].data_ptr = event_data;
1452 	ev_msg.dv[0].data_length = sizeof(struct net_port_info_una_wake_event);
1453 
1454 	int result = kev_post_msg(&ev_msg);
1455 	if (result != 0) {
1456 		uuid_string_t wake_uuid_str;
1457 
1458 		uuid_unparse(event_data->una_wake_uuid, wake_uuid_str);
1459 		os_log_error(wake_packet_log_handle,
1460 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1461 		    __func__, result, wake_uuid_str);
1462 
1463 		if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1464 	}
1465 #if (DEBUG || DEVELOPMENT)
1466 	net_port_info_log_una_wake_event("unattributed wake packet event", event_data);
1467 #endif /* (DEBUG || DEVELOPMENT) */
1468 }
1469 
1470 static void
deliver_attributed_wake_packet_event(struct net_port_info_wake_event * event_data)1471 deliver_attributed_wake_packet_event(struct net_port_info_wake_event *event_data)
1472 {
1473 	struct kev_msg ev_msg = {};
1474 
1475 	has_notified_wake_pkt = true;
1476 
1477 	if_ports_used_stats.ifpu_wake_pkt_event += 1;
1478 
1479 	last_wake_pkt_event.npi_wp_code = KEV_POWER_WAKE_PACKET;
1480 	memcpy(&last_wake_pkt_event.npi_ev_wake_pkt_attributed, event_data,
1481 	    sizeof(struct net_port_info_wake_event));
1482 
1483 	ev_msg.vendor_code = KEV_VENDOR_APPLE;
1484 	ev_msg.kev_class = KEV_NETWORK_CLASS;
1485 	ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1486 	ev_msg.event_code  = KEV_POWER_WAKE_PACKET;
1487 
1488 	ev_msg.dv[0].data_ptr = event_data;
1489 	ev_msg.dv[0].data_length = sizeof(struct net_port_info_wake_event);
1490 
1491 	int result = kev_post_msg(&ev_msg);
1492 	if (result != 0) {
1493 		uuid_string_t wake_uuid_str;
1494 
1495 		uuid_unparse(event_data->wake_uuid, wake_uuid_str);
1496 		os_log_error(wake_packet_log_handle,
1497 		    "%s: kev_post_msg() failed with error %d for wake uuid %s",
1498 		    __func__, result, wake_uuid_str);
1499 
1500 		if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1501 	}
1502 #if (DEBUG || DEVELOPMENT)
1503 	net_port_info_log_wake_event("attributed wake packet event", event_data);
1504 #endif /* (DEBUG || DEVELOPMENT) */
1505 }
1506 
1507 static bool
is_unattributed_wake_already_notified(struct net_port_info * npi)1508 is_unattributed_wake_already_notified(struct net_port_info *npi)
1509 {
1510 	bool retval = false;
1511 
1512 	if (has_notified_unattributed_wake == true || has_notified_wake_pkt == true) {
1513 		if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1514 
1515 		if (__improbable(net_wake_pkt_debug > 0)) {
1516 			net_port_info_log_npi("already notified unattributed wake packet", npi);
1517 		}
1518 		retval = true;
1519 	}
1520 
1521 	return retval;
1522 }
1523 
1524 static void
check_for_existing_delayed_wake_event()1525 check_for_existing_delayed_wake_event()
1526 {
1527 	/*
1528 	 * Count the delayed events that are ignored as the most recent delayed
1529 	 * wake event wins as the packet makes up its way up the stack
1530 	 */
1531 	if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
1532 		if_ports_used_stats.ifpu_ignored_delayed_attributed_events += 1;
1533 	} else if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_UNATTRIBUTED_WAKE) {
1534 		if_ports_used_stats.ifpu_ignored_delayed_unattributed_events += 1;
1535 	}
1536 }
1537 
1538 static void
if_notify_unattributed_wake_common(struct ifnet * ifp,struct net_port_info * npi,struct net_port_info_una_wake_event * event_data)1539 if_notify_unattributed_wake_common(struct ifnet *ifp, struct net_port_info *npi,
1540     struct net_port_info_una_wake_event *event_data)
1541 {
1542 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1543 	lck_mtx_lock(&net_port_entry_head_lock);
1544 
1545 	if (is_unattributed_wake_already_notified(npi) == true) {
1546 		goto done;
1547 	}
1548 
1549 	/*
1550 	 * Check if this is a wake packet that we cannot process inline
1551 	 */
1552 	if (if_need_delayed_wake_pkt_event(ifp)) {
1553 		check_for_existing_delayed_wake_event();
1554 
1555 		delay_wake_pkt_event.npi_wp_code = KEV_POWER_UNATTRIBUTED_WAKE;
1556 		memcpy(&delay_wake_pkt_event.npi_ev_wake_pkt_unattributed, event_data,
1557 		    sizeof(struct net_port_info_una_wake_event));
1558 
1559 #if (DEBUG || DEVELOPMENT)
1560 		if (if_ports_used_verbose > 0) {
1561 			net_port_info_log_una_wake_event("delay unattributed wake packet event", event_data);
1562 		}
1563 #endif /* (DEBUG || DEVELOPMENT) */
1564 
1565 		goto done;
1566 	}
1567 	deliver_unattributed_wake_packet_event(event_data);
1568 
1569 done:
1570 	lck_mtx_unlock(&net_port_entry_head_lock);
1571 }
1572 
1573 static void
if_notify_unattributed_wake_mbuf(struct ifnet * ifp,struct mbuf * m,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)1574 if_notify_unattributed_wake_mbuf(struct ifnet *ifp, struct mbuf *m,
1575     struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
1576     uint16_t pkt_control_flags, uint16_t proto)
1577 {
1578 	struct net_port_info_una_wake_event event_data = {};
1579 
1580 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1581 	event_data.una_wake_pkt_if_index = ifp->if_index;
1582 	event_data.una_wake_pkt_flags = npi->npi_flags;
1583 
1584 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
1585 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1586 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1587 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1588 
1589 	event_data.una_wake_pkt_total_len = pkt_total_len;
1590 	event_data.una_wake_pkt_data_len = pkt_data_len;
1591 	event_data.una_wake_pkt_control_flags = pkt_control_flags;
1592 	event_data.una_wake_pkt_proto = proto;
1593 
1594 	strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
1595 	    sizeof(event_data.una_wake_pkt_ifname));
1596 	event_data.una_wake_pkt_if_info.npi_if_family = ifp->if_family;
1597 	event_data.una_wake_pkt_if_info.npi_if_subfamily = ifp->if_subfamily;
1598 	event_data.una_wake_pkt_if_info.npi_if_functional_type = if_functional_type(ifp, true);
1599 
1600 	strbufcpy(event_data.una_wake_pkt_phy_ifname, last_wake_phy_if_name);
1601 	event_data.una_wake_pkt_phy_if_info.npi_if_family = last_wake_phy_if_family;
1602 	event_data.una_wake_pkt_phy_if_info.npi_if_subfamily = last_wake_phy_if_subfamily;
1603 	event_data.una_wake_pkt_phy_if_info.npi_if_functional_type = last_wake_phy_if_functional_type;
1604 
1605 	event_data.una_wake_ptk_len = m->m_pkthdr.len > NPI_MAX_UNA_WAKE_PKT_LEN ?
1606 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t)m->m_pkthdr.len;
1607 
1608 	errno_t error = mbuf_copydata(m, 0, event_data.una_wake_ptk_len,
1609 	    (void *)event_data.una_wake_pkt);
1610 	if (error != 0) {
1611 		uuid_string_t wake_uuid_str;
1612 
1613 		uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1614 		os_log_error(wake_packet_log_handle,
1615 		    "%s: mbuf_copydata() failed with error %d for wake uuid %s",
1616 		    __func__, error, wake_uuid_str);
1617 
1618 		if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1619 		return;
1620 	}
1621 
1622 	if_notify_unattributed_wake_common(ifp, npi, &event_data);
1623 }
1624 
1625 static bool
is_attributed_wake_already_notified(struct net_port_info * npi)1626 is_attributed_wake_already_notified(struct net_port_info *npi)
1627 {
1628 	if (has_notified_wake_pkt == true) {
1629 		if_ports_used_stats.ifpu_dup_wake_pkt_event += 1;
1630 		if (__improbable(net_wake_pkt_debug > 0)) {
1631 			net_port_info_log_npi("already notified attributed wake packet", npi);
1632 		}
1633 		return true;
1634 	}
1635 
1636 	return false;
1637 }
1638 
1639 static void
if_notify_wake_packet(struct ifnet * ifp,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags)1640 if_notify_wake_packet(struct ifnet *ifp, struct net_port_info *npi,
1641     uint32_t pkt_total_len, uint32_t pkt_data_len, uint16_t pkt_control_flags)
1642 {
1643 	struct net_port_info_wake_event event_data = {};
1644 
1645 	uuid_copy(event_data.wake_uuid, current_wakeuuid);
1646 	event_data.wake_pkt_if_index = ifp->if_index;
1647 	event_data.wake_pkt_port = npi->npi_local_port;
1648 	event_data.wake_pkt_flags = npi->npi_flags;
1649 	event_data.wake_pkt_owner_pid = npi->npi_owner_pid;
1650 	event_data.wake_pkt_effective_pid = npi->npi_effective_pid;
1651 	strbufcpy(event_data.wake_pkt_owner_pname, npi->npi_owner_pname);
1652 	strbufcpy(event_data.wake_pkt_effective_pname, npi->npi_effective_pname);
1653 	uuid_copy(event_data.wake_pkt_owner_uuid, npi->npi_owner_uuid);
1654 	uuid_copy(event_data.wake_pkt_effective_uuid, npi->npi_effective_uuid);
1655 
1656 	event_data.wake_pkt_foreign_port = npi->npi_foreign_port;
1657 	event_data.wake_pkt_local_addr_ = npi->npi_local_addr_;
1658 	event_data.wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1659 	strlcpy(event_data.wake_pkt_ifname, IF_XNAME(ifp), sizeof(event_data.wake_pkt_ifname));
1660 
1661 	event_data.wake_pkt_if_info.npi_if_family = ifp->if_family;
1662 	event_data.wake_pkt_if_info.npi_if_subfamily = ifp->if_subfamily;
1663 	event_data.wake_pkt_if_info.npi_if_functional_type = if_functional_type(ifp, true);
1664 
1665 	strbufcpy(event_data.wake_pkt_phy_ifname, last_wake_phy_if_name);
1666 	event_data.wake_pkt_phy_if_info.npi_if_family = last_wake_phy_if_family;
1667 	event_data.wake_pkt_phy_if_info.npi_if_subfamily = last_wake_phy_if_subfamily;
1668 	event_data.wake_pkt_phy_if_info.npi_if_functional_type = last_wake_phy_if_functional_type;
1669 
1670 	event_data.wake_pkt_total_len = pkt_total_len;
1671 	event_data.wake_pkt_data_len = pkt_data_len;
1672 	event_data.wake_pkt_control_flags = pkt_control_flags;
1673 	if (npi->npi_flags & NPIF_NOWAKE) {
1674 		event_data.wake_pkt_control_flags |= NPICF_NOWAKE;
1675 	}
1676 
1677 	LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1678 
1679 	lck_mtx_lock(&net_port_entry_head_lock);
1680 
1681 	/*
1682 	 * Always immediately notify attributed wake for idle connections in LPW
1683 	 * even if an attributed wake has already been notified or
1684 	 * the interface requires delayed wake attribution
1685 	 */
1686 	if (if_is_lpw_enabled(ifp) &&
1687 	    (npi->npi_flags & NPIF_CONNECTION_IDLE) != 0) {
1688 		goto deliver;
1689 	}
1690 
1691 	if (is_attributed_wake_already_notified(npi) == true) {
1692 		goto done;
1693 	}
1694 
1695 	/*
1696 	 * Check if this is a wake packet that we cannot process inline
1697 	 * We do not delay attributed idle connections in LPW because it is more
1698 	 * important to get accurate count about attributed idle connections in LPW
1699 	 * than an accurate count of attributed wake.
1700 	 */
1701 	if (if_need_delayed_wake_pkt_event(ifp)) {
1702 		check_for_existing_delayed_wake_event();
1703 
1704 		delay_wake_pkt_event.npi_wp_code = KEV_POWER_WAKE_PACKET;
1705 		memcpy(&delay_wake_pkt_event.npi_ev_wake_pkt_attributed, &event_data,
1706 		    sizeof(struct net_port_info_wake_event));
1707 
1708 #if (DEBUG || DEVELOPMENT)
1709 		if (if_ports_used_verbose > 0) {
1710 			net_port_info_log_wake_event("delay attributed wake packet event", &event_data);
1711 		}
1712 #endif /* (DEBUG || DEVELOPMENT) */
1713 
1714 		goto done;
1715 	}
1716 
1717 deliver:
1718 	if (npi->npi_flags & NPIF_NOWAKE) {
1719 		if_ports_used_stats.ifpu_spurious_wake_event += 1;
1720 	}
1721 
1722 	deliver_attributed_wake_packet_event(&event_data);
1723 done:
1724 	lck_mtx_unlock(&net_port_entry_head_lock);
1725 }
1726 
1727 static bool
is_encapsulated_esp(struct mbuf * m,size_t data_offset)1728 is_encapsulated_esp(struct mbuf *m, size_t data_offset)
1729 {
1730 	/*
1731 	 * They are three cases:
1732 	 * - Keep alive: 1 byte payload
1733 	 * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
1734 	 * - otherwise it's ESP
1735 	 */
1736 	ASSERT(m->m_pkthdr.len >= data_offset);
1737 
1738 	size_t data_len = m->m_pkthdr.len - data_offset;
1739 	if (data_len == 1) {
1740 		return false;
1741 	} else if (data_len > ESP_HDR_SIZE) {
1742 		uint8_t payload[ESP_HDR_SIZE];
1743 
1744 		errno_t error = mbuf_copydata(m, data_offset, ESP_HDR_SIZE, &payload);
1745 		if (error != 0) {
1746 			os_log(wake_packet_log_handle, "%s: mbuf_copydata(ESP_HDR_SIZE) error %d",
1747 			    __func__, error);
1748 		} else if (payload[0] == 0 && payload[1] == 0 &&
1749 		    payload[2] == 0 && payload[3] == 0) {
1750 			return false;
1751 		}
1752 	}
1753 	return true;
1754 }
1755 
1756 static void
log_hexdump(os_log_t log_handle,void * __sized_by (len)data,size_t len)1757 log_hexdump(os_log_t log_handle, void *__sized_by(len) data, size_t len)
1758 {
1759 	size_t i, j, k;
1760 	unsigned char *ptr = (unsigned char *)data;
1761 #define MAX_DUMP_BUF 32
1762 	unsigned char buf[3 * MAX_DUMP_BUF + 1];
1763 
1764 	for (i = 0; i < len; i += MAX_DUMP_BUF) {
1765 		for (j = i, k = 0; j < i + MAX_DUMP_BUF && j < len; j++) {
1766 			unsigned char msnbl = ptr[j] >> 4;
1767 			unsigned char lsnbl = ptr[j] & 0x0f;
1768 
1769 			buf[k++] = msnbl < 10 ? msnbl + '0' : msnbl + 'a' - 10;
1770 			buf[k++] = lsnbl < 10 ? lsnbl + '0' : lsnbl + 'a' - 10;
1771 
1772 			if ((j % 2) == 1) {
1773 				buf[k++] = ' ';
1774 			}
1775 			if ((j % MAX_DUMP_BUF) == MAX_DUMP_BUF - 1) {
1776 				buf[k++] = ' ';
1777 			}
1778 		}
1779 		buf[k] = 0;
1780 		os_log(log_handle, "%3lu: %s", i, buf);
1781 	}
1782 }
1783 
1784 __attribute__((noinline))
1785 static void
log_wake_mbuf(struct ifnet * ifp,struct mbuf * m)1786 log_wake_mbuf(struct ifnet *ifp, struct mbuf *m)
1787 {
1788 	char buffer[64];
1789 	size_t buflen = MIN(mbuf_pkthdr_len(m), sizeof(buffer));
1790 
1791 	os_log(wake_packet_log_handle, "wake packet from %s len %d",
1792 	    ifp->if_xname, m_pktlen(m));
1793 	if (mbuf_copydata(m, 0, buflen, buffer) == 0) {
1794 		log_hexdump(wake_packet_log_handle, buffer, buflen);
1795 	}
1796 }
1797 
1798 void
if_ports_used_match_mbuf(struct ifnet * ifp,protocol_family_t proto_family,struct mbuf * m)1799 if_ports_used_match_mbuf(struct ifnet *ifp, protocol_family_t proto_family, struct mbuf *m)
1800 {
1801 	errno_t error;
1802 	struct net_port_info npi = {};
1803 	bool found = false;
1804 	uint32_t pkt_total_len = 0;
1805 	uint32_t pkt_data_len = 0;
1806 	uint16_t pkt_control_flags = 0;
1807 	uint16_t pkt_proto = 0;
1808 
1809 	if (ifp == NULL) {
1810 		os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: receive interface is NULL");
1811 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1812 		return;
1813 	}
1814 
1815 	if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == 0) {
1816 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1817 		os_log_error(wake_packet_log_handle, "if_ports_used_match_mbuf: called PKTF_WAKE_PKT not set from %s",
1818 		    IF_XNAME(ifp));
1819 		return;
1820 	}
1821 
1822 	if (__improbable(net_wake_pkt_debug > 0)) {
1823 		log_wake_mbuf(ifp, m);
1824 	}
1825 
1826 	/*
1827 	 * Only accept one wake from a physical interface per wake cycle
1828 	 */
1829 	if (if_set_wake_physical_interface(ifp) == EJUSTRETURN) {
1830 		m->m_pkthdr.pkt_flags &= ~PKTF_WAKE_PKT;
1831 		return;
1832 	}
1833 
1834 	if_ports_used_stats.ifpu_so_match_wake_pkt += 1;
1835 	npi.npi_flags |= NPIF_SOCKET; /* For logging */
1836 	pkt_total_len = m->m_pkthdr.len;
1837 	pkt_data_len = pkt_total_len;
1838 
1839 	npi.npi_if_index = ifp->if_index;
1840 	if (IFNET_IS_COMPANION_LINK(ifp)) {
1841 		npi.npi_flags |= NPIF_COMPLINK;
1842 	}
1843 
1844 	if (proto_family == PF_INET) {
1845 		struct ip iphdr = {};
1846 
1847 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1848 
1849 		error = mbuf_copydata(m, 0, sizeof(struct ip), &iphdr);
1850 		if (error != 0) {
1851 			os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(ip) error %d",
1852 			    error);
1853 			goto failed;
1854 		}
1855 		npi.npi_flags |= NPIF_IPV4;
1856 		npi.npi_local_addr_in = iphdr.ip_dst;
1857 		npi.npi_foreign_addr_in = iphdr.ip_src;
1858 
1859 		/*
1860 		 * Check if this is a fragment that is not the first fragment
1861 		 */
1862 		if ((ntohs(iphdr.ip_off) & ~(IP_DF | IP_RF)) &&
1863 		    (ntohs(iphdr.ip_off) & IP_OFFMASK) != 0) {
1864 			npi.npi_flags |= NPIF_FRAG;
1865 			if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1866 		}
1867 
1868 		if ((iphdr.ip_hl << 2) < pkt_data_len) {
1869 			pkt_data_len -= iphdr.ip_hl << 2;
1870 		} else {
1871 			pkt_data_len = 0;
1872 		}
1873 
1874 		pkt_proto = iphdr.ip_p;
1875 
1876 		switch (iphdr.ip_p) {
1877 		case IPPROTO_TCP: {
1878 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1879 			npi.npi_flags |= NPIF_TCP;
1880 
1881 			if (npi.npi_flags & NPIF_FRAG) {
1882 				goto failed;
1883 			}
1884 
1885 			struct tcphdr th = {};
1886 			error = mbuf_copydata(m, iphdr.ip_hl << 2, sizeof(struct tcphdr), &th);
1887 			if (error != 0) {
1888 				os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(tcphdr) error %d",
1889 				    error);
1890 				goto failed;
1891 			}
1892 			npi.npi_local_port = th.th_dport;
1893 			npi.npi_foreign_port = th.th_sport;
1894 
1895 			if (pkt_data_len < sizeof(struct tcphdr) ||
1896 			    pkt_data_len < (th.th_off << 2)) {
1897 				pkt_data_len = 0;
1898 			} else {
1899 				pkt_data_len -= th.th_off << 2;
1900 			}
1901 			pkt_control_flags = th.th_flags;
1902 			break;
1903 		}
1904 		case IPPROTO_UDP: {
1905 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1906 			npi.npi_flags |= NPIF_UDP;
1907 
1908 			if (npi.npi_flags & NPIF_FRAG) {
1909 				goto failed;
1910 			}
1911 			struct udphdr uh = {};
1912 			size_t udp_offset = iphdr.ip_hl << 2;
1913 
1914 			error = mbuf_copydata(m, udp_offset, sizeof(struct udphdr), &uh);
1915 			if (error != 0) {
1916 				os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(udphdr) error %d",
1917 				    error);
1918 				goto failed;
1919 			}
1920 			npi.npi_local_port = uh.uh_dport;
1921 			npi.npi_foreign_port = uh.uh_sport;
1922 			/*
1923 			 * Let the ESP layer handle wake packets
1924 			 */
1925 			if (ntohs(uh.uh_dport) == PORT_ISAKMP_NATT ||
1926 			    ntohs(uh.uh_sport) == PORT_ISAKMP_NATT) {
1927 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1928 				if (is_encapsulated_esp(m, udp_offset + sizeof(struct udphdr))) {
1929 					if (net_wake_pkt_debug > 0) {
1930 						net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1931 					}
1932 					return;
1933 				}
1934 			}
1935 
1936 			if (pkt_data_len < sizeof(struct udphdr)) {
1937 				pkt_data_len = 0;
1938 			} else {
1939 				pkt_data_len -= sizeof(struct udphdr);
1940 			}
1941 			break;
1942 		}
1943 		case IPPROTO_ESP: {
1944 			/*
1945 			 * Let the ESP layer handle wake packets
1946 			 */
1947 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1948 			npi.npi_flags |= NPIF_ESP;
1949 			if (net_wake_pkt_debug > 0) {
1950 				net_port_info_log_npi("defer ESP matching", &npi);
1951 			}
1952 			return;
1953 		}
1954 		default:
1955 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1956 			os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: unexpected IPv4 protocol %u from %s",
1957 			    iphdr.ip_p, IF_XNAME(ifp));
1958 			goto failed;
1959 		}
1960 	} else if (proto_family == PF_INET6) {
1961 		struct ip6_hdr ip6_hdr = {};
1962 
1963 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1964 
1965 		error = mbuf_copydata(m, 0, sizeof(struct ip6_hdr), &ip6_hdr);
1966 		if (error != 0) {
1967 			os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(ip6_hdr) error %d",
1968 			    error);
1969 			goto failed;
1970 		}
1971 		npi.npi_flags |= NPIF_IPV6;
1972 		memcpy(&npi.npi_local_addr_in6, &ip6_hdr.ip6_dst, sizeof(struct in6_addr));
1973 		memcpy(&npi.npi_foreign_addr_in6, &ip6_hdr.ip6_src, sizeof(struct in6_addr));
1974 
1975 		size_t l3_len = sizeof(struct ip6_hdr);
1976 		uint8_t l4_proto = ip6_hdr.ip6_nxt;
1977 
1978 		pkt_proto = l4_proto;
1979 
1980 		if (pkt_data_len < l3_len) {
1981 			pkt_data_len = 0;
1982 		} else {
1983 			pkt_data_len -= l3_len;
1984 		}
1985 
1986 		/*
1987 		 * Check if this is a fragment that is not the first fragment
1988 		 */
1989 		if (l4_proto == IPPROTO_FRAGMENT) {
1990 			struct ip6_frag ip6_frag;
1991 
1992 			error = mbuf_copydata(m, sizeof(struct ip6_hdr), sizeof(struct ip6_frag), &ip6_frag);
1993 			if (error != 0) {
1994 				os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(ip6_frag) error %d",
1995 				    error);
1996 				goto failed;
1997 			}
1998 
1999 			l3_len += sizeof(struct ip6_frag);
2000 			l4_proto = ip6_frag.ip6f_nxt;
2001 
2002 			if ((ip6_frag.ip6f_offlg & IP6F_OFF_MASK) != 0) {
2003 				npi.npi_flags |= NPIF_FRAG;
2004 				if_ports_used_stats.ifpu_frag_wake_pkt += 1;
2005 			}
2006 		}
2007 
2008 
2009 		switch (l4_proto) {
2010 		case IPPROTO_TCP: {
2011 			if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
2012 			npi.npi_flags |= NPIF_TCP;
2013 
2014 			/*
2015 			 * Cannot attribute a fragment that is not the first fragment as it
2016 			 * not have the TCP header
2017 			 */
2018 			if (npi.npi_flags & NPIF_FRAG) {
2019 				goto failed;
2020 			}
2021 
2022 			struct tcphdr th = {};
2023 
2024 			error = mbuf_copydata(m, l3_len, sizeof(struct tcphdr), &th);
2025 			if (error != 0) {
2026 				os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(tcphdr) error %d",
2027 				    error);
2028 				if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
2029 				goto failed;
2030 			}
2031 			npi.npi_local_port = th.th_dport;
2032 			npi.npi_foreign_port = th.th_sport;
2033 
2034 			if (pkt_data_len < sizeof(struct tcphdr) ||
2035 			    pkt_data_len < (th.th_off << 2)) {
2036 				pkt_data_len = 0;
2037 			} else {
2038 				pkt_data_len -= th.th_off << 2;
2039 			}
2040 			pkt_control_flags = th.th_flags;
2041 			break;
2042 		}
2043 		case IPPROTO_UDP: {
2044 			if_ports_used_stats.ifpu_udp_wake_pkt += 1;
2045 			npi.npi_flags |= NPIF_UDP;
2046 
2047 			/*
2048 			 * Cannot attribute a fragment that is not the first fragment as it
2049 			 * not have the UDP header
2050 			 */
2051 			if (npi.npi_flags & NPIF_FRAG) {
2052 				goto failed;
2053 			}
2054 
2055 			struct udphdr uh = {};
2056 
2057 			error = mbuf_copydata(m, l3_len, sizeof(struct udphdr), &uh);
2058 			if (error != 0) {
2059 				os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(udphdr) error %d",
2060 				    error);
2061 				if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
2062 				goto failed;
2063 			}
2064 			npi.npi_local_port = uh.uh_dport;
2065 			npi.npi_foreign_port = uh.uh_sport;
2066 			/*
2067 			 * Let the ESP layer handle wake packets
2068 			 */
2069 			if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
2070 			    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
2071 				if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
2072 				if (is_encapsulated_esp(m, l3_len + sizeof(struct udphdr))) {
2073 					if (net_wake_pkt_debug > 0) {
2074 						net_port_info_log_npi("defer encapsulated ESP matching", &npi);
2075 					}
2076 					return;
2077 				}
2078 			}
2079 
2080 			if (pkt_data_len < sizeof(struct udphdr)) {
2081 				pkt_data_len = 0;
2082 			} else {
2083 				pkt_data_len -= sizeof(struct udphdr);
2084 			}
2085 			break;
2086 		}
2087 		case IPPROTO_ESP: {
2088 			/*
2089 			 * Let the ESP layer handle the wake packet
2090 			 */
2091 			if_ports_used_stats.ifpu_esp_wake_pkt += 1;
2092 			npi.npi_flags |= NPIF_ESP;
2093 			if (net_wake_pkt_debug > 0) {
2094 				net_port_info_log_npi("defer ESP matching", &npi);
2095 			}
2096 			return;
2097 		}
2098 		default:
2099 			if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
2100 
2101 			os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: unexpected IPv6 protocol %u from %s",
2102 			    ip6_hdr.ip6_nxt, IF_XNAME(ifp));
2103 			goto failed;
2104 		}
2105 	} else {
2106 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
2107 		os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: unexpected protocol family %d from %s",
2108 		    proto_family, IF_XNAME(ifp));
2109 		goto failed;
2110 	}
2111 
2112 	found = net_port_info_find_match(&npi);
2113 
2114 failed:
2115 	if (__improbable(if_is_lpw_enabled(ifp))) {
2116 		npi.npi_flags |= NPIF_LPW;
2117 
2118 		if (found && (npi.npi_flags & NPIF_CONNECTION_IDLE)) {
2119 			os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: idle connection in LPW on %s",
2120 			    IF_XNAME(ifp));
2121 
2122 			if_ports_used_stats.ifpu_lpw_connection_idle_wake++;
2123 		} else {
2124 			os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: not idle connection in LPW on %s",
2125 			    IF_XNAME(ifp));
2126 
2127 			if_ports_used_stats.ifpu_lpw_not_idle_wake++;
2128 		}
2129 	}
2130 	if (found) {
2131 		if_notify_wake_packet(ifp, &npi,
2132 		    pkt_total_len, pkt_data_len, pkt_control_flags);
2133 	} else {
2134 		if_notify_unattributed_wake_mbuf(ifp, m, &npi,
2135 		    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
2136 	}
2137 }
2138 
2139 #if SKYWALK
2140 
2141 static void
if_notify_unattributed_wake_pkt(struct ifnet * ifp,struct __kern_packet * pkt,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)2142 if_notify_unattributed_wake_pkt(struct ifnet *ifp, struct __kern_packet *pkt,
2143     struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
2144     uint16_t pkt_control_flags, uint16_t proto)
2145 {
2146 	struct net_port_info_una_wake_event event_data = {};
2147 
2148 	uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
2149 	event_data.una_wake_pkt_if_index = ifp->if_index;
2150 	event_data.una_wake_pkt_flags = npi->npi_flags;
2151 
2152 	uint16_t offset = kern_packet_get_network_header_offset(SK_PKT2PH(pkt));
2153 	event_data.una_wake_ptk_len =
2154 	    pkt->pkt_length - offset > NPI_MAX_UNA_WAKE_PKT_LEN ?
2155 	    NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t) pkt->pkt_length - offset;
2156 
2157 	kern_packet_copy_bytes(SK_PKT2PH(pkt), offset, event_data.una_wake_ptk_len,
2158 	    event_data.una_wake_pkt);
2159 
2160 	event_data.una_wake_pkt_local_port = npi->npi_local_port;
2161 	event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
2162 	event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
2163 	event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
2164 	strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
2165 	    sizeof(event_data.una_wake_pkt_ifname));
2166 
2167 	event_data.una_wake_pkt_total_len = pkt_total_len;
2168 	event_data.una_wake_pkt_data_len = pkt_data_len;
2169 	event_data.una_wake_pkt_control_flags = pkt_control_flags;
2170 	event_data.una_wake_pkt_proto = proto;
2171 
2172 	if_notify_unattributed_wake_common(ifp, npi, &event_data);
2173 }
2174 
2175 __attribute__((noinline))
2176 static void
log_wake_pkt(struct ifnet * ifp,struct __kern_packet * pkt)2177 log_wake_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
2178 {
2179 	uint32_t len;
2180 
2181 	if (pkt->pkt_pflags & PKT_F_MBUF_DATA) {
2182 		len = m_pktlen(pkt->pkt_mbuf);
2183 	} else {
2184 		len = __packet_get_real_data_length(pkt);
2185 	}
2186 
2187 	os_log(wake_packet_log_handle, "wake packet from %s len %d",
2188 	    ifp->if_xname, len);
2189 }
2190 
2191 void
if_ports_used_match_pkt(struct ifnet * ifp,struct __kern_packet * pkt)2192 if_ports_used_match_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
2193 {
2194 	struct net_port_info npi = {};
2195 	bool found = false;
2196 	uint32_t pkt_total_len = 0;
2197 	uint32_t pkt_data_len = 0;
2198 	uint16_t pkt_control_flags = 0;
2199 	uint16_t pkt_proto = 0;
2200 
2201 	if (ifp == NULL) {
2202 		os_log(wake_packet_log_handle, "if_ports_used_match_pkt: receive interface is NULL");
2203 		if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
2204 		return;
2205 	}
2206 
2207 	if ((pkt->pkt_pflags & PKT_F_WAKE_PKT) == 0) {
2208 		if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
2209 		os_log_error(wake_packet_log_handle, "%s: called PKT_F_WAKE_PKT not set from %s",
2210 		    __func__, IF_XNAME(ifp));
2211 		return;
2212 	}
2213 
2214 
2215 	if (__improbable(net_wake_pkt_debug > 0)) {
2216 		log_wake_pkt(ifp, pkt);
2217 	}
2218 
2219 	/*
2220 	 * Only accept one wake from a physical interface per wake cycle
2221 	 */
2222 	if (if_set_wake_physical_interface(ifp) == EJUSTRETURN) {
2223 		pkt->pkt_pflags &= ~PKT_F_WAKE_PKT;
2224 		return;
2225 	}
2226 
2227 	if_ports_used_stats.ifpu_ch_match_wake_pkt += 1;
2228 	npi.npi_flags |= NPIF_CHANNEL; /* For logging */
2229 	pkt_total_len = pkt->pkt_flow_ip_hlen +
2230 	    pkt->pkt_flow_tcp_hlen + pkt->pkt_flow_ulen;
2231 	pkt_data_len = pkt->pkt_flow_ulen;
2232 
2233 	npi.npi_if_index = ifp->if_index;
2234 	if (IFNET_IS_COMPANION_LINK(ifp)) {
2235 		npi.npi_flags |= NPIF_COMPLINK;
2236 	}
2237 
2238 
2239 	switch (pkt->pkt_flow_ip_ver) {
2240 	case IPVERSION:
2241 		if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
2242 
2243 		npi.npi_flags |= NPIF_IPV4;
2244 		npi.npi_local_addr_in = pkt->pkt_flow_ipv4_dst;
2245 		npi.npi_foreign_addr_in = pkt->pkt_flow_ipv4_src;
2246 		break;
2247 	case IPV6_VERSION:
2248 		if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
2249 
2250 		npi.npi_flags |= NPIF_IPV6;
2251 		memcpy(&npi.npi_local_addr_in6, &pkt->pkt_flow_ipv6_dst,
2252 		    sizeof(struct in6_addr));
2253 		memcpy(&npi.npi_foreign_addr_in6, &pkt->pkt_flow_ipv6_src,
2254 		    sizeof(struct in6_addr));
2255 		break;
2256 	default:
2257 		if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
2258 
2259 		os_log(wake_packet_log_handle, "%s: unexpected protocol family %u from %s",
2260 		    __func__, pkt->pkt_flow_ip_ver, IF_XNAME(ifp));
2261 		goto failed;
2262 	}
2263 	pkt_proto = pkt->pkt_flow_ip_ver;
2264 
2265 	/*
2266 	 * Check if this is a fragment that is not the first fragment
2267 	 */
2268 	if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
2269 		os_log(wake_packet_log_handle, "%s: unexpected wake fragment from %s",
2270 		    __func__, IF_XNAME(ifp));
2271 		npi.npi_flags |= NPIF_FRAG;
2272 		if_ports_used_stats.ifpu_frag_wake_pkt += 1;
2273 	}
2274 
2275 	switch (pkt->pkt_flow_ip_proto) {
2276 	case IPPROTO_TCP: {
2277 		if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
2278 		npi.npi_flags |= NPIF_TCP;
2279 
2280 		/*
2281 		 * Cannot attribute a fragment that is not the first fragment as it
2282 		 * not have the TCP header
2283 		 */
2284 		if (npi.npi_flags & NPIF_FRAG) {
2285 			goto failed;
2286 		}
2287 		struct tcphdr * __single tcp = __unsafe_forge_single(struct tcphdr *, pkt->pkt_flow_tcp_hdr);
2288 		if (tcp == NULL) {
2289 			os_log(wake_packet_log_handle, "%s: pkt with unassigned TCP header from %s",
2290 			    __func__, IF_XNAME(ifp));
2291 			if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
2292 			goto failed;
2293 		}
2294 		npi.npi_local_port = tcp->th_dport;
2295 		npi.npi_foreign_port = tcp->th_sport;
2296 		pkt_control_flags = tcp->th_flags;
2297 		break;
2298 	}
2299 	case IPPROTO_UDP: {
2300 		if_ports_used_stats.ifpu_udp_wake_pkt += 1;
2301 		npi.npi_flags |= NPIF_UDP;
2302 
2303 		/*
2304 		 * Cannot attribute a fragment that is not the first fragment as it
2305 		 * not have the UDP header
2306 		 */
2307 		if (npi.npi_flags & NPIF_FRAG) {
2308 			goto failed;
2309 		}
2310 		struct udphdr * __single uh = __unsafe_forge_single(struct udphdr *, pkt->pkt_flow_udp_hdr);
2311 		if (uh == NULL) {
2312 			os_log(wake_packet_log_handle, "%s: pkt with unassigned UDP header from %s",
2313 			    __func__, IF_XNAME(ifp));
2314 			if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
2315 			goto failed;
2316 		}
2317 		npi.npi_local_port = uh->uh_dport;
2318 		npi.npi_foreign_port = uh->uh_sport;
2319 
2320 		/*
2321 		 * Defer matching of UDP NAT traversal to ip_input
2322 		 * (assumes IKE uses sockets)
2323 		 */
2324 		if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
2325 		    ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
2326 			if_ports_used_stats.ifpu_deferred_isakmp_natt_wake_pkt += 1;
2327 			if (net_wake_pkt_debug > 0) {
2328 				net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
2329 			}
2330 			return;
2331 		}
2332 		break;
2333 	}
2334 	case IPPROTO_ESP: {
2335 		/*
2336 		 * Let the ESP layer handle the wake packet
2337 		 */
2338 		if_ports_used_stats.ifpu_esp_wake_pkt += 1;
2339 		npi.npi_flags |= NPIF_ESP;
2340 		if (net_wake_pkt_debug > 0) {
2341 			net_port_info_log_npi("defer ESP matching", &npi);
2342 		}
2343 		return;
2344 	}
2345 	default:
2346 		if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
2347 
2348 		os_log(wake_packet_log_handle, "%s: unexpected IP protocol %u from %s",
2349 		    __func__, pkt->pkt_flow_ip_proto, IF_XNAME(ifp));
2350 		goto failed;
2351 	}
2352 
2353 	found = net_port_info_find_match(&npi);
2354 
2355 failed:
2356 	if (__improbable(if_is_lpw_enabled(ifp))) {
2357 		npi.npi_flags |= NPIF_LPW;
2358 
2359 		if (found && (npi.npi_flags & NPIF_CONNECTION_IDLE)) {
2360 			os_log(wake_packet_log_handle, "if_ports_used_match_pkt: idle connection in LPW on %s",
2361 			    IF_XNAME(ifp));
2362 
2363 			if_ports_used_stats.ifpu_lpw_connection_idle_wake++;
2364 		} else {
2365 			os_log(wake_packet_log_handle, "if_ports_used_match_pkt: not idle connection in LPW on %s",
2366 			    IF_XNAME(ifp));
2367 
2368 			if_ports_used_stats.ifpu_lpw_not_idle_wake++;
2369 		}
2370 	}
2371 
2372 	if (found) {
2373 		if_notify_wake_packet(ifp, &npi,
2374 		    pkt_total_len, pkt_data_len, pkt_control_flags);
2375 	} else {
2376 		if_notify_unattributed_wake_pkt(ifp, pkt, &npi,
2377 		    pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
2378 	}
2379 }
2380 #endif /* SKYWALK */
2381 
2382 int
2383 sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS
2384 {
2385 #pragma unused(oidp, arg1, arg2)
2386 	struct net_port_info_wake_event net_port_info_wake_event = { 0 };
2387 	size_t len = sizeof(net_port_info_wake_event);
2388 	int error;
2389 
2390 	lck_mtx_lock(&net_port_entry_head_lock);
2391 	if (last_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
2392 		memcpy(&net_port_info_wake_event, &last_wake_pkt_event.npi_ev_wake_pkt_attributed, len);
2393 	}
2394 	lck_mtx_unlock(&net_port_entry_head_lock);
2395 
2396 	if (req->oldptr != 0) {
2397 		len = MIN(req->oldlen, len);
2398 	}
2399 	error = SYSCTL_OUT(req, &net_port_info_wake_event, len);
2400 
2401 	return error;
2402 }
2403 
2404 int
2405 sysctl_last_unattributed_wake_event SYSCTL_HANDLER_ARGS
2406 {
2407 #pragma unused(oidp, arg1, arg2)
2408 	struct net_port_info_una_wake_event net_port_info_una_wake_event = { 0 };
2409 	size_t len = sizeof(net_port_info_una_wake_event);
2410 	int error;
2411 
2412 	lck_mtx_lock(&net_port_entry_head_lock);
2413 	if (last_wake_pkt_event.npi_wp_code == KEV_POWER_UNATTRIBUTED_WAKE) {
2414 		memcpy(&net_port_info_una_wake_event, &last_wake_pkt_event.npi_ev_wake_pkt_unattributed, len);
2415 	}
2416 	lck_mtx_unlock(&net_port_entry_head_lock);
2417 
2418 	if (req->oldptr != 0) {
2419 		len = MIN(req->oldlen, len);
2420 	}
2421 	error = SYSCTL_OUT(req, &net_port_info_una_wake_event, len);
2422 
2423 	return error;
2424 }
2425 
2426 /*
2427  * Pass the interface family of the interface that caused the wake
2428  */
2429 int
2430 sysctl_wake_pkt_event_notify SYSCTL_HANDLER_ARGS
2431 {
2432 #pragma unused(oidp, arg1, arg2)
2433 	long long val = 0;
2434 	int error = 0;
2435 	int changed = 0;
2436 	uint32_t if_family = 0;
2437 
2438 	error = sysctl_io_number(req, val, sizeof(val), &val, &changed);
2439 	if (error != 0 || req->newptr == 0 || changed == 0) {
2440 		return error;
2441 	}
2442 
2443 	if (val < 0 || val > UINT32_MAX) {
2444 		return EINVAL;
2445 	}
2446 	if_family = (uint32_t)val;
2447 
2448 	if (!IOCurrentTaskHasEntitlement(WAKE_PKT_EVENT_CONTROL_ENTITLEMENT)) {
2449 		return EPERM;
2450 	}
2451 
2452 	os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify proc %s:%u val %u last_wake_phy_if_delay_wake_pkt %d last_wake_phy_if_family %u delay_wake_pkt_event %d",
2453 	    proc_best_name(current_proc()), proc_selfpid(),
2454 	    if_family, last_wake_phy_if_delay_wake_pkt, last_wake_phy_if_family,
2455 	    delay_wake_pkt_event.npi_wp_code);
2456 #if (DEBUG || DEVELOPMENT)
2457 	if (if_ports_used_verbose > 0) {
2458 		if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
2459 			net_port_info_log_wake_event("sysctl_wake_pkt_event_notify", &delay_wake_pkt_event.npi_ev_wake_pkt_attributed);
2460 		} else if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_UNATTRIBUTED_WAKE) {
2461 			net_port_info_log_una_wake_event("sysctl_wake_pkt_event_notify", &delay_wake_pkt_event.npi_ev_wake_pkt_unattributed);
2462 		}
2463 	}
2464 #endif /* (DEBUG || DEVELOPMENT) */
2465 
2466 	lck_mtx_lock(&net_port_entry_head_lock);
2467 
2468 	if (last_wake_phy_if_delay_wake_pkt == true && val == last_wake_phy_if_family) {
2469 		last_wake_phy_if_delay_wake_pkt = false;
2470 
2471 		if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
2472 			if (is_attributed_wake_already_notified(NULL) == false) {
2473 				deliver_attributed_wake_packet_event(&delay_wake_pkt_event.npi_ev_wake_pkt_attributed);
2474 			} else {
2475 				os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify attributed_wake_already_notified");
2476 			}
2477 		} else if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_UNATTRIBUTED_WAKE) {
2478 			if (is_unattributed_wake_already_notified(NULL)) {
2479 				deliver_unattributed_wake_packet_event(&delay_wake_pkt_event.npi_ev_wake_pkt_unattributed);
2480 			} else {
2481 				os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify unattributed_wake_already_notified");
2482 			}
2483 		} else {
2484 			if_ports_used_stats.ifpu_wake_pkt_event_notify_in_vain += 1;
2485 			os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify bad npi_wp_code");
2486 		}
2487 	} else {
2488 		if_ports_used_stats.ifpu_wake_pkt_event_notify_in_vain += 1;
2489 		os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify in vain");
2490 	}
2491 	lck_mtx_unlock(&net_port_entry_head_lock);
2492 
2493 	return 0;
2494 }
2495 
2496 static void
if_set_delay_wake_flags(ifnet_t ifp,bool delay)2497 if_set_delay_wake_flags(ifnet_t ifp, bool delay)
2498 {
2499 	if (delay) {
2500 		if_set_xflags(ifp, IFXF_DELAYWAKEPKTEVENT);
2501 		if_clear_xflags(ifp, IFXF_INBAND_WAKE_PKT_TAGGING);
2502 	} else {
2503 		if_clear_xflags(ifp, IFXF_DELAYWAKEPKTEVENT);
2504 		if_set_xflags(ifp, IFXF_INBAND_WAKE_PKT_TAGGING);
2505 	}
2506 }
2507 
2508 int
2509 sysctl_wake_pkt_event_delay_if_families SYSCTL_HANDLER_ARGS
2510 {
2511 #pragma unused(oidp, arg1, arg2)
2512 	long long val = npi_wake_packet_event_delay_if_families;
2513 	int error;
2514 	int changed = 0;
2515 	uint32_t old_value = npi_wake_packet_event_delay_if_families;
2516 
2517 	error = sysctl_io_number(req, val, sizeof(val), &val, &changed);
2518 	if (error != 0 || req->newptr == 0 || changed == 0) {
2519 		return error;
2520 	}
2521 	if (!IOCurrentTaskHasEntitlement(WAKE_PKT_EVENT_CONTROL_ENTITLEMENT)) {
2522 		return EPERM;
2523 	}
2524 	if (val < 0 || val > UINT32_MAX) {
2525 		return EINVAL;
2526 	}
2527 
2528 	/* The value is the bitmap of the functional types to delay */
2529 	old_value = npi_wake_packet_event_delay_if_families;
2530 	npi_wake_packet_event_delay_if_families = (uint32_t)val;
2531 
2532 	/* Need to reevalute the capability of doing in-band wake packet tagging */
2533 	if (npi_wake_packet_event_delay_if_families != 0) {
2534 		uint32_t count, i;
2535 		ifnet_t *__counted_by(count) ifp_list;
2536 
2537 		error = ifnet_list_get_all(IFNET_FAMILY_ANY, &ifp_list, &count);
2538 		if (error != 0) {
2539 			os_log_error(wake_packet_log_handle,
2540 			    "%s: ifnet_list_get_all() failed %d",
2541 			    __func__, error);
2542 			npi_wake_packet_event_delay_if_families = old_value;
2543 			return error;
2544 		}
2545 		for (i = 0; i < count; i++) {
2546 			ifnet_t ifp = ifp_list[i];
2547 			bool delay = is_wake_pkt_event_delay(ifp->if_family);
2548 			const uint32_t flags = IFXF_INBAND_WAKE_PKT_TAGGING | IFXF_DELAYWAKEPKTEVENT;
2549 
2550 			if ((delay && (ifp->if_xflags & flags) != IFXF_DELAYWAKEPKTEVENT) ||
2551 			    (!delay && (ifp->if_xflags & flags) != IFXF_INBAND_WAKE_PKT_TAGGING)) {
2552 				if_set_delay_wake_flags(ifp, delay);
2553 
2554 				if (if_ports_used_verbose || ifp->if_family == IFNET_FAMILY_CELLULAR) {
2555 					os_log(wake_packet_log_handle, "interface %s reset INBAND_WAKE_PKT_TAGGING %d DELAYWAKEPKTEVENT %d",
2556 					    ifp->if_xname,
2557 					    ifp->if_xflags & IFXF_INBAND_WAKE_PKT_TAGGING ? 1 : 0,
2558 					    ifp->if_xflags & IFXF_DELAYWAKEPKTEVENT ? 1 : 0);
2559 				}
2560 			}
2561 		}
2562 		ifnet_list_free_counted_by(ifp_list, count);
2563 	}
2564 
2565 	os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_delay_if_families proc %s:%u npi_wake_packet_event_delay_if_families 0x%x -> 0x%x",
2566 	    proc_best_name(current_proc()), proc_selfpid(),
2567 	    old_value, npi_wake_packet_event_delay_if_families);
2568 
2569 
2570 	return 0;
2571 }
2572 
2573 void
init_inband_wake_pkt_tagging_for_family(struct ifnet * ifp)2574 init_inband_wake_pkt_tagging_for_family(struct ifnet *ifp)
2575 {
2576 	bool delay = is_wake_pkt_event_delay(ifp->if_family);
2577 
2578 	if_set_delay_wake_flags(ifp, delay);
2579 
2580 	if (if_ports_used_verbose || ifp->if_family == IFNET_FAMILY_CELLULAR) {
2581 		os_log(wake_packet_log_handle, "interface %s initialized INBAND_WAKE_PKT_TAGGING %d DELAYWAKEPKTEVENT %d",
2582 		    ifp->if_xname,
2583 		    ifp->if_xflags & IFXF_INBAND_WAKE_PKT_TAGGING ? 1 : 0,
2584 		    ifp->if_xflags & IFXF_DELAYWAKEPKTEVENT ? 1 : 0);
2585 	}
2586 }
2587 
2588 #if (DEBUG | DEVELOPMENT)
2589 
2590 static int
2591 sysctl_use_fake_lpw SYSCTL_HANDLER_ARGS
2592 {
2593 #pragma unused(arg1, arg2)
2594 	int error = 0;
2595 	int old_value = use_fake_lpw;
2596 	int new_value = *(int *)oidp->oid_arg1;
2597 
2598 	error = sysctl_handle_int(oidp, &new_value, 0, req);
2599 	if (error == 0) {
2600 		*(int *)oidp->oid_arg1 = new_value;
2601 
2602 		if (new_value != old_value) {
2603 			os_log(wake_packet_log_handle, "use_fake_lpw %d", new_value);
2604 		}
2605 	}
2606 	return error;
2607 }
2608 
2609 static int
2610 sysctl_mark_wake_packet_port SYSCTL_HANDLER_ARGS
2611 {
2612 #pragma unused(arg1, arg2)
2613 	int error = 0;
2614 	int new_value = *(int *)oidp->oid_arg1;
2615 
2616 	error = sysctl_handle_int(oidp, &new_value, 0, req);
2617 	if (error == 0) {
2618 		if (new_value < 0 || new_value >= UINT16_MAX) {
2619 			error = EINVAL;
2620 			goto done;
2621 		}
2622 		*(int *)oidp->oid_arg1 = new_value;
2623 	}
2624 done:
2625 	return error;
2626 }
2627 
2628 static int
2629 sysctl_mark_wake_packet_if SYSCTL_HANDLER_ARGS
2630 {
2631 #pragma unused(arg1, arg2)
2632 	int error = 0;
2633 	char new_value[IFNAMSIZ] = { 0 };
2634 	int changed = 0;
2635 
2636 	strbufcpy(new_value, IFNAMSIZ, mark_wake_packet_if, IFNAMSIZ);
2637 	error = sysctl_io_string(req, new_value, IFNAMSIZ, 0, &changed);
2638 	if (error == 0) {
2639 		strbufcpy(mark_wake_packet_if, IFNAMSIZ, new_value, IFNAMSIZ);
2640 	}
2641 
2642 	return error;
2643 }
2644 
2645 bool
check_wake_mbuf(ifnet_t ifp,protocol_family_t protocol_family,mbuf_ref_t m)2646 check_wake_mbuf(ifnet_t ifp, protocol_family_t protocol_family, mbuf_ref_t m)
2647 {
2648 	uint8_t ipproto = 0;
2649 	size_t offset = 0;
2650 
2651 	/* The protocol and interface must both be specified */
2652 	if (mark_wake_packet_ipproto == 0 || mark_wake_packet_if[0] == 0) {
2653 		return false;
2654 	}
2655 	/* The interface must match */
2656 	if (strlcmp(mark_wake_packet_if, IF_XNAME(ifp), IFNAMSIZ) != 0) {
2657 		return false;
2658 	}
2659 	/* The protocol must match */
2660 	if (protocol_family == PF_INET6) {
2661 		struct ip6_hdr ip6;
2662 
2663 		if ((size_t)(m)->m_pkthdr.len < sizeof(struct ip6_hdr)) {
2664 			os_log(wake_packet_log_handle, "check_wake_mbuf: IP6 too short");
2665 			return false;
2666 		}
2667 		mbuf_copydata(m, 0, sizeof(struct ip6_hdr), &ip6);
2668 
2669 		if ((ipproto = ip6.ip6_nxt) != mark_wake_packet_ipproto) {
2670 			return false;
2671 		}
2672 		offset = sizeof(struct ip6_hdr);
2673 	} else if (protocol_family == PF_INET) {
2674 		struct ip ip;
2675 
2676 		if ((size_t)(m)->m_pkthdr.len < sizeof(struct ip)) {
2677 			os_log(wake_packet_log_handle, "check_wake_mbuf: IP too short");
2678 			return false;
2679 		}
2680 		mbuf_copydata(m, 0, sizeof(struct ip), &ip);
2681 
2682 		if ((ipproto = ip.ip_p) != mark_wake_packet_ipproto) {
2683 			return false;
2684 		}
2685 		offset = sizeof(struct ip);
2686 	}
2687 
2688 	/* Check the ports for TCP and UDP */
2689 	if (ipproto == IPPROTO_TCP) {
2690 		struct tcphdr th;
2691 
2692 		if ((size_t)(m)->m_pkthdr.len < offset + sizeof(struct tcphdr)) {
2693 			os_log(wake_packet_log_handle, "check_wake_mbuf: TCP too short");
2694 			return false;
2695 		}
2696 		mbuf_copydata(m, offset, sizeof(struct tcphdr), &th);
2697 
2698 		if (mark_wake_packet_local_port != 0 &&
2699 		    ntohs(th.th_dport) != mark_wake_packet_local_port) {
2700 			return false;
2701 		}
2702 		if (mark_wake_packet_remote_port != 0 &&
2703 		    ntohs(th.th_sport) != mark_wake_packet_remote_port) {
2704 			return false;
2705 		}
2706 		return true;
2707 	} else if (ipproto == IPPROTO_UDP) {
2708 		struct udphdr uh;
2709 
2710 		if ((size_t)(m)->m_pkthdr.len < offset + sizeof(struct udphdr)) {
2711 			os_log(wake_packet_log_handle, "check_wake_mbufL UDP too short");
2712 			return false;
2713 		}
2714 		mbuf_copydata(m, offset, sizeof(struct udphdr), &uh);
2715 
2716 		if (mark_wake_packet_local_port != 0 &&
2717 		    ntohs(uh.uh_dport) != mark_wake_packet_local_port) {
2718 			return false;
2719 		}
2720 		if (mark_wake_packet_remote_port != 0 &&
2721 		    ntohs(uh.uh_sport) != mark_wake_packet_remote_port) {
2722 			return false;
2723 		}
2724 		return true;
2725 	}
2726 
2727 	return ipproto == mark_wake_packet_ipproto;
2728 }
2729 
2730 bool
check_wake_pkt(ifnet_t ifp __unused,struct __kern_packet * pkt)2731 check_wake_pkt(ifnet_t ifp __unused, struct __kern_packet *pkt)
2732 {
2733 	/* The protocol and interface must both be specified */
2734 	if (mark_wake_packet_ipproto == 0 || mark_wake_packet_if[0] == 0) {
2735 		return false;
2736 	}
2737 	/* The interface must match */
2738 	if (strlcmp(mark_wake_packet_if, IF_XNAME(ifp), IFNAMSIZ) != 0) {
2739 		return false;
2740 	}
2741 	/* Cannot deal with fragments */
2742 	if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
2743 		return false;
2744 	}
2745 	/* Check the ports for TCP and UDP */
2746 	if (pkt->pkt_flow_ip_proto == IPPROTO_TCP) {
2747 		struct tcphdr * __single th = __unsafe_forge_single(struct tcphdr *, pkt->pkt_flow_tcp_hdr);
2748 		if (th == NULL) {
2749 			return false;
2750 		}
2751 		if (mark_wake_packet_local_port != 0 &&
2752 		    ntohs(th->th_dport) != mark_wake_packet_local_port) {
2753 			return false;
2754 		}
2755 		if (mark_wake_packet_remote_port != 0 &&
2756 		    ntohs(th->th_sport) != mark_wake_packet_remote_port) {
2757 			return false;
2758 		}
2759 		return true;
2760 	} else if (pkt->pkt_flow_ip_proto == IPPROTO_UDP) {
2761 		struct udphdr * __single uh = __unsafe_forge_single(struct udphdr *, pkt->pkt_flow_udp_hdr);
2762 		if (uh == NULL) {
2763 			return false;
2764 		}
2765 		if (mark_wake_packet_local_port != 0 &&
2766 		    ntohs(uh->uh_dport) != mark_wake_packet_local_port) {
2767 			return false;
2768 		}
2769 		if (mark_wake_packet_remote_port != 0 &&
2770 		    ntohs(uh->uh_sport) != mark_wake_packet_remote_port) {
2771 			return false;
2772 		}
2773 	}
2774 	return pkt->pkt_flow_ip_proto == mark_wake_packet_ipproto;
2775 }
2776 
2777 #endif /* (DEBUG | DEVELOPMENT) */
2778