1 /*
2 * Copyright (c) 2017-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/types.h>
30 #include <sys/time.h>
31 #include <sys/mcache.h>
32 #include <sys/malloc.h>
33 #include <sys/kauth.h>
34 #include <sys/kern_event.h>
35 #include <sys/bitstring.h>
36 #include <sys/priv.h>
37 #include <sys/proc.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40
41 #include <kern/locks.h>
42 #include <kern/zalloc.h>
43
44 #include <libkern/libkern.h>
45
46 #include <net/kpi_interface.h>
47 #include <net/if_var.h>
48 #include <net/if_ports_used.h>
49 #include <net/net_sysctl.h>
50
51 #include <netinet/in_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/tcp_var.h>
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/udp.h>
57
58 #if SKYWALK
59 #include <skywalk/os_skywalk_private.h>
60 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
61 #include <skywalk/namespace/netns.h>
62 #endif /* SKYWALK */
63
64 #include <stdbool.h>
65
66 #include <os/log.h>
67
68 #include <IOKit/IOBSD.h>
69
70 #include <string.h>
71
72 #define ESP_HDR_SIZE 4
73 #define PORT_ISAKMP 500
74 #define PORT_ISAKMP_NATT 4500 /* rfc3948 */
75
76 #define IF_XNAME(ifp) ((ifp) != NULL ? (ifp)->if_xname : (const char * __null_terminated)"")
77
78 extern bool IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len);
79
80 SYSCTL_DECL(_net_link_generic_system);
81
82 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, port_used,
83 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used");
84
85 struct if_ports_used_stats if_ports_used_stats = {};
86 static int sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS;
87 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, stats,
88 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
89 sysctl_if_ports_used_stats, "S,struct if_ports_used_stats", "");
90
91 static uuid_t current_wakeuuid;
92 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, current_wakeuuid,
93 CTLFLAG_RD | CTLFLAG_LOCKED,
94 current_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
95
96 static int sysctl_net_port_info_list SYSCTL_HANDLER_ARGS;
97 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, list,
98 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
99 sysctl_net_port_info_list, "S,xnpigen", "");
100
101 static int use_test_wakeuuid = 0;
102 static uuid_t test_wakeuuid;
103
104 #if (DEVELOPMENT || DEBUG)
105 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, use_test_wakeuuid,
106 CTLFLAG_RW | CTLFLAG_LOCKED,
107 &use_test_wakeuuid, 0, "");
108
109 static int sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS;
110 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, new_test_wakeuuid,
111 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
112 sysctl_new_test_wakeuuid, "S,uuid_t", "");
113
114 static int sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS;
115 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, clear_test_wakeuuid,
116 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
117 sysctl_clear_test_wakeuuid, "S,uuid_t", "");
118
119 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, test_wakeuuid,
120 CTLFLAG_RD | CTLFLAG_LOCKED,
121 test_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
122
123 /*
124 * use_fake_lpw is used for testing only
125 */
126 #define FAKE_LPW_OFF 0 /* fake LPW off */
127 #define FAKE_LPW_ON_ONCE 1 /* use fake LPW once */
128 #define FAKE_LPW_ALWAYS_ON 2 /* permanent fake LPW mode */
129 #define FAKE_LPW_FLIP_ON 3 /* LPW on, then switch to off */
130 #define FAKE_LPW_FLIP_OFF 4 /* LPW off, then switch to on */
131
132 static int use_fake_lpw = 0;
133 static int sysctl_use_fake_lpw SYSCTL_HANDLER_ARGS;
134 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, use_fake_lpw,
135 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
136 &use_fake_lpw, 0, &sysctl_use_fake_lpw, "I", "");
137
138 bool fake_lpw_mode_is_set = false;
139
140 SYSCTL_NODE(_net_link_generic_system_port_used, OID_AUTO, mark_wake_packet,
141 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used");
142
143 static int sysctl_mark_wake_packet_port SYSCTL_HANDLER_ARGS;
144 static int sysctl_mark_wake_packet_if SYSCTL_HANDLER_ARGS;
145
146 static int mark_wake_packet_local_port = 0;
147 SYSCTL_PROC(_net_link_generic_system_port_used_mark_wake_packet, OID_AUTO, local_port,
148 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
149 &mark_wake_packet_local_port, 0, &sysctl_mark_wake_packet_port, "I", "");
150
151 static int mark_wake_packet_remote_port = 0;
152 SYSCTL_PROC(_net_link_generic_system_port_used_mark_wake_packet, OID_AUTO, remote_port,
153 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
154 &mark_wake_packet_remote_port, 0, &sysctl_mark_wake_packet_port, "I", "");
155
156 static int mark_wake_packet_ipproto = 0;
157 SYSCTL_INT(_net_link_generic_system_port_used_mark_wake_packet, OID_AUTO, ipproto,
158 CTLFLAG_RW | CTLFLAG_LOCKED,
159 &mark_wake_packet_ipproto, 0, "");
160
161 static char mark_wake_packet_if[IFNAMSIZ];
162 SYSCTL_PROC(_net_link_generic_system_port_used_mark_wake_packet, OID_AUTO, if,
163 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
164 0, 0, sysctl_mark_wake_packet_if, "A", "");
165
166 #endif /* (DEVELOPMENT || DEBUG) */
167
168 static int sysctl_get_ports_used SYSCTL_HANDLER_ARGS;
169 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, get_ports_used,
170 CTLFLAG_RD | CTLFLAG_LOCKED,
171 sysctl_get_ports_used, "");
172
173 int if_ports_used_verbose = 0;
174 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, verbose,
175 CTLFLAG_RW | CTLFLAG_LOCKED,
176 &if_ports_used_verbose, 0, "");
177
178 struct timeval wakeuuid_not_set_last_time;
179 int sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS;
180 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
181 wakeuuid_not_set_last_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
182 0, 0, sysctl_wakeuuid_not_set_last_time, "S,timeval", "");
183
184 char wakeuuid_not_set_last_if[IFXNAMSIZ];
185 int sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS;
186 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
187 wakeuuid_not_set_last_if, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
188 0, 0, sysctl_wakeuuid_not_set_last_if, "A", "");
189
190 struct timeval wakeuuid_last_update_time;
191 int sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS;
192 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
193 wakeuuid_last_update_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
194 0, 0, sysctl_wakeuuid_last_update_time, "S,timeval", "");
195
196 static bool last_wake_phy_if_set = false;
197 static char last_wake_phy_if_name[IFNAMSIZ]; /* name + unit */
198 static uint32_t last_wake_phy_if_family;
199 static uint32_t last_wake_phy_if_subfamily;
200 static uint32_t last_wake_phy_if_functional_type;
201 static bool last_wake_phy_if_delay_wake_pkt = false;
202 static bool last_wake_phy_if_lpw = false;
203
204 static bool has_notified_wake_pkt = false;
205 static bool has_notified_unattributed_wake = false;
206
207 static bool is_lpw_mode = false;
208
209 static LCK_GRP_DECLARE(net_port_entry_head_lock_group, "net port entry lock");
210 static LCK_MTX_DECLARE(net_port_entry_head_lock, &net_port_entry_head_lock_group);
211
212
213 struct net_port_entry {
214 SLIST_ENTRY(net_port_entry) npe_list_next;
215 TAILQ_ENTRY(net_port_entry) npe_hash_next;
216 struct net_port_info npe_npi;
217 };
218
219 static KALLOC_TYPE_DEFINE(net_port_entry_zone, struct net_port_entry, NET_KT_DEFAULT);
220
221 static SLIST_HEAD(net_port_entry_list, net_port_entry) net_port_entry_list =
222 SLIST_HEAD_INITIALIZER(&net_port_entry_list);
223
224 struct timeval wakeuiid_last_check;
225
226 /*
227 * Hashing of the net_port_entry list is based on the local port
228 *
229 * The hash masks uses the least significant bits so we have to use host byte order
230 * when applying the mask because the LSB have more entropy that the MSB (most local ports
231 * are in the high dynamic port range)
232 */
233 #define NPE_HASH_BUCKET_COUNT 32
234 #define NPE_HASH_MASK (NPE_HASH_BUCKET_COUNT - 1)
235 #define NPE_HASH_VAL(_lport) (ntohs(_lport) & NPE_HASH_MASK)
236 #define NPE_HASH_HEAD(_lport) (&net_port_entry_hash_table[NPE_HASH_VAL(_lport)])
237
238 static TAILQ_HEAD(net_port_entry_hash_table, net_port_entry) * __indexable net_port_entry_hash_table = NULL;
239
240 /*
241 * For some types of physical interface we need to delay the notification of wake packet events
242 * until a user land interface controller confirms the AP wake was caused by its packet
243 */
244 struct net_port_info_wake_pkt_event {
245 uint32_t npi_wp_code;
246 uint32_t npi_wp_flags;
247 union {
248 struct net_port_info_wake_event _npi_ev_wake_pkt_attributed;
249 struct net_port_info_una_wake_event _npi_ev_wake_pkt_unattributed;
250 } npi_ev_wake_pkt_;
251 };
252
253 #define npi_ev_wake_pkt_attributed npi_ev_wake_pkt_._npi_ev_wake_pkt_attributed
254 #define npi_ev_wake_pkt_unattributed npi_ev_wake_pkt_._npi_ev_wake_pkt_unattributed
255
256 int sysctl_wake_pkt_event_notify SYSCTL_HANDLER_ARGS;
257 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, wake_pkt_event_notify,
258 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY, 0, 0,
259 sysctl_wake_pkt_event_notify, "I", "");
260
261 /* Bitmap of the interface families to delay the notification of wake packet events */
262 static uint32_t npi_wake_packet_event_delay_if_families = 0;
263
264 /* How many interfaces families are supported */
265 #define NPI_MAX_IF_FAMILY_BITS 32
266
267 int sysctl_wake_pkt_event_delay_if_families SYSCTL_HANDLER_ARGS;
268 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, wake_pkt_event_delay_if_families,
269 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0,
270 sysctl_wake_pkt_event_delay_if_families, "I", "");
271
272 /* last_wake_pkt_event is informational */
273 static struct net_port_info_wake_pkt_event last_wake_pkt_event;
274
275 /*
276 * delay_wake_pkt_event hold the current wake packet event that is delayed waiting for
277 * confirmation from a userspace agent
278 * It can be overwritten as a wake packet makes its way up the stack
279 */
280 static struct net_port_info_wake_pkt_event delay_wake_pkt_event;
281
282 int sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS;
283 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
284 last_attributed_wake_event, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
285 0, 0, sysctl_last_attributed_wake_event, "S,net_port_info_wake_event", "");
286
287 int sysctl_last_unattributed_wake_event SYSCTL_HANDLER_ARGS;
288 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
289 last_unattributed_wake_event, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
290 0, 0, sysctl_last_unattributed_wake_event, "S,net_port_info_una_wake_event", "");
291
292 os_log_t wake_packet_log_handle = NULL;
293
294 static bool is_wake_pkt_event_delay(uint32_t ifrtype);
295
296 static bool
_if_need_delayed_wake_pkt_event_inner(struct ifnet * ifp)297 _if_need_delayed_wake_pkt_event_inner(struct ifnet *ifp)
298 {
299 if ((ifp->if_xflags & IFXF_DELAYWAKEPKTEVENT) != 0 ||
300 is_wake_pkt_event_delay(ifp->if_family)) {
301 return true;
302 }
303 return false;
304 }
305
306 static bool
if_need_delayed_wake_pkt_event(struct ifnet * ifp)307 if_need_delayed_wake_pkt_event(struct ifnet *ifp)
308 {
309 if (ifp != NULL) {
310 if (_if_need_delayed_wake_pkt_event_inner(ifp) == true) {
311 return true;
312 }
313 if (ifp->if_delegated.ifp != NULL) {
314 return _if_need_delayed_wake_pkt_event_inner(ifp->if_delegated.ifp);
315 }
316 }
317 return false;
318 }
319
320 /*
321 * Initialize IPv4 source address hash table.
322 */
323 void
if_ports_used_init(void)324 if_ports_used_init(void)
325 {
326 if (net_port_entry_hash_table != NULL) {
327 return;
328 }
329
330 wake_packet_log_handle = os_log_create("com.apple.xnu.net.wake_packet", "");
331
332 net_port_entry_hash_table = zalloc_permanent(
333 NPE_HASH_BUCKET_COUNT * sizeof(*net_port_entry_hash_table),
334 ZALIGN_PTR);
335 }
336
337 bool
if_is_lpw_enabled(struct ifnet * ifp)338 if_is_lpw_enabled(struct ifnet *ifp)
339 {
340 bool old_is_lpw_mode = is_lpw_mode;
341
342 if (ifp == NULL) {
343 return false;
344 }
345
346 if ((ifp->if_xflags & IFXF_LOW_POWER_WAKE) == 0 && last_wake_phy_if_lpw == false) {
347 return false;
348 }
349
350 #if (DEBUG || DEVELOPMENT)
351 if (use_fake_lpw != FAKE_LPW_OFF) {
352 if (strlcmp(mark_wake_packet_if, IF_XNAME(ifp), IFNAMSIZ) == 0) {
353 fake_lpw_mode_is_set = true;
354
355 switch (use_fake_lpw) {
356 case FAKE_LPW_ON_ONCE:
357 is_lpw_mode = true;
358 use_fake_lpw = FAKE_LPW_OFF;
359 break;
360 case FAKE_LPW_ALWAYS_ON:
361 is_lpw_mode = true;
362 break;
363 case FAKE_LPW_FLIP_ON:
364 is_lpw_mode = true;
365 use_fake_lpw = FAKE_LPW_FLIP_OFF;
366 break;
367 case FAKE_LPW_FLIP_OFF:
368 is_lpw_mode = false;
369 use_fake_lpw = FAKE_LPW_FLIP_ON;
370 break;
371 }
372
373 if (if_ports_used_verbose && is_lpw_mode != old_is_lpw_mode) {
374 os_log(wake_packet_log_handle, "if_is_lpw_enabled %s set LPW to %d",
375 IF_XNAME(ifp), is_lpw_mode == true ? 1 : 0);
376 }
377
378 return is_lpw_mode;
379 }
380 /* In fake mode, ignore packets from other interfaces */
381 return false;
382 }
383 #endif /* (DEBUG || DEVELOPMENT) */
384
385 if (IOPMIsLPWMode()) {
386 is_lpw_mode = true;
387 } else {
388 is_lpw_mode = false;
389 }
390 if (if_ports_used_verbose && is_lpw_mode != old_is_lpw_mode) {
391 os_log(wake_packet_log_handle, "if_is_lpw_enabled %s set LPW to %d",
392 IF_XNAME(ifp), is_lpw_mode == true ? 1 : 0);
393 }
394
395 return is_lpw_mode;
396 }
397
398 void
if_exit_lpw(struct ifnet * ifp,const char * lpw_exit_reason)399 if_exit_lpw(struct ifnet *ifp, const char *lpw_exit_reason)
400 {
401 if (if_is_lpw_enabled(ifp) == false) {
402 return;
403 }
404 is_lpw_mode = false;
405
406 if_ports_used_stats.ifpu_lpw_to_full_wake++;
407 os_log(wake_packet_log_handle, "if_exit_lpw: LPW to Full Wake requested on %s reason %s",
408 IF_XNAME(ifp), lpw_exit_reason);
409
410 #if (DEVELOPMENT || DEBUG)
411 if (fake_lpw_mode_is_set == true) {
412 /* Let's not mess up with the IO power management subsystem */
413 if (IOPMIsLPWMode() == false) {
414 return;
415 }
416 }
417 #endif /* (DEVELOPMENT || DEBUG) */
418
419 IOPMNetworkStackFullWake(kIOPMNetworkStackFullWakeFlag, "Network.ConnectionNotIdle");
420 }
421
422 static void
net_port_entry_list_clear(void)423 net_port_entry_list_clear(void)
424 {
425 struct net_port_entry *npe;
426
427 LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
428
429 while ((npe = SLIST_FIRST(&net_port_entry_list)) != NULL) {
430 SLIST_REMOVE_HEAD(&net_port_entry_list, npe_list_next);
431 TAILQ_REMOVE(NPE_HASH_HEAD(npe->npe_npi.npi_local_port), npe, npe_hash_next);
432
433 zfree(net_port_entry_zone, npe);
434 }
435
436 for (int i = 0; i < NPE_HASH_BUCKET_COUNT; i++) {
437 VERIFY(TAILQ_EMPTY(&net_port_entry_hash_table[i]));
438 }
439
440 if_ports_used_stats.ifpu_npe_count = 0;
441 if_ports_used_stats.ifpu_wakeuid_gen++;
442 }
443
444 static bool
get_test_wake_uuid(uuid_string_t wakeuuid_str)445 get_test_wake_uuid(uuid_string_t wakeuuid_str)
446 {
447 if (!uuid_is_null(test_wakeuuid)) {
448 if (wakeuuid_str != NULL) {
449 uuid_unparse(test_wakeuuid, wakeuuid_str);
450 }
451 return true;
452 }
453
454 return false;
455 }
456
457 static bool
is_wakeuuid_set(void)458 is_wakeuuid_set(void)
459 {
460 if (__improbable(use_test_wakeuuid) && !uuid_is_null(test_wakeuuid)) {
461 return true;
462 }
463
464 /*
465 * IOPMCopySleepWakeUUIDKey() tells if SleepWakeUUID is currently set
466 * That means we are currently in a sleep/wake cycle
467 */
468 return IOPMCopySleepWakeUUIDKey(NULL, 0);
469 }
470
471 static void
if_ports_reset_wake_attribution_state(void)472 if_ports_reset_wake_attribution_state(void)
473 {
474 has_notified_wake_pkt = false;
475 has_notified_unattributed_wake = false;
476
477 memset(&last_wake_pkt_event, 0, sizeof(last_wake_pkt_event));
478 memset(&delay_wake_pkt_event, 0, sizeof(delay_wake_pkt_event));
479
480 last_wake_phy_if_set = false;
481 memset(&last_wake_phy_if_name, 0, sizeof(last_wake_phy_if_name));
482 last_wake_phy_if_family = IFRTYPE_FAMILY_ANY;
483 last_wake_phy_if_subfamily = IFRTYPE_SUBFAMILY_ANY;
484 last_wake_phy_if_functional_type = IFRTYPE_FUNCTIONAL_UNKNOWN;
485 last_wake_phy_if_delay_wake_pkt = false;
486 last_wake_phy_if_lpw = false;
487
488 is_lpw_mode = false;
489 #if (DEVELOPMENT || DEBUG)
490 fake_lpw_mode_is_set = false;
491 #endif /* (DEVELOPMENT || DEBUG) */
492 }
493
494 void
if_ports_used_update_wakeuuid(struct ifnet * ifp)495 if_ports_used_update_wakeuuid(struct ifnet *ifp)
496 {
497 uuid_t wakeuuid;
498 bool wakeuuid_is_set = false;
499 bool updated = false;
500 uuid_string_t wakeuuid_str;
501
502 uuid_clear(wakeuuid);
503
504 if (__improbable(use_test_wakeuuid)) {
505 wakeuuid_is_set = get_test_wake_uuid(wakeuuid_str);
506 } else {
507 wakeuuid_is_set = IOPMCopySleepWakeUUIDKey(wakeuuid_str,
508 sizeof(wakeuuid_str));
509 }
510
511 if (wakeuuid_is_set) {
512 if (uuid_parse(wakeuuid_str, wakeuuid) != 0) {
513 os_log(wake_packet_log_handle,
514 "if_ports_used_update_wakeuuid: IOPMCopySleepWakeUUIDKey got bad value %s\n",
515 wakeuuid_str);
516 wakeuuid_is_set = false;
517 }
518 }
519
520 if (!wakeuuid_is_set) {
521 if (ifp != NULL) {
522 if (if_ports_used_verbose > 0) {
523 os_log_info(wake_packet_log_handle,
524 "if_ports_used_update_wakeuuid: SleepWakeUUID not set, "
525 "don't update the port list for %s\n",
526 ifp != NULL ? if_name(ifp) : "");
527 }
528 if_ports_used_stats.ifpu_wakeuuid_not_set_count += 1;
529 microtime(&wakeuuid_not_set_last_time);
530 strlcpy(wakeuuid_not_set_last_if, if_name(ifp),
531 sizeof(wakeuuid_not_set_last_if));
532 }
533 return;
534 }
535
536 lck_mtx_lock(&net_port_entry_head_lock);
537 if (uuid_compare(wakeuuid, current_wakeuuid) != 0) {
538 if (last_wake_phy_if_delay_wake_pkt) {
539 if_ports_used_stats.ifpu_delayed_wake_event_undelivered++;
540 }
541
542 net_port_entry_list_clear();
543 uuid_copy(current_wakeuuid, wakeuuid);
544 microtime(&wakeuuid_last_update_time);
545 updated = true;
546
547 if_ports_reset_wake_attribution_state();
548 }
549 /*
550 * Record the time last checked
551 */
552 microuptime(&wakeuiid_last_check);
553 lck_mtx_unlock(&net_port_entry_head_lock);
554
555 if (updated && if_ports_used_verbose > 0) {
556 uuid_string_t uuid_str;
557
558 uuid_unparse(current_wakeuuid, uuid_str);
559 os_log(wake_packet_log_handle, "if_ports_used_update_wakeuuid: current wakeuuid %s for %s",
560 uuid_str, ifp != NULL ? if_name(ifp) : "");
561 }
562 }
563
564 void
IOPMNetworkStackWillSleepFromAOT(void)565 IOPMNetworkStackWillSleepFromAOT(void)
566 {
567 /*
568 * We do not take a lock because we assume there won't be any concurrent
569 * power state event when going to sleep from AOT
570 */
571 if_ports_reset_wake_attribution_state();
572
573 if (if_ports_used_verbose > 0) {
574 os_log(wake_packet_log_handle, "");
575 }
576 }
577
578 static bool
net_port_info_equal(const struct net_port_info * x,const struct net_port_info * y)579 net_port_info_equal(const struct net_port_info *x,
580 const struct net_port_info *y)
581 {
582 ASSERT(x != NULL && y != NULL);
583
584 if (x->npi_if_index == y->npi_if_index &&
585 x->npi_local_port == y->npi_local_port &&
586 x->npi_foreign_port == y->npi_foreign_port &&
587 x->npi_owner_pid == y->npi_owner_pid &&
588 x->npi_effective_pid == y->npi_effective_pid &&
589 x->npi_flags == y->npi_flags &&
590 memcmp(&x->npi_local_addr_, &y->npi_local_addr_,
591 sizeof(union in_addr_4_6)) == 0 &&
592 memcmp(&x->npi_foreign_addr_, &y->npi_foreign_addr_,
593 sizeof(union in_addr_4_6)) == 0) {
594 return true;
595 }
596 return false;
597 }
598
599 static bool
net_port_info_has_entry(const struct net_port_info * npi)600 net_port_info_has_entry(const struct net_port_info *npi)
601 {
602 struct net_port_entry *npe;
603 bool found = false;
604 int32_t count = 0;
605
606 LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
607
608 TAILQ_FOREACH(npe, NPE_HASH_HEAD(npi->npi_local_port), npe_hash_next) {
609 count += 1;
610 if (net_port_info_equal(&npe->npe_npi, npi)) {
611 found = true;
612 break;
613 }
614 }
615 if_ports_used_stats.ifpu_npi_hash_search_total += count;
616 if (count > if_ports_used_stats.ifpu_npi_hash_search_max) {
617 if_ports_used_stats.ifpu_npi_hash_search_max = count;
618 }
619
620 return found;
621 }
622
623 static bool
net_port_info_add_entry(const struct net_port_info * npi)624 net_port_info_add_entry(const struct net_port_info *npi)
625 {
626 struct net_port_entry *npe = NULL;
627 uint32_t num = 0;
628 bool entry_added = false;
629
630 ASSERT(npi != NULL);
631
632 if (__improbable(is_wakeuuid_set() == false)) {
633 if_ports_used_stats.ifpu_npi_not_added_no_wakeuuid++;
634 if (if_ports_used_verbose > 0) {
635 os_log(wake_packet_log_handle, "%s: wakeuuid not set not adding "
636 "port: %u flags: 0x%xif: %u pid: %u epid %u",
637 __func__,
638 ntohs(npi->npi_local_port),
639 npi->npi_flags,
640 npi->npi_if_index,
641 npi->npi_owner_pid,
642 npi->npi_effective_pid);
643 }
644 return false;
645 }
646
647 npe = zalloc_flags(net_port_entry_zone, Z_WAITOK | Z_ZERO);
648 if (__improbable(npe == NULL)) {
649 os_log(wake_packet_log_handle, "%s: zalloc() failed for "
650 "port: %u flags: 0x%x if: %u pid: %u epid %u",
651 __func__,
652 ntohs(npi->npi_local_port),
653 npi->npi_flags,
654 npi->npi_if_index,
655 npi->npi_owner_pid,
656 npi->npi_effective_pid);
657 return false;
658 }
659
660 memcpy(&npe->npe_npi, npi, sizeof(npe->npe_npi));
661
662 if (IF_INDEX_IN_RANGE(npe->npe_npi.npi_if_index)) {
663 struct ifnet *ifp = ifindex2ifnet[npe->npe_npi.npi_if_index];
664 if (ifp != NULL) {
665 if (IFNET_IS_COMPANION_LINK(ifp)) {
666 npe->npe_npi.npi_flags |= NPIF_COMPLINK;
667 }
668 if (if_need_delayed_wake_pkt_event(ifp)) {
669 npe->npe_npi.npi_flags |= NPIF_DELAYWAKEPKTEVENT;
670 }
671 }
672 }
673
674 lck_mtx_lock(&net_port_entry_head_lock);
675
676 if (net_port_info_has_entry(npi) == false) {
677 SLIST_INSERT_HEAD(&net_port_entry_list, npe, npe_list_next);
678 TAILQ_INSERT_HEAD(NPE_HASH_HEAD(npi->npi_local_port), npe, npe_hash_next);
679 num = (uint32_t)if_ports_used_stats.ifpu_npe_count++; /* rollover OK */
680 entry_added = true;
681
682 if (if_ports_used_stats.ifpu_npe_count > if_ports_used_stats.ifpu_npe_max) {
683 if_ports_used_stats.ifpu_npe_max = if_ports_used_stats.ifpu_npe_count;
684 }
685 if_ports_used_stats.ifpu_npe_total++;
686
687 if (if_ports_used_verbose > 1) {
688 os_log(wake_packet_log_handle, "%s: num %u for "
689 "port: %u flags: 0x%x if: %u pid: %u epid %u",
690 __func__,
691 num,
692 ntohs(npi->npi_local_port),
693 npi->npi_flags,
694 npi->npi_if_index,
695 npi->npi_owner_pid,
696 npi->npi_effective_pid);
697 }
698 } else {
699 if_ports_used_stats.ifpu_npe_dup++;
700 if (if_ports_used_verbose > 2) {
701 os_log(wake_packet_log_handle, "%s: already added "
702 "port: %u flags: 0x%x if: %u pid: %u epid %u",
703 __func__,
704 ntohs(npi->npi_local_port),
705 npi->npi_flags,
706 npi->npi_if_index,
707 npi->npi_owner_pid,
708 npi->npi_effective_pid);
709 }
710 }
711
712 lck_mtx_unlock(&net_port_entry_head_lock);
713
714 if (entry_added == false) {
715 zfree(net_port_entry_zone, npe);
716 }
717 return entry_added;
718 }
719
720 #if (DEVELOPMENT || DEBUG)
721 static int
722 sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS
723 {
724 #pragma unused(oidp, arg1, arg2)
725 int error = 0;
726
727 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
728 return EPERM;
729 }
730 if (req->oldptr == USER_ADDR_NULL) {
731 req->oldidx = sizeof(uuid_t);
732 return 0;
733 }
734 if (req->newptr != USER_ADDR_NULL) {
735 uuid_generate(test_wakeuuid);
736 if_ports_used_update_wakeuuid(NULL);
737 }
738 error = SYSCTL_OUT(req, test_wakeuuid,
739 MIN(sizeof(uuid_t), req->oldlen));
740
741 return error;
742 }
743
744 static int
745 sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS
746 {
747 #pragma unused(oidp, arg1, arg2)
748 int error = 0;
749
750 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
751 return EPERM;
752 }
753 if (req->oldptr == USER_ADDR_NULL) {
754 req->oldidx = sizeof(uuid_t);
755 return 0;
756 }
757 if (req->newptr != USER_ADDR_NULL) {
758 uuid_clear(test_wakeuuid);
759 if_ports_used_update_wakeuuid(NULL);
760 }
761 error = SYSCTL_OUT(req, test_wakeuuid,
762 MIN(sizeof(uuid_t), req->oldlen));
763
764 return error;
765 }
766
767 #endif /* (DEVELOPMENT || DEBUG) */
768
769 static int
sysctl_timeval(struct sysctl_req * req,const struct timeval * tv)770 sysctl_timeval(struct sysctl_req *req, const struct timeval *tv)
771 {
772 if (proc_is64bit(req->p)) {
773 struct user64_timeval tv64 = {};
774
775 tv64.tv_sec = tv->tv_sec;
776 tv64.tv_usec = tv->tv_usec;
777 return SYSCTL_OUT(req, &tv64, sizeof(tv64));
778 } else {
779 struct user32_timeval tv32 = {};
780
781 tv32.tv_sec = (user32_time_t)tv->tv_sec;
782 tv32.tv_usec = tv->tv_usec;
783 return SYSCTL_OUT(req, &tv32, sizeof(tv32));
784 }
785 }
786
787 int
788 sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS
789 {
790 #pragma unused(oidp, arg1, arg2)
791
792 return sysctl_timeval(req, &wakeuuid_last_update_time);
793 }
794
795 int
796 sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS
797 {
798 #pragma unused(oidp, arg1, arg2)
799
800 return sysctl_timeval(req, &wakeuuid_not_set_last_time);
801 }
802
803 int
804 sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS
805 {
806 #pragma unused(oidp, arg1, arg2)
807
808 return SYSCTL_OUT(req, &wakeuuid_not_set_last_if, strbuflen(wakeuuid_not_set_last_if) + 1);
809 }
810
811 int
812 sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS
813 {
814 #pragma unused(oidp, arg1, arg2)
815 size_t len = sizeof(struct if_ports_used_stats);
816
817 if (req->oldptr != 0) {
818 len = MIN(req->oldlen, sizeof(struct if_ports_used_stats));
819 }
820 return SYSCTL_OUT(req, &if_ports_used_stats, len);
821 }
822
823 static int
824 sysctl_net_port_info_list SYSCTL_HANDLER_ARGS
825 {
826 #pragma unused(oidp, arg1, arg2)
827 int error = 0;
828 struct xnpigen xnpigen;
829 struct net_port_entry *npe;
830
831 if ((error = priv_check_cred(kauth_cred_get(),
832 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0)) != 0) {
833 return EPERM;
834 }
835 lck_mtx_lock(&net_port_entry_head_lock);
836
837 if (req->oldptr == USER_ADDR_NULL) {
838 /* Add a 25% cushion */
839 size_t cnt = (size_t)if_ports_used_stats.ifpu_npe_count;
840 cnt += cnt >> 4;
841 req->oldidx = sizeof(struct xnpigen) +
842 cnt * sizeof(struct net_port_info);
843 goto done;
844 }
845
846 memset(&xnpigen, 0, sizeof(struct xnpigen));
847 xnpigen.xng_len = sizeof(struct xnpigen);
848 xnpigen.xng_gen = (uint32_t)if_ports_used_stats.ifpu_wakeuid_gen;
849 uuid_copy(xnpigen.xng_wakeuuid, current_wakeuuid);
850 xnpigen.xng_npi_count = (uint32_t)if_ports_used_stats.ifpu_npe_count;
851 xnpigen.xng_npi_size = sizeof(struct net_port_info);
852 error = SYSCTL_OUT(req, &xnpigen, sizeof(xnpigen));
853 if (error != 0) {
854 printf("%s: SYSCTL_OUT(xnpigen) error %d\n",
855 __func__, error);
856 goto done;
857 }
858
859 SLIST_FOREACH(npe, &net_port_entry_list, npe_list_next) {
860 error = SYSCTL_OUT(req, &npe->npe_npi,
861 sizeof(struct net_port_info));
862 if (error != 0) {
863 printf("%s: SYSCTL_OUT(npi) error %d\n",
864 __func__, error);
865 goto done;
866 }
867 }
868 done:
869 lck_mtx_unlock(&net_port_entry_head_lock);
870
871 return error;
872 }
873
874 /*
875 * Mirror the arguments of ifnet_get_local_ports_extended()
876 * ifindex
877 * protocol
878 * flags
879 */
880 static int
881 sysctl_get_ports_used SYSCTL_HANDLER_ARGS
882 {
883 #pragma unused(oidp)
884 /*
885 * 3 is the required number of parameters: ifindex, protocol and flags
886 */
887 DECLARE_SYSCTL_HANDLER_ARG_ARRAY(int, 3, name, namelen);
888 int error = 0;
889 int idx;
890 protocol_family_t protocol;
891 u_int32_t flags;
892 ifnet_t ifp = NULL;
893 u_int8_t *bitfield = NULL;
894
895 if (req->newptr != USER_ADDR_NULL) {
896 error = EPERM;
897 goto done;
898 }
899
900 if (req->oldptr == USER_ADDR_NULL) {
901 req->oldidx = bitstr_size(IP_PORTRANGE_SIZE);
902 goto done;
903 }
904 if (req->oldlen < bitstr_size(IP_PORTRANGE_SIZE)) {
905 error = ENOMEM;
906 goto done;
907 }
908 bitfield = (u_int8_t *) kalloc_data(bitstr_size(IP_PORTRANGE_SIZE),
909 Z_WAITOK | Z_ZERO);
910 if (bitfield == NULL) {
911 error = ENOMEM;
912 goto done;
913 }
914
915 idx = name[0];
916 protocol = name[1];
917 flags = name[2];
918
919 ifnet_head_lock_shared();
920 if (IF_INDEX_IN_RANGE(idx)) {
921 ifp = ifindex2ifnet[idx];
922 }
923 ifnet_head_done();
924
925 error = ifnet_get_local_ports_extended(ifp, protocol, flags, bitfield);
926 if (error != 0) {
927 printf("%s: ifnet_get_local_ports_extended() error %d\n",
928 __func__, error);
929 goto done;
930 }
931 error = SYSCTL_OUT(req, bitfield, bitstr_size(IP_PORTRANGE_SIZE));
932 done:
933 if (bitfield != NULL) {
934 kfree_data(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
935 }
936 return error;
937 }
938
939 __private_extern__ bool
if_ports_used_add_inpcb(const uint32_t ifindex,const struct inpcb * inp)940 if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp)
941 {
942 struct net_port_info npi = {};
943 struct socket *so = inp->inp_socket;
944
945 /* This is unlikely to happen but better be safe than sorry */
946 if (ifindex > UINT16_MAX) {
947 os_log(wake_packet_log_handle, "%s: ifindex %u too big", __func__, ifindex);
948 return false;
949 }
950
951 if (ifindex != 0) {
952 npi.npi_if_index = (uint16_t)ifindex;
953 } else if (inp->inp_last_outifp != NULL) {
954 npi.npi_if_index = (uint16_t)inp->inp_last_outifp->if_index;
955 }
956
957 npi.npi_flags |= NPIF_SOCKET;
958
959 npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
960 npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
961
962 if (so->so_options & SO_NOWAKEFROMSLEEP) {
963 npi.npi_flags |= NPIF_NOWAKE;
964 }
965
966 if (inp->inp_flags2 & INP2_CONNECTION_IDLE) {
967 npi.npi_flags |= NPIF_CONNECTION_IDLE;
968 }
969
970 if (SOCK_PROTO(so) == IPPROTO_TCP) {
971 struct tcpcb *tp = intotcpcb(inp);
972
973 npi.npi_flags |= NPIF_TCP;
974 if (tp != NULL && tp->t_state == TCPS_LISTEN) {
975 npi.npi_flags |= NPIF_LISTEN;
976 }
977 } else if (SOCK_PROTO(so) == IPPROTO_UDP) {
978 npi.npi_flags |= NPIF_UDP;
979 } else {
980 os_log(wake_packet_log_handle, "%s: unexpected protocol %u for inp %p", __func__,
981 SOCK_PROTO(inp->inp_socket), inp);
982 return false;
983 }
984
985 uuid_copy(npi.npi_flow_uuid, inp->necp_client_uuid);
986
987 npi.npi_local_port = inp->inp_lport;
988 npi.npi_foreign_port = inp->inp_fport;
989
990 /*
991 * Take in account IPv4 addresses mapped on IPv6
992 */
993 if ((inp->inp_vflag & INP_IPV6) != 0 && (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
994 (inp->inp_vflag & (INP_IPV6 | INP_IPV4)) == (INP_IPV6 | INP_IPV4)) {
995 npi.npi_flags |= NPIF_IPV6 | NPIF_IPV4;
996 memcpy(&npi.npi_local_addr_in6,
997 &inp->in6p_laddr, sizeof(struct in6_addr));
998 } else if (inp->inp_vflag & INP_IPV4) {
999 npi.npi_flags |= NPIF_IPV4;
1000 npi.npi_local_addr_in = inp->inp_laddr;
1001 npi.npi_foreign_addr_in = inp->inp_faddr;
1002 } else {
1003 npi.npi_flags |= NPIF_IPV6;
1004 memcpy(&npi.npi_local_addr_in6,
1005 &inp->in6p_laddr, sizeof(struct in6_addr));
1006 memcpy(&npi.npi_foreign_addr_in6,
1007 &inp->in6p_faddr, sizeof(struct in6_addr));
1008
1009 /* Clear the embedded scope ID */
1010 if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
1011 npi.npi_local_addr_in6.s6_addr16[1] = 0;
1012 }
1013 if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
1014 npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
1015 }
1016 }
1017
1018 npi.npi_owner_pid = so->last_pid;
1019
1020 if (so->last_pid != 0) {
1021 proc_name(so->last_pid, npi.npi_owner_pname,
1022 sizeof(npi.npi_owner_pname));
1023 uuid_copy(npi.npi_owner_uuid, so->last_uuid);
1024 }
1025
1026 if (so->so_flags & SOF_DELEGATED) {
1027 npi.npi_flags |= NPIF_DELEGATED;
1028 npi.npi_effective_pid = so->e_pid;
1029 if (so->e_pid != 0) {
1030 proc_name(so->e_pid, npi.npi_effective_pname,
1031 sizeof(npi.npi_effective_pname));
1032 }
1033 uuid_copy(npi.npi_effective_uuid, so->e_uuid);
1034 } else {
1035 npi.npi_effective_pid = so->last_pid;
1036 if (so->last_pid != 0) {
1037 strbufcpy(npi.npi_effective_pname, npi.npi_owner_pname);
1038 }
1039 uuid_copy(npi.npi_effective_uuid, so->last_uuid);
1040 }
1041
1042 return net_port_info_add_entry(&npi);
1043 }
1044
1045 #if SKYWALK
1046 __private_extern__ bool
if_ports_used_add_flow_entry(const struct flow_entry * fe,const uint32_t ifindex,const struct ns_flow_info * nfi,uint32_t ns_flags)1047 if_ports_used_add_flow_entry(const struct flow_entry *fe, const uint32_t ifindex,
1048 const struct ns_flow_info *nfi, uint32_t ns_flags)
1049 {
1050 struct net_port_info npi = {};
1051
1052 /* This is unlikely to happen but better be safe than sorry */
1053 if (ifindex > UINT16_MAX) {
1054 os_log(wake_packet_log_handle, "%s: ifindex %u too big", __func__, ifindex);
1055 return false;
1056 }
1057 npi.npi_if_index = (uint16_t)ifindex;
1058
1059 npi.npi_flags |= NPIF_CHANNEL;
1060
1061 npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
1062 npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
1063
1064 if (ns_flags & NETNS_NOWAKEFROMSLEEP) {
1065 npi.npi_flags |= NPIF_NOWAKE;
1066 }
1067 if (ns_flags & NETNS_CONNECTION_IDLE) {
1068 npi.npi_flags |= NPIF_CONNECTION_IDLE;
1069 }
1070 if ((ns_flags & NETNS_OWNER_MASK) == NETNS_LISTENER) {
1071 npi.npi_flags |= NPIF_LISTEN;
1072 }
1073
1074 uuid_copy(npi.npi_flow_uuid, nfi->nfi_flow_uuid);
1075
1076 if (nfi->nfi_protocol == IPPROTO_TCP) {
1077 npi.npi_flags |= NPIF_TCP;
1078 } else if (nfi->nfi_protocol == IPPROTO_UDP) {
1079 npi.npi_flags |= NPIF_UDP;
1080 } else {
1081 os_log(wake_packet_log_handle, "%s: unexpected protocol %u for nfi %p",
1082 __func__, nfi->nfi_protocol, nfi);
1083 return false;
1084 }
1085
1086 if (nfi->nfi_laddr.sa.sa_family == AF_INET) {
1087 npi.npi_flags |= NPIF_IPV4;
1088
1089 npi.npi_local_port = nfi->nfi_laddr.sin.sin_port;
1090 npi.npi_foreign_port = nfi->nfi_faddr.sin.sin_port;
1091
1092 npi.npi_local_addr_in = nfi->nfi_laddr.sin.sin_addr;
1093 npi.npi_foreign_addr_in = nfi->nfi_faddr.sin.sin_addr;
1094 } else {
1095 npi.npi_flags |= NPIF_IPV6;
1096
1097 npi.npi_local_port = nfi->nfi_laddr.sin6.sin6_port;
1098 npi.npi_foreign_port = nfi->nfi_faddr.sin6.sin6_port;
1099
1100 memcpy(&npi.npi_local_addr_in6,
1101 &nfi->nfi_laddr.sin6.sin6_addr, sizeof(struct in6_addr));
1102 memcpy(&npi.npi_foreign_addr_in6,
1103 &nfi->nfi_faddr.sin6.sin6_addr, sizeof(struct in6_addr));
1104
1105 /* Clear the embedded scope ID */
1106 if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
1107 npi.npi_local_addr_in6.s6_addr16[1] = 0;
1108 }
1109 if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
1110 npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
1111 }
1112 }
1113
1114 npi.npi_owner_pid = nfi->nfi_owner_pid;
1115 strbufcpy(npi.npi_owner_pname, nfi->nfi_owner_name);
1116
1117 /*
1118 * Get the proc UUID from the pid as the the proc UUID is not present
1119 * in the flow_entry
1120 */
1121 proc_t proc = proc_find(npi.npi_owner_pid);
1122 if (proc != PROC_NULL) {
1123 proc_getexecutableuuid(proc, npi.npi_owner_uuid, sizeof(npi.npi_owner_uuid));
1124 proc_rele(proc);
1125 }
1126 if (nfi->nfi_effective_pid != -1) {
1127 npi.npi_effective_pid = nfi->nfi_effective_pid;
1128 strbufcpy(npi.npi_effective_pname, nfi->nfi_effective_name);
1129 uuid_copy(npi.npi_effective_uuid, fe->fe_eproc_uuid);
1130 } else {
1131 npi.npi_effective_pid = npi.npi_owner_pid;
1132 strbufcpy(npi.npi_effective_pname, npi.npi_owner_pname);
1133 uuid_copy(npi.npi_effective_uuid, npi.npi_owner_uuid);
1134 }
1135
1136 return net_port_info_add_entry(&npi);
1137 }
1138
1139 #endif /* SKYWALK */
1140
1141 static void
net_port_info_log_npi(const char * s,const struct net_port_info * npi)1142 net_port_info_log_npi(const char *s, const struct net_port_info *npi)
1143 {
1144 char lbuf[MAX_IPv6_STR_LEN] = {};
1145 char fbuf[MAX_IPv6_STR_LEN] = {};
1146
1147 if (npi == NULL) {
1148 os_log(wake_packet_log_handle, "%s", s);
1149 return;
1150 }
1151
1152 if (npi->npi_flags & NPIF_IPV4) {
1153 inet_ntop(PF_INET, &npi->npi_local_addr_in.s_addr,
1154 lbuf, sizeof(lbuf));
1155 inet_ntop(PF_INET, &npi->npi_foreign_addr_in.s_addr,
1156 fbuf, sizeof(fbuf));
1157 } else if (npi->npi_flags & NPIF_IPV6) {
1158 inet_ntop(PF_INET6, &npi->npi_local_addr_in6,
1159 lbuf, sizeof(lbuf));
1160 inet_ntop(PF_INET6, &npi->npi_foreign_addr_in6,
1161 fbuf, sizeof(fbuf));
1162 }
1163 os_log(wake_packet_log_handle, "%s net_port_info if_index %u arch %s family %s proto %s local %s:%u foreign %s:%u pid: %u epid %u",
1164 s != NULL ? s : "",
1165 npi->npi_if_index,
1166 (npi->npi_flags & NPIF_SOCKET) ? "so" : (npi->npi_flags & NPIF_CHANNEL) ? "ch" : "unknown",
1167 (npi->npi_flags & NPIF_IPV4) ? "ipv4" : (npi->npi_flags & NPIF_IPV6) ? "ipv6" : "unknown",
1168 npi->npi_flags & NPIF_TCP ? "tcp" : npi->npi_flags & NPIF_UDP ? "udp" :
1169 npi->npi_flags & NPIF_ESP ? "esp" : "unknown",
1170 lbuf, ntohs(npi->npi_local_port),
1171 fbuf, ntohs(npi->npi_foreign_port),
1172 npi->npi_owner_pid,
1173 npi->npi_effective_pid);
1174 }
1175
1176 /*
1177 * net_port_info_match_npi() returns true for an exact match that does not have "no wake" set
1178 */
1179 #define NPI_MATCH_IPV4 (NPIF_IPV4 | NPIF_TCP | NPIF_UDP)
1180 #define NPI_MATCH_IPV6 (NPIF_IPV6 | NPIF_TCP | NPIF_UDP)
1181
1182 static bool
net_port_info_match_npi(struct net_port_entry * npe,const struct net_port_info * in_npi,struct net_port_entry ** best_match)1183 net_port_info_match_npi(struct net_port_entry *npe, const struct net_port_info *in_npi,
1184 struct net_port_entry **best_match)
1185 {
1186 if (__improbable(net_wake_pkt_debug > 1)) {
1187 net_port_info_log_npi("net_port_info_match_npi", &npe->npe_npi);
1188 }
1189
1190 /*
1191 * The interfaces must match or be both companion link
1192 */
1193 if (npe->npe_npi.npi_if_index != in_npi->npi_if_index &&
1194 !((npe->npe_npi.npi_flags & NPIF_COMPLINK) && (in_npi->npi_flags & NPIF_COMPLINK))) {
1195 return false;
1196 }
1197
1198 /*
1199 * The local ports and protocols must match
1200 */
1201 if (npe->npe_npi.npi_local_port != in_npi->npi_local_port ||
1202 ((npe->npe_npi.npi_flags & NPI_MATCH_IPV4) != (in_npi->npi_flags & NPI_MATCH_IPV4) &&
1203 (npe->npe_npi.npi_flags & NPI_MATCH_IPV6) != (in_npi->npi_flags & NPI_MATCH_IPV6))) {
1204 return false;
1205 }
1206
1207 /*
1208 * Search stops on an exact match
1209 */
1210 if (npe->npe_npi.npi_foreign_port == in_npi->npi_foreign_port) {
1211 if ((npe->npe_npi.npi_flags & NPIF_IPV4) && (npe->npe_npi.npi_flags & NPIF_IPV4)) {
1212 if (in_npi->npi_local_addr_in.s_addr == npe->npe_npi.npi_local_addr_in.s_addr &&
1213 in_npi->npi_foreign_addr_in.s_addr == npe->npe_npi.npi_foreign_addr_in.s_addr) {
1214 if (npe->npe_npi.npi_flags & NPIF_NOWAKE) {
1215 /*
1216 * Do not overwrite an existing match when "no wake" is set
1217 */
1218 if (*best_match == NULL) {
1219 *best_match = npe;
1220 }
1221 return false;
1222 }
1223 *best_match = npe;
1224 return true;
1225 }
1226 }
1227 if ((npe->npe_npi.npi_flags & NPIF_IPV6) && (npe->npe_npi.npi_flags & NPIF_IPV6)) {
1228 if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
1229 sizeof(union in_addr_4_6)) == 0 &&
1230 memcmp(&npe->npe_npi.npi_foreign_addr_, &in_npi->npi_foreign_addr_,
1231 sizeof(union in_addr_4_6)) == 0) {
1232 if (npe->npe_npi.npi_flags & NPIF_NOWAKE) {
1233 /*
1234 * Do not overwrite an existing match when "no wake" is set
1235 */
1236 if (*best_match == NULL) {
1237 *best_match = npe;
1238 }
1239 return false;
1240 }
1241 *best_match = npe;
1242 return true;
1243 }
1244 }
1245 }
1246 /*
1247 * Skip connected entries as we are looking for a wildcard match
1248 * on the local address and port
1249 */
1250 if (npe->npe_npi.npi_foreign_port != 0) {
1251 return false;
1252 }
1253 /*
1254 * Do not overwrite an existing match when "no wake" is set
1255 */
1256 if (*best_match != NULL && (npe->npe_npi.npi_flags & NPIF_NOWAKE) != 0) {
1257 return false;
1258 }
1259 /*
1260 * The local address matches: this is our 2nd best match
1261 */
1262 if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
1263 sizeof(union in_addr_4_6)) == 0) {
1264 *best_match = npe;
1265 return false;
1266 }
1267
1268 /*
1269 * Only the local port matches, do not override a match
1270 * on the local address
1271 */
1272 if (*best_match == NULL) {
1273 *best_match = npe;
1274 }
1275 return false;
1276 }
1277 #undef NPI_MATCH_IPV4
1278 #undef NPI_MATCH_IPV6
1279
1280 /*
1281 *
1282 */
1283 static bool
net_port_info_find_match(struct net_port_info * in_npi)1284 net_port_info_find_match(struct net_port_info *in_npi)
1285 {
1286 struct net_port_entry *npe;
1287 struct net_port_entry * __single best_match = NULL;
1288
1289 lck_mtx_lock(&net_port_entry_head_lock);
1290
1291 uint32_t count = 0;
1292 TAILQ_FOREACH(npe, NPE_HASH_HEAD(in_npi->npi_local_port), npe_hash_next) {
1293 count += 1;
1294 /*
1295 * Search stop on an exact match
1296 */
1297 if (net_port_info_match_npi(npe, in_npi, &best_match)) {
1298 break;
1299 }
1300 }
1301
1302 if (best_match != NULL) {
1303 best_match->npe_npi.npi_flags |= NPIF_WAKEPKT;
1304 in_npi->npi_flags = best_match->npe_npi.npi_flags;
1305 in_npi->npi_owner_pid = best_match->npe_npi.npi_owner_pid;
1306 in_npi->npi_effective_pid = best_match->npe_npi.npi_effective_pid;
1307 strbufcpy(in_npi->npi_owner_pname, best_match->npe_npi.npi_owner_pname);
1308 strbufcpy(in_npi->npi_effective_pname, best_match->npe_npi.npi_effective_pname);
1309 uuid_copy(in_npi->npi_owner_uuid, best_match->npe_npi.npi_owner_uuid);
1310 uuid_copy(in_npi->npi_effective_uuid, best_match->npe_npi.npi_effective_uuid);
1311 }
1312 lck_mtx_unlock(&net_port_entry_head_lock);
1313
1314 if (__improbable(net_wake_pkt_debug > 0)) {
1315 if (best_match != NULL) {
1316 net_port_info_log_npi("wake packet match", in_npi);
1317 } else {
1318 net_port_info_log_npi("wake packet no match", in_npi);
1319 }
1320 }
1321
1322 return best_match != NULL ? true : false;
1323 }
1324
1325 #if (DEBUG || DEVELOPMENT)
1326 static void
net_port_info_log_una_wake_event(const char * s,struct net_port_info_una_wake_event * ev)1327 net_port_info_log_una_wake_event(const char *s, struct net_port_info_una_wake_event *ev)
1328 {
1329 char lbuf[MAX_IPv6_STR_LEN] = {};
1330 char fbuf[MAX_IPv6_STR_LEN] = {};
1331
1332 if (ev->una_wake_pkt_flags & NPIF_IPV4) {
1333 inet_ntop(PF_INET, &ev->una_wake_pkt_local_addr_._in_a_4.s_addr,
1334 lbuf, sizeof(lbuf));
1335 inet_ntop(PF_INET, &ev->una_wake_pkt_foreign_addr_._in_a_4.s_addr,
1336 fbuf, sizeof(fbuf));
1337 } else if (ev->una_wake_pkt_flags & NPIF_IPV6) {
1338 inet_ntop(PF_INET6, &ev->una_wake_pkt_local_addr_._in_a_6.s6_addr,
1339 lbuf, sizeof(lbuf));
1340 inet_ntop(PF_INET6, &ev->una_wake_pkt_foreign_addr_._in_a_6.s6_addr,
1341 fbuf, sizeof(fbuf));
1342 }
1343 os_log(wake_packet_log_handle, "%s if %s (%u) phy_if %s proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proto: %u lpw: %d",
1344 s != NULL ? s : "",
1345 ev->una_wake_pkt_ifname, ev->una_wake_pkt_if_index, ev->una_wake_pkt_phy_ifname,
1346 ev->una_wake_pkt_flags & NPIF_TCP ? "tcp" : ev->una_wake_pkt_flags & NPIF_UDP ? "udp" :
1347 ev->una_wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1348 lbuf, ntohs(ev->una_wake_pkt_local_port),
1349 fbuf, ntohs(ev->una_wake_pkt_foreign_port),
1350 ev->una_wake_pkt_total_len, ev->una_wake_pkt_data_len,
1351 ev->una_wake_pkt_control_flags, ev->una_wake_pkt_proto,
1352 ev->una_wake_pkt_flags & NPIF_LPW ? 1 : 0);
1353 }
1354
1355 static void
net_port_info_log_wake_event(const char * s,struct net_port_info_wake_event * ev)1356 net_port_info_log_wake_event(const char *s, struct net_port_info_wake_event *ev)
1357 {
1358 char lbuf[MAX_IPv6_STR_LEN] = {};
1359 char fbuf[MAX_IPv6_STR_LEN] = {};
1360
1361 if (ev->wake_pkt_flags & NPIF_IPV4) {
1362 inet_ntop(PF_INET, &ev->wake_pkt_local_addr_._in_a_4.s_addr,
1363 lbuf, sizeof(lbuf));
1364 inet_ntop(PF_INET, &ev->wake_pkt_foreign_addr_._in_a_4.s_addr,
1365 fbuf, sizeof(fbuf));
1366 } else if (ev->wake_pkt_flags & NPIF_IPV6) {
1367 inet_ntop(PF_INET6, &ev->wake_pkt_local_addr_._in_a_6.s6_addr,
1368 lbuf, sizeof(lbuf));
1369 inet_ntop(PF_INET6, &ev->wake_pkt_foreign_addr_._in_a_6.s6_addr,
1370 fbuf, sizeof(fbuf));
1371 }
1372 os_log(wake_packet_log_handle, "%s if %s (%u) phy_if %s proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proc %s eproc %s idle %d lpw %d",
1373 s != NULL ? s : "",
1374 ev->wake_pkt_ifname, ev->wake_pkt_if_index, ev->wake_pkt_phy_ifname,
1375 ev->wake_pkt_flags & NPIF_TCP ? "tcp" : ev->wake_pkt_flags ? "udp" :
1376 ev->wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1377 lbuf, ntohs(ev->wake_pkt_port),
1378 fbuf, ntohs(ev->wake_pkt_foreign_port),
1379 ev->wake_pkt_total_len, ev->wake_pkt_data_len, ev->wake_pkt_control_flags,
1380 ev->wake_pkt_owner_pname, ev->wake_pkt_effective_pname,
1381 ev->wake_pkt_flags & NPIF_CONNECTION_IDLE ? 1 : 0,
1382 ev->wake_pkt_flags & NPIF_LPW ? 1 : 0);
1383 }
1384
1385 #endif /* (DEBUG || DEVELOPMENT) */
1386
1387 /*
1388 * The process attribution of a wake packet can take several steps:
1389 *
1390 * 1) After device wakes, the first interface that sees a wake packet is the
1391 * physical interface and we remember it via if_set_wake_physical_interface()
1392 *
1393 * 2) We try to attribute a packet to a flow or not based on the physical interface.
1394 * If we find a flow, then the physical interface is the same as the interface used
1395 * by the TCP/UDP flow.
1396 *
1397 * 3) If the packet is tunneled or redirected we are going to do the attribution again
1398 * and the physical will be different from the interface used the TCP/UDP flow.
1399 */
1400 static bool
is_wake_pkt_event_delay(uint32_t ifrtype)1401 is_wake_pkt_event_delay(uint32_t ifrtype)
1402 {
1403 // Prevent overflow of the bitstring
1404 if (ifrtype >= NPI_MAX_IF_FAMILY_BITS) {
1405 return false;
1406 }
1407 if (bitstr_test((bitstr_t *)&npi_wake_packet_event_delay_if_families, ifrtype)) {
1408 return true;
1409 }
1410 return false;
1411 }
1412
1413 static int
if_set_wake_physical_interface(struct ifnet * ifp)1414 if_set_wake_physical_interface(struct ifnet *ifp)
1415 {
1416 /*
1417 * A physical interface is either Ethernet, cellular or companion link over BT
1418 * otherwise assumes it is some kind of tunnel
1419 */
1420 if (ifp->if_family != IFNET_FAMILY_ETHERNET && ifp->if_family != IFNET_FAMILY_CELLULAR &&
1421 IFNET_IS_COMPANION_LINK_BLUETOOTH(ifp) == false) {
1422 return 0;
1423 }
1424
1425 /*
1426 * Only handle a wake from a physical interface per wake cycle
1427 */
1428 if (last_wake_phy_if_set == true) {
1429 if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1430 os_log(wake_packet_log_handle,
1431 "if_set_wake_physical_interface ignored on %s because already set on %s",
1432 IF_XNAME(ifp), last_wake_phy_if_name);
1433 return EJUSTRETURN;
1434 }
1435
1436 last_wake_phy_if_set = true;
1437 strlcpy(last_wake_phy_if_name, IF_XNAME(ifp), sizeof(last_wake_phy_if_name));
1438 last_wake_phy_if_family = ifp->if_family;
1439 last_wake_phy_if_subfamily = ifp->if_subfamily;
1440 last_wake_phy_if_functional_type = if_functional_type(ifp, true);
1441
1442 if (if_need_delayed_wake_pkt_event(ifp)) {
1443 if_ports_used_stats.ifpu_delay_phy_wake_pkt += 1;
1444 last_wake_phy_if_delay_wake_pkt = true;
1445 os_log(wake_packet_log_handle, "if_set_wake_physical_interface %s last_wake_phy_if_delay_wake_pkt set",
1446 IF_XNAME(ifp));
1447 }
1448 if ((ifp->if_flags & IFXF_LOW_POWER_WAKE) != 0) {
1449 last_wake_phy_if_lpw = true;
1450 }
1451
1452 return 0;
1453 }
1454
1455 static void
deliver_unattributed_wake_packet_event(struct net_port_info_una_wake_event * event_data)1456 deliver_unattributed_wake_packet_event(struct net_port_info_una_wake_event *event_data)
1457 {
1458 struct kev_msg ev_msg = {};
1459
1460 if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1461
1462 last_wake_pkt_event.npi_wp_code = KEV_POWER_UNATTRIBUTED_WAKE;
1463 memcpy(&last_wake_pkt_event.npi_ev_wake_pkt_unattributed, event_data,
1464 sizeof(struct net_port_info_una_wake_event));
1465
1466 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1467 ev_msg.kev_class = KEV_NETWORK_CLASS;
1468 ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1469 ev_msg.event_code = KEV_POWER_UNATTRIBUTED_WAKE;
1470
1471 ev_msg.dv[0].data_ptr = event_data;
1472 ev_msg.dv[0].data_length = sizeof(struct net_port_info_una_wake_event);
1473
1474 int result = kev_post_msg(&ev_msg);
1475 if (result != 0) {
1476 uuid_string_t wake_uuid_str;
1477
1478 uuid_unparse(event_data->una_wake_uuid, wake_uuid_str);
1479 os_log_error(wake_packet_log_handle,
1480 "%s: kev_post_msg() failed with error %d for wake uuid %s",
1481 __func__, result, wake_uuid_str);
1482
1483 if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1484 }
1485 #if (DEBUG || DEVELOPMENT)
1486 net_port_info_log_una_wake_event("unattributed wake packet event", event_data);
1487 #endif /* (DEBUG || DEVELOPMENT) */
1488 }
1489
1490 static void
deliver_attributed_wake_packet_event(struct net_port_info_wake_event * event_data)1491 deliver_attributed_wake_packet_event(struct net_port_info_wake_event *event_data)
1492 {
1493 struct kev_msg ev_msg = {};
1494
1495 has_notified_wake_pkt = true;
1496
1497 if_ports_used_stats.ifpu_wake_pkt_event += 1;
1498
1499 last_wake_pkt_event.npi_wp_code = KEV_POWER_WAKE_PACKET;
1500 memcpy(&last_wake_pkt_event.npi_ev_wake_pkt_attributed, event_data,
1501 sizeof(struct net_port_info_wake_event));
1502
1503 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1504 ev_msg.kev_class = KEV_NETWORK_CLASS;
1505 ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1506 ev_msg.event_code = KEV_POWER_WAKE_PACKET;
1507
1508 ev_msg.dv[0].data_ptr = event_data;
1509 ev_msg.dv[0].data_length = sizeof(struct net_port_info_wake_event);
1510
1511 int result = kev_post_msg(&ev_msg);
1512 if (result != 0) {
1513 uuid_string_t wake_uuid_str;
1514
1515 uuid_unparse(event_data->wake_uuid, wake_uuid_str);
1516 os_log_error(wake_packet_log_handle,
1517 "%s: kev_post_msg() failed with error %d for wake uuid %s",
1518 __func__, result, wake_uuid_str);
1519
1520 if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1521 }
1522 #if (DEBUG || DEVELOPMENT)
1523 net_port_info_log_wake_event("attributed wake packet event", event_data);
1524 #endif /* (DEBUG || DEVELOPMENT) */
1525 }
1526
1527 static bool
is_unattributed_wake_already_notified(struct net_port_info * npi)1528 is_unattributed_wake_already_notified(struct net_port_info *npi)
1529 {
1530 bool retval = false;
1531
1532 if (has_notified_unattributed_wake == true || has_notified_wake_pkt == true) {
1533 if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1534
1535 if (__improbable(net_wake_pkt_debug > 0)) {
1536 net_port_info_log_npi("already notified unattributed wake packet", npi);
1537 }
1538 retval = true;
1539 }
1540
1541 return retval;
1542 }
1543
1544 static void
check_for_existing_delayed_wake_event()1545 check_for_existing_delayed_wake_event()
1546 {
1547 /*
1548 * Count the delayed events that are ignored as the most recent delayed
1549 * wake event wins as the packet makes up its way up the stack
1550 */
1551 if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
1552 if_ports_used_stats.ifpu_ignored_delayed_attributed_events += 1;
1553 } else if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_UNATTRIBUTED_WAKE) {
1554 if_ports_used_stats.ifpu_ignored_delayed_unattributed_events += 1;
1555 }
1556 }
1557
1558 static void
if_notify_unattributed_wake_common(struct ifnet * ifp,struct net_port_info * npi,struct net_port_info_una_wake_event * event_data)1559 if_notify_unattributed_wake_common(struct ifnet *ifp, struct net_port_info *npi,
1560 struct net_port_info_una_wake_event *event_data)
1561 {
1562 LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1563 lck_mtx_lock(&net_port_entry_head_lock);
1564
1565 if (is_unattributed_wake_already_notified(npi) == true) {
1566 goto done;
1567 }
1568
1569 /*
1570 * Check if this is a wake packet that we cannot process inline
1571 */
1572 if (if_need_delayed_wake_pkt_event(ifp)) {
1573 check_for_existing_delayed_wake_event();
1574
1575 delay_wake_pkt_event.npi_wp_code = KEV_POWER_UNATTRIBUTED_WAKE;
1576 memcpy(&delay_wake_pkt_event.npi_ev_wake_pkt_unattributed, event_data,
1577 sizeof(struct net_port_info_una_wake_event));
1578
1579 #if (DEBUG || DEVELOPMENT)
1580 if (if_ports_used_verbose > 0) {
1581 net_port_info_log_una_wake_event("delay unattributed wake packet event", event_data);
1582 }
1583 #endif /* (DEBUG || DEVELOPMENT) */
1584
1585 goto done;
1586 }
1587 deliver_unattributed_wake_packet_event(event_data);
1588
1589 done:
1590 lck_mtx_unlock(&net_port_entry_head_lock);
1591 }
1592
1593 static void
if_notify_unattributed_wake_mbuf(struct ifnet * ifp,struct mbuf * m,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)1594 if_notify_unattributed_wake_mbuf(struct ifnet *ifp, struct mbuf *m,
1595 struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
1596 uint16_t pkt_control_flags, uint16_t proto)
1597 {
1598 struct net_port_info_una_wake_event event_data = {};
1599
1600 uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1601 event_data.una_wake_pkt_if_index = ifp->if_index;
1602 event_data.una_wake_pkt_flags = npi->npi_flags;
1603
1604 event_data.una_wake_pkt_local_port = npi->npi_local_port;
1605 event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1606 event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1607 event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1608
1609 event_data.una_wake_pkt_total_len = pkt_total_len;
1610 event_data.una_wake_pkt_data_len = pkt_data_len;
1611 event_data.una_wake_pkt_control_flags = pkt_control_flags;
1612 event_data.una_wake_pkt_proto = proto;
1613
1614 strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
1615 sizeof(event_data.una_wake_pkt_ifname));
1616 event_data.una_wake_pkt_if_info.npi_if_family = ifp->if_family;
1617 event_data.una_wake_pkt_if_info.npi_if_subfamily = ifp->if_subfamily;
1618 event_data.una_wake_pkt_if_info.npi_if_functional_type = if_functional_type(ifp, true);
1619
1620 strbufcpy(event_data.una_wake_pkt_phy_ifname, last_wake_phy_if_name);
1621 event_data.una_wake_pkt_phy_if_info.npi_if_family = last_wake_phy_if_family;
1622 event_data.una_wake_pkt_phy_if_info.npi_if_subfamily = last_wake_phy_if_subfamily;
1623 event_data.una_wake_pkt_phy_if_info.npi_if_functional_type = last_wake_phy_if_functional_type;
1624
1625 event_data.una_wake_ptk_len = m->m_pkthdr.len > NPI_MAX_UNA_WAKE_PKT_LEN ?
1626 NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t)m->m_pkthdr.len;
1627
1628 errno_t error = mbuf_copydata(m, 0, event_data.una_wake_ptk_len,
1629 (void *)event_data.una_wake_pkt);
1630 if (error != 0) {
1631 uuid_string_t wake_uuid_str;
1632
1633 uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1634 os_log_error(wake_packet_log_handle,
1635 "%s: mbuf_copydata() failed with error %d for wake uuid %s",
1636 __func__, error, wake_uuid_str);
1637
1638 if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1639 return;
1640 }
1641
1642 if_notify_unattributed_wake_common(ifp, npi, &event_data);
1643 }
1644
1645 static bool
is_attributed_wake_already_notified(struct net_port_info * npi)1646 is_attributed_wake_already_notified(struct net_port_info *npi)
1647 {
1648 if (has_notified_wake_pkt == true) {
1649 if_ports_used_stats.ifpu_dup_wake_pkt_event += 1;
1650 if (__improbable(net_wake_pkt_debug > 0)) {
1651 net_port_info_log_npi("already notified attributed wake packet", npi);
1652 }
1653 return true;
1654 }
1655
1656 return false;
1657 }
1658
1659 static void
if_notify_wake_packet(struct ifnet * ifp,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags)1660 if_notify_wake_packet(struct ifnet *ifp, struct net_port_info *npi,
1661 uint32_t pkt_total_len, uint32_t pkt_data_len, uint16_t pkt_control_flags)
1662 {
1663 struct net_port_info_wake_event event_data = {};
1664
1665 uuid_copy(event_data.wake_uuid, current_wakeuuid);
1666 event_data.wake_pkt_if_index = ifp->if_index;
1667 event_data.wake_pkt_port = npi->npi_local_port;
1668 event_data.wake_pkt_flags = npi->npi_flags;
1669 event_data.wake_pkt_owner_pid = npi->npi_owner_pid;
1670 event_data.wake_pkt_effective_pid = npi->npi_effective_pid;
1671 strbufcpy(event_data.wake_pkt_owner_pname, npi->npi_owner_pname);
1672 strbufcpy(event_data.wake_pkt_effective_pname, npi->npi_effective_pname);
1673 uuid_copy(event_data.wake_pkt_owner_uuid, npi->npi_owner_uuid);
1674 uuid_copy(event_data.wake_pkt_effective_uuid, npi->npi_effective_uuid);
1675
1676 event_data.wake_pkt_foreign_port = npi->npi_foreign_port;
1677 event_data.wake_pkt_local_addr_ = npi->npi_local_addr_;
1678 event_data.wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1679 strlcpy(event_data.wake_pkt_ifname, IF_XNAME(ifp), sizeof(event_data.wake_pkt_ifname));
1680
1681 event_data.wake_pkt_if_info.npi_if_family = ifp->if_family;
1682 event_data.wake_pkt_if_info.npi_if_subfamily = ifp->if_subfamily;
1683 event_data.wake_pkt_if_info.npi_if_functional_type = if_functional_type(ifp, true);
1684
1685 strbufcpy(event_data.wake_pkt_phy_ifname, last_wake_phy_if_name);
1686 event_data.wake_pkt_phy_if_info.npi_if_family = last_wake_phy_if_family;
1687 event_data.wake_pkt_phy_if_info.npi_if_subfamily = last_wake_phy_if_subfamily;
1688 event_data.wake_pkt_phy_if_info.npi_if_functional_type = last_wake_phy_if_functional_type;
1689
1690 event_data.wake_pkt_total_len = pkt_total_len;
1691 event_data.wake_pkt_data_len = pkt_data_len;
1692 event_data.wake_pkt_control_flags = pkt_control_flags;
1693 if (npi->npi_flags & NPIF_NOWAKE) {
1694 event_data.wake_pkt_control_flags |= NPICF_NOWAKE;
1695 }
1696
1697 LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1698
1699 lck_mtx_lock(&net_port_entry_head_lock);
1700
1701 /*
1702 * Always immediately notify attributed wake for idle connections in LPW
1703 * even if an attributed wake has already been notified or
1704 * the interface requires delayed wake attribution
1705 */
1706 if (if_is_lpw_enabled(ifp) &&
1707 (npi->npi_flags & NPIF_CONNECTION_IDLE) != 0) {
1708 goto deliver;
1709 }
1710
1711 if (is_attributed_wake_already_notified(npi) == true) {
1712 goto done;
1713 }
1714
1715 /*
1716 * Check if this is a wake packet that we cannot process inline
1717 * We do not delay attributed idle connections in LPW because it is more
1718 * important to get accurate count about attributed idle connections in LPW
1719 * than an accurate count of attributed wake.
1720 */
1721 if (if_need_delayed_wake_pkt_event(ifp)) {
1722 check_for_existing_delayed_wake_event();
1723
1724 delay_wake_pkt_event.npi_wp_code = KEV_POWER_WAKE_PACKET;
1725 memcpy(&delay_wake_pkt_event.npi_ev_wake_pkt_attributed, &event_data,
1726 sizeof(struct net_port_info_wake_event));
1727
1728 #if (DEBUG || DEVELOPMENT)
1729 if (if_ports_used_verbose > 0) {
1730 net_port_info_log_wake_event("delay attributed wake packet event", &event_data);
1731 }
1732 #endif /* (DEBUG || DEVELOPMENT) */
1733
1734 goto done;
1735 }
1736
1737 deliver:
1738 if (npi->npi_flags & NPIF_NOWAKE) {
1739 if_ports_used_stats.ifpu_spurious_wake_event += 1;
1740 }
1741
1742 deliver_attributed_wake_packet_event(&event_data);
1743 done:
1744 lck_mtx_unlock(&net_port_entry_head_lock);
1745 }
1746
1747 static bool
is_encapsulated_esp(struct mbuf * m,size_t data_offset)1748 is_encapsulated_esp(struct mbuf *m, size_t data_offset)
1749 {
1750 /*
1751 * They are three cases:
1752 * - Keep alive: 1 byte payload
1753 * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
1754 * - otherwise it's ESP
1755 */
1756 ASSERT(m->m_pkthdr.len >= data_offset);
1757
1758 size_t data_len = m->m_pkthdr.len - data_offset;
1759 if (data_len == 1) {
1760 return false;
1761 } else if (data_len > ESP_HDR_SIZE) {
1762 uint8_t payload[ESP_HDR_SIZE];
1763
1764 errno_t error = mbuf_copydata(m, data_offset, ESP_HDR_SIZE, &payload);
1765 if (error != 0) {
1766 os_log(wake_packet_log_handle, "%s: mbuf_copydata(ESP_HDR_SIZE) error %d",
1767 __func__, error);
1768 } else if (payload[0] == 0 && payload[1] == 0 &&
1769 payload[2] == 0 && payload[3] == 0) {
1770 return false;
1771 }
1772 }
1773 return true;
1774 }
1775
1776 extern void log_hexdump(os_log_t log_handle, void *__sized_by(len) data, size_t len);
1777
1778 void
log_hexdump(os_log_t log_handle,void * __sized_by (len)data,size_t len)1779 log_hexdump(os_log_t log_handle, void *__sized_by(len) data, size_t len)
1780 {
1781 size_t i, j, k;
1782 unsigned char *ptr = (unsigned char *)data;
1783 #define MAX_DUMP_BUF 32
1784 unsigned char buf[3 * MAX_DUMP_BUF + 1];
1785
1786 for (i = 0; i < len; i += MAX_DUMP_BUF) {
1787 for (j = i, k = 0; j < i + MAX_DUMP_BUF && j < len; j++) {
1788 unsigned char msnbl = ptr[j] >> 4;
1789 unsigned char lsnbl = ptr[j] & 0x0f;
1790
1791 buf[k++] = msnbl < 10 ? msnbl + '0' : msnbl + 'a' - 10;
1792 buf[k++] = lsnbl < 10 ? lsnbl + '0' : lsnbl + 'a' - 10;
1793
1794 if ((j % 2) == 1) {
1795 buf[k++] = ' ';
1796 }
1797 if ((j % MAX_DUMP_BUF) == MAX_DUMP_BUF - 1) {
1798 buf[k++] = ' ';
1799 }
1800 }
1801 buf[k] = 0;
1802 os_log(log_handle, "%3lu: %s", i, buf);
1803 }
1804 }
1805
1806 __attribute__((noinline))
1807 static void
log_wake_mbuf(struct ifnet * ifp,struct mbuf * m)1808 log_wake_mbuf(struct ifnet *ifp, struct mbuf *m)
1809 {
1810 char buffer[64];
1811 size_t buflen = MIN(mbuf_pkthdr_len(m), sizeof(buffer));
1812
1813 os_log(wake_packet_log_handle, "wake packet from %s len %d",
1814 ifp->if_xname, m_pktlen(m));
1815 if (mbuf_copydata(m, 0, buflen, buffer) == 0) {
1816 log_hexdump(wake_packet_log_handle, buffer, buflen);
1817 }
1818 }
1819
1820 void
if_ports_used_match_mbuf(struct ifnet * ifp,protocol_family_t proto_family,struct mbuf * m)1821 if_ports_used_match_mbuf(struct ifnet *ifp, protocol_family_t proto_family, struct mbuf *m)
1822 {
1823 errno_t error;
1824 struct net_port_info npi = {};
1825 bool found = false;
1826 uint32_t pkt_total_len = 0;
1827 uint32_t pkt_data_len = 0;
1828 uint16_t pkt_control_flags = 0;
1829 uint16_t pkt_proto = 0;
1830
1831 if (ifp == NULL) {
1832 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: receive interface is NULL");
1833 if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1834 return;
1835 }
1836
1837 if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == 0) {
1838 if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1839 os_log_error(wake_packet_log_handle, "if_ports_used_match_mbuf: called PKTF_WAKE_PKT not set from %s",
1840 IF_XNAME(ifp));
1841 return;
1842 }
1843
1844 if (__improbable(net_wake_pkt_debug > 0)) {
1845 log_wake_mbuf(ifp, m);
1846 }
1847
1848 /*
1849 * Only accept one wake from a physical interface per wake cycle
1850 */
1851 if (if_set_wake_physical_interface(ifp) == EJUSTRETURN) {
1852 if (if_is_lpw_enabled(ifp) == false) {
1853 m->m_pkthdr.pkt_flags &= ~PKTF_WAKE_PKT;
1854 }
1855 return;
1856 }
1857
1858 if_ports_used_stats.ifpu_so_match_wake_pkt += 1;
1859 npi.npi_flags |= NPIF_SOCKET; /* For logging */
1860 pkt_total_len = m->m_pkthdr.len;
1861 pkt_data_len = pkt_total_len;
1862
1863 npi.npi_if_index = ifp->if_index;
1864 if (IFNET_IS_COMPANION_LINK(ifp)) {
1865 npi.npi_flags |= NPIF_COMPLINK;
1866 }
1867
1868 if (proto_family == PF_INET) {
1869 struct ip iphdr = {};
1870
1871 if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1872
1873 error = mbuf_copydata(m, 0, sizeof(struct ip), &iphdr);
1874 if (error != 0) {
1875 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(ip) error %d",
1876 error);
1877 goto failed;
1878 }
1879 npi.npi_flags |= NPIF_IPV4;
1880 npi.npi_local_addr_in = iphdr.ip_dst;
1881 npi.npi_foreign_addr_in = iphdr.ip_src;
1882
1883 /*
1884 * Check if this is a fragment that is not the first fragment
1885 */
1886 if ((ntohs(iphdr.ip_off) & ~(IP_DF | IP_RF)) &&
1887 (ntohs(iphdr.ip_off) & IP_OFFMASK) != 0) {
1888 npi.npi_flags |= NPIF_FRAG;
1889 if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1890 }
1891
1892 if ((iphdr.ip_hl << 2) < pkt_data_len) {
1893 pkt_data_len -= iphdr.ip_hl << 2;
1894 } else {
1895 pkt_data_len = 0;
1896 }
1897
1898 pkt_proto = iphdr.ip_p;
1899
1900 switch (iphdr.ip_p) {
1901 case IPPROTO_TCP: {
1902 if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1903 npi.npi_flags |= NPIF_TCP;
1904
1905 if (npi.npi_flags & NPIF_FRAG) {
1906 goto failed;
1907 }
1908
1909 struct tcphdr th = {};
1910 error = mbuf_copydata(m, iphdr.ip_hl << 2, sizeof(struct tcphdr), &th);
1911 if (error != 0) {
1912 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(tcphdr) error %d",
1913 error);
1914 goto failed;
1915 }
1916 npi.npi_local_port = th.th_dport;
1917 npi.npi_foreign_port = th.th_sport;
1918
1919 if (pkt_data_len < sizeof(struct tcphdr) ||
1920 pkt_data_len < (th.th_off << 2)) {
1921 pkt_data_len = 0;
1922 } else {
1923 pkt_data_len -= th.th_off << 2;
1924 }
1925 pkt_control_flags = th.th_flags;
1926 break;
1927 }
1928 case IPPROTO_UDP: {
1929 if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1930 npi.npi_flags |= NPIF_UDP;
1931
1932 if (npi.npi_flags & NPIF_FRAG) {
1933 goto failed;
1934 }
1935 struct udphdr uh = {};
1936 size_t udp_offset = iphdr.ip_hl << 2;
1937
1938 error = mbuf_copydata(m, udp_offset, sizeof(struct udphdr), &uh);
1939 if (error != 0) {
1940 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(udphdr) error %d",
1941 error);
1942 goto failed;
1943 }
1944 npi.npi_local_port = uh.uh_dport;
1945 npi.npi_foreign_port = uh.uh_sport;
1946 /*
1947 * Let the ESP layer handle wake packets
1948 */
1949 if (ntohs(uh.uh_dport) == PORT_ISAKMP_NATT ||
1950 ntohs(uh.uh_sport) == PORT_ISAKMP_NATT) {
1951 if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1952 if (is_encapsulated_esp(m, udp_offset + sizeof(struct udphdr))) {
1953 if (net_wake_pkt_debug > 0) {
1954 net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1955 }
1956 return;
1957 }
1958 }
1959
1960 if (pkt_data_len < sizeof(struct udphdr)) {
1961 pkt_data_len = 0;
1962 } else {
1963 pkt_data_len -= sizeof(struct udphdr);
1964 }
1965 break;
1966 }
1967 case IPPROTO_ESP: {
1968 /*
1969 * Let the ESP layer handle wake packets
1970 */
1971 if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1972 npi.npi_flags |= NPIF_ESP;
1973 if (net_wake_pkt_debug > 0) {
1974 net_port_info_log_npi("defer ESP matching", &npi);
1975 }
1976 return;
1977 }
1978 default:
1979 if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1980 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: unexpected IPv4 protocol %u from %s",
1981 iphdr.ip_p, IF_XNAME(ifp));
1982 goto failed;
1983 }
1984 } else if (proto_family == PF_INET6) {
1985 struct ip6_hdr ip6_hdr = {};
1986
1987 if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1988
1989 error = mbuf_copydata(m, 0, sizeof(struct ip6_hdr), &ip6_hdr);
1990 if (error != 0) {
1991 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(ip6_hdr) error %d",
1992 error);
1993 goto failed;
1994 }
1995 npi.npi_flags |= NPIF_IPV6;
1996 memcpy(&npi.npi_local_addr_in6, &ip6_hdr.ip6_dst, sizeof(struct in6_addr));
1997 memcpy(&npi.npi_foreign_addr_in6, &ip6_hdr.ip6_src, sizeof(struct in6_addr));
1998
1999 size_t l3_len = sizeof(struct ip6_hdr);
2000 uint8_t l4_proto = ip6_hdr.ip6_nxt;
2001
2002 pkt_proto = l4_proto;
2003
2004 if (pkt_data_len < l3_len) {
2005 pkt_data_len = 0;
2006 } else {
2007 pkt_data_len -= l3_len;
2008 }
2009
2010 /*
2011 * Check if this is a fragment that is not the first fragment
2012 */
2013 if (l4_proto == IPPROTO_FRAGMENT) {
2014 struct ip6_frag ip6_frag;
2015
2016 error = mbuf_copydata(m, sizeof(struct ip6_hdr), sizeof(struct ip6_frag), &ip6_frag);
2017 if (error != 0) {
2018 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(ip6_frag) error %d",
2019 error);
2020 goto failed;
2021 }
2022
2023 l3_len += sizeof(struct ip6_frag);
2024 l4_proto = ip6_frag.ip6f_nxt;
2025
2026 if ((ip6_frag.ip6f_offlg & IP6F_OFF_MASK) != 0) {
2027 npi.npi_flags |= NPIF_FRAG;
2028 if_ports_used_stats.ifpu_frag_wake_pkt += 1;
2029 }
2030 }
2031
2032
2033 switch (l4_proto) {
2034 case IPPROTO_TCP: {
2035 if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
2036 npi.npi_flags |= NPIF_TCP;
2037
2038 /*
2039 * Cannot attribute a fragment that is not the first fragment as it
2040 * not have the TCP header
2041 */
2042 if (npi.npi_flags & NPIF_FRAG) {
2043 goto failed;
2044 }
2045
2046 struct tcphdr th = {};
2047
2048 error = mbuf_copydata(m, l3_len, sizeof(struct tcphdr), &th);
2049 if (error != 0) {
2050 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(tcphdr) error %d",
2051 error);
2052 if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
2053 goto failed;
2054 }
2055 npi.npi_local_port = th.th_dport;
2056 npi.npi_foreign_port = th.th_sport;
2057
2058 if (pkt_data_len < sizeof(struct tcphdr) ||
2059 pkt_data_len < (th.th_off << 2)) {
2060 pkt_data_len = 0;
2061 } else {
2062 pkt_data_len -= th.th_off << 2;
2063 }
2064 pkt_control_flags = th.th_flags;
2065 break;
2066 }
2067 case IPPROTO_UDP: {
2068 if_ports_used_stats.ifpu_udp_wake_pkt += 1;
2069 npi.npi_flags |= NPIF_UDP;
2070
2071 /*
2072 * Cannot attribute a fragment that is not the first fragment as it
2073 * not have the UDP header
2074 */
2075 if (npi.npi_flags & NPIF_FRAG) {
2076 goto failed;
2077 }
2078
2079 struct udphdr uh = {};
2080
2081 error = mbuf_copydata(m, l3_len, sizeof(struct udphdr), &uh);
2082 if (error != 0) {
2083 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: mbuf_copydata(udphdr) error %d",
2084 error);
2085 if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
2086 goto failed;
2087 }
2088 npi.npi_local_port = uh.uh_dport;
2089 npi.npi_foreign_port = uh.uh_sport;
2090 /*
2091 * Let the ESP layer handle wake packets
2092 */
2093 if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
2094 ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
2095 if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
2096 if (is_encapsulated_esp(m, l3_len + sizeof(struct udphdr))) {
2097 if (net_wake_pkt_debug > 0) {
2098 net_port_info_log_npi("defer encapsulated ESP matching", &npi);
2099 }
2100 return;
2101 }
2102 }
2103
2104 if (pkt_data_len < sizeof(struct udphdr)) {
2105 pkt_data_len = 0;
2106 } else {
2107 pkt_data_len -= sizeof(struct udphdr);
2108 }
2109 break;
2110 }
2111 case IPPROTO_ESP: {
2112 /*
2113 * Let the ESP layer handle the wake packet
2114 */
2115 if_ports_used_stats.ifpu_esp_wake_pkt += 1;
2116 npi.npi_flags |= NPIF_ESP;
2117 if (net_wake_pkt_debug > 0) {
2118 net_port_info_log_npi("defer ESP matching", &npi);
2119 }
2120 return;
2121 }
2122 default:
2123 if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
2124
2125 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: unexpected IPv6 protocol %u from %s",
2126 ip6_hdr.ip6_nxt, IF_XNAME(ifp));
2127 goto failed;
2128 }
2129 } else {
2130 if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
2131 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: unexpected protocol family %d from %s",
2132 proto_family, IF_XNAME(ifp));
2133 goto failed;
2134 }
2135
2136 found = net_port_info_find_match(&npi);
2137
2138 failed:
2139 if (__improbable(if_is_lpw_enabled(ifp))) {
2140 npi.npi_flags |= NPIF_LPW;
2141
2142 if (found && (npi.npi_flags & NPIF_CONNECTION_IDLE)) {
2143 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: idle connection in LPW on %s",
2144 IF_XNAME(ifp));
2145
2146 if_ports_used_stats.ifpu_lpw_connection_idle_wake++;
2147 } else {
2148 os_log(wake_packet_log_handle, "if_ports_used_match_mbuf: not idle connection in LPW on %s",
2149 IF_XNAME(ifp));
2150
2151 if_ports_used_stats.ifpu_lpw_not_idle_wake++;
2152 }
2153 }
2154 if (found) {
2155 if_notify_wake_packet(ifp, &npi,
2156 pkt_total_len, pkt_data_len, pkt_control_flags);
2157 } else {
2158 if_notify_unattributed_wake_mbuf(ifp, m, &npi,
2159 pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
2160 }
2161 }
2162
2163 #if SKYWALK
2164
2165 static void
if_notify_unattributed_wake_pkt(struct ifnet * ifp,struct __kern_packet * pkt,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)2166 if_notify_unattributed_wake_pkt(struct ifnet *ifp, struct __kern_packet *pkt,
2167 struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
2168 uint16_t pkt_control_flags, uint16_t proto)
2169 {
2170 struct net_port_info_una_wake_event event_data = {};
2171
2172 uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
2173 event_data.una_wake_pkt_if_index = ifp->if_index;
2174 event_data.una_wake_pkt_flags = npi->npi_flags;
2175
2176 uint16_t offset = kern_packet_get_network_header_offset(SK_PKT2PH(pkt));
2177 event_data.una_wake_ptk_len =
2178 pkt->pkt_length - offset > NPI_MAX_UNA_WAKE_PKT_LEN ?
2179 NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t) pkt->pkt_length - offset;
2180
2181 kern_packet_copy_bytes(SK_PKT2PH(pkt), offset, event_data.una_wake_ptk_len,
2182 event_data.una_wake_pkt);
2183
2184 event_data.una_wake_pkt_local_port = npi->npi_local_port;
2185 event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
2186 event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
2187 event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
2188 strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
2189 sizeof(event_data.una_wake_pkt_ifname));
2190
2191 event_data.una_wake_pkt_total_len = pkt_total_len;
2192 event_data.una_wake_pkt_data_len = pkt_data_len;
2193 event_data.una_wake_pkt_control_flags = pkt_control_flags;
2194 event_data.una_wake_pkt_proto = proto;
2195
2196 if_notify_unattributed_wake_common(ifp, npi, &event_data);
2197 }
2198
2199 __attribute__((noinline))
2200 static void
log_wake_pkt(struct ifnet * ifp,struct __kern_packet * pkt)2201 log_wake_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
2202 {
2203 uint32_t len;
2204
2205 if (pkt->pkt_pflags & PKT_F_MBUF_DATA) {
2206 len = m_pktlen(pkt->pkt_mbuf);
2207 } else {
2208 len = __packet_get_real_data_length(pkt);
2209 }
2210
2211 os_log(wake_packet_log_handle, "wake packet from %s len %d",
2212 ifp->if_xname, len);
2213 }
2214
2215 void
if_ports_used_match_pkt(struct ifnet * ifp,struct __kern_packet * pkt)2216 if_ports_used_match_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
2217 {
2218 struct net_port_info npi = {};
2219 bool found = false;
2220 uint32_t pkt_total_len = 0;
2221 uint32_t pkt_data_len = 0;
2222 uint16_t pkt_control_flags = 0;
2223 uint16_t pkt_proto = 0;
2224
2225 if (ifp == NULL) {
2226 os_log(wake_packet_log_handle, "if_ports_used_match_pkt: receive interface is NULL");
2227 if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
2228 return;
2229 }
2230
2231 if ((pkt->pkt_pflags & PKT_F_WAKE_PKT) == 0) {
2232 if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
2233 os_log_error(wake_packet_log_handle, "%s: called PKT_F_WAKE_PKT not set from %s",
2234 __func__, IF_XNAME(ifp));
2235 return;
2236 }
2237
2238
2239 if (__improbable(net_wake_pkt_debug > 0)) {
2240 log_wake_pkt(ifp, pkt);
2241 }
2242
2243 /*
2244 * Only accept one wake from a physical interface per wake cycle
2245 */
2246 if (if_set_wake_physical_interface(ifp) == EJUSTRETURN) {
2247 pkt->pkt_pflags &= ~PKT_F_WAKE_PKT;
2248 return;
2249 }
2250
2251 if_ports_used_stats.ifpu_ch_match_wake_pkt += 1;
2252 npi.npi_flags |= NPIF_CHANNEL; /* For logging */
2253 pkt_total_len = pkt->pkt_flow_ip_hlen +
2254 pkt->pkt_flow_tcp_hlen + pkt->pkt_flow_ulen;
2255 pkt_data_len = pkt->pkt_flow_ulen;
2256
2257 npi.npi_if_index = ifp->if_index;
2258 if (IFNET_IS_COMPANION_LINK(ifp)) {
2259 npi.npi_flags |= NPIF_COMPLINK;
2260 }
2261
2262
2263 switch (pkt->pkt_flow_ip_ver) {
2264 case IPVERSION:
2265 if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
2266
2267 npi.npi_flags |= NPIF_IPV4;
2268 npi.npi_local_addr_in = pkt->pkt_flow_ipv4_dst;
2269 npi.npi_foreign_addr_in = pkt->pkt_flow_ipv4_src;
2270 break;
2271 case IPV6_VERSION:
2272 if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
2273
2274 npi.npi_flags |= NPIF_IPV6;
2275 memcpy(&npi.npi_local_addr_in6, &pkt->pkt_flow_ipv6_dst,
2276 sizeof(struct in6_addr));
2277 memcpy(&npi.npi_foreign_addr_in6, &pkt->pkt_flow_ipv6_src,
2278 sizeof(struct in6_addr));
2279 break;
2280 default:
2281 if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
2282
2283 os_log(wake_packet_log_handle, "%s: unexpected protocol family %u from %s",
2284 __func__, pkt->pkt_flow_ip_ver, IF_XNAME(ifp));
2285 goto failed;
2286 }
2287 pkt_proto = pkt->pkt_flow_ip_ver;
2288
2289 /*
2290 * Check if this is a fragment that is not the first fragment
2291 */
2292 if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
2293 os_log(wake_packet_log_handle, "%s: unexpected wake fragment from %s",
2294 __func__, IF_XNAME(ifp));
2295 npi.npi_flags |= NPIF_FRAG;
2296 if_ports_used_stats.ifpu_frag_wake_pkt += 1;
2297 }
2298
2299 switch (pkt->pkt_flow_ip_proto) {
2300 case IPPROTO_TCP: {
2301 if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
2302 npi.npi_flags |= NPIF_TCP;
2303
2304 /*
2305 * Cannot attribute a fragment that is not the first fragment as it
2306 * not have the TCP header
2307 */
2308 if (npi.npi_flags & NPIF_FRAG) {
2309 goto failed;
2310 }
2311 struct tcphdr * __single tcp = __unsafe_forge_single(struct tcphdr *, pkt->pkt_flow_tcp_hdr);
2312 if (tcp == NULL) {
2313 os_log(wake_packet_log_handle, "%s: pkt with unassigned TCP header from %s",
2314 __func__, IF_XNAME(ifp));
2315 if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
2316 goto failed;
2317 }
2318 npi.npi_local_port = tcp->th_dport;
2319 npi.npi_foreign_port = tcp->th_sport;
2320 pkt_control_flags = tcp->th_flags;
2321 break;
2322 }
2323 case IPPROTO_UDP: {
2324 if_ports_used_stats.ifpu_udp_wake_pkt += 1;
2325 npi.npi_flags |= NPIF_UDP;
2326
2327 /*
2328 * Cannot attribute a fragment that is not the first fragment as it
2329 * not have the UDP header
2330 */
2331 if (npi.npi_flags & NPIF_FRAG) {
2332 goto failed;
2333 }
2334 struct udphdr * __single uh = __unsafe_forge_single(struct udphdr *, pkt->pkt_flow_udp_hdr);
2335 if (uh == NULL) {
2336 os_log(wake_packet_log_handle, "%s: pkt with unassigned UDP header from %s",
2337 __func__, IF_XNAME(ifp));
2338 if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
2339 goto failed;
2340 }
2341 npi.npi_local_port = uh->uh_dport;
2342 npi.npi_foreign_port = uh->uh_sport;
2343
2344 /*
2345 * Defer matching of UDP NAT traversal to ip_input
2346 * (assumes IKE uses sockets)
2347 */
2348 if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
2349 ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
2350 if_ports_used_stats.ifpu_deferred_isakmp_natt_wake_pkt += 1;
2351 if (net_wake_pkt_debug > 0) {
2352 net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
2353 }
2354 return;
2355 }
2356 break;
2357 }
2358 case IPPROTO_ESP: {
2359 /*
2360 * Let the ESP layer handle the wake packet
2361 */
2362 if_ports_used_stats.ifpu_esp_wake_pkt += 1;
2363 npi.npi_flags |= NPIF_ESP;
2364 if (net_wake_pkt_debug > 0) {
2365 net_port_info_log_npi("defer ESP matching", &npi);
2366 }
2367 return;
2368 }
2369 default:
2370 if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
2371
2372 os_log(wake_packet_log_handle, "%s: unexpected IP protocol %u from %s",
2373 __func__, pkt->pkt_flow_ip_proto, IF_XNAME(ifp));
2374 goto failed;
2375 }
2376
2377 found = net_port_info_find_match(&npi);
2378
2379 failed:
2380 if (__improbable(if_is_lpw_enabled(ifp))) {
2381 npi.npi_flags |= NPIF_LPW;
2382
2383 if (found && (npi.npi_flags & NPIF_CONNECTION_IDLE)) {
2384 os_log(wake_packet_log_handle, "if_ports_used_match_pkt: idle connection in LPW on %s",
2385 IF_XNAME(ifp));
2386
2387 if_ports_used_stats.ifpu_lpw_connection_idle_wake++;
2388 } else {
2389 os_log(wake_packet_log_handle, "if_ports_used_match_pkt: not idle connection in LPW on %s",
2390 IF_XNAME(ifp));
2391
2392 if_ports_used_stats.ifpu_lpw_not_idle_wake++;
2393 }
2394 }
2395
2396 if (found) {
2397 if_notify_wake_packet(ifp, &npi,
2398 pkt_total_len, pkt_data_len, pkt_control_flags);
2399 } else {
2400 if_notify_unattributed_wake_pkt(ifp, pkt, &npi,
2401 pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
2402 }
2403 }
2404 #endif /* SKYWALK */
2405
2406 int
2407 sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS
2408 {
2409 #pragma unused(oidp, arg1, arg2)
2410 struct net_port_info_wake_event net_port_info_wake_event = { 0 };
2411 size_t len = sizeof(net_port_info_wake_event);
2412 int error;
2413
2414 lck_mtx_lock(&net_port_entry_head_lock);
2415 if (last_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
2416 memcpy(&net_port_info_wake_event, &last_wake_pkt_event.npi_ev_wake_pkt_attributed, len);
2417 }
2418 lck_mtx_unlock(&net_port_entry_head_lock);
2419
2420 if (req->oldptr != 0) {
2421 len = MIN(req->oldlen, len);
2422 }
2423 error = SYSCTL_OUT(req, &net_port_info_wake_event, len);
2424
2425 return error;
2426 }
2427
2428 int
2429 sysctl_last_unattributed_wake_event SYSCTL_HANDLER_ARGS
2430 {
2431 #pragma unused(oidp, arg1, arg2)
2432 struct net_port_info_una_wake_event net_port_info_una_wake_event = { 0 };
2433 size_t len = sizeof(net_port_info_una_wake_event);
2434 int error;
2435
2436 lck_mtx_lock(&net_port_entry_head_lock);
2437 if (last_wake_pkt_event.npi_wp_code == KEV_POWER_UNATTRIBUTED_WAKE) {
2438 memcpy(&net_port_info_una_wake_event, &last_wake_pkt_event.npi_ev_wake_pkt_unattributed, len);
2439 }
2440 lck_mtx_unlock(&net_port_entry_head_lock);
2441
2442 if (req->oldptr != 0) {
2443 len = MIN(req->oldlen, len);
2444 }
2445 error = SYSCTL_OUT(req, &net_port_info_una_wake_event, len);
2446
2447 return error;
2448 }
2449
2450 /*
2451 * Pass the interface family of the interface that caused the wake
2452 */
2453 int
2454 sysctl_wake_pkt_event_notify SYSCTL_HANDLER_ARGS
2455 {
2456 #pragma unused(oidp, arg1, arg2)
2457 long long val = 0;
2458 int error = 0;
2459 int changed = 0;
2460 uint32_t if_family = 0;
2461
2462 error = sysctl_io_number(req, val, sizeof(val), &val, &changed);
2463 if (error != 0 || req->newptr == 0 || changed == 0) {
2464 return error;
2465 }
2466
2467 if (val < 0 || val > UINT32_MAX) {
2468 return EINVAL;
2469 }
2470 if_family = (uint32_t)val;
2471
2472 if (!IOCurrentTaskHasEntitlement(WAKE_PKT_EVENT_CONTROL_ENTITLEMENT)) {
2473 return EPERM;
2474 }
2475
2476 os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify proc %s:%u val %u last_wake_phy_if_delay_wake_pkt %d last_wake_phy_if_family %u delay_wake_pkt_event %d",
2477 proc_best_name(current_proc()), proc_selfpid(),
2478 if_family, last_wake_phy_if_delay_wake_pkt, last_wake_phy_if_family,
2479 delay_wake_pkt_event.npi_wp_code);
2480 #if (DEBUG || DEVELOPMENT)
2481 if (if_ports_used_verbose > 0) {
2482 if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
2483 net_port_info_log_wake_event("sysctl_wake_pkt_event_notify", &delay_wake_pkt_event.npi_ev_wake_pkt_attributed);
2484 } else if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_UNATTRIBUTED_WAKE) {
2485 net_port_info_log_una_wake_event("sysctl_wake_pkt_event_notify", &delay_wake_pkt_event.npi_ev_wake_pkt_unattributed);
2486 }
2487 }
2488 #endif /* (DEBUG || DEVELOPMENT) */
2489
2490 lck_mtx_lock(&net_port_entry_head_lock);
2491
2492 if (last_wake_phy_if_delay_wake_pkt == true && val == last_wake_phy_if_family) {
2493 last_wake_phy_if_delay_wake_pkt = false;
2494
2495 if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_WAKE_PACKET) {
2496 if (is_attributed_wake_already_notified(NULL) == false) {
2497 deliver_attributed_wake_packet_event(&delay_wake_pkt_event.npi_ev_wake_pkt_attributed);
2498 } else {
2499 os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify attributed_wake_already_notified");
2500 }
2501 } else if (delay_wake_pkt_event.npi_wp_code == KEV_POWER_UNATTRIBUTED_WAKE) {
2502 if (is_unattributed_wake_already_notified(NULL)) {
2503 deliver_unattributed_wake_packet_event(&delay_wake_pkt_event.npi_ev_wake_pkt_unattributed);
2504 } else {
2505 os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify unattributed_wake_already_notified");
2506 }
2507 } else {
2508 if_ports_used_stats.ifpu_wake_pkt_event_notify_in_vain += 1;
2509 os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify bad npi_wp_code");
2510 }
2511 } else {
2512 if_ports_used_stats.ifpu_wake_pkt_event_notify_in_vain += 1;
2513 os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_notify in vain");
2514 }
2515 lck_mtx_unlock(&net_port_entry_head_lock);
2516
2517 return 0;
2518 }
2519
2520 static void
if_set_delay_wake_flags(ifnet_t ifp,bool delay)2521 if_set_delay_wake_flags(ifnet_t ifp, bool delay)
2522 {
2523 if (delay) {
2524 if_set_xflags(ifp, IFXF_DELAYWAKEPKTEVENT);
2525 if_clear_xflags(ifp, IFXF_INBAND_WAKE_PKT_TAGGING);
2526 } else {
2527 if_clear_xflags(ifp, IFXF_DELAYWAKEPKTEVENT);
2528 if_set_xflags(ifp, IFXF_INBAND_WAKE_PKT_TAGGING);
2529 }
2530 }
2531
2532 int
2533 sysctl_wake_pkt_event_delay_if_families SYSCTL_HANDLER_ARGS
2534 {
2535 #pragma unused(oidp, arg1, arg2)
2536 long long val = npi_wake_packet_event_delay_if_families;
2537 int error;
2538 int changed = 0;
2539 uint32_t old_value = npi_wake_packet_event_delay_if_families;
2540
2541 error = sysctl_io_number(req, val, sizeof(val), &val, &changed);
2542 if (error != 0 || req->newptr == 0 || changed == 0) {
2543 return error;
2544 }
2545 if (!IOCurrentTaskHasEntitlement(WAKE_PKT_EVENT_CONTROL_ENTITLEMENT)) {
2546 return EPERM;
2547 }
2548 if (val < 0 || val > UINT32_MAX) {
2549 return EINVAL;
2550 }
2551
2552 /* The value is the bitmap of the functional types to delay */
2553 old_value = npi_wake_packet_event_delay_if_families;
2554 npi_wake_packet_event_delay_if_families = (uint32_t)val;
2555
2556 /* Need to reevalute the capability of doing in-band wake packet tagging */
2557 if (npi_wake_packet_event_delay_if_families != 0) {
2558 uint32_t count, i;
2559 ifnet_t *__counted_by(count) ifp_list;
2560
2561 error = ifnet_list_get_all(IFNET_FAMILY_ANY, &ifp_list, &count);
2562 if (error != 0) {
2563 os_log_error(wake_packet_log_handle,
2564 "%s: ifnet_list_get_all() failed %d",
2565 __func__, error);
2566 npi_wake_packet_event_delay_if_families = old_value;
2567 return error;
2568 }
2569 for (i = 0; i < count; i++) {
2570 ifnet_t ifp = ifp_list[i];
2571 bool delay = is_wake_pkt_event_delay(ifp->if_family);
2572 const uint32_t flags = IFXF_INBAND_WAKE_PKT_TAGGING | IFXF_DELAYWAKEPKTEVENT;
2573
2574 if ((delay && (ifp->if_xflags & flags) != IFXF_DELAYWAKEPKTEVENT) ||
2575 (!delay && (ifp->if_xflags & flags) != IFXF_INBAND_WAKE_PKT_TAGGING)) {
2576 if_set_delay_wake_flags(ifp, delay);
2577
2578 if (if_ports_used_verbose || ifp->if_family == IFNET_FAMILY_CELLULAR) {
2579 os_log(wake_packet_log_handle, "interface %s reset INBAND_WAKE_PKT_TAGGING %d DELAYWAKEPKTEVENT %d",
2580 ifp->if_xname,
2581 ifp->if_xflags & IFXF_INBAND_WAKE_PKT_TAGGING ? 1 : 0,
2582 ifp->if_xflags & IFXF_DELAYWAKEPKTEVENT ? 1 : 0);
2583 }
2584 }
2585 }
2586 ifnet_list_free_counted_by(ifp_list, count);
2587 }
2588
2589 os_log(wake_packet_log_handle, "sysctl_wake_pkt_event_delay_if_families proc %s:%u npi_wake_packet_event_delay_if_families 0x%x -> 0x%x",
2590 proc_best_name(current_proc()), proc_selfpid(),
2591 old_value, npi_wake_packet_event_delay_if_families);
2592
2593
2594 return 0;
2595 }
2596
2597 void
init_inband_wake_pkt_tagging_for_family(struct ifnet * ifp)2598 init_inband_wake_pkt_tagging_for_family(struct ifnet *ifp)
2599 {
2600 bool delay = is_wake_pkt_event_delay(ifp->if_family);
2601
2602 if_set_delay_wake_flags(ifp, delay);
2603
2604 if (if_ports_used_verbose || ifp->if_family == IFNET_FAMILY_CELLULAR) {
2605 os_log(wake_packet_log_handle, "interface %s initialized INBAND_WAKE_PKT_TAGGING %d DELAYWAKEPKTEVENT %d",
2606 ifp->if_xname,
2607 ifp->if_xflags & IFXF_INBAND_WAKE_PKT_TAGGING ? 1 : 0,
2608 ifp->if_xflags & IFXF_DELAYWAKEPKTEVENT ? 1 : 0);
2609 }
2610 }
2611
2612 #if (DEBUG | DEVELOPMENT)
2613
2614 static int
2615 sysctl_use_fake_lpw SYSCTL_HANDLER_ARGS
2616 {
2617 #pragma unused(arg1, arg2)
2618 int error = 0;
2619 int old_value = use_fake_lpw;
2620 int new_value = *(int *)oidp->oid_arg1;
2621
2622 error = sysctl_handle_int(oidp, &new_value, 0, req);
2623 if (error == 0) {
2624 *(int *)oidp->oid_arg1 = new_value;
2625
2626 if (new_value != old_value) {
2627 os_log(wake_packet_log_handle, "use_fake_lpw %d", new_value);
2628 }
2629 }
2630 return error;
2631 }
2632
2633 static int
2634 sysctl_mark_wake_packet_port SYSCTL_HANDLER_ARGS
2635 {
2636 #pragma unused(arg1, arg2)
2637 int error = 0;
2638 int new_value = *(int *)oidp->oid_arg1;
2639
2640 error = sysctl_handle_int(oidp, &new_value, 0, req);
2641 if (error == 0) {
2642 if (new_value < 0 || new_value >= UINT16_MAX) {
2643 error = EINVAL;
2644 goto done;
2645 }
2646 *(int *)oidp->oid_arg1 = new_value;
2647 }
2648 done:
2649 return error;
2650 }
2651
2652 static int
2653 sysctl_mark_wake_packet_if SYSCTL_HANDLER_ARGS
2654 {
2655 #pragma unused(arg1, arg2)
2656 int error = 0;
2657 char new_value[IFNAMSIZ] = { 0 };
2658 int changed = 0;
2659
2660 strbufcpy(new_value, IFNAMSIZ, mark_wake_packet_if, IFNAMSIZ);
2661 error = sysctl_io_string(req, new_value, IFNAMSIZ, 0, &changed);
2662 if (error == 0) {
2663 strbufcpy(mark_wake_packet_if, IFNAMSIZ, new_value, IFNAMSIZ);
2664 }
2665
2666 return error;
2667 }
2668
2669 bool
check_wake_mbuf(ifnet_t ifp,protocol_family_t protocol_family,mbuf_ref_t m)2670 check_wake_mbuf(ifnet_t ifp, protocol_family_t protocol_family, mbuf_ref_t m)
2671 {
2672 uint8_t ipproto = 0;
2673 size_t offset = 0;
2674
2675 /* The protocol and interface must both be specified */
2676 if (mark_wake_packet_ipproto == 0 || mark_wake_packet_if[0] == 0) {
2677 return false;
2678 }
2679 /* The interface must match */
2680 if (strlcmp(mark_wake_packet_if, IF_XNAME(ifp), IFNAMSIZ) != 0) {
2681 return false;
2682 }
2683 /* The protocol must match */
2684 if (protocol_family == PF_INET6) {
2685 struct ip6_hdr ip6;
2686
2687 if ((size_t)(m)->m_pkthdr.len < sizeof(struct ip6_hdr)) {
2688 os_log(wake_packet_log_handle, "check_wake_mbuf: IP6 too short");
2689 return false;
2690 }
2691 mbuf_copydata(m, 0, sizeof(struct ip6_hdr), &ip6);
2692
2693 if ((ipproto = ip6.ip6_nxt) != mark_wake_packet_ipproto) {
2694 return false;
2695 }
2696 offset = sizeof(struct ip6_hdr);
2697 } else if (protocol_family == PF_INET) {
2698 struct ip ip;
2699
2700 if ((size_t)(m)->m_pkthdr.len < sizeof(struct ip)) {
2701 os_log(wake_packet_log_handle, "check_wake_mbuf: IP too short");
2702 return false;
2703 }
2704 mbuf_copydata(m, 0, sizeof(struct ip), &ip);
2705
2706 if ((ipproto = ip.ip_p) != mark_wake_packet_ipproto) {
2707 return false;
2708 }
2709 offset = sizeof(struct ip);
2710 }
2711
2712 /* Check the ports for TCP and UDP */
2713 if (ipproto == IPPROTO_TCP) {
2714 struct tcphdr th;
2715
2716 if ((size_t)(m)->m_pkthdr.len < offset + sizeof(struct tcphdr)) {
2717 os_log(wake_packet_log_handle, "check_wake_mbuf: TCP too short");
2718 return false;
2719 }
2720 mbuf_copydata(m, offset, sizeof(struct tcphdr), &th);
2721
2722 if (mark_wake_packet_local_port != 0 &&
2723 ntohs(th.th_dport) != mark_wake_packet_local_port) {
2724 return false;
2725 }
2726 if (mark_wake_packet_remote_port != 0 &&
2727 ntohs(th.th_sport) != mark_wake_packet_remote_port) {
2728 return false;
2729 }
2730 return true;
2731 } else if (ipproto == IPPROTO_UDP) {
2732 struct udphdr uh;
2733
2734 if ((size_t)(m)->m_pkthdr.len < offset + sizeof(struct udphdr)) {
2735 os_log(wake_packet_log_handle, "check_wake_mbufL UDP too short");
2736 return false;
2737 }
2738 mbuf_copydata(m, offset, sizeof(struct udphdr), &uh);
2739
2740 if (mark_wake_packet_local_port != 0 &&
2741 ntohs(uh.uh_dport) != mark_wake_packet_local_port) {
2742 return false;
2743 }
2744 if (mark_wake_packet_remote_port != 0 &&
2745 ntohs(uh.uh_sport) != mark_wake_packet_remote_port) {
2746 return false;
2747 }
2748 return true;
2749 }
2750
2751 return ipproto == mark_wake_packet_ipproto;
2752 }
2753
2754 bool
check_wake_pkt(ifnet_t ifp __unused,struct __kern_packet * pkt)2755 check_wake_pkt(ifnet_t ifp __unused, struct __kern_packet *pkt)
2756 {
2757 /* The protocol and interface must both be specified */
2758 if (mark_wake_packet_ipproto == 0 || mark_wake_packet_if[0] == 0) {
2759 return false;
2760 }
2761 /* The interface must match */
2762 if (strlcmp(mark_wake_packet_if, IF_XNAME(ifp), IFNAMSIZ) != 0) {
2763 return false;
2764 }
2765 /* Cannot deal with fragments */
2766 if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
2767 return false;
2768 }
2769 /* Check the ports for TCP and UDP */
2770 if (pkt->pkt_flow_ip_proto == IPPROTO_TCP) {
2771 struct tcphdr * __single th = __unsafe_forge_single(struct tcphdr *, pkt->pkt_flow_tcp_hdr);
2772 if (th == NULL) {
2773 return false;
2774 }
2775 if (mark_wake_packet_local_port != 0 &&
2776 ntohs(th->th_dport) != mark_wake_packet_local_port) {
2777 return false;
2778 }
2779 if (mark_wake_packet_remote_port != 0 &&
2780 ntohs(th->th_sport) != mark_wake_packet_remote_port) {
2781 return false;
2782 }
2783 return true;
2784 } else if (pkt->pkt_flow_ip_proto == IPPROTO_UDP) {
2785 struct udphdr * __single uh = __unsafe_forge_single(struct udphdr *, pkt->pkt_flow_udp_hdr);
2786 if (uh == NULL) {
2787 return false;
2788 }
2789 if (mark_wake_packet_local_port != 0 &&
2790 ntohs(uh->uh_dport) != mark_wake_packet_local_port) {
2791 return false;
2792 }
2793 if (mark_wake_packet_remote_port != 0 &&
2794 ntohs(uh->uh_sport) != mark_wake_packet_remote_port) {
2795 return false;
2796 }
2797 }
2798 return pkt->pkt_flow_ip_proto == mark_wake_packet_ipproto;
2799 }
2800
2801 #endif /* (DEBUG | DEVELOPMENT) */
2802