1 /*
2 * Copyright (c) 2017-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/types.h>
30 #include <sys/sysctl.h>
31 #include <sys/time.h>
32 #include <sys/mcache.h>
33 #include <sys/malloc.h>
34 #include <sys/kauth.h>
35 #include <sys/kern_event.h>
36 #include <sys/bitstring.h>
37 #include <sys/priv.h>
38 #include <sys/proc.h>
39 #include <sys/protosw.h>
40 #include <sys/socket.h>
41
42 #include <kern/locks.h>
43 #include <kern/zalloc.h>
44
45 #include <libkern/libkern.h>
46
47 #include <net/kpi_interface.h>
48 #include <net/if_var.h>
49 #include <net/if_ports_used.h>
50
51 #include <netinet/in_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet/tcp_var.h>
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/udp.h>
57
58 #if SKYWALK
59 #include <skywalk/os_skywalk_private.h>
60 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
61 #include <skywalk/namespace/netns.h>
62 #endif /* SKYWALK */
63
64 #include <stdbool.h>
65
66 #include <os/log.h>
67
68 #define ESP_HDR_SIZE 4
69 #define PORT_ISAKMP 500
70 #define PORT_ISAKMP_NATT 4500 /* rfc3948 */
71
72 #define IF_XNAME(ifp) ((ifp) != NULL ? (ifp)->if_xname : "")
73
74 extern bool IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len);
75
76 SYSCTL_DECL(_net_link_generic_system);
77
78 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, port_used,
79 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used");
80
81 struct if_ports_used_stats if_ports_used_stats = {};
82 static int sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS;
83 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, stats,
84 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
85 sysctl_if_ports_used_stats, "S,struct if_ports_used_stats", "");
86
87 static uuid_t current_wakeuuid;
88 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, current_wakeuuid,
89 CTLFLAG_RD | CTLFLAG_LOCKED,
90 current_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
91
92 static int sysctl_net_port_info_list SYSCTL_HANDLER_ARGS;
93 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, list,
94 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
95 sysctl_net_port_info_list, "S,xnpigen", "");
96
97 static int use_test_wakeuuid = 0;
98 static uuid_t test_wakeuuid;
99
100 #if (DEVELOPMENT || DEBUG)
101 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, use_test_wakeuuid,
102 CTLFLAG_RW | CTLFLAG_LOCKED,
103 &use_test_wakeuuid, 0, "");
104
105 int sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS;
106 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, new_test_wakeuuid,
107 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
108 sysctl_new_test_wakeuuid, "S,uuid_t", "");
109
110 int sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS;
111 SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, clear_test_wakeuuid,
112 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
113 sysctl_clear_test_wakeuuid, "S,uuid_t", "");
114
115 SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, test_wakeuuid,
116 CTLFLAG_RD | CTLFLAG_LOCKED,
117 test_wakeuuid, sizeof(uuid_t), "S,uuid_t", "");
118 #endif /* (DEVELOPMENT || DEBUG) */
119
120 static int sysctl_get_ports_used SYSCTL_HANDLER_ARGS;
121 SYSCTL_NODE(_net_link_generic_system, OID_AUTO, get_ports_used,
122 CTLFLAG_RD | CTLFLAG_LOCKED,
123 sysctl_get_ports_used, "");
124
125 static int if_ports_used_verbose = 0;
126 SYSCTL_INT(_net_link_generic_system_port_used, OID_AUTO, verbose,
127 CTLFLAG_RW | CTLFLAG_LOCKED,
128 &if_ports_used_verbose, 0, "");
129
130 struct timeval wakeuuid_not_set_last_time;
131 int sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS;
132 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
133 wakeuuid_not_set_last_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
134 0, 0, sysctl_wakeuuid_not_set_last_time, "S,timeval", "");
135
136 char wakeuuid_not_set_last_if[IFXNAMSIZ];
137 int sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS;
138 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
139 wakeuuid_not_set_last_if, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
140 0, 0, sysctl_wakeuuid_not_set_last_if, "A", "");
141
142 struct timeval wakeuuid_last_update_time;
143 int sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS;
144 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
145 wakeuuid_last_update_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
146 0, 0, sysctl_wakeuuid_last_update_time, "S,timeval", "");
147
148 struct net_port_info_wake_event last_attributed_wake_event;
149 int sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS;
150 static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO,
151 last_attributed_wake_event, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
152 0, 0, sysctl_last_attributed_wake_event, "S,net_port_info_wake_event", "");
153
154 static bool last_wake_phy_if_set = false;
155 static char last_wake_phy_if_name[IFNAMSIZ]; /* name + unit */
156 static uint32_t last_wake_phy_if_family;
157 static uint32_t last_wake_phy_if_subfamily;
158 static uint32_t last_wake_phy_if_functional_type;
159
160
161 static bool has_notified_wake_pkt = false;
162 static bool has_notified_unattributed_wake = false;
163
164 static LCK_GRP_DECLARE(net_port_entry_head_lock_group, "net port entry lock");
165 static LCK_MTX_DECLARE(net_port_entry_head_lock, &net_port_entry_head_lock_group);
166
167
168 struct net_port_entry {
169 SLIST_ENTRY(net_port_entry) npe_list_next;
170 TAILQ_ENTRY(net_port_entry) npe_hash_next;
171 struct net_port_info npe_npi;
172 };
173
174 static KALLOC_TYPE_DEFINE(net_port_entry_zone, struct net_port_entry, NET_KT_DEFAULT);
175
176 static SLIST_HEAD(net_port_entry_list, net_port_entry) net_port_entry_list =
177 SLIST_HEAD_INITIALIZER(&net_port_entry_list);
178
179 struct timeval wakeuiid_last_check;
180
181
182 #if (DEBUG | DEVELOPMENT)
183 static int64_t npi_search_list_total = 0;
184 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_total,
185 CTLFLAG_RD | CTLFLAG_LOCKED,
186 &npi_search_list_total, "");
187
188 static int64_t npi_search_list_max = 0;
189 SYSCTL_QUAD(_net_link_generic_system_port_used, OID_AUTO, npi_search_list_max,
190 CTLFLAG_RD | CTLFLAG_LOCKED,
191 &npi_search_list_max, "");
192 #endif /* (DEBUG | DEVELOPMENT) */
193
194 /*
195 * Hashing of the net_port_entry list is based on the local port
196 *
197 * The hash masks uses the least significant bits so we have to use host byte order
198 * when applying the mask because the LSB have more entropy that the MSB (most local ports
199 * are in the high dynamic port range)
200 */
201 #define NPE_HASH_BUCKET_COUNT 32
202 #define NPE_HASH_MASK (NPE_HASH_BUCKET_COUNT - 1)
203 #define NPE_HASH_VAL(_lport) (ntohs(_lport) & NPE_HASH_MASK)
204 #define NPE_HASH_HEAD(_lport) (&net_port_entry_hash_table[NPE_HASH_VAL(_lport)])
205
206 static TAILQ_HEAD(net_port_entry_hash_table, net_port_entry) * net_port_entry_hash_table = NULL;
207
208 /*
209 * Initialize IPv4 source address hash table.
210 */
211 void
if_ports_used_init(void)212 if_ports_used_init(void)
213 {
214 if (net_port_entry_hash_table != NULL) {
215 return;
216 }
217
218 net_port_entry_hash_table = zalloc_permanent(
219 NPE_HASH_BUCKET_COUNT * sizeof(*net_port_entry_hash_table),
220 ZALIGN_PTR);
221 }
222
223 static void
net_port_entry_list_clear(void)224 net_port_entry_list_clear(void)
225 {
226 struct net_port_entry *npe;
227
228 LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
229
230 while ((npe = SLIST_FIRST(&net_port_entry_list)) != NULL) {
231 SLIST_REMOVE_HEAD(&net_port_entry_list, npe_list_next);
232 TAILQ_REMOVE(NPE_HASH_HEAD(npe->npe_npi.npi_local_port), npe, npe_hash_next);
233
234 zfree(net_port_entry_zone, npe);
235 }
236
237 for (int i = 0; i < NPE_HASH_BUCKET_COUNT; i++) {
238 VERIFY(TAILQ_EMPTY(&net_port_entry_hash_table[i]));
239 }
240
241 if_ports_used_stats.ifpu_npe_count = 0;
242 if_ports_used_stats.ifpu_wakeuid_gen++;
243 }
244
245 static bool
get_test_wake_uuid(uuid_string_t wakeuuid_str,size_t len)246 get_test_wake_uuid(uuid_string_t wakeuuid_str, size_t len)
247 {
248 if (__improbable(use_test_wakeuuid)) {
249 if (!uuid_is_null(test_wakeuuid)) {
250 if (wakeuuid_str != NULL && len != 0) {
251 uuid_unparse(test_wakeuuid, wakeuuid_str);
252 }
253 return true;
254 } else {
255 return false;
256 }
257 } else {
258 return false;
259 }
260 }
261
262 static bool
is_wakeuuid_set(void)263 is_wakeuuid_set(void)
264 {
265 /*
266 * IOPMCopySleepWakeUUIDKey() tells if SleepWakeUUID is currently set
267 * That means we are currently in a sleep/wake cycle
268 */
269 return get_test_wake_uuid(NULL, 0) || IOPMCopySleepWakeUUIDKey(NULL, 0);
270 }
271
272 void
if_ports_used_update_wakeuuid(struct ifnet * ifp)273 if_ports_used_update_wakeuuid(struct ifnet *ifp)
274 {
275 uuid_t wakeuuid;
276 bool wakeuuid_is_set = false;
277 bool updated = false;
278 uuid_string_t wakeuuid_str;
279
280 uuid_clear(wakeuuid);
281
282 if (__improbable(use_test_wakeuuid)) {
283 wakeuuid_is_set = get_test_wake_uuid(wakeuuid_str,
284 sizeof(wakeuuid_str));
285 } else {
286 wakeuuid_is_set = IOPMCopySleepWakeUUIDKey(wakeuuid_str,
287 sizeof(wakeuuid_str));
288 }
289
290 if (wakeuuid_is_set) {
291 if (uuid_parse(wakeuuid_str, wakeuuid) != 0) {
292 os_log(OS_LOG_DEFAULT,
293 "%s: IOPMCopySleepWakeUUIDKey got bad value %s\n",
294 __func__, wakeuuid_str);
295 wakeuuid_is_set = false;
296 }
297 }
298
299 if (!wakeuuid_is_set) {
300 if (ifp != NULL) {
301 if (if_ports_used_verbose > 0) {
302 os_log_info(OS_LOG_DEFAULT,
303 "%s: SleepWakeUUID not set, "
304 "don't update the port list for %s\n",
305 __func__, ifp != NULL ? if_name(ifp) : "");
306 }
307 if_ports_used_stats.ifpu_wakeuuid_not_set_count += 1;
308 microtime(&wakeuuid_not_set_last_time);
309 strlcpy(wakeuuid_not_set_last_if, if_name(ifp),
310 sizeof(wakeuuid_not_set_last_if));
311 }
312 return;
313 }
314
315 lck_mtx_lock(&net_port_entry_head_lock);
316 if (uuid_compare(wakeuuid, current_wakeuuid) != 0) {
317 net_port_entry_list_clear();
318 uuid_copy(current_wakeuuid, wakeuuid);
319 microtime(&wakeuuid_last_update_time);
320 updated = true;
321
322 has_notified_wake_pkt = false;
323 has_notified_unattributed_wake = false;
324
325 memset(&last_attributed_wake_event, 0, sizeof(last_attributed_wake_event));
326
327 last_wake_phy_if_set = false;
328 memset(&last_wake_phy_if_name, 0, sizeof(last_wake_phy_if_name));
329 last_wake_phy_if_family = IFRTYPE_FAMILY_ANY;
330 last_wake_phy_if_subfamily = IFRTYPE_SUBFAMILY_ANY;
331 last_wake_phy_if_functional_type = IFRTYPE_FUNCTIONAL_UNKNOWN;
332 }
333 /*
334 * Record the time last checked
335 */
336 microuptime(&wakeuiid_last_check);
337 lck_mtx_unlock(&net_port_entry_head_lock);
338
339 if (updated && if_ports_used_verbose > 0) {
340 uuid_string_t uuid_str;
341
342 uuid_unparse(current_wakeuuid, uuid_str);
343 os_log(OS_LOG_DEFAULT, "%s: current wakeuuid %s",
344 __func__, uuid_str);
345 }
346 }
347
348 static bool
net_port_info_equal(const struct net_port_info * x,const struct net_port_info * y)349 net_port_info_equal(const struct net_port_info *x,
350 const struct net_port_info *y)
351 {
352 ASSERT(x != NULL && y != NULL);
353
354 if (x->npi_if_index == y->npi_if_index &&
355 x->npi_local_port == y->npi_local_port &&
356 x->npi_foreign_port == y->npi_foreign_port &&
357 x->npi_owner_pid == y->npi_owner_pid &&
358 x->npi_effective_pid == y->npi_effective_pid &&
359 x->npi_flags == y->npi_flags &&
360 memcmp(&x->npi_local_addr_, &y->npi_local_addr_,
361 sizeof(union in_addr_4_6)) == 0 &&
362 memcmp(&x->npi_foreign_addr_, &y->npi_foreign_addr_,
363 sizeof(union in_addr_4_6)) == 0) {
364 return true;
365 }
366 return false;
367 }
368
369 static bool
net_port_info_has_entry(const struct net_port_info * npi)370 net_port_info_has_entry(const struct net_port_info *npi)
371 {
372 struct net_port_entry *npe;
373 bool found = false;
374 int32_t count = 0;
375
376 LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_OWNED);
377
378 TAILQ_FOREACH(npe, NPE_HASH_HEAD(npi->npi_local_port), npe_hash_next) {
379 count += 1;
380 if (net_port_info_equal(&npe->npe_npi, npi)) {
381 found = true;
382 break;
383 }
384 }
385 if_ports_used_stats.ifpu_npi_hash_search_total += count;
386 if (count > if_ports_used_stats.ifpu_npi_hash_search_max) {
387 if_ports_used_stats.ifpu_npi_hash_search_max = count;
388 }
389
390 return found;
391 }
392
393 static bool
net_port_info_add_entry(const struct net_port_info * npi)394 net_port_info_add_entry(const struct net_port_info *npi)
395 {
396 struct net_port_entry *npe = NULL;
397 uint32_t num = 0;
398 bool entry_added = false;
399
400 ASSERT(npi != NULL);
401
402 if (__improbable(is_wakeuuid_set() == false)) {
403 if_ports_used_stats.ifpu_npi_not_added_no_wakeuuid++;
404 if (if_ports_used_verbose > 0) {
405 os_log(OS_LOG_DEFAULT, "%s: wakeuuid not set not adding "
406 "port: %u flags: 0x%xif: %u pid: %u epid %u",
407 __func__,
408 ntohs(npi->npi_local_port),
409 npi->npi_flags,
410 npi->npi_if_index,
411 npi->npi_owner_pid,
412 npi->npi_effective_pid);
413 }
414 return false;
415 }
416
417 npe = zalloc_flags(net_port_entry_zone, Z_WAITOK | Z_ZERO);
418 if (__improbable(npe == NULL)) {
419 os_log(OS_LOG_DEFAULT, "%s: zalloc() failed for "
420 "port: %u flags: 0x%x if: %u pid: %u epid %u",
421 __func__,
422 ntohs(npi->npi_local_port),
423 npi->npi_flags,
424 npi->npi_if_index,
425 npi->npi_owner_pid,
426 npi->npi_effective_pid);
427 return false;
428 }
429
430 memcpy(&npe->npe_npi, npi, sizeof(npe->npe_npi));
431
432 lck_mtx_lock(&net_port_entry_head_lock);
433
434 if (net_port_info_has_entry(npi) == false) {
435 SLIST_INSERT_HEAD(&net_port_entry_list, npe, npe_list_next);
436 TAILQ_INSERT_HEAD(NPE_HASH_HEAD(npi->npi_local_port), npe, npe_hash_next);
437 num = (uint32_t)if_ports_used_stats.ifpu_npe_count++; /* rollover OK */
438 entry_added = true;
439
440 if (if_ports_used_stats.ifpu_npe_count > if_ports_used_stats.ifpu_npe_max) {
441 if_ports_used_stats.ifpu_npe_max = if_ports_used_stats.ifpu_npe_count;
442 }
443 if_ports_used_stats.ifpu_npe_total++;
444
445 if (if_ports_used_verbose > 1) {
446 os_log(OS_LOG_DEFAULT, "%s: num %u for "
447 "port: %u flags: 0x%x if: %u pid: %u epid %u",
448 __func__,
449 num,
450 ntohs(npi->npi_local_port),
451 npi->npi_flags,
452 npi->npi_if_index,
453 npi->npi_owner_pid,
454 npi->npi_effective_pid);
455 }
456 } else {
457 if_ports_used_stats.ifpu_npe_dup++;
458 if (if_ports_used_verbose > 2) {
459 os_log(OS_LOG_DEFAULT, "%s: already added "
460 "port: %u flags: 0x%x if: %u pid: %u epid %u",
461 __func__,
462 ntohs(npi->npi_local_port),
463 npi->npi_flags,
464 npi->npi_if_index,
465 npi->npi_owner_pid,
466 npi->npi_effective_pid);
467 }
468 }
469
470 lck_mtx_unlock(&net_port_entry_head_lock);
471
472 if (entry_added == false) {
473 zfree(net_port_entry_zone, npe);
474 }
475 return entry_added;
476 }
477
478 #if (DEVELOPMENT || DEBUG)
479 int
480 sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS
481 {
482 #pragma unused(oidp, arg1, arg2)
483 int error = 0;
484
485 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
486 return EPERM;
487 }
488 if (req->oldptr == USER_ADDR_NULL) {
489 req->oldidx = sizeof(uuid_t);
490 return 0;
491 }
492 if (req->newptr != USER_ADDR_NULL) {
493 uuid_generate(test_wakeuuid);
494 if_ports_used_update_wakeuuid(NULL);
495 }
496 error = SYSCTL_OUT(req, test_wakeuuid,
497 MIN(sizeof(uuid_t), req->oldlen));
498
499 return error;
500 }
501
502 int
503 sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS
504 {
505 #pragma unused(oidp, arg1, arg2)
506 int error = 0;
507
508 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
509 return EPERM;
510 }
511 if (req->oldptr == USER_ADDR_NULL) {
512 req->oldidx = sizeof(uuid_t);
513 return 0;
514 }
515 if (req->newptr != USER_ADDR_NULL) {
516 uuid_clear(test_wakeuuid);
517 if_ports_used_update_wakeuuid(NULL);
518 }
519 error = SYSCTL_OUT(req, test_wakeuuid,
520 MIN(sizeof(uuid_t), req->oldlen));
521
522 return error;
523 }
524
525 #endif /* (DEVELOPMENT || DEBUG) */
526
527 static int
sysctl_timeval(struct sysctl_req * req,const struct timeval * tv)528 sysctl_timeval(struct sysctl_req *req, const struct timeval *tv)
529 {
530 if (proc_is64bit(req->p)) {
531 struct user64_timeval tv64 = {};
532
533 tv64.tv_sec = tv->tv_sec;
534 tv64.tv_usec = tv->tv_usec;
535 return SYSCTL_OUT(req, &tv64, sizeof(tv64));
536 } else {
537 struct user32_timeval tv32 = {};
538
539 tv32.tv_sec = (user32_time_t)tv->tv_sec;
540 tv32.tv_usec = tv->tv_usec;
541 return SYSCTL_OUT(req, &tv32, sizeof(tv32));
542 }
543 }
544
545 int
546 sysctl_wakeuuid_last_update_time SYSCTL_HANDLER_ARGS
547 {
548 #pragma unused(oidp, arg1, arg2)
549
550 return sysctl_timeval(req, &wakeuuid_last_update_time);
551 }
552
553 int
554 sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS
555 {
556 #pragma unused(oidp, arg1, arg2)
557
558 return sysctl_timeval(req, &wakeuuid_not_set_last_time);
559 }
560
561 int
562 sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS
563 {
564 #pragma unused(oidp, arg1, arg2)
565
566 return SYSCTL_OUT(req, &wakeuuid_not_set_last_if, strlen(wakeuuid_not_set_last_if) + 1);
567 }
568
569 int
570 sysctl_if_ports_used_stats SYSCTL_HANDLER_ARGS
571 {
572 #pragma unused(oidp, arg1, arg2)
573 size_t len = sizeof(struct if_ports_used_stats);
574
575 if (req->oldptr != 0) {
576 len = MIN(req->oldlen, sizeof(struct if_ports_used_stats));
577 }
578 return SYSCTL_OUT(req, &if_ports_used_stats, len);
579 }
580
581 static int
582 sysctl_net_port_info_list SYSCTL_HANDLER_ARGS
583 {
584 #pragma unused(oidp, arg1, arg2)
585 int error = 0;
586 struct xnpigen xnpigen;
587 struct net_port_entry *npe;
588
589 if ((error = priv_check_cred(kauth_cred_get(),
590 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0)) != 0) {
591 return EPERM;
592 }
593 lck_mtx_lock(&net_port_entry_head_lock);
594
595 if (req->oldptr == USER_ADDR_NULL) {
596 /* Add a 25% cushion */
597 size_t cnt = (size_t)if_ports_used_stats.ifpu_npe_count;
598 cnt += cnt >> 4;
599 req->oldidx = sizeof(struct xnpigen) +
600 cnt * sizeof(struct net_port_info);
601 goto done;
602 }
603
604 memset(&xnpigen, 0, sizeof(struct xnpigen));
605 xnpigen.xng_len = sizeof(struct xnpigen);
606 xnpigen.xng_gen = (uint32_t)if_ports_used_stats.ifpu_wakeuid_gen;
607 uuid_copy(xnpigen.xng_wakeuuid, current_wakeuuid);
608 xnpigen.xng_npi_count = (uint32_t)if_ports_used_stats.ifpu_npe_count;
609 xnpigen.xng_npi_size = sizeof(struct net_port_info);
610 error = SYSCTL_OUT(req, &xnpigen, sizeof(xnpigen));
611 if (error != 0) {
612 printf("%s: SYSCTL_OUT(xnpigen) error %d\n",
613 __func__, error);
614 goto done;
615 }
616
617 SLIST_FOREACH(npe, &net_port_entry_list, npe_list_next) {
618 error = SYSCTL_OUT(req, &npe->npe_npi,
619 sizeof(struct net_port_info));
620 if (error != 0) {
621 printf("%s: SYSCTL_OUT(npi) error %d\n",
622 __func__, error);
623 goto done;
624 }
625 }
626 done:
627 lck_mtx_unlock(&net_port_entry_head_lock);
628
629 return error;
630 }
631
632 /*
633 * Mirror the arguments of ifnet_get_local_ports_extended()
634 * ifindex
635 * protocol
636 * flags
637 */
638 static int
639 sysctl_get_ports_used SYSCTL_HANDLER_ARGS
640 {
641 #pragma unused(oidp)
642 int *name = (int *)arg1;
643 int namelen = arg2;
644 int error = 0;
645 int idx;
646 protocol_family_t protocol;
647 u_int32_t flags;
648 ifnet_t ifp = NULL;
649 u_int8_t *bitfield = NULL;
650
651 if (req->newptr != USER_ADDR_NULL) {
652 error = EPERM;
653 goto done;
654 }
655 /*
656 * 3 is the required number of parameters: ifindex, protocol and flags
657 */
658 if (namelen != 3) {
659 error = ENOENT;
660 goto done;
661 }
662
663 if (req->oldptr == USER_ADDR_NULL) {
664 req->oldidx = bitstr_size(IP_PORTRANGE_SIZE);
665 goto done;
666 }
667 if (req->oldlen < bitstr_size(IP_PORTRANGE_SIZE)) {
668 error = ENOMEM;
669 goto done;
670 }
671 bitfield = (u_int8_t *) kalloc_data(bitstr_size(IP_PORTRANGE_SIZE),
672 Z_WAITOK | Z_ZERO);
673 if (bitfield == NULL) {
674 error = ENOMEM;
675 goto done;
676 }
677
678 idx = name[0];
679 protocol = name[1];
680 flags = name[2];
681
682 ifnet_head_lock_shared();
683 if (IF_INDEX_IN_RANGE(idx)) {
684 ifp = ifindex2ifnet[idx];
685 }
686 ifnet_head_done();
687
688 error = ifnet_get_local_ports_extended(ifp, protocol, flags, bitfield);
689 if (error != 0) {
690 printf("%s: ifnet_get_local_ports_extended() error %d\n",
691 __func__, error);
692 goto done;
693 }
694 error = SYSCTL_OUT(req, bitfield, bitstr_size(IP_PORTRANGE_SIZE));
695 done:
696 if (bitfield != NULL) {
697 kfree_data(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
698 }
699 return error;
700 }
701
702 __private_extern__ bool
if_ports_used_add_inpcb(const uint32_t ifindex,const struct inpcb * inp)703 if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp)
704 {
705 struct net_port_info npi = {};
706 struct socket *so = inp->inp_socket;
707
708 /* This is unlikely to happen but better be safe than sorry */
709 if (ifindex > UINT16_MAX) {
710 os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
711 return false;
712 }
713
714 if (ifindex != 0) {
715 npi.npi_if_index = (uint16_t)ifindex;
716 } else if (inp->inp_last_outifp != NULL) {
717 npi.npi_if_index = (uint16_t)inp->inp_last_outifp->if_index;
718 }
719 if (IF_INDEX_IN_RANGE(npi.npi_if_index)) {
720 struct ifnet *ifp = ifindex2ifnet[npi.npi_if_index];
721 if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
722 npi.npi_flags |= NPIF_COMPLINK;
723 }
724 }
725
726 npi.npi_flags |= NPIF_SOCKET;
727
728 npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
729 npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
730
731 if (so->so_options & SO_NOWAKEFROMSLEEP) {
732 npi.npi_flags |= NPIF_NOWAKE;
733 }
734
735 if (SOCK_PROTO(so) == IPPROTO_TCP) {
736 struct tcpcb *tp = intotcpcb(inp);
737
738 npi.npi_flags |= NPIF_TCP;
739 if (tp != NULL && tp->t_state == TCPS_LISTEN) {
740 npi.npi_flags |= NPIF_LISTEN;
741 }
742 } else if (SOCK_PROTO(so) == IPPROTO_UDP) {
743 npi.npi_flags |= NPIF_UDP;
744 } else {
745 os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for inp %p", __func__,
746 SOCK_PROTO(inp->inp_socket), inp);
747 return false;
748 }
749
750 uuid_copy(npi.npi_flow_uuid, inp->necp_client_uuid);
751
752 npi.npi_local_port = inp->inp_lport;
753 npi.npi_foreign_port = inp->inp_fport;
754
755 /*
756 * Take in account IPv4 addresses mapped on IPv6
757 */
758 if ((inp->inp_vflag & INP_IPV6) != 0 && (inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
759 (inp->inp_vflag & (INP_IPV6 | INP_IPV4)) == (INP_IPV6 | INP_IPV4)) {
760 npi.npi_flags |= NPIF_IPV6 | NPIF_IPV4;
761 memcpy(&npi.npi_local_addr_in6,
762 &inp->in6p_laddr, sizeof(struct in6_addr));
763 } else if (inp->inp_vflag & INP_IPV4) {
764 npi.npi_flags |= NPIF_IPV4;
765 npi.npi_local_addr_in = inp->inp_laddr;
766 npi.npi_foreign_addr_in = inp->inp_faddr;
767 } else {
768 npi.npi_flags |= NPIF_IPV6;
769 memcpy(&npi.npi_local_addr_in6,
770 &inp->in6p_laddr, sizeof(struct in6_addr));
771 memcpy(&npi.npi_foreign_addr_in6,
772 &inp->in6p_faddr, sizeof(struct in6_addr));
773
774 /* Clear the embedded scope ID */
775 if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
776 npi.npi_local_addr_in6.s6_addr16[1] = 0;
777 }
778 if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
779 npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
780 }
781 }
782
783 npi.npi_owner_pid = so->last_pid;
784
785 if (so->last_pid != 0) {
786 proc_name(so->last_pid, npi.npi_owner_pname,
787 sizeof(npi.npi_owner_pname));
788 uuid_copy(npi.npi_owner_uuid, so->last_uuid);
789 }
790
791 if (so->so_flags & SOF_DELEGATED) {
792 npi.npi_flags |= NPIF_DELEGATED;
793 npi.npi_effective_pid = so->e_pid;
794 if (so->e_pid != 0) {
795 proc_name(so->e_pid, npi.npi_effective_pname,
796 sizeof(npi.npi_effective_pname));
797 }
798 uuid_copy(npi.npi_effective_uuid, so->e_uuid);
799 } else {
800 npi.npi_effective_pid = so->last_pid;
801 if (so->last_pid != 0) {
802 strlcpy(npi.npi_effective_pname, npi.npi_owner_pname,
803 sizeof(npi.npi_effective_pname));
804 }
805 uuid_copy(npi.npi_effective_uuid, so->last_uuid);
806 }
807
808 return net_port_info_add_entry(&npi);
809 }
810
811 #if SKYWALK
812 __private_extern__ bool
if_ports_used_add_flow_entry(const struct flow_entry * fe,const uint32_t ifindex,const struct ns_flow_info * nfi,uint32_t ns_flags)813 if_ports_used_add_flow_entry(const struct flow_entry *fe, const uint32_t ifindex,
814 const struct ns_flow_info *nfi, uint32_t ns_flags)
815 {
816 struct net_port_info npi = {};
817
818 /* This is unlikely to happen but better be safe than sorry */
819 if (ifindex > UINT16_MAX) {
820 os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big", __func__, ifindex);
821 return false;
822 }
823 npi.npi_if_index = (uint16_t)ifindex;
824 if (IF_INDEX_IN_RANGE(ifindex)) {
825 struct ifnet *ifp = ifindex2ifnet[ifindex];
826 if (ifp != NULL && IFNET_IS_COMPANION_LINK(ifp)) {
827 npi.npi_flags |= NPIF_COMPLINK;
828 }
829 }
830
831 npi.npi_flags |= NPIF_CHANNEL;
832
833 npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec;
834 npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec;
835
836 if (ns_flags & NETNS_NOWAKEFROMSLEEP) {
837 npi.npi_flags |= NPIF_NOWAKE;
838 }
839 if ((ns_flags & NETNS_OWNER_MASK) == NETNS_LISTENER) {
840 npi.npi_flags |= NPIF_LISTEN;
841 }
842
843 uuid_copy(npi.npi_flow_uuid, nfi->nfi_flow_uuid);
844
845 if (nfi->nfi_protocol == IPPROTO_TCP) {
846 npi.npi_flags |= NPIF_TCP;
847 } else if (nfi->nfi_protocol == IPPROTO_UDP) {
848 npi.npi_flags |= NPIF_UDP;
849 } else {
850 os_log(OS_LOG_DEFAULT, "%s: unexpected protocol %u for nfi %p",
851 __func__, nfi->nfi_protocol, nfi);
852 return false;
853 }
854
855 if (nfi->nfi_laddr.sa.sa_family == AF_INET) {
856 npi.npi_flags |= NPIF_IPV4;
857
858 npi.npi_local_port = nfi->nfi_laddr.sin.sin_port;
859 npi.npi_foreign_port = nfi->nfi_faddr.sin.sin_port;
860
861 npi.npi_local_addr_in = nfi->nfi_laddr.sin.sin_addr;
862 npi.npi_foreign_addr_in = nfi->nfi_faddr.sin.sin_addr;
863 } else {
864 npi.npi_flags |= NPIF_IPV6;
865
866 npi.npi_local_port = nfi->nfi_laddr.sin6.sin6_port;
867 npi.npi_foreign_port = nfi->nfi_faddr.sin6.sin6_port;
868
869 memcpy(&npi.npi_local_addr_in6,
870 &nfi->nfi_laddr.sin6.sin6_addr, sizeof(struct in6_addr));
871 memcpy(&npi.npi_foreign_addr_in6,
872 &nfi->nfi_faddr.sin6.sin6_addr, sizeof(struct in6_addr));
873
874 /* Clear the embedded scope ID */
875 if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) {
876 npi.npi_local_addr_in6.s6_addr16[1] = 0;
877 }
878 if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) {
879 npi.npi_foreign_addr_in6.s6_addr16[1] = 0;
880 }
881 }
882
883 npi.npi_owner_pid = nfi->nfi_owner_pid;
884 strlcpy(npi.npi_owner_pname, nfi->nfi_owner_name,
885 sizeof(npi.npi_owner_pname));
886
887 /*
888 * Get the proc UUID from the pid as the the proc UUID is not present
889 * in the flow_entry
890 */
891 proc_t proc = proc_find(npi.npi_owner_pid);
892 if (proc != PROC_NULL) {
893 proc_getexecutableuuid(proc, npi.npi_owner_uuid, sizeof(npi.npi_owner_uuid));
894 proc_rele(proc);
895 }
896 if (nfi->nfi_effective_pid != -1) {
897 npi.npi_effective_pid = nfi->nfi_effective_pid;
898 strlcpy(npi.npi_effective_pname, nfi->nfi_effective_name,
899 sizeof(npi.npi_effective_pname));
900 uuid_copy(npi.npi_effective_uuid, fe->fe_eproc_uuid);
901 } else {
902 npi.npi_effective_pid = npi.npi_owner_pid;
903 strlcpy(npi.npi_effective_pname, npi.npi_owner_pname,
904 sizeof(npi.npi_effective_pname));
905 uuid_copy(npi.npi_effective_uuid, npi.npi_owner_uuid);
906 }
907
908 return net_port_info_add_entry(&npi);
909 }
910
911 #endif /* SKYWALK */
912
913 static void
net_port_info_log_npi(const char * s,const struct net_port_info * npi)914 net_port_info_log_npi(const char *s, const struct net_port_info *npi)
915 {
916 char lbuf[MAX_IPv6_STR_LEN] = {};
917 char fbuf[MAX_IPv6_STR_LEN] = {};
918
919 if (npi->npi_flags & NPIF_IPV4) {
920 inet_ntop(PF_INET, &npi->npi_local_addr_in.s_addr,
921 lbuf, sizeof(lbuf));
922 inet_ntop(PF_INET, &npi->npi_foreign_addr_in.s_addr,
923 fbuf, sizeof(fbuf));
924 } else if (npi->npi_flags & NPIF_IPV6) {
925 inet_ntop(PF_INET6, &npi->npi_local_addr_in6,
926 lbuf, sizeof(lbuf));
927 inet_ntop(PF_INET6, &npi->npi_foreign_addr_in6,
928 fbuf, sizeof(fbuf));
929 }
930 os_log(OS_LOG_DEFAULT, "%s net_port_info if_index %u arch %s family %s proto %s local %s:%u foreign %s:%u pid: %u epid %u",
931 s != NULL ? s : "",
932 npi->npi_if_index,
933 (npi->npi_flags & NPIF_SOCKET) ? "so" : (npi->npi_flags & NPIF_CHANNEL) ? "ch" : "unknown",
934 (npi->npi_flags & NPIF_IPV4) ? "ipv4" : (npi->npi_flags & NPIF_IPV6) ? "ipv6" : "unknown",
935 npi->npi_flags & NPIF_TCP ? "tcp" : npi->npi_flags & NPIF_UDP ? "udp" :
936 npi->npi_flags & NPIF_ESP ? "esp" : "unknown",
937 lbuf, ntohs(npi->npi_local_port),
938 fbuf, ntohs(npi->npi_foreign_port),
939 npi->npi_owner_pid,
940 npi->npi_effective_pid);
941 }
942
943 #define NPI_MATCH_IPV4 (NPIF_IPV4 | NPIF_TCP | NPIF_UDP)
944 #define NPI_MATCH_IPV6 (NPIF_IPV6 | NPIF_TCP | NPIF_UDP)
945
946 static bool
net_port_info_match_npi(struct net_port_entry * npe,const struct net_port_info * in_npi,struct net_port_entry ** best_match)947 net_port_info_match_npi(struct net_port_entry *npe, const struct net_port_info *in_npi,
948 struct net_port_entry **best_match)
949 {
950 if (__improbable(net_wake_pkt_debug > 1)) {
951 net_port_info_log_npi(" ", &npe->npe_npi);
952 }
953
954 /*
955 * The interfaces must match or be both companion link
956 */
957 if (npe->npe_npi.npi_if_index != in_npi->npi_if_index &&
958 !((npe->npe_npi.npi_flags & NPIF_COMPLINK) && (in_npi->npi_flags & NPIF_COMPLINK))) {
959 return false;
960 }
961
962 /*
963 * The local ports and protocols must match
964 */
965 if (npe->npe_npi.npi_local_port != in_npi->npi_local_port ||
966 ((npe->npe_npi.npi_flags & NPI_MATCH_IPV4) != (in_npi->npi_flags & NPI_MATCH_IPV4) &&
967 (npe->npe_npi.npi_flags & NPI_MATCH_IPV6) != (in_npi->npi_flags & NPI_MATCH_IPV6))) {
968 return false;
969 }
970 /*
971 * Search stops on an exact match
972 */
973 if (npe->npe_npi.npi_foreign_port == in_npi->npi_foreign_port) {
974 if ((npe->npe_npi.npi_flags & NPIF_IPV4) && (npe->npe_npi.npi_flags & NPIF_IPV4)) {
975 if (in_npi->npi_local_addr_in.s_addr == npe->npe_npi.npi_local_addr_in.s_addr &&
976 in_npi->npi_foreign_addr_in.s_addr == npe->npe_npi.npi_foreign_addr_in.s_addr) {
977 *best_match = npe;
978 return true;
979 }
980 }
981 if ((npe->npe_npi.npi_flags & NPIF_IPV6) && (npe->npe_npi.npi_flags & NPIF_IPV6)) {
982 if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
983 sizeof(union in_addr_4_6)) == 0 &&
984 memcmp(&npe->npe_npi.npi_foreign_addr_, &in_npi->npi_foreign_addr_,
985 sizeof(union in_addr_4_6)) == 0) {
986 *best_match = npe;
987 return true;
988 }
989 }
990 }
991 /*
992 * Skip connected entries as we are looking for a wildcard match
993 * on the local address and port
994 */
995 if (npe->npe_npi.npi_foreign_port != 0) {
996 return false;
997 }
998 /*
999 * The local address matches: this is our 2nd best match
1000 */
1001 if (memcmp(&npe->npe_npi.npi_local_addr_, &in_npi->npi_local_addr_,
1002 sizeof(union in_addr_4_6)) == 0) {
1003 *best_match = npe;
1004 return false;
1005 }
1006 /*
1007 * Only the local port matches, do not override a match
1008 * on the local address
1009 */
1010 if (*best_match == NULL) {
1011 *best_match = npe;
1012 }
1013 return false;
1014 }
1015
1016 /*
1017 *
1018 */
1019 static bool
net_port_info_find_match(struct net_port_info * in_npi)1020 net_port_info_find_match(struct net_port_info *in_npi)
1021 {
1022 struct net_port_entry *npe;
1023 struct net_port_entry *best_match = NULL;
1024
1025 lck_mtx_lock(&net_port_entry_head_lock);
1026
1027 uint32_t count = 0;
1028 TAILQ_FOREACH(npe, NPE_HASH_HEAD(in_npi->npi_local_port), npe_hash_next) {
1029 count += 1;
1030 if (net_port_info_match_npi(npe, in_npi, &best_match)) {
1031 break;
1032 }
1033 }
1034
1035 if (best_match != NULL) {
1036 best_match->npe_npi.npi_flags |= NPIF_WAKEPKT;
1037 in_npi->npi_owner_pid = best_match->npe_npi.npi_owner_pid;
1038 in_npi->npi_effective_pid = best_match->npe_npi.npi_effective_pid;
1039 strlcpy(in_npi->npi_owner_pname, best_match->npe_npi.npi_owner_pname,
1040 sizeof(in_npi->npi_owner_pname));
1041 strlcpy(in_npi->npi_effective_pname, best_match->npe_npi.npi_effective_pname,
1042 sizeof(in_npi->npi_effective_pname));
1043 uuid_copy(in_npi->npi_owner_uuid, best_match->npe_npi.npi_owner_uuid);
1044 uuid_copy(in_npi->npi_effective_uuid, best_match->npe_npi.npi_effective_uuid);
1045 }
1046 lck_mtx_unlock(&net_port_entry_head_lock);
1047
1048 if (__improbable(net_wake_pkt_debug > 0)) {
1049 if (best_match != NULL) {
1050 net_port_info_log_npi("wake packet match", in_npi);
1051 } else {
1052 net_port_info_log_npi("wake packet no match", in_npi);
1053 }
1054 }
1055
1056 return best_match != NULL ? true : false;
1057 }
1058
1059 #if (DEBUG || DEVELOPMENT)
1060 static void
net_port_info_log_una_wake_event(const char * s,struct net_port_info_una_wake_event * ev)1061 net_port_info_log_una_wake_event(const char *s, struct net_port_info_una_wake_event *ev)
1062 {
1063 char lbuf[MAX_IPv6_STR_LEN] = {};
1064 char fbuf[MAX_IPv6_STR_LEN] = {};
1065
1066 if (ev->una_wake_pkt_flags & NPIF_IPV4) {
1067 inet_ntop(PF_INET, &ev->una_wake_pkt_local_addr_._in_a_4.s_addr,
1068 lbuf, sizeof(lbuf));
1069 inet_ntop(PF_INET, &ev->una_wake_pkt_foreign_addr_._in_a_4.s_addr,
1070 fbuf, sizeof(fbuf));
1071 } else if (ev->una_wake_pkt_flags & NPIF_IPV6) {
1072 inet_ntop(PF_INET6, &ev->una_wake_pkt_local_addr_._in_a_6.s6_addr,
1073 lbuf, sizeof(lbuf));
1074 inet_ntop(PF_INET6, &ev->una_wake_pkt_foreign_addr_._in_a_6.s6_addr,
1075 fbuf, sizeof(fbuf));
1076 }
1077 os_log(OS_LOG_DEFAULT, "%s if %s (%u) phy_if %s proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proto: %u",
1078 s != NULL ? s : "",
1079 ev->una_wake_pkt_ifname, ev->una_wake_pkt_if_index, ev->una_wake_pkt_phy_ifname,
1080 ev->una_wake_pkt_flags & NPIF_TCP ? "tcp" : ev->una_wake_pkt_flags ? "udp" :
1081 ev->una_wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1082 lbuf, ntohs(ev->una_wake_pkt_local_port),
1083 fbuf, ntohs(ev->una_wake_pkt_foreign_port),
1084 ev->una_wake_pkt_total_len, ev->una_wake_pkt_data_len,
1085 ev->una_wake_pkt_control_flags, ev->una_wake_pkt_proto);
1086 }
1087
1088 static void
net_port_info_log_wake_event(const char * s,struct net_port_info_wake_event * ev)1089 net_port_info_log_wake_event(const char *s, struct net_port_info_wake_event *ev)
1090 {
1091 char lbuf[MAX_IPv6_STR_LEN] = {};
1092 char fbuf[MAX_IPv6_STR_LEN] = {};
1093
1094 if (ev->wake_pkt_flags & NPIF_IPV4) {
1095 inet_ntop(PF_INET, &ev->wake_pkt_local_addr_._in_a_4.s_addr,
1096 lbuf, sizeof(lbuf));
1097 inet_ntop(PF_INET, &ev->wake_pkt_foreign_addr_._in_a_4.s_addr,
1098 fbuf, sizeof(fbuf));
1099 } else if (ev->wake_pkt_flags & NPIF_IPV6) {
1100 inet_ntop(PF_INET6, &ev->wake_pkt_local_addr_._in_a_6.s6_addr,
1101 lbuf, sizeof(lbuf));
1102 inet_ntop(PF_INET6, &ev->wake_pkt_foreign_addr_._in_a_6.s6_addr,
1103 fbuf, sizeof(fbuf));
1104 }
1105 os_log(OS_LOG_DEFAULT, "%s if %s (%u) phy_if %s proto %s local %s:%u foreign %s:%u len: %u datalen: %u cflags: 0x%x proc %s eproc %s",
1106 s != NULL ? s : "",
1107 ev->wake_pkt_ifname, ev->wake_pkt_if_index, ev->wake_pkt_phy_ifname,
1108 ev->wake_pkt_flags & NPIF_TCP ? "tcp" : ev->wake_pkt_flags ? "udp" :
1109 ev->wake_pkt_flags & NPIF_ESP ? "esp" : "unknown",
1110 lbuf, ntohs(ev->wake_pkt_port),
1111 fbuf, ntohs(ev->wake_pkt_foreign_port),
1112 ev->wake_pkt_total_len, ev->wake_pkt_data_len, ev->wake_pkt_control_flags,
1113 ev->wake_pkt_owner_pname, ev->wake_pkt_effective_pname);
1114 }
1115
1116 #endif /* (DEBUG || DEVELOPMENT) */
1117
1118 /*
1119 * The process attribution of a wake packet can take several steps:
1120 *
1121 * 1) After device wakes, the first interface that sees a wake packet is the
1122 * physical interface and we remember it via if_set_wake_physical_interface()
1123 *
1124 * 2) We try to attribute a packet to a flow or not based on the physical interface.
1125 * If we find a flow, then the physical interface is the same as the interface used
1126 * by the TCP/UDP flow.
1127 *
1128 * 3) If the packet is tunneled or redirected we are going to do the attribution again
1129 * and the physical will be different from the interface used the TCP/UDP flow.
1130 */
1131 static void
if_set_wake_physical_interface(struct ifnet * ifp)1132 if_set_wake_physical_interface(struct ifnet *ifp)
1133 {
1134 if (last_wake_phy_if_set == true || ifp == NULL) {
1135 return;
1136 }
1137 last_wake_phy_if_set = true;
1138 strlcpy(last_wake_phy_if_name, IF_XNAME(ifp), sizeof(last_wake_phy_if_name));
1139 last_wake_phy_if_family = ifp->if_family;
1140 last_wake_phy_if_subfamily = ifp->if_subfamily;
1141 last_wake_phy_if_functional_type = if_functional_type(ifp, true);
1142 }
1143
1144 static void
if_notify_unattributed_wake_mbuf(struct ifnet * ifp,struct mbuf * m,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)1145 if_notify_unattributed_wake_mbuf(struct ifnet *ifp, struct mbuf *m,
1146 struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
1147 uint16_t pkt_control_flags, uint16_t proto)
1148 {
1149 struct kev_msg ev_msg = {};
1150
1151 LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1152
1153 lck_mtx_lock(&net_port_entry_head_lock);
1154 if (has_notified_unattributed_wake) {
1155 lck_mtx_unlock(&net_port_entry_head_lock);
1156 if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1157
1158 if (__improbable(net_wake_pkt_debug > 0)) {
1159 net_port_info_log_npi("already notified unattributed wake packet", npi);
1160 }
1161 return;
1162 }
1163 has_notified_unattributed_wake = true;
1164 lck_mtx_unlock(&net_port_entry_head_lock);
1165
1166 if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1167
1168 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1169 ev_msg.kev_class = KEV_NETWORK_CLASS;
1170 ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1171 ev_msg.event_code = KEV_POWER_UNATTRIBUTED_WAKE;
1172
1173 struct net_port_info_una_wake_event event_data = {};
1174 uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1175 event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1176 event_data.una_wake_pkt_flags = npi->npi_flags;
1177
1178 event_data.una_wake_pkt_local_port = npi->npi_local_port;
1179 event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1180 event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1181 event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1182
1183 event_data.una_wake_pkt_total_len = pkt_total_len;
1184 event_data.una_wake_pkt_data_len = pkt_data_len;
1185 event_data.una_wake_pkt_control_flags = pkt_control_flags;
1186 event_data.una_wake_pkt_proto = proto;
1187
1188 if (ifp != NULL) {
1189 strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
1190 sizeof(event_data.una_wake_pkt_ifname));
1191 event_data.una_wake_pkt_if_info.npi_if_family = ifp->if_family;
1192 event_data.una_wake_pkt_if_info.npi_if_subfamily = ifp->if_subfamily;
1193 event_data.una_wake_pkt_if_info.npi_if_functional_type = if_functional_type(ifp, true);
1194
1195 strlcpy(event_data.una_wake_pkt_phy_ifname, last_wake_phy_if_name,
1196 sizeof(event_data.una_wake_pkt_phy_ifname));
1197 event_data.una_wake_pkt_phy_if_info.npi_if_family = last_wake_phy_if_family;
1198 event_data.una_wake_pkt_phy_if_info.npi_if_subfamily = last_wake_phy_if_subfamily;
1199 event_data.una_wake_pkt_phy_if_info.npi_if_functional_type = last_wake_phy_if_functional_type;
1200 } else {
1201 if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1202 }
1203
1204 event_data.una_wake_ptk_len = m->m_pkthdr.len > NPI_MAX_UNA_WAKE_PKT_LEN ?
1205 NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t)m->m_pkthdr.len;
1206
1207 errno_t error = mbuf_copydata(m, 0, event_data.una_wake_ptk_len,
1208 (void *)event_data.una_wake_pkt);
1209 if (error != 0) {
1210 uuid_string_t wake_uuid_str;
1211
1212 uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1213 os_log_error(OS_LOG_DEFAULT,
1214 "%s: mbuf_copydata() failed with error %d for wake uuid %s",
1215 __func__, error, wake_uuid_str);
1216
1217 if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1218 return;
1219 }
1220
1221 ev_msg.dv[0].data_ptr = &event_data;
1222 ev_msg.dv[0].data_length = sizeof(event_data);
1223
1224 int result = kev_post_msg(&ev_msg);
1225 if (result != 0) {
1226 uuid_string_t wake_uuid_str;
1227
1228 uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1229 os_log_error(OS_LOG_DEFAULT,
1230 "%s: kev_post_msg() failed with error %d for wake uuid %s",
1231 __func__, result, wake_uuid_str);
1232
1233 if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1234 }
1235
1236 #if (DEBUG || DEVELOPMENT)
1237 net_port_info_log_una_wake_event("unattributed wake packet event", &event_data);
1238 #endif /* (DEBUG || DEVELOPMENT) */
1239 }
1240
1241 static void
if_notify_wake_packet(struct ifnet * ifp,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags)1242 if_notify_wake_packet(struct ifnet *ifp, struct net_port_info *npi,
1243 uint32_t pkt_total_len, uint32_t pkt_data_len, uint16_t pkt_control_flags)
1244 {
1245 struct kev_msg ev_msg = {};
1246
1247 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1248 ev_msg.kev_class = KEV_NETWORK_CLASS;
1249 ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1250 ev_msg.event_code = KEV_POWER_WAKE_PACKET;
1251
1252 struct net_port_info_wake_event event_data = {};
1253
1254 uuid_copy(event_data.wake_uuid, current_wakeuuid);
1255 event_data.wake_pkt_if_index = ifp->if_index;
1256 event_data.wake_pkt_port = npi->npi_local_port;
1257 event_data.wake_pkt_flags = npi->npi_flags;
1258 event_data.wake_pkt_owner_pid = npi->npi_owner_pid;
1259 event_data.wake_pkt_effective_pid = npi->npi_effective_pid;
1260 strlcpy(event_data.wake_pkt_owner_pname, npi->npi_owner_pname,
1261 sizeof(event_data.wake_pkt_owner_pname));
1262 strlcpy(event_data.wake_pkt_effective_pname, npi->npi_effective_pname,
1263 sizeof(event_data.wake_pkt_effective_pname));
1264 uuid_copy(event_data.wake_pkt_owner_uuid, npi->npi_owner_uuid);
1265 uuid_copy(event_data.wake_pkt_effective_uuid, npi->npi_effective_uuid);
1266
1267 event_data.wake_pkt_foreign_port = npi->npi_foreign_port;
1268 event_data.wake_pkt_local_addr_ = npi->npi_local_addr_;
1269 event_data.wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1270 strlcpy(event_data.wake_pkt_ifname, IF_XNAME(ifp), sizeof(event_data.wake_pkt_ifname));
1271
1272 event_data.wake_pkt_if_info.npi_if_family = ifp->if_family;
1273 event_data.wake_pkt_if_info.npi_if_subfamily = ifp->if_subfamily;
1274 event_data.wake_pkt_if_info.npi_if_functional_type = if_functional_type(ifp, true);
1275
1276 strlcpy(event_data.wake_pkt_phy_ifname, last_wake_phy_if_name,
1277 sizeof(event_data.wake_pkt_phy_ifname));
1278 event_data.wake_pkt_phy_if_info.npi_if_family = last_wake_phy_if_family;
1279 event_data.wake_pkt_phy_if_info.npi_if_subfamily = last_wake_phy_if_subfamily;
1280 event_data.wake_pkt_phy_if_info.npi_if_functional_type = last_wake_phy_if_functional_type;
1281
1282 event_data.wake_pkt_total_len = pkt_total_len;
1283 event_data.wake_pkt_data_len = pkt_data_len;
1284 event_data.wake_pkt_control_flags = pkt_control_flags;
1285
1286 ev_msg.dv[0].data_ptr = &event_data;
1287 ev_msg.dv[0].data_length = sizeof(event_data);
1288
1289 LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1290
1291 lck_mtx_lock(&net_port_entry_head_lock);
1292
1293 if (has_notified_wake_pkt) {
1294 lck_mtx_unlock(&net_port_entry_head_lock);
1295 if_ports_used_stats.ifpu_dup_wake_pkt_event += 1;
1296
1297 if (__improbable(net_wake_pkt_debug > 0)) {
1298 net_port_info_log_npi("already notified wake packet", npi);
1299 }
1300 return;
1301 }
1302 has_notified_wake_pkt = true;
1303
1304 memcpy(&last_attributed_wake_event, &event_data, sizeof(last_attributed_wake_event));
1305
1306 lck_mtx_unlock(&net_port_entry_head_lock);
1307
1308 if_ports_used_stats.ifpu_wake_pkt_event += 1;
1309
1310
1311 int result = kev_post_msg(&ev_msg);
1312 if (result != 0) {
1313 uuid_string_t wake_uuid_str;
1314
1315 uuid_unparse(event_data.wake_uuid, wake_uuid_str);
1316 os_log_error(OS_LOG_DEFAULT,
1317 "%s: kev_post_msg() failed with error %d for wake uuid %s",
1318 __func__, result, wake_uuid_str);
1319
1320 if_ports_used_stats.ifpu_wake_pkt_event_error += 1;
1321 }
1322 #if (DEBUG || DEVELOPMENT)
1323 net_port_info_log_wake_event("attributed wake packet event", &event_data);
1324 #endif /* (DEBUG || DEVELOPMENT) */
1325 }
1326
1327 static bool
is_encapsulated_esp(struct mbuf * m,size_t data_offset)1328 is_encapsulated_esp(struct mbuf *m, size_t data_offset)
1329 {
1330 /*
1331 * They are three cases:
1332 * - Keep alive: 1 byte payload
1333 * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
1334 * - otherwise it's ESP
1335 */
1336 ASSERT(m->m_pkthdr.len >= data_offset);
1337
1338 size_t data_len = m->m_pkthdr.len - data_offset;
1339 if (data_len == 1) {
1340 return false;
1341 } else if (data_len > ESP_HDR_SIZE) {
1342 uint8_t payload[ESP_HDR_SIZE];
1343
1344 errno_t error = mbuf_copydata(m, data_offset, ESP_HDR_SIZE, &payload);
1345 if (error != 0) {
1346 os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ESP_HDR_SIZE) error %d",
1347 __func__, error);
1348 } else if (payload[0] == 0 && payload[1] == 0 &&
1349 payload[2] == 0 && payload[3] == 0) {
1350 return false;
1351 }
1352 }
1353 return true;
1354 }
1355
1356 void
if_ports_used_match_mbuf(struct ifnet * ifp,protocol_family_t proto_family,struct mbuf * m)1357 if_ports_used_match_mbuf(struct ifnet *ifp, protocol_family_t proto_family, struct mbuf *m)
1358 {
1359 errno_t error;
1360 struct net_port_info npi = {};
1361 bool found = false;
1362 uint32_t pkt_total_len = 0;
1363 uint32_t pkt_data_len = 0;
1364 uint16_t pkt_control_flags = 0;
1365 uint16_t pkt_proto = 0;
1366
1367 if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == 0) {
1368 if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1369 os_log_error(OS_LOG_DEFAULT, "%s: called PKTF_WAKE_PKT not set from %s",
1370 __func__, ifp != NULL ? IF_XNAME(ifp) : "");
1371 return;
1372 }
1373
1374 if_ports_used_stats.ifpu_so_match_wake_pkt += 1;
1375 npi.npi_flags |= NPIF_SOCKET; /* For logging */
1376 pkt_total_len = m->m_pkthdr.len;
1377 pkt_data_len = pkt_total_len;
1378
1379 if (ifp != NULL) {
1380 npi.npi_if_index = ifp->if_index;
1381 if (IFNET_IS_COMPANION_LINK(ifp)) {
1382 npi.npi_flags |= NPIF_COMPLINK;
1383 }
1384 if_set_wake_physical_interface(ifp);
1385 }
1386
1387 if (proto_family == PF_INET) {
1388 struct ip iphdr = {};
1389
1390 if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1391
1392 error = mbuf_copydata(m, 0, sizeof(struct ip), &iphdr);
1393 if (error != 0) {
1394 os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip) error %d",
1395 __func__, error);
1396 goto failed;
1397 }
1398 npi.npi_flags |= NPIF_IPV4;
1399 npi.npi_local_addr_in = iphdr.ip_dst;
1400 npi.npi_foreign_addr_in = iphdr.ip_src;
1401
1402 /*
1403 * Check if this is a fragment that is not the first fragment
1404 */
1405 if ((ntohs(iphdr.ip_off) & ~(IP_DF | IP_RF)) &&
1406 (ntohs(iphdr.ip_off) & IP_OFFMASK) != 0) {
1407 npi.npi_flags |= NPIF_FRAG;
1408 if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1409 }
1410
1411 if ((iphdr.ip_hl << 2) < pkt_data_len) {
1412 pkt_data_len -= iphdr.ip_hl << 2;
1413 } else {
1414 pkt_data_len = 0;
1415 }
1416
1417 pkt_proto = iphdr.ip_p;
1418
1419 switch (iphdr.ip_p) {
1420 case IPPROTO_TCP: {
1421 if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1422 npi.npi_flags |= NPIF_TCP;
1423
1424 if (npi.npi_flags & NPIF_FRAG) {
1425 goto failed;
1426 }
1427
1428 struct tcphdr th = {};
1429 error = mbuf_copydata(m, iphdr.ip_hl << 2, sizeof(struct tcphdr), &th);
1430 if (error != 0) {
1431 os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1432 __func__, error);
1433 goto failed;
1434 }
1435 npi.npi_local_port = th.th_dport;
1436 npi.npi_foreign_port = th.th_sport;
1437
1438 if (pkt_data_len < sizeof(struct tcphdr) ||
1439 pkt_data_len < (th.th_off << 2)) {
1440 pkt_data_len = 0;
1441 } else {
1442 pkt_data_len -= th.th_off << 2;
1443 }
1444 pkt_control_flags = th.th_flags;
1445 break;
1446 }
1447 case IPPROTO_UDP: {
1448 if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1449 npi.npi_flags |= NPIF_UDP;
1450
1451 if (npi.npi_flags & NPIF_FRAG) {
1452 goto failed;
1453 }
1454 struct udphdr uh = {};
1455 size_t udp_offset = iphdr.ip_hl << 2;
1456
1457 error = mbuf_copydata(m, udp_offset, sizeof(struct udphdr), &uh);
1458 if (error != 0) {
1459 os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1460 __func__, error);
1461 goto failed;
1462 }
1463 npi.npi_local_port = uh.uh_dport;
1464 npi.npi_foreign_port = uh.uh_sport;
1465 /*
1466 * Let the ESP layer handle wake packets
1467 */
1468 if (ntohs(uh.uh_dport) == PORT_ISAKMP_NATT ||
1469 ntohs(uh.uh_sport) == PORT_ISAKMP_NATT) {
1470 if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1471 if (is_encapsulated_esp(m, udp_offset + sizeof(struct udphdr))) {
1472 if (net_wake_pkt_debug > 0) {
1473 net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1474 }
1475 return;
1476 }
1477 }
1478
1479 if (pkt_data_len < sizeof(struct udphdr)) {
1480 pkt_data_len = 0;
1481 } else {
1482 pkt_data_len -= sizeof(struct udphdr);
1483 }
1484 break;
1485 }
1486 case IPPROTO_ESP: {
1487 /*
1488 * Let the ESP layer handle wake packets
1489 */
1490 if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1491 npi.npi_flags |= NPIF_ESP;
1492 if (net_wake_pkt_debug > 0) {
1493 net_port_info_log_npi("defer ESP matching", &npi);
1494 }
1495 return;
1496 }
1497 default:
1498 if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1499 os_log(OS_LOG_DEFAULT, "%s: unexpected IPv4 protocol %u from %s",
1500 __func__, iphdr.ip_p, IF_XNAME(ifp));
1501 goto failed;
1502 }
1503 } else if (proto_family == PF_INET6) {
1504 struct ip6_hdr ip6_hdr = {};
1505
1506 if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1507
1508 error = mbuf_copydata(m, 0, sizeof(struct ip6_hdr), &ip6_hdr);
1509 if (error != 0) {
1510 os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_hdr) error %d",
1511 __func__, error);
1512 goto failed;
1513 }
1514 npi.npi_flags |= NPIF_IPV6;
1515 memcpy(&npi.npi_local_addr_in6, &ip6_hdr.ip6_dst, sizeof(struct in6_addr));
1516 memcpy(&npi.npi_foreign_addr_in6, &ip6_hdr.ip6_src, sizeof(struct in6_addr));
1517
1518 size_t l3_len = sizeof(struct ip6_hdr);
1519 uint8_t l4_proto = ip6_hdr.ip6_nxt;
1520
1521 pkt_proto = l4_proto;
1522
1523 if (pkt_data_len < l3_len) {
1524 pkt_data_len = 0;
1525 } else {
1526 pkt_data_len -= l3_len;
1527 }
1528
1529 /*
1530 * Check if this is a fragment that is not the first fragment
1531 */
1532 if (l4_proto == IPPROTO_FRAGMENT) {
1533 struct ip6_frag ip6_frag;
1534
1535 error = mbuf_copydata(m, sizeof(struct ip6_hdr), sizeof(struct ip6_frag), &ip6_frag);
1536 if (error != 0) {
1537 os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(ip6_frag) error %d",
1538 __func__, error);
1539 goto failed;
1540 }
1541
1542 l3_len += sizeof(struct ip6_frag);
1543 l4_proto = ip6_frag.ip6f_nxt;
1544
1545 if ((ip6_frag.ip6f_offlg & IP6F_OFF_MASK) != 0) {
1546 npi.npi_flags |= NPIF_FRAG;
1547 if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1548 }
1549 }
1550
1551
1552 switch (l4_proto) {
1553 case IPPROTO_TCP: {
1554 if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1555 npi.npi_flags |= NPIF_TCP;
1556
1557 /*
1558 * Cannot attribute a fragment that is not the first fragment as it
1559 * not have the TCP header
1560 */
1561 if (npi.npi_flags & NPIF_FRAG) {
1562 goto failed;
1563 }
1564
1565 struct tcphdr th = {};
1566
1567 error = mbuf_copydata(m, l3_len, sizeof(struct tcphdr), &th);
1568 if (error != 0) {
1569 os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(tcphdr) error %d",
1570 __func__, error);
1571 if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1572 goto failed;
1573 }
1574 npi.npi_local_port = th.th_dport;
1575 npi.npi_foreign_port = th.th_sport;
1576
1577 if (pkt_data_len < sizeof(struct tcphdr) ||
1578 pkt_data_len < (th.th_off << 2)) {
1579 pkt_data_len = 0;
1580 } else {
1581 pkt_data_len -= th.th_off << 2;
1582 }
1583 pkt_control_flags = th.th_flags;
1584 break;
1585 }
1586 case IPPROTO_UDP: {
1587 if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1588 npi.npi_flags |= NPIF_UDP;
1589
1590 /*
1591 * Cannot attribute a fragment that is not the first fragment as it
1592 * not have the UDP header
1593 */
1594 if (npi.npi_flags & NPIF_FRAG) {
1595 goto failed;
1596 }
1597
1598 struct udphdr uh = {};
1599
1600 error = mbuf_copydata(m, l3_len, sizeof(struct udphdr), &uh);
1601 if (error != 0) {
1602 os_log(OS_LOG_DEFAULT, "%s: mbuf_copydata(udphdr) error %d",
1603 __func__, error);
1604 if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1605 goto failed;
1606 }
1607 npi.npi_local_port = uh.uh_dport;
1608 npi.npi_foreign_port = uh.uh_sport;
1609 /*
1610 * Let the ESP layer handle wake packets
1611 */
1612 if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1613 ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1614 if_ports_used_stats.ifpu_isakmp_natt_wake_pkt += 1;
1615 if (is_encapsulated_esp(m, l3_len + sizeof(struct udphdr))) {
1616 if (net_wake_pkt_debug > 0) {
1617 net_port_info_log_npi("defer encapsulated ESP matching", &npi);
1618 }
1619 return;
1620 }
1621 }
1622
1623 if (pkt_data_len < sizeof(struct udphdr)) {
1624 pkt_data_len = 0;
1625 } else {
1626 pkt_data_len -= sizeof(struct udphdr);
1627 }
1628 break;
1629 }
1630 case IPPROTO_ESP: {
1631 /*
1632 * Let the ESP layer handle the wake packet
1633 */
1634 if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1635 npi.npi_flags |= NPIF_ESP;
1636 if (net_wake_pkt_debug > 0) {
1637 net_port_info_log_npi("defer ESP matching", &npi);
1638 }
1639 return;
1640 }
1641 default:
1642 if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1643
1644 os_log(OS_LOG_DEFAULT, "%s: unexpected IPv6 protocol %u from %s",
1645 __func__, ip6_hdr.ip6_nxt, IF_XNAME(ifp));
1646 goto failed;
1647 }
1648 } else {
1649 if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1650 os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %d from %s",
1651 __func__, proto_family, IF_XNAME(ifp));
1652 goto failed;
1653 }
1654 if (ifp == NULL) {
1655 goto failed;
1656 }
1657
1658 found = net_port_info_find_match(&npi);
1659 if (found) {
1660 if_notify_wake_packet(ifp, &npi,
1661 pkt_total_len, pkt_data_len, pkt_control_flags);
1662 } else {
1663 if_notify_unattributed_wake_mbuf(ifp, m, &npi,
1664 pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1665 }
1666 return;
1667 failed:
1668 if_notify_unattributed_wake_mbuf(ifp, m, &npi,
1669 pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1670 }
1671
1672 #if SKYWALK
1673
1674 static void
if_notify_unattributed_wake_pkt(struct ifnet * ifp,struct __kern_packet * pkt,struct net_port_info * npi,uint32_t pkt_total_len,uint32_t pkt_data_len,uint16_t pkt_control_flags,uint16_t proto)1675 if_notify_unattributed_wake_pkt(struct ifnet *ifp, struct __kern_packet *pkt,
1676 struct net_port_info *npi, uint32_t pkt_total_len, uint32_t pkt_data_len,
1677 uint16_t pkt_control_flags, uint16_t proto)
1678 {
1679 struct kev_msg ev_msg = {};
1680
1681 LCK_MTX_ASSERT(&net_port_entry_head_lock, LCK_MTX_ASSERT_NOTOWNED);
1682
1683 lck_mtx_lock(&net_port_entry_head_lock);
1684 if (has_notified_unattributed_wake) {
1685 lck_mtx_unlock(&net_port_entry_head_lock);
1686 if_ports_used_stats.ifpu_dup_unattributed_wake_event += 1;
1687
1688 if (__improbable(net_wake_pkt_debug > 0)) {
1689 net_port_info_log_npi("already notified unattributed wake packet", npi);
1690 }
1691 return;
1692 }
1693 has_notified_unattributed_wake = true;
1694 lck_mtx_unlock(&net_port_entry_head_lock);
1695
1696 if_ports_used_stats.ifpu_unattributed_wake_event += 1;
1697
1698 if (ifp == NULL) {
1699 os_log(OS_LOG_DEFAULT, "%s: receive interface is NULL",
1700 __func__);
1701 if_ports_used_stats.ifpu_unattributed_null_recvif += 1;
1702 }
1703
1704 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1705 ev_msg.kev_class = KEV_NETWORK_CLASS;
1706 ev_msg.kev_subclass = KEV_POWER_SUBCLASS;
1707 ev_msg.event_code = KEV_POWER_UNATTRIBUTED_WAKE;
1708
1709 struct net_port_info_una_wake_event event_data = {};
1710 uuid_copy(event_data.una_wake_uuid, current_wakeuuid);
1711 event_data.una_wake_pkt_if_index = ifp != NULL ? ifp->if_index : 0;
1712 event_data.una_wake_pkt_flags = npi->npi_flags;
1713
1714 uint16_t offset = kern_packet_get_network_header_offset(SK_PKT2PH(pkt));
1715 event_data.una_wake_ptk_len =
1716 pkt->pkt_length - offset > NPI_MAX_UNA_WAKE_PKT_LEN ?
1717 NPI_MAX_UNA_WAKE_PKT_LEN : (u_int16_t) pkt->pkt_length - offset;
1718
1719 kern_packet_copy_bytes(SK_PKT2PH(pkt), offset, event_data.una_wake_ptk_len,
1720 event_data.una_wake_pkt);
1721
1722 event_data.una_wake_pkt_local_port = npi->npi_local_port;
1723 event_data.una_wake_pkt_foreign_port = npi->npi_foreign_port;
1724 event_data.una_wake_pkt_local_addr_ = npi->npi_local_addr_;
1725 event_data.una_wake_pkt_foreign_addr_ = npi->npi_foreign_addr_;
1726 if (ifp != NULL) {
1727 strlcpy(event_data.una_wake_pkt_ifname, IF_XNAME(ifp),
1728 sizeof(event_data.una_wake_pkt_ifname));
1729 }
1730
1731 event_data.una_wake_pkt_total_len = pkt_total_len;
1732 event_data.una_wake_pkt_data_len = pkt_data_len;
1733 event_data.una_wake_pkt_control_flags = pkt_control_flags;
1734 event_data.una_wake_pkt_proto = proto;
1735
1736 ev_msg.dv[0].data_ptr = &event_data;
1737 ev_msg.dv[0].data_length = sizeof(event_data);
1738
1739 int result = kev_post_msg(&ev_msg);
1740 if (result != 0) {
1741 uuid_string_t wake_uuid_str;
1742
1743 uuid_unparse(event_data.una_wake_uuid, wake_uuid_str);
1744 os_log_error(OS_LOG_DEFAULT,
1745 "%s: kev_post_msg() failed with error %d for wake uuid %s",
1746 __func__, result, wake_uuid_str);
1747
1748 if_ports_used_stats.ifpu_unattributed_wake_event_error += 1;
1749 }
1750 #if (DEBUG || DEVELOPMENT)
1751 net_port_info_log_una_wake_event("unattributed wake packet event", &event_data);
1752 #endif /* (DEBUG || DEVELOPMENT) */
1753 }
1754
1755 void
if_ports_used_match_pkt(struct ifnet * ifp,struct __kern_packet * pkt)1756 if_ports_used_match_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
1757 {
1758 struct net_port_info npi = {};
1759 bool found = false;
1760 uint32_t pkt_total_len = 0;
1761 uint32_t pkt_data_len = 0;
1762 uint16_t pkt_control_flags = 0;
1763 uint16_t pkt_proto = 0;
1764
1765 if ((pkt->pkt_pflags & PKT_F_WAKE_PKT) == 0) {
1766 if_ports_used_stats.ifpu_match_wake_pkt_no_flag += 1;
1767 os_log_error(OS_LOG_DEFAULT, "%s: called PKT_F_WAKE_PKT not set from %s",
1768 __func__, IF_XNAME(ifp));
1769 return;
1770 }
1771
1772 if_ports_used_stats.ifpu_ch_match_wake_pkt += 1;
1773 npi.npi_flags |= NPIF_CHANNEL; /* For logging */
1774 pkt_total_len = pkt->pkt_flow_ip_hlen +
1775 pkt->pkt_flow_tcp_hlen + pkt->pkt_flow_ulen;
1776 pkt_data_len = pkt->pkt_flow_ulen;
1777
1778 if (ifp != NULL) {
1779 npi.npi_if_index = ifp->if_index;
1780 if (IFNET_IS_COMPANION_LINK(ifp)) {
1781 npi.npi_flags |= NPIF_COMPLINK;
1782 }
1783 if_set_wake_physical_interface(ifp);
1784 }
1785
1786 switch (pkt->pkt_flow_ip_ver) {
1787 case IPVERSION:
1788 if_ports_used_stats.ifpu_ipv4_wake_pkt += 1;
1789
1790 npi.npi_flags |= NPIF_IPV4;
1791 npi.npi_local_addr_in = pkt->pkt_flow_ipv4_dst;
1792 npi.npi_foreign_addr_in = pkt->pkt_flow_ipv4_src;
1793 break;
1794 case IPV6_VERSION:
1795 if_ports_used_stats.ifpu_ipv6_wake_pkt += 1;
1796
1797 npi.npi_flags |= NPIF_IPV6;
1798 memcpy(&npi.npi_local_addr_in6, &pkt->pkt_flow_ipv6_dst,
1799 sizeof(struct in6_addr));
1800 memcpy(&npi.npi_foreign_addr_in6, &pkt->pkt_flow_ipv6_src,
1801 sizeof(struct in6_addr));
1802 break;
1803 default:
1804 if_ports_used_stats.ifpu_bad_family_wake_pkt += 1;
1805
1806 os_log(OS_LOG_DEFAULT, "%s: unexpected protocol family %u from %s",
1807 __func__, pkt->pkt_flow_ip_ver, IF_XNAME(ifp));
1808 goto failed;
1809 }
1810 pkt_proto = pkt->pkt_flow_ip_ver;
1811
1812 /*
1813 * Check if this is a fragment that is not the first fragment
1814 */
1815 if (pkt->pkt_flow_ip_is_frag && !pkt->pkt_flow_ip_is_first_frag) {
1816 os_log(OS_LOG_DEFAULT, "%s: unexpected wake fragment from %s",
1817 __func__, IF_XNAME(ifp));
1818 npi.npi_flags |= NPIF_FRAG;
1819 if_ports_used_stats.ifpu_frag_wake_pkt += 1;
1820 }
1821
1822 switch (pkt->pkt_flow_ip_proto) {
1823 case IPPROTO_TCP: {
1824 if_ports_used_stats.ifpu_tcp_wake_pkt += 1;
1825 npi.npi_flags |= NPIF_TCP;
1826
1827 /*
1828 * Cannot attribute a fragment that is not the first fragment as it
1829 * not have the TCP header
1830 */
1831 if (npi.npi_flags & NPIF_FRAG) {
1832 goto failed;
1833 }
1834 struct tcphdr *tcp = (struct tcphdr *)pkt->pkt_flow_tcp_hdr;
1835 if (tcp == NULL) {
1836 os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned TCP header from %s",
1837 __func__, IF_XNAME(ifp));
1838 if_ports_used_stats.ifpu_incomplete_tcp_hdr_pkt += 1;
1839 goto failed;
1840 }
1841 npi.npi_local_port = tcp->th_dport;
1842 npi.npi_foreign_port = tcp->th_sport;
1843 pkt_control_flags = tcp->th_flags;
1844 break;
1845 }
1846 case IPPROTO_UDP: {
1847 if_ports_used_stats.ifpu_udp_wake_pkt += 1;
1848 npi.npi_flags |= NPIF_UDP;
1849
1850 /*
1851 * Cannot attribute a fragment that is not the first fragment as it
1852 * not have the UDP header
1853 */
1854 if (npi.npi_flags & NPIF_FRAG) {
1855 goto failed;
1856 }
1857 struct udphdr *uh = (struct udphdr *)pkt->pkt_flow_udp_hdr;
1858 if (uh == NULL) {
1859 os_log(OS_LOG_DEFAULT, "%s: pkt with unassigned UDP header from %s",
1860 __func__, IF_XNAME(ifp));
1861 if_ports_used_stats.ifpu_incomplete_udp_hdr_pkt += 1;
1862 goto failed;
1863 }
1864 npi.npi_local_port = uh->uh_dport;
1865 npi.npi_foreign_port = uh->uh_sport;
1866
1867 /*
1868 * Defer matching of UDP NAT traversal to ip_input
1869 * (assumes IKE uses sockets)
1870 */
1871 if (ntohs(npi.npi_local_port) == PORT_ISAKMP_NATT ||
1872 ntohs(npi.npi_foreign_port) == PORT_ISAKMP_NATT) {
1873 if_ports_used_stats.ifpu_deferred_isakmp_natt_wake_pkt += 1;
1874 if (net_wake_pkt_debug > 0) {
1875 net_port_info_log_npi("defer ISAKMP_NATT matching", &npi);
1876 }
1877 return;
1878 }
1879 break;
1880 }
1881 case IPPROTO_ESP: {
1882 /*
1883 * Let the ESP layer handle the wake packet
1884 */
1885 if_ports_used_stats.ifpu_esp_wake_pkt += 1;
1886 npi.npi_flags |= NPIF_ESP;
1887 if (net_wake_pkt_debug > 0) {
1888 net_port_info_log_npi("defer ESP matching", &npi);
1889 }
1890 return;
1891 }
1892 default:
1893 if_ports_used_stats.ifpu_bad_proto_wake_pkt += 1;
1894
1895 os_log(OS_LOG_DEFAULT, "%s: unexpected IP protocol %u from %s",
1896 __func__, pkt->pkt_flow_ip_proto, IF_XNAME(ifp));
1897 goto failed;
1898 }
1899
1900 if (ifp == NULL) {
1901 goto failed;
1902 }
1903
1904 found = net_port_info_find_match(&npi);
1905 if (found) {
1906 if_notify_wake_packet(ifp, &npi,
1907 pkt_total_len, pkt_data_len, pkt_control_flags);
1908 } else {
1909 if_notify_unattributed_wake_pkt(ifp, pkt, &npi,
1910 pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1911 }
1912 return;
1913 failed:
1914 if_notify_unattributed_wake_pkt(ifp, pkt, &npi,
1915 pkt_total_len, pkt_data_len, pkt_control_flags, pkt_proto);
1916 }
1917 #endif /* SKYWALK */
1918
1919 int
1920 sysctl_last_attributed_wake_event SYSCTL_HANDLER_ARGS
1921 {
1922 #pragma unused(oidp, arg1, arg2)
1923 size_t len = sizeof(struct net_port_info_wake_event);
1924
1925 if (req->oldptr != 0) {
1926 len = MIN(req->oldlen, len);
1927 }
1928 lck_mtx_lock(&net_port_entry_head_lock);
1929 int error = SYSCTL_OUT(req, &last_attributed_wake_event, len);
1930 lck_mtx_unlock(&net_port_entry_head_lock);
1931
1932 return error;
1933 }
1934