1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83 #include <os/log.h>
84
85 #include <mach/vm_param.h>
86
87 #include <net/dlil.h>
88 #include <net/droptap.h>
89 #include <net/if.h>
90 #include <net/if_types.h>
91 #include <net/net_api_stats.h>
92 #include <net/route.h>
93 #if SKYWALK
94 #include <skywalk/lib/net_filter_event.h>
95 #endif
96
97 #include <netinet/in.h>
98 #include <netinet/in_var.h>
99 #include <netinet/in_systm.h>
100 #include <netinet/ip.h>
101 #include <netinet/ip_var.h>
102 #include <netinet/ip_icmp.h>
103 #include <netinet/if_ether.h>
104
105 #if DUMMYNET
106 #include <netinet/ip_dummynet.h>
107 #else
108 struct ip_fw_args;
109 #endif /* DUMMYNET */
110
111 #include <libkern/crypto/md5.h>
112
113 #include <machine/machine_routines.h>
114
115 #include <miscfs/devfs/devfs.h>
116
117 #include <net/pfvar.h>
118
119 #if NPFSYNC
120 #include <net/if_pfsync.h>
121 #endif /* NPFSYNC */
122
123 #if PFLOG
124 #include <net/if_pflog.h>
125 #endif /* PFLOG */
126
127 #include <netinet/ip6.h>
128 #include <netinet/in_pcb.h>
129
130 #include <dev/random/randomdev.h>
131
132 #if 0
133 static void pfdetach(void);
134 #endif
135 static int pfopen(dev_t, int, int, struct proc *);
136 static int pfclose(dev_t, int, int, struct proc *);
137 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
138 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
139 struct pfioc_table_64 *, struct proc *);
140 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
141 struct pfioc_tokens_64 *, struct proc *);
142 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
143 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
144 struct proc *);
145 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
146 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
147 struct pfioc_states_64 *, struct proc *);
148 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
149 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
150 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
151 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
152 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
153 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
154 struct pfioc_trans_64 *, struct proc *);
155 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
156 struct pfioc_src_nodes_64 *, struct proc *);
157 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
158 struct proc *);
159 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
160 struct pfioc_iface_64 *, struct proc *);
161 static struct pf_pool *pf_get_pool(char const *, u_int32_t, u_int8_t, u_int32_t,
162 u_int8_t, u_int8_t, u_int8_t);
163 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
164 static void pf_empty_pool(struct pf_palist *);
165 static int pf_begin_rules(u_int32_t *, int, const char *);
166 static int pf_rollback_rules(u_int32_t, int, char const *);
167 static int pf_setup_pfsync_matching(struct pf_ruleset *);
168 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
169 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
170 static int pf_commit_rules(u_int32_t, int, char const *);
171 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
172 int);
173 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
174 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
175 struct pf_state *);
176 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
177 struct pf_state *);
178 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
179 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
180 static void pf_expire_states_and_src_nodes(struct pf_rule *);
181 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
182 int, struct pf_rule *);
183 static void pf_addrwrap_setup(struct pf_addr_wrap *);
184 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
185 struct pf_ruleset *);
186 static void pf_delete_rule_by_owner(char const *, u_int32_t);
187 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
188 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
189 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
190 int, struct pf_rule **);
191 #if SKYWALK
192 static void pf_process_compatibilities(void);
193 #endif // SKYWALK
194
195 #define PF_CDEV_MAJOR (-1)
196
197 static const struct cdevsw pf_cdevsw = {
198 .d_open = pfopen,
199 .d_close = pfclose,
200 .d_read = eno_rdwrt,
201 .d_write = eno_rdwrt,
202 .d_ioctl = pfioctl,
203 .d_stop = eno_stop,
204 .d_reset = eno_reset,
205 .d_ttys = NULL,
206 .d_select = eno_select,
207 .d_mmap = eno_mmap,
208 .d_strategy = eno_strat,
209 .d_reserved_1 = eno_getc,
210 .d_reserved_2 = eno_putc,
211 .d_type = 0
212 };
213
214 static void pf_attach_hooks(void);
215 #if 0
216 /* currently unused along with pfdetach() */
217 static void pf_detach_hooks(void);
218 #endif
219
220 /*
221 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
222 * and used in pf_af_hook() for performance optimization, such that packets
223 * will enter pf_test() or pf_test6() only when PF is running.
224 */
225 int pf_is_enabled = 0;
226
227 u_int32_t pf_hash_seed;
228 int16_t pf_nat64_configured = 0;
229
230 /*
231 * These are the pf enabled reference counting variables
232 */
233 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token))
234
235 static u_int64_t pf_enabled_ref_count;
236 static u_int32_t nr_tokens = 0;
237 static u_int32_t pffwrules;
238 static u_int32_t pfdevcnt;
239
240 SLIST_HEAD(list_head, pfioc_kernel_token);
241 static struct list_head token_list_head;
242
243 struct pf_rule pf_default_rule;
244
245 typedef struct {
246 char tag_name[PF_TAG_NAME_SIZE];
247 uint16_t tag_id;
248 } pf_reserved_tag_table_t;
249
250 #define NUM_RESERVED_TAGS 2
251 static pf_reserved_tag_table_t pf_reserved_tag_table[NUM_RESERVED_TAGS] = {
252 { PF_TAG_NAME_SYSTEM_SERVICE, PF_TAG_ID_SYSTEM_SERVICE},
253 { PF_TAG_NAME_STACK_DROP, PF_TAG_ID_STACK_DROP},
254 };
255 #define RESERVED_TAG_ID_MIN PF_TAG_ID_SYSTEM_SERVICE
256
257 #define DYNAMIC_TAG_ID_MAX 50000
258 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
259 TAILQ_HEAD_INITIALIZER(pf_tags);
260
261 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
262 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
263 #endif
264 static u_int16_t tagname2tag(struct pf_tags *, char const *);
265 static void tag_unref(struct pf_tags *, u_int16_t);
266 static int pf_rtlabel_add(struct pf_addr_wrap *);
267 static void pf_rtlabel_remove(struct pf_addr_wrap *);
268 static void pf_rtlabel_copyout(struct pf_addr_wrap *);
269
270 #if INET
271 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
272 struct ip_fw_args *);
273 #endif /* INET */
274 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
275 struct ip_fw_args *);
276
277 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
278
279 /*
280 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
281 */
282 #define PFIOCX_STRUCT_DECL(s) \
283 struct { \
284 union { \
285 struct s##_32 _s##_32; \
286 struct s##_64 _s##_64; \
287 } _u; \
288 } *__single s##_un = NULL \
289
290 #define PFIOCX_STRUCT_BEGIN(a, s) { \
291 VERIFY(s##_un == NULL); \
292 s##_un = kalloc_type(typeof(*s##_un), Z_WAITOK_ZERO_NOFAIL); \
293 if (p64) \
294 bcopy((struct s##_64 *)(void *) (a), &s##_un->_u._s##_64, \
295 sizeof (struct s##_64)); \
296 else \
297 bcopy((struct s##_32 *)(void *) (a), &s##_un->_u._s##_32, \
298 sizeof (struct s##_32)); \
299 }
300
301 #define PFIOCX_STRUCT_END(s, a) { \
302 VERIFY(s##_un != NULL); \
303 if (p64) \
304 bcopy(&s##_un->_u._s##_64, (struct s##_64 *)(void *) (a), sizeof (struct s##_64)); \
305 else \
306 bcopy(&s##_un->_u._s##_32, (struct s##_32 *)(void *) (a), sizeof (struct s##_32)); \
307 kfree_type(typeof(*s##_un), s##_un); \
308 }
309
310 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
311 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
312
313 /*
314 * Helper macros for regular ioctl structures.
315 */
316 #define PFIOC_STRUCT_BEGIN(a, v) { \
317 VERIFY((v) == NULL); \
318 (v) = kalloc_type(typeof(*(v)), Z_WAITOK_ZERO_NOFAIL); \
319 bcopy((typeof(v))(void *) a, v, sizeof (*(v))); \
320 }
321
322 #define PFIOC_STRUCT_END(v, a) { \
323 VERIFY((v) != NULL); \
324 bcopy(v, (typeof(v))(void *) a, sizeof (*(v))); \
325 kfree_type(typeof(*(v)), v); \
326 }
327
328 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
329 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
330
331 struct thread *pf_purge_thread;
332
333 extern void pfi_kifaddr_update(void *);
334
335 /* pf enable ref-counting helper functions */
336 static u_int64_t generate_token(struct proc *);
337 static int remove_token(struct pfioc_remove_token *);
338 static void invalidate_all_tokens(void);
339
340 static u_int64_t
generate_token(struct proc * p)341 generate_token(struct proc *p)
342 {
343 u_int64_t token_value;
344 struct pfioc_kernel_token *new_token;
345
346 if (nr_tokens + 1 > NR_TOKENS_LIMIT) {
347 os_log_error(OS_LOG_DEFAULT, "%s: NR_TOKENS_LIMIT reached", __func__);
348 return 0;
349 }
350
351 new_token = kalloc_type(struct pfioc_kernel_token,
352 Z_WAITOK | Z_ZERO | Z_NOFAIL);
353
354 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
355
356 token_value = VM_KERNEL_ADDRHASH((u_int64_t)(uintptr_t)new_token);
357
358 new_token->token.token_value = token_value;
359 new_token->token.pid = proc_pid(p);
360 proc_name(new_token->token.pid, new_token->token.proc_name,
361 sizeof(new_token->token.proc_name));
362 new_token->token.timestamp = pf_calendar_time_second();
363
364 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
365 nr_tokens++;
366
367 return token_value;
368 }
369
370 static int
remove_token(struct pfioc_remove_token * tok)371 remove_token(struct pfioc_remove_token *tok)
372 {
373 struct pfioc_kernel_token *__single entry, *__single tmp;
374
375 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
376
377 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
378 if (tok->token_value == entry->token.token_value) {
379 SLIST_REMOVE(&token_list_head, entry,
380 pfioc_kernel_token, next);
381 kfree_type(struct pfioc_kernel_token, entry);
382 nr_tokens--;
383 return 0; /* success */
384 }
385 }
386
387 printf("pf : remove failure\n");
388 return ESRCH; /* failure */
389 }
390
391 static void
invalidate_all_tokens(void)392 invalidate_all_tokens(void)
393 {
394 struct pfioc_kernel_token *__single entry, *__single tmp;
395
396 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
397
398 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
399 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
400 kfree_type(struct pfioc_kernel_token, entry);
401 }
402
403 nr_tokens = 0;
404 }
405
406 struct pf_reass_tag_container {
407 struct m_tag pf_reass_m_tag;
408 struct pf_fragment_tag pf_reass_fragment_tag;
409 };
410
411 static struct m_tag *
m_tag_kalloc_pf_reass(u_int32_t id,u_int16_t type,uint16_t len,int wait)412 m_tag_kalloc_pf_reass(u_int32_t id, u_int16_t type, uint16_t len, int wait)
413 {
414 struct pf_reass_tag_container *tag_container;
415 struct m_tag *tag = NULL;
416
417 assert3u(id, ==, KERNEL_MODULE_TAG_ID);
418 assert3u(type, ==, KERNEL_TAG_TYPE_PF_REASS);
419 assert3u(len, ==, sizeof(struct pf_fragment_tag));
420
421 if (len != sizeof(struct pf_fragment_tag)) {
422 return NULL;
423 }
424
425 tag_container = kalloc_type(struct pf_reass_tag_container, wait | M_ZERO);
426 if (tag_container != NULL) {
427 tag = &tag_container->pf_reass_m_tag;
428
429 assert3p(tag, ==, tag_container);
430
431 M_TAG_INIT(tag, id, type, len, &tag_container->pf_reass_fragment_tag, NULL);
432 }
433
434 return tag;
435 }
436
437 static void
m_tag_kfree_pf_reass(struct m_tag * tag)438 m_tag_kfree_pf_reass(struct m_tag *tag)
439 {
440 struct pf_reass_tag_container *__single tag_container = (struct pf_reass_tag_container *)tag;
441
442 assert3u(tag->m_tag_len, ==, sizeof(struct pf_fragment_tag));
443
444 kfree_type(struct pf_reass_tag_container, tag_container);
445 }
446
447 void
pf_register_m_tag(void)448 pf_register_m_tag(void)
449 {
450 int error;
451
452 error = m_register_internal_tag_type(KERNEL_TAG_TYPE_PF_REASS, sizeof(struct pf_fragment_tag),
453 m_tag_kalloc_pf_reass, m_tag_kfree_pf_reass);
454
455 assert3u(error, ==, 0);
456 }
457
458 void
pfinit(void)459 pfinit(void)
460 {
461 u_int32_t *t = pf_default_rule.timeout;
462 int maj;
463
464 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
465 NULL);
466 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
467 "pfsrctrpl", NULL);
468 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
469 NULL);
470 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
471 "pfstatekeypl", NULL);
472 pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
473 "pfappstatepl", NULL);
474 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
475 "pfpooladdrpl", NULL);
476 pfr_initialize();
477 pfi_initialize();
478 pf_osfp_initialize();
479
480 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
481 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
482
483 if (max_mem <= 256 * 1024 * 1024) {
484 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
485 PFR_KENTRY_HIWAT_SMALL;
486 }
487
488 RB_INIT(&tree_src_tracking);
489 RB_INIT(&pf_anchors);
490 pf_init_ruleset(&pf_main_ruleset);
491 TAILQ_INIT(&pf_pabuf);
492 TAILQ_INIT(&state_list);
493
494 static_assert((SC_BE & SCIDX_MASK) == SCIDX_BE);
495 static_assert((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
496 static_assert((SC_BK & SCIDX_MASK) == SCIDX_BK);
497 static_assert((SC_RD & SCIDX_MASK) == SCIDX_RD);
498 static_assert((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
499 static_assert((SC_AV & SCIDX_MASK) == SCIDX_AV);
500 static_assert((SC_RV & SCIDX_MASK) == SCIDX_RV);
501 static_assert((SC_VI & SCIDX_MASK) == SCIDX_VI);
502 static_assert((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
503 static_assert((SC_VO & SCIDX_MASK) == SCIDX_VO);
504 static_assert((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
505
506 /* default rule should never be garbage collected */
507 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
508 pf_default_rule.action = PF_PASS;
509 pf_default_rule.nr = -1;
510 pf_default_rule.rtableid = IFSCOPE_NONE;
511
512 /* initialize default timeouts */
513 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
514 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
515 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
516 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
517 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
518 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
519 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
520 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
521 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
522 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
523 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
524 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
525 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
526 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
527 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
528 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
529 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
530 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
531 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
532 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
533 t[PFTM_FRAG] = PFTM_FRAG_VAL;
534 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
535 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
536 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
537 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
538 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
539
540 pf_normalize_init();
541 bzero(&pf_status, sizeof(pf_status));
542 pf_status.debug = PF_DEBUG_URGENT;
543 pf_hash_seed = RandomULong();
544
545 /* XXX do our best to avoid a conflict */
546 pf_status.hostid = random();
547
548 if (kernel_thread_start(pf_purge_thread_fn, NULL,
549 &pf_purge_thread) != 0) {
550 printf("%s: unable to start purge thread!", __func__);
551 return;
552 }
553
554 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
555 if (maj == -1) {
556 printf("%s: failed to allocate major number!\n", __func__);
557 return;
558 }
559 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
560 UID_ROOT, GID_WHEEL, 0600, "pf");
561
562 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
563 UID_ROOT, GID_WHEEL, 0600, "pfm");
564
565 pf_attach_hooks();
566 #if DUMMYNET
567 dummynet_init();
568 #endif
569 }
570
571 #if 0
572 static void
573 pfdetach(void)
574 {
575 struct pf_anchor *anchor;
576 struct pf_state *state;
577 struct pf_src_node *node;
578 struct pfioc_table pt;
579 u_int32_t ticket;
580 int i;
581 char r = '\0';
582
583 pf_detach_hooks();
584
585 pf_status.running = 0;
586 wakeup(pf_purge_thread_fn);
587
588 /* clear the rulesets */
589 for (i = 0; i < PF_RULESET_MAX; i++) {
590 if (pf_begin_rules(&ticket, i, &r) == 0) {
591 pf_commit_rules(ticket, i, &r);
592 }
593 }
594
595 /* clear states */
596 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
597 state->timeout = PFTM_PURGE;
598 #if NPFSYNC
599 state->sync_flags = PFSTATE_NOSYNC;
600 #endif
601 }
602 pf_purge_expired_states(pf_status.states);
603
604 #if NPFSYNC
605 pfsync_clear_states(pf_status.hostid, NULL);
606 #endif
607
608 /* clear source nodes */
609 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
610 state->src_node = NULL;
611 state->nat_src_node = NULL;
612 }
613 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
614 node->expire = 1;
615 node->states = 0;
616 }
617 pf_purge_expired_src_nodes();
618
619 /* clear tables */
620 memset(&pt, '\0', sizeof(pt));
621 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
622
623 /* destroy anchors */
624 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
625 for (i = 0; i < PF_RULESET_MAX; i++) {
626 if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
627 pf_commit_rules(ticket, i, anchor->name);
628 }
629 }
630 }
631
632 /* destroy main ruleset */
633 pf_remove_if_empty_ruleset(&pf_main_ruleset);
634
635 /* destroy the pools */
636 pool_destroy(&pf_pooladdr_pl);
637 pool_destroy(&pf_state_pl);
638 pool_destroy(&pf_rule_pl);
639 pool_destroy(&pf_src_tree_pl);
640
641 /* destroy subsystems */
642 pf_normalize_destroy();
643 pf_osfp_destroy();
644 pfr_destroy();
645 pfi_destroy();
646 }
647 #endif
648
649 static int
pfopen(dev_t dev,int flags,int fmt,struct proc * p)650 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
651 {
652 #pragma unused(flags, fmt, p)
653 if (minor(dev) >= PFDEV_MAX) {
654 return ENXIO;
655 }
656
657 if (minor(dev) == PFDEV_PFM) {
658 lck_mtx_lock(&pf_lock);
659 if (pfdevcnt != 0) {
660 lck_mtx_unlock(&pf_lock);
661 return EBUSY;
662 }
663 pfdevcnt++;
664 lck_mtx_unlock(&pf_lock);
665 }
666 return 0;
667 }
668
669 static int
pfclose(dev_t dev,int flags,int fmt,struct proc * p)670 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
671 {
672 #pragma unused(flags, fmt, p)
673 if (minor(dev) >= PFDEV_MAX) {
674 return ENXIO;
675 }
676
677 if (minor(dev) == PFDEV_PFM) {
678 lck_mtx_lock(&pf_lock);
679 VERIFY(pfdevcnt > 0);
680 pfdevcnt--;
681 lck_mtx_unlock(&pf_lock);
682 }
683 return 0;
684 }
685
686 static struct pf_pool *
pf_get_pool(char const * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)687 pf_get_pool(char const *anchor, u_int32_t ticket, u_int8_t rule_action,
688 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
689 u_int8_t check_ticket)
690 {
691 struct pf_ruleset *__single ruleset;
692 struct pf_rule *__single rule;
693 int rs_num;
694 struct pf_pool *__single p = NULL;
695
696 ruleset = pf_find_ruleset(anchor);
697 if (ruleset == NULL) {
698 goto done;
699 }
700 rs_num = pf_get_ruleset_number(rule_action);
701 if (rs_num >= PF_RULESET_MAX) {
702 goto done;
703 }
704 if (active) {
705 if (check_ticket && ticket !=
706 ruleset->rules[rs_num].active.ticket) {
707 goto done;
708 }
709 if (r_last) {
710 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
711 pf_rulequeue);
712 } else {
713 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
714 }
715 } else {
716 if (check_ticket && ticket !=
717 ruleset->rules[rs_num].inactive.ticket) {
718 goto done;
719 }
720 if (r_last) {
721 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
722 pf_rulequeue);
723 } else {
724 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
725 }
726 }
727 if (!r_last) {
728 while ((rule != NULL) && (rule->nr != rule_number)) {
729 rule = TAILQ_NEXT(rule, entries);
730 }
731 }
732 if (rule == NULL) {
733 goto done;
734 }
735
736 p = &rule->rpool;
737 done:
738
739 if (ruleset) {
740 pf_release_ruleset(ruleset);
741 ruleset = NULL;
742 }
743
744 return p;
745 }
746
747 static void
pf_mv_pool(struct pf_palist * poola,struct pf_palist * poolb)748 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
749 {
750 struct pf_pooladdr *mv_pool_pa;
751
752 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
753 TAILQ_REMOVE(poola, mv_pool_pa, entries);
754 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
755 }
756 }
757
758 static void
pf_empty_pool(struct pf_palist * poola)759 pf_empty_pool(struct pf_palist *poola)
760 {
761 struct pf_pooladdr *empty_pool_pa;
762
763 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
764 pfi_dynaddr_remove(&empty_pool_pa->addr);
765 pf_tbladdr_remove(&empty_pool_pa->addr);
766 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
767 TAILQ_REMOVE(poola, empty_pool_pa, entries);
768 pool_put(&pf_pooladdr_pl, empty_pool_pa);
769 }
770 }
771
772 void
pf_rm_rule(struct pf_rulequeue * rulequeue,struct pf_rule * rule)773 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
774 {
775 if (rulequeue != NULL) {
776 if (rule->states <= 0) {
777 /*
778 * XXX - we need to remove the table *before* detaching
779 * the rule to make sure the table code does not delete
780 * the anchor under our feet.
781 */
782 pf_tbladdr_remove(&rule->src.addr);
783 pf_tbladdr_remove(&rule->dst.addr);
784 if (rule->overload_tbl) {
785 pfr_detach_table(rule->overload_tbl);
786 }
787 }
788 TAILQ_REMOVE(rulequeue, rule, entries);
789 rule->entries.tqe_prev = NULL;
790 rule->nr = -1;
791 }
792
793 if (rule->states > 0 || rule->src_nodes > 0 ||
794 rule->entries.tqe_prev != NULL) {
795 return;
796 }
797 pf_tag_unref(rule->tag);
798 pf_tag_unref(rule->match_tag);
799 pf_rtlabel_remove(&rule->src.addr);
800 pf_rtlabel_remove(&rule->dst.addr);
801 pfi_dynaddr_remove(&rule->src.addr);
802 pfi_dynaddr_remove(&rule->dst.addr);
803 if (rulequeue == NULL) {
804 pf_tbladdr_remove(&rule->src.addr);
805 pf_tbladdr_remove(&rule->dst.addr);
806 if (rule->overload_tbl) {
807 pfr_detach_table(rule->overload_tbl);
808 }
809 }
810 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
811 pf_anchor_remove(rule);
812 pf_empty_pool(&rule->rpool.list);
813 pool_put(&pf_rule_pl, rule);
814 }
815
816 static u_int16_t
tagname2tag(struct pf_tags * head,char const * tagname)817 tagname2tag(struct pf_tags *head, char const *tagname)
818 {
819 struct pf_tagname *__single tag, *__single p = NULL;
820 uint16_t new_tagid = 1;
821 bool reserved_tag = false;
822
823 TAILQ_FOREACH(tag, head, entries)
824 if (strlcmp(tag->name, tagname, sizeof(tag->name)) == 0) {
825 tag->ref++;
826 return tag->tag;
827 }
828
829 /*
830 * check if it is a reserved tag.
831 */
832 static_assert(RESERVED_TAG_ID_MIN > DYNAMIC_TAG_ID_MAX);
833 for (int i = 0; i < NUM_RESERVED_TAGS; i++) {
834 if (strlcmp(pf_reserved_tag_table[i].tag_name, tagname, PF_TAG_NAME_SIZE) == 0) {
835 new_tagid = pf_reserved_tag_table[i].tag_id;
836 reserved_tag = true;
837 goto skip_dynamic_tag_alloc;
838 }
839 }
840
841 /*
842 * to avoid fragmentation, we do a linear search from the beginning
843 * and take the first free slot we find. if there is none or the list
844 * is empty, append a new entry at the end.
845 */
846
847 /* new entry */
848 if (!TAILQ_EMPTY(head)) {
849 /* skip reserved tags */
850 for (p = TAILQ_FIRST(head); p != NULL &&
851 p->tag >= RESERVED_TAG_ID_MIN;
852 p = TAILQ_NEXT(p, entries)) {
853 ;
854 }
855
856 for (; p != NULL && p->tag == new_tagid;
857 p = TAILQ_NEXT(p, entries)) {
858 new_tagid = p->tag + 1;
859 }
860 }
861
862 if (new_tagid > DYNAMIC_TAG_ID_MAX) {
863 return 0;
864 }
865
866 skip_dynamic_tag_alloc:
867 /* allocate and fill new struct pf_tagname */
868 tag = kalloc_type(struct pf_tagname, Z_WAITOK | Z_ZERO | Z_NOFAIL);
869 strlcpy(tag->name, tagname, sizeof(tag->name));
870 tag->tag = new_tagid;
871 tag->ref++;
872
873 if (reserved_tag) { /* insert reserved tag at the head */
874 TAILQ_INSERT_HEAD(head, tag, entries);
875 } else if (p != NULL) { /* insert new entry before p */
876 TAILQ_INSERT_BEFORE(p, tag, entries);
877 } else { /* either list empty or no free slot in between */
878 TAILQ_INSERT_TAIL(head, tag, entries);
879 }
880
881 return tag->tag;
882 }
883
884 static void
tag_unref(struct pf_tags * head,u_int16_t tag)885 tag_unref(struct pf_tags *head, u_int16_t tag)
886 {
887 struct pf_tagname *__single p, *__single next;
888
889 if (tag == 0) {
890 return;
891 }
892
893 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
894 next = TAILQ_NEXT(p, entries);
895 if (tag == p->tag) {
896 if (--p->ref == 0) {
897 TAILQ_REMOVE(head, p, entries);
898 kfree_type(struct pf_tagname, p);
899 }
900 break;
901 }
902 }
903 }
904
905 u_int16_t
pf_tagname2tag(char const * tagname)906 pf_tagname2tag(char const *tagname)
907 {
908 return tagname2tag(&pf_tags, tagname);
909 }
910
911 u_int16_t
pf_tagname2tag_ext(char const * tagname)912 pf_tagname2tag_ext(char const *tagname)
913 {
914 u_int16_t tag;
915
916 lck_rw_lock_exclusive(&pf_perim_lock);
917 lck_mtx_lock(&pf_lock);
918 tag = pf_tagname2tag(tagname);
919 lck_mtx_unlock(&pf_lock);
920 lck_rw_done(&pf_perim_lock);
921 return tag;
922 }
923
924 void
pf_tag_ref(u_int16_t tag)925 pf_tag_ref(u_int16_t tag)
926 {
927 struct pf_tagname *t;
928
929 TAILQ_FOREACH(t, &pf_tags, entries)
930 if (t->tag == tag) {
931 break;
932 }
933 if (t != NULL) {
934 t->ref++;
935 }
936 }
937
938 void
pf_tag_unref(u_int16_t tag)939 pf_tag_unref(u_int16_t tag)
940 {
941 tag_unref(&pf_tags, tag);
942 }
943
944 static int
pf_rtlabel_add(struct pf_addr_wrap * a)945 pf_rtlabel_add(struct pf_addr_wrap *a)
946 {
947 #pragma unused(a)
948 return 0;
949 }
950
951 static void
pf_rtlabel_remove(struct pf_addr_wrap * a)952 pf_rtlabel_remove(struct pf_addr_wrap *a)
953 {
954 #pragma unused(a)
955 }
956
957 static void
pf_rtlabel_copyout(struct pf_addr_wrap * a)958 pf_rtlabel_copyout(struct pf_addr_wrap *a)
959 {
960 #pragma unused(a)
961 }
962
963 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)964 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
965 {
966 struct pf_ruleset *rs;
967 struct pf_rule *rule;
968
969 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
970 return EINVAL;
971 }
972 rs = pf_find_or_create_ruleset(anchor);
973 if (rs == NULL) {
974 return EINVAL;
975 }
976 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
977 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
978 rs->rules[rs_num].inactive.rcount--;
979 }
980 *ticket = ++rs->rules[rs_num].inactive.ticket;
981 rs->rules[rs_num].inactive.open = 1;
982 pf_release_ruleset(rs);
983 rs = NULL;
984 return 0;
985 }
986
987 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char const * anchor)988 pf_rollback_rules(u_int32_t ticket, int rs_num, char const *anchor)
989 {
990 struct pf_ruleset *__single rs = NULL;
991 struct pf_rule *__single rule;
992 int err = 0;
993
994 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
995 err = EINVAL;
996 goto done;
997 }
998 rs = pf_find_ruleset(anchor);
999 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1000 rs->rules[rs_num].inactive.ticket != ticket) {
1001 goto done;
1002 }
1003 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1004 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1005 rs->rules[rs_num].inactive.rcount--;
1006 }
1007 rs->rules[rs_num].inactive.open = 0;
1008
1009 done:
1010 if (rs) {
1011 pf_release_ruleset(rs);
1012 rs = NULL;
1013 }
1014 return err;
1015 }
1016
1017 #define PF_MD5_UPD(st, elm) \
1018 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof((st)->elm))
1019
1020 #define PF_MD5_UPD_STRBUF(st, elm) \
1021 MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int) strbuflen((st)->elm, sizeof(st)->elm))
1022
1023 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1024 (stor) = htonl((st)->elm); \
1025 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
1026 } while (0)
1027
1028 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1029 (stor) = htons((st)->elm); \
1030 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
1031 } while (0)
1032
1033 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr,u_int8_t proto)1034 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
1035 {
1036 PF_MD5_UPD(pfr, addr.type);
1037 switch (pfr->addr.type) {
1038 case PF_ADDR_DYNIFTL:
1039 PF_MD5_UPD(pfr, addr.v.ifname);
1040 PF_MD5_UPD(pfr, addr.iflags);
1041 break;
1042 case PF_ADDR_TABLE:
1043 PF_MD5_UPD(pfr, addr.v.tblname);
1044 break;
1045 case PF_ADDR_ADDRMASK:
1046 /* XXX ignore af? */
1047 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1048 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1049 break;
1050 case PF_ADDR_RTLABEL:
1051 PF_MD5_UPD(pfr, addr.v.rtlabelname);
1052 break;
1053 }
1054
1055 switch (proto) {
1056 case IPPROTO_TCP:
1057 case IPPROTO_UDP:
1058 PF_MD5_UPD(pfr, xport.range.port[0]);
1059 PF_MD5_UPD(pfr, xport.range.port[1]);
1060 PF_MD5_UPD(pfr, xport.range.op);
1061 break;
1062
1063 default:
1064 break;
1065 }
1066
1067 PF_MD5_UPD(pfr, neg);
1068 }
1069
1070 static void
pf_hash_rule(MD5_CTX * ctx,struct pf_rule * rule)1071 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1072 {
1073 u_int16_t x;
1074 u_int32_t y;
1075
1076 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1077 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1078 PF_MD5_UPD_STRBUF(rule, label);
1079 PF_MD5_UPD_STRBUF(rule, ifname);
1080 PF_MD5_UPD_STRBUF(rule, match_tagname);
1081 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1082 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1083 PF_MD5_UPD_HTONL(rule, prob, y);
1084 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1085 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1086 PF_MD5_UPD(rule, uid.op);
1087 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1088 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1089 PF_MD5_UPD(rule, gid.op);
1090 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1091 PF_MD5_UPD(rule, action);
1092 PF_MD5_UPD(rule, direction);
1093 PF_MD5_UPD(rule, af);
1094 PF_MD5_UPD(rule, quick);
1095 PF_MD5_UPD(rule, ifnot);
1096 PF_MD5_UPD(rule, match_tag_not);
1097 PF_MD5_UPD(rule, natpass);
1098 PF_MD5_UPD(rule, keep_state);
1099 PF_MD5_UPD(rule, proto);
1100 PF_MD5_UPD(rule, type);
1101 PF_MD5_UPD(rule, code);
1102 PF_MD5_UPD(rule, flags);
1103 PF_MD5_UPD(rule, flagset);
1104 PF_MD5_UPD(rule, allow_opts);
1105 PF_MD5_UPD(rule, rt);
1106 PF_MD5_UPD(rule, tos);
1107 }
1108
1109 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char const * anchor)1110 pf_commit_rules(u_int32_t ticket, int rs_num, char const *anchor)
1111 {
1112 struct pf_ruleset *rs = NULL;
1113 struct pf_rule *rule, **old_array, *r;
1114 struct pf_rulequeue *old_rules;
1115 int error = 0;
1116 u_int32_t old_rcount;
1117 u_int32_t old_rsize;
1118
1119 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1120
1121 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1122 error = EINVAL;
1123 goto done;
1124 }
1125 rs = pf_find_ruleset(anchor);
1126 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1127 ticket != rs->rules[rs_num].inactive.ticket) {
1128 error = EBUSY;
1129 goto done;
1130 }
1131
1132 /* Calculate checksum for the main ruleset */
1133 if (rs == &pf_main_ruleset) {
1134 error = pf_setup_pfsync_matching(rs);
1135 if (error != 0) {
1136 goto done;
1137 }
1138 }
1139
1140 /* Swap rules, keep the old. */
1141 old_rules = rs->rules[rs_num].active.ptr;
1142 old_rcount = rs->rules[rs_num].active.rcount;
1143 old_array = rs->rules[rs_num].active.ptr_array;
1144 old_rsize = rs->rules[rs_num].active.rsize;
1145
1146 if (old_rcount != 0) {
1147 r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1148 while (r) {
1149 if (r->rule_flag & PFRULE_PFM) {
1150 pffwrules--;
1151 }
1152 r = TAILQ_NEXT(r, entries);
1153 }
1154 }
1155
1156
1157 rs->rules[rs_num].active.ptr =
1158 rs->rules[rs_num].inactive.ptr;
1159 rs->rules[rs_num].active.ptr_array =
1160 rs->rules[rs_num].inactive.ptr_array;
1161 rs->rules[rs_num].active.rsize =
1162 rs->rules[rs_num].inactive.rsize;
1163 rs->rules[rs_num].active.rcount =
1164 rs->rules[rs_num].inactive.rcount;
1165 rs->rules[rs_num].inactive.ptr = old_rules;
1166 rs->rules[rs_num].inactive.ptr_array = old_array;
1167 rs->rules[rs_num].inactive.rsize = old_rsize;
1168 rs->rules[rs_num].inactive.rcount = old_rcount;
1169
1170 rs->rules[rs_num].active.ticket =
1171 rs->rules[rs_num].inactive.ticket;
1172 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1173
1174
1175 /* Purge the old rule list. */
1176 while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1177 pf_rm_rule(old_rules, rule);
1178 }
1179 kfree_type_counted_by(struct pf_rule *, rs->rules[rs_num].inactive.rsize,
1180 rs->rules[rs_num].inactive.ptr_array);
1181 rs->rules[rs_num].inactive.ptr_array = NULL;
1182 rs->rules[rs_num].inactive.rsize = 0;
1183 rs->rules[rs_num].inactive.rcount = 0;
1184 rs->rules[rs_num].inactive.open = 0;
1185
1186 done:
1187 if (rs) {
1188 pf_release_ruleset(rs);
1189 }
1190 return error;
1191 }
1192
1193 static void
pf_rule_copyin(struct pf_rule * src,struct pf_rule * dst,struct proc * p,int minordev)1194 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1195 int minordev)
1196 {
1197 bcopy(src, dst, sizeof(struct pf_rule));
1198
1199 dst->label[sizeof(dst->label) - 1] = '\0';
1200 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1201 dst->qname[sizeof(dst->qname) - 1] = '\0';
1202 dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1203 dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1204 dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1205 dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1206 dst->owner[sizeof(dst->owner) - 1] = '\0';
1207
1208 dst->cuid = kauth_cred_getuid(kauth_cred_get());
1209 dst->cpid = proc_getpid(p);
1210
1211 dst->anchor = NULL;
1212 dst->kif = NULL;
1213 dst->overload_tbl = NULL;
1214
1215 TAILQ_INIT(&dst->rpool.list);
1216 dst->rpool.cur = NULL;
1217
1218 /* initialize refcounting */
1219 dst->states = 0;
1220 dst->src_nodes = 0;
1221
1222 dst->entries.tqe_prev = NULL;
1223 dst->entries.tqe_next = NULL;
1224 if ((uint8_t)minordev == PFDEV_PFM) {
1225 dst->rule_flag |= PFRULE_PFM;
1226 }
1227
1228 /*
1229 * userland should not pass any skip pointers to us
1230 */
1231 for (uint32_t i = 0; i < PF_SKIP_COUNT; ++i) {
1232 dst->skip[i].ptr = 0;
1233 }
1234 }
1235
1236 static void
pf_rule_copyout(struct pf_rule * src,struct pf_rule * dst)1237 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1238 {
1239 bcopy(src, dst, sizeof(struct pf_rule));
1240
1241 dst->anchor = NULL;
1242 dst->kif = NULL;
1243 dst->overload_tbl = NULL;
1244
1245 dst->rpool.list.tqh_first = NULL;
1246 dst->rpool.list.tqh_last = NULL;
1247 dst->rpool.cur = NULL;
1248
1249 dst->entries.tqe_prev = NULL;
1250 dst->entries.tqe_next = NULL;
1251
1252 /*
1253 * redact skip pointers for security
1254 */
1255 for (uint32_t i = 0; i < PF_SKIP_COUNT; ++i) {
1256 dst->skip[i].ptr = 0;
1257 }
1258 }
1259
1260 static void
pf_state_export(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1261 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1262 struct pf_state *s)
1263 {
1264 uint64_t secs = pf_time_second();
1265 bzero(sp, sizeof(struct pfsync_state));
1266
1267 /* copy from state key */
1268 sp->lan.addr = sk->lan.addr;
1269 sp->lan.xport = sk->lan.xport;
1270 sp->gwy.addr = sk->gwy.addr;
1271 sp->gwy.xport = sk->gwy.xport;
1272 sp->ext_lan.addr = sk->ext_lan.addr;
1273 sp->ext_lan.xport = sk->ext_lan.xport;
1274 sp->ext_gwy.addr = sk->ext_gwy.addr;
1275 sp->ext_gwy.xport = sk->ext_gwy.xport;
1276 sp->proto_variant = sk->proto_variant;
1277 sp->tag = s->tag;
1278 sp->proto = sk->proto;
1279 sp->af_lan = sk->af_lan;
1280 sp->af_gwy = sk->af_gwy;
1281 sp->direction = sk->direction;
1282 sp->flowhash = sk->flowhash;
1283
1284 /* copy from state */
1285 memcpy(&sp->id, &s->id, sizeof(sp->id));
1286 sp->creatorid = s->creatorid;
1287 strbufcpy(sp->ifname, s->kif->pfik_name);
1288 pf_state_peer_to_pfsync(&s->src, &sp->src);
1289 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1290
1291 sp->rule = s->rule.ptr->nr;
1292 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1293 (unsigned)-1 : s->nat_rule.ptr->nr;
1294 sp->anchor = (s->anchor.ptr == NULL) ?
1295 (unsigned)-1 : s->anchor.ptr->nr;
1296
1297 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1298 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1299 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1300 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1301 sp->creation = secs - s->creation;
1302 sp->expire = pf_state_expires(s);
1303 sp->log = s->log;
1304 sp->allow_opts = s->allow_opts;
1305 sp->timeout = s->timeout;
1306
1307 if (s->src_node) {
1308 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1309 }
1310 if (s->nat_src_node) {
1311 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1312 }
1313
1314 if (sp->expire > secs) {
1315 sp->expire -= secs;
1316 } else {
1317 sp->expire = 0;
1318 }
1319 }
1320
1321 static void
pf_state_import(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1322 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1323 struct pf_state *s)
1324 {
1325 /* copy to state key */
1326 sk->lan.addr = sp->lan.addr;
1327 sk->lan.xport = sp->lan.xport;
1328 sk->gwy.addr = sp->gwy.addr;
1329 sk->gwy.xport = sp->gwy.xport;
1330 sk->ext_lan.addr = sp->ext_lan.addr;
1331 sk->ext_lan.xport = sp->ext_lan.xport;
1332 sk->ext_gwy.addr = sp->ext_gwy.addr;
1333 sk->ext_gwy.xport = sp->ext_gwy.xport;
1334 sk->proto_variant = sp->proto_variant;
1335 s->tag = sp->tag;
1336 sk->proto = sp->proto;
1337 sk->af_lan = sp->af_lan;
1338 sk->af_gwy = sp->af_gwy;
1339 sk->direction = sp->direction;
1340 ASSERT(sk->flowsrc == FLOWSRC_PF);
1341 ASSERT(sk->flowhash != 0);
1342
1343 /* copy to state */
1344 memcpy(&s->id, &sp->id, sizeof(sp->id));
1345 s->creatorid = sp->creatorid;
1346 pf_state_peer_from_pfsync(&sp->src, &s->src);
1347 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1348
1349 s->rule.ptr = &pf_default_rule;
1350 s->nat_rule.ptr = NULL;
1351 s->anchor.ptr = NULL;
1352 s->rt_kif = NULL;
1353 s->creation = pf_time_second();
1354 s->expire = pf_time_second();
1355 if (sp->expire > 0) {
1356 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1357 }
1358 s->pfsync_time = 0;
1359 s->packets[0] = s->packets[1] = 0;
1360 s->bytes[0] = s->bytes[1] = 0;
1361 }
1362
1363 static void
pf_pooladdr_copyin(struct pf_pooladdr * src,struct pf_pooladdr * dst)1364 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1365 {
1366 bcopy(src, dst, sizeof(struct pf_pooladdr));
1367
1368 dst->entries.tqe_prev = NULL;
1369 dst->entries.tqe_next = NULL;
1370 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1371 dst->kif = NULL;
1372 }
1373
1374 static void
pf_pooladdr_copyout(struct pf_pooladdr * src,struct pf_pooladdr * dst)1375 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1376 {
1377 bcopy(src, dst, sizeof(struct pf_pooladdr));
1378
1379 dst->entries.tqe_prev = NULL;
1380 dst->entries.tqe_next = NULL;
1381 dst->kif = NULL;
1382 }
1383
1384 static int
pf_setup_pfsync_matching(struct pf_ruleset * rs)1385 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1386 {
1387 MD5_CTX ctx;
1388 struct pf_rule *rule;
1389 int rs_cnt;
1390 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1391
1392 MD5Init(&ctx);
1393 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1394 /* XXX PF_RULESET_SCRUB as well? */
1395 if (rs_cnt == PF_RULESET_SCRUB) {
1396 continue;
1397 }
1398
1399 rs->rules[rs_cnt].inactive.ptr_array = krealloc_type(struct pf_rule *,
1400 rs->rules[rs_cnt].inactive.rsize, rs->rules[rs_cnt].inactive.rcount,
1401 rs->rules[rs_cnt].inactive.ptr_array, Z_WAITOK | Z_REALLOCF);
1402 rs->rules[rs_cnt].inactive.rsize =
1403 rs->rules[rs_cnt].inactive.rcount;
1404
1405 if (rs->rules[rs_cnt].inactive.rcount &&
1406 !rs->rules[rs_cnt].inactive.ptr_array) {
1407 rs->rules[rs_cnt].inactive.ptr_array = rs->rules[rs_cnt].inactive.ptr_array;
1408 rs->rules[rs_cnt].inactive.rsize = 0;
1409 return ENOMEM;
1410 }
1411
1412 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1413 entries) {
1414 pf_hash_rule(&ctx, rule);
1415 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1416 }
1417 }
1418
1419 MD5Final(digest, &ctx);
1420 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1421 return 0;
1422 }
1423
1424 static void
pf_start(void)1425 pf_start(void)
1426 {
1427 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1428
1429 VERIFY(pf_is_enabled == 0);
1430
1431 pf_is_enabled = 1;
1432 pf_status.running = 1;
1433 pf_status.since = pf_calendar_time_second();
1434 if (pf_status.stateid == 0) {
1435 pf_status.stateid = pf_time_second();
1436 pf_status.stateid = pf_status.stateid << 32;
1437 }
1438 wakeup(pf_purge_thread_fn);
1439 #if SKYWALK
1440 pf_process_compatibilities();
1441 #endif // SKYWALK
1442 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1443 }
1444
1445 static void
pf_stop(void)1446 pf_stop(void)
1447 {
1448 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1449
1450 VERIFY(pf_is_enabled);
1451
1452 pf_status.running = 0;
1453 pf_is_enabled = 0;
1454 pf_status.since = pf_calendar_time_second();
1455 wakeup(pf_purge_thread_fn);
1456 #if SKYWALK
1457 pf_process_compatibilities();
1458 #endif // SKYWALK
1459 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1460 }
1461
1462 static int
pfioctl(dev_t dev,u_long cmd,caddr_t addr,int flags,struct proc * p)1463 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1464 {
1465 #pragma unused(dev)
1466 int p64 = proc_is64bit(p);
1467 int error = 0;
1468 int minordev = minor(dev);
1469
1470 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1471 return EPERM;
1472 }
1473
1474 /* XXX keep in sync with switch() below */
1475 if (securelevel > 1) {
1476 switch (cmd) {
1477 case DIOCGETRULES:
1478 case DIOCGETRULE:
1479 case DIOCGETADDRS:
1480 case DIOCGETADDR:
1481 case DIOCGETSTATE:
1482 case DIOCSETSTATUSIF:
1483 case DIOCGETSTATUS:
1484 case DIOCCLRSTATUS:
1485 case DIOCNATLOOK:
1486 case DIOCSETDEBUG:
1487 case DIOCGETSTATES:
1488 case DIOCINSERTRULE:
1489 case DIOCDELETERULE:
1490 case DIOCGETTIMEOUT:
1491 case DIOCCLRRULECTRS:
1492 case DIOCGETLIMIT:
1493 case DIOCGETALTQS:
1494 case DIOCGETALTQ:
1495 case DIOCGETQSTATS:
1496 case DIOCGETRULESETS:
1497 case DIOCGETRULESET:
1498 case DIOCRGETTABLES:
1499 case DIOCRGETTSTATS:
1500 case DIOCRCLRTSTATS:
1501 case DIOCRCLRADDRS:
1502 case DIOCRADDADDRS:
1503 case DIOCRDELADDRS:
1504 case DIOCRSETADDRS:
1505 case DIOCRGETADDRS:
1506 case DIOCRGETASTATS:
1507 case DIOCRCLRASTATS:
1508 case DIOCRTSTADDRS:
1509 case DIOCOSFPGET:
1510 case DIOCGETSRCNODES:
1511 case DIOCCLRSRCNODES:
1512 case DIOCIGETIFACES:
1513 case DIOCGIFSPEED:
1514 case DIOCSETIFFLAG:
1515 case DIOCCLRIFFLAG:
1516 break;
1517 case DIOCRCLRTABLES:
1518 case DIOCRADDTABLES:
1519 case DIOCRDELTABLES:
1520 case DIOCRSETTFLAGS: {
1521 int pfrio_flags;
1522
1523 struct pfioc_table *__single table = (struct pfioc_table *)(void *) addr;
1524
1525 bcopy(&table->pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1526
1527 if (pfrio_flags & PFR_FLAG_DUMMY) {
1528 break; /* dummy operation ok */
1529 }
1530 return EPERM;
1531 }
1532 default:
1533 return EPERM;
1534 }
1535 }
1536
1537 if (!(flags & FWRITE)) {
1538 switch (cmd) {
1539 case DIOCSTART:
1540 case DIOCSTARTREF:
1541 case DIOCSTOP:
1542 case DIOCSTOPREF:
1543 case DIOCGETSTARTERS:
1544 case DIOCGETRULES:
1545 case DIOCGETADDRS:
1546 case DIOCGETADDR:
1547 case DIOCGETSTATE:
1548 case DIOCGETSTATUS:
1549 case DIOCGETSTATES:
1550 case DIOCINSERTRULE:
1551 case DIOCDELETERULE:
1552 case DIOCGETTIMEOUT:
1553 case DIOCGETLIMIT:
1554 case DIOCGETALTQS:
1555 case DIOCGETALTQ:
1556 case DIOCGETQSTATS:
1557 case DIOCGETRULESETS:
1558 case DIOCGETRULESET:
1559 case DIOCNATLOOK:
1560 case DIOCRGETTABLES:
1561 case DIOCRGETTSTATS:
1562 case DIOCRGETADDRS:
1563 case DIOCRGETASTATS:
1564 case DIOCRTSTADDRS:
1565 case DIOCOSFPGET:
1566 case DIOCGETSRCNODES:
1567 case DIOCIGETIFACES:
1568 case DIOCGIFSPEED:
1569 break;
1570 case DIOCRCLRTABLES:
1571 case DIOCRADDTABLES:
1572 case DIOCRDELTABLES:
1573 case DIOCRCLRTSTATS:
1574 case DIOCRCLRADDRS:
1575 case DIOCRADDADDRS:
1576 case DIOCRDELADDRS:
1577 case DIOCRSETADDRS:
1578 case DIOCRSETTFLAGS: {
1579 int pfrio_flags;
1580
1581 bcopy(&((struct pfioc_table *)(void *)addr)->
1582 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1583
1584 if (pfrio_flags & PFR_FLAG_DUMMY) {
1585 flags |= FWRITE; /* need write lock for dummy */
1586 break; /* dummy operation ok */
1587 }
1588 return EACCES;
1589 }
1590 case DIOCGETRULE: {
1591 u_int32_t action;
1592
1593 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1594 &action, sizeof(action));
1595
1596 if (action == PF_GET_CLR_CNTR) {
1597 return EACCES;
1598 }
1599 break;
1600 }
1601 default:
1602 return EACCES;
1603 }
1604 }
1605
1606 if (flags & FWRITE) {
1607 lck_rw_lock_exclusive(&pf_perim_lock);
1608 } else {
1609 lck_rw_lock_shared(&pf_perim_lock);
1610 }
1611
1612 lck_mtx_lock(&pf_lock);
1613
1614 switch (cmd) {
1615 case DIOCSTART:
1616 if (pf_status.running) {
1617 /*
1618 * Increment the reference for a simple -e enable, so
1619 * that even if other processes drop their references,
1620 * pf will still be available to processes that turned
1621 * it on without taking a reference
1622 */
1623 if (nr_tokens == pf_enabled_ref_count) {
1624 pf_enabled_ref_count++;
1625 VERIFY(pf_enabled_ref_count != 0);
1626 }
1627 error = EEXIST;
1628 } else if (pf_purge_thread == NULL) {
1629 error = ENOMEM;
1630 } else {
1631 pf_start();
1632 pf_enabled_ref_count++;
1633 VERIFY(pf_enabled_ref_count != 0);
1634 }
1635 break;
1636
1637 case DIOCSTARTREF: /* u_int64_t */
1638 if (pf_purge_thread == NULL) {
1639 error = ENOMEM;
1640 } else {
1641 u_int64_t token;
1642
1643 /* small enough to be on stack */
1644 if ((token = generate_token(p)) != 0) {
1645 if (pf_is_enabled == 0) {
1646 pf_start();
1647 }
1648 pf_enabled_ref_count++;
1649 VERIFY(pf_enabled_ref_count != 0);
1650 } else {
1651 error = ENOMEM;
1652 DPFPRINTF(PF_DEBUG_URGENT,
1653 ("pf: unable to generate token\n"));
1654 }
1655 bcopy(&token, (uint64_t *)(void *)addr, sizeof(token));
1656 }
1657 break;
1658
1659 case DIOCSTOP:
1660 if (!pf_status.running) {
1661 error = ENOENT;
1662 } else {
1663 pf_stop();
1664 pf_enabled_ref_count = 0;
1665 invalidate_all_tokens();
1666 }
1667 break;
1668
1669 case DIOCSTOPREF: /* struct pfioc_remove_token */
1670 if (!pf_status.running) {
1671 error = ENOENT;
1672 } else {
1673 struct pfioc_remove_token pfrt;
1674
1675 /* small enough to be on stack */
1676 bcopy((struct pfioc_remove_token *)(void *)addr, &pfrt, sizeof(pfrt));
1677 if ((error = remove_token(&pfrt)) == 0) {
1678 VERIFY(pf_enabled_ref_count != 0);
1679 pf_enabled_ref_count--;
1680 /* return currently held references */
1681 pfrt.refcount = pf_enabled_ref_count;
1682 DPFPRINTF(PF_DEBUG_MISC,
1683 ("pf: enabled refcount decremented\n"));
1684 } else {
1685 error = EINVAL;
1686 DPFPRINTF(PF_DEBUG_URGENT,
1687 ("pf: token mismatch\n"));
1688 }
1689 bcopy(&pfrt, (struct pfioc_remove_token *)(void *)addr, sizeof(pfrt));
1690
1691 if (error == 0 && pf_enabled_ref_count == 0) {
1692 pf_stop();
1693 }
1694 }
1695 break;
1696
1697 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1698 PFIOCX_STRUCT_DECL(pfioc_tokens);
1699
1700 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens);
1701 error = pfioctl_ioc_tokens(cmd,
1702 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1703 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1704 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1705 break;
1706 }
1707
1708 case DIOCADDRULE: /* struct pfioc_rule */
1709 case DIOCGETRULES: /* struct pfioc_rule */
1710 case DIOCGETRULE: /* struct pfioc_rule */
1711 case DIOCCHANGERULE: /* struct pfioc_rule */
1712 case DIOCINSERTRULE: /* struct pfioc_rule */
1713 case DIOCDELETERULE: { /* struct pfioc_rule */
1714 struct pfioc_rule *__single pr = NULL;
1715
1716 PFIOC_STRUCT_BEGIN(addr, pr);
1717 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1718 PFIOC_STRUCT_END(pr, addr);
1719 break;
1720 }
1721
1722 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1723 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1724 struct pfioc_state_kill *__single psk = NULL;
1725
1726 PFIOC_STRUCT_BEGIN(addr, psk);
1727 error = pfioctl_ioc_state_kill(cmd, psk, p);
1728 PFIOC_STRUCT_END(psk, addr);
1729 break;
1730 }
1731
1732 case DIOCADDSTATE: /* struct pfioc_state */
1733 case DIOCGETSTATE: { /* struct pfioc_state */
1734 struct pfioc_state *__single ps = NULL;
1735
1736 PFIOC_STRUCT_BEGIN(addr, ps);
1737 error = pfioctl_ioc_state(cmd, ps, p);
1738 PFIOC_STRUCT_END(ps, addr);
1739 break;
1740 }
1741
1742 case DIOCGETSTATES: { /* struct pfioc_states */
1743 PFIOCX_STRUCT_DECL(pfioc_states);
1744
1745 PFIOCX_STRUCT_BEGIN(addr, pfioc_states);
1746 error = pfioctl_ioc_states(cmd,
1747 PFIOCX_STRUCT_ADDR32(pfioc_states),
1748 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1749 PFIOCX_STRUCT_END(pfioc_states, addr);
1750 break;
1751 }
1752
1753 case DIOCGETSTATUS: { /* struct pf_status */
1754 struct pf_status *__single s = NULL;
1755
1756 PFIOC_STRUCT_BEGIN(&pf_status, s);
1757 pfi_update_status(__unsafe_null_terminated_from_indexable(s->ifname), s);
1758 PFIOC_STRUCT_END(s, addr);
1759 break;
1760 }
1761
1762 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1763 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1764
1765 /* OK for unaligned accesses */
1766 if (pi->ifname[0] == 0) {
1767 bzero(pf_status.ifname, IFNAMSIZ);
1768 break;
1769 }
1770 strbufcpy(pf_status.ifname, pi->ifname);
1771 break;
1772 }
1773
1774 case DIOCCLRSTATUS: {
1775 bzero(pf_status.counters, sizeof(pf_status.counters));
1776 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1777 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1778 pf_status.since = pf_calendar_time_second();
1779 if (*pf_status.ifname) {
1780 pfi_update_status(__unsafe_null_terminated_from_indexable(pf_status.ifname), NULL);
1781 }
1782 break;
1783 }
1784
1785 case DIOCNATLOOK: { /* struct pfioc_natlook */
1786 struct pfioc_natlook *__single pnl = NULL;
1787
1788 PFIOC_STRUCT_BEGIN(addr, pnl);
1789 error = pfioctl_ioc_natlook(cmd, pnl, p);
1790 PFIOC_STRUCT_END(pnl, addr);
1791 break;
1792 }
1793
1794 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1795 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1796 struct pfioc_tm pt;
1797
1798 /* small enough to be on stack */
1799 bcopy((struct pfioc_tm *)(void *) addr, &pt, sizeof(pt));
1800 error = pfioctl_ioc_tm(cmd, &pt, p);
1801 bcopy(&pt, (struct pfioc_tm *)(void *) addr, sizeof(pt));
1802 break;
1803 }
1804
1805 case DIOCGETLIMIT: /* struct pfioc_limit */
1806 case DIOCSETLIMIT: { /* struct pfioc_limit */
1807 struct pfioc_limit pl;
1808
1809 /* small enough to be on stack */
1810 bcopy((struct pfioc_limit *)(void *) addr, &pl, sizeof(pl));
1811 error = pfioctl_ioc_limit(cmd, &pl, p);
1812 bcopy(&pl, (struct pfioc_limit *)(void *) addr, sizeof(pl));
1813 break;
1814 }
1815
1816 case DIOCSETDEBUG: { /* u_int32_t */
1817 bcopy((uint32_t *)(void *) addr, &pf_status.debug, sizeof(u_int32_t));
1818 break;
1819 }
1820
1821 case DIOCCLRRULECTRS: {
1822 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1823 struct pf_ruleset *ruleset = &pf_main_ruleset;
1824 struct pf_rule *rule;
1825
1826 TAILQ_FOREACH(rule,
1827 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1828 rule->evaluations = 0;
1829 rule->packets[0] = rule->packets[1] = 0;
1830 rule->bytes[0] = rule->bytes[1] = 0;
1831 }
1832 break;
1833 }
1834
1835 case DIOCGIFSPEED: {
1836 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1837 struct pf_ifspeed ps;
1838 struct ifnet *ifp;
1839 u_int64_t baudrate;
1840
1841 if (psp->ifname[0] != '\0') {
1842 strbufcpy(ps.ifname, psp->ifname);
1843 ifp = ifunit(__unsafe_null_terminated_from_indexable(ps.ifname));
1844 if (ifp != NULL) {
1845 baudrate = ifp->if_output_bw.max_bw;
1846 bcopy(&baudrate, &psp->baudrate,
1847 sizeof(baudrate));
1848 } else {
1849 error = EINVAL;
1850 }
1851 } else {
1852 error = EINVAL;
1853 }
1854 break;
1855 }
1856
1857 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
1858 case DIOCADDADDR: /* struct pfioc_pooladdr */
1859 case DIOCGETADDRS: /* struct pfioc_pooladdr */
1860 case DIOCGETADDR: /* struct pfioc_pooladdr */
1861 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
1862 struct pfioc_pooladdr *__single pp = NULL;
1863
1864 PFIOC_STRUCT_BEGIN(addr, pp);
1865 error = pfioctl_ioc_pooladdr(cmd, pp, p);
1866 PFIOC_STRUCT_END(pp, addr);
1867 break;
1868 }
1869
1870 case DIOCGETRULESETS: /* struct pfioc_ruleset */
1871 case DIOCGETRULESET: { /* struct pfioc_ruleset */
1872 struct pfioc_ruleset *__single pr = NULL;
1873
1874 PFIOC_STRUCT_BEGIN(addr, pr);
1875 error = pfioctl_ioc_ruleset(cmd, pr, p);
1876 PFIOC_STRUCT_END(pr, addr);
1877 break;
1878 }
1879
1880 case DIOCRCLRTABLES: /* struct pfioc_table */
1881 case DIOCRADDTABLES: /* struct pfioc_table */
1882 case DIOCRDELTABLES: /* struct pfioc_table */
1883 case DIOCRGETTABLES: /* struct pfioc_table */
1884 case DIOCRGETTSTATS: /* struct pfioc_table */
1885 case DIOCRCLRTSTATS: /* struct pfioc_table */
1886 case DIOCRSETTFLAGS: /* struct pfioc_table */
1887 case DIOCRCLRADDRS: /* struct pfioc_table */
1888 case DIOCRADDADDRS: /* struct pfioc_table */
1889 case DIOCRDELADDRS: /* struct pfioc_table */
1890 case DIOCRSETADDRS: /* struct pfioc_table */
1891 case DIOCRGETADDRS: /* struct pfioc_table */
1892 case DIOCRGETASTATS: /* struct pfioc_table */
1893 case DIOCRCLRASTATS: /* struct pfioc_table */
1894 case DIOCRTSTADDRS: /* struct pfioc_table */
1895 case DIOCRINADEFINE: { /* struct pfioc_table */
1896 PFIOCX_STRUCT_DECL(pfioc_table);
1897
1898 PFIOCX_STRUCT_BEGIN(addr, pfioc_table);
1899 error = pfioctl_ioc_table(cmd,
1900 PFIOCX_STRUCT_ADDR32(pfioc_table),
1901 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1902 PFIOCX_STRUCT_END(pfioc_table, addr);
1903 break;
1904 }
1905
1906 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
1907 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
1908 struct pf_osfp_ioctl *__single io = NULL;
1909
1910 PFIOC_STRUCT_BEGIN(addr, io);
1911 if (cmd == DIOCOSFPADD) {
1912 error = pf_osfp_add(io);
1913 } else {
1914 VERIFY(cmd == DIOCOSFPGET);
1915 error = pf_osfp_get(io);
1916 }
1917 PFIOC_STRUCT_END(io, addr);
1918 break;
1919 }
1920
1921 case DIOCXBEGIN: /* struct pfioc_trans */
1922 case DIOCXROLLBACK: /* struct pfioc_trans */
1923 case DIOCXCOMMIT: { /* struct pfioc_trans */
1924 PFIOCX_STRUCT_DECL(pfioc_trans);
1925
1926 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans);
1927 error = pfioctl_ioc_trans(cmd,
1928 PFIOCX_STRUCT_ADDR32(pfioc_trans),
1929 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1930 PFIOCX_STRUCT_END(pfioc_trans, addr);
1931 break;
1932 }
1933
1934 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
1935 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1936
1937 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes);
1938 error = pfioctl_ioc_src_nodes(cmd,
1939 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1940 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1941 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1942 break;
1943 }
1944
1945 case DIOCCLRSRCNODES: {
1946 struct pf_src_node *n;
1947 struct pf_state *state;
1948
1949 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1950 state->src_node = NULL;
1951 state->nat_src_node = NULL;
1952 }
1953 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1954 n->expire = 1;
1955 n->states = 0;
1956 }
1957 pf_purge_expired_src_nodes();
1958 pf_status.src_nodes = 0;
1959 break;
1960 }
1961
1962 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
1963 struct pfioc_src_node_kill *__single psnk = NULL;
1964
1965 PFIOC_STRUCT_BEGIN(addr, psnk);
1966 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1967 PFIOC_STRUCT_END(psnk, addr);
1968 break;
1969 }
1970
1971 case DIOCSETHOSTID: { /* u_int32_t */
1972 u_int32_t hid;
1973
1974 /* small enough to be on stack */
1975 bcopy((u_int32_t * __single)(void *__single)addr, &hid, sizeof(hid));
1976 if (hid == 0) {
1977 pf_status.hostid = random();
1978 } else {
1979 pf_status.hostid = hid;
1980 }
1981 break;
1982 }
1983
1984 case DIOCOSFPFLUSH:
1985 pf_osfp_flush();
1986 break;
1987
1988 case DIOCIGETIFACES: /* struct pfioc_iface */
1989 case DIOCSETIFFLAG: /* struct pfioc_iface */
1990 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
1991 PFIOCX_STRUCT_DECL(pfioc_iface);
1992
1993 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface);
1994 error = pfioctl_ioc_iface(cmd,
1995 PFIOCX_STRUCT_ADDR32(pfioc_iface),
1996 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1997 PFIOCX_STRUCT_END(pfioc_iface, addr);
1998 break;
1999 }
2000
2001 default:
2002 error = ENODEV;
2003 break;
2004 }
2005
2006 lck_mtx_unlock(&pf_lock);
2007 lck_rw_done(&pf_perim_lock);
2008
2009 return error;
2010 }
2011
2012 static int
pfioctl_ioc_table(u_long cmd,struct pfioc_table_32 * io32,struct pfioc_table_64 * io64,struct proc * p)2013 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
2014 struct pfioc_table_64 *io64, struct proc *p)
2015 {
2016 int p64 = proc_is64bit(p);
2017 int error = 0;
2018
2019 if (!p64) {
2020 goto struct32;
2021 }
2022
2023 #ifdef __LP64__
2024 /*
2025 * 64-bit structure processing
2026 */
2027 switch (cmd) {
2028 case DIOCRCLRTABLES:
2029 if (io64->pfrio_esize != 0) {
2030 error = ENODEV;
2031 break;
2032 }
2033 pfr_table_copyin_cleanup(&io64->pfrio_table);
2034 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
2035 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2036 break;
2037
2038 case DIOCRADDTABLES:
2039 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2040 error = ENODEV;
2041 break;
2042 }
2043 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
2044 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2045 break;
2046
2047 case DIOCRDELTABLES:
2048 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2049 error = ENODEV;
2050 break;
2051 }
2052 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
2053 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2054 break;
2055
2056 case DIOCRGETTABLES:
2057 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2058 error = ENODEV;
2059 break;
2060 }
2061 pfr_table_copyin_cleanup(&io64->pfrio_table);
2062 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
2063 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2064 break;
2065
2066 case DIOCRGETTSTATS:
2067 if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
2068 error = ENODEV;
2069 break;
2070 }
2071 pfr_table_copyin_cleanup(&io64->pfrio_table);
2072 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
2073 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2074 break;
2075
2076 case DIOCRCLRTSTATS:
2077 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2078 error = ENODEV;
2079 break;
2080 }
2081 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
2082 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2083 break;
2084
2085 case DIOCRSETTFLAGS:
2086 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2087 error = ENODEV;
2088 break;
2089 }
2090 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
2091 io64->pfrio_setflag, io64->pfrio_clrflag,
2092 &io64->pfrio_nchange, &io64->pfrio_ndel,
2093 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2094 break;
2095
2096 case DIOCRCLRADDRS:
2097 if (io64->pfrio_esize != 0) {
2098 error = ENODEV;
2099 break;
2100 }
2101 pfr_table_copyin_cleanup(&io64->pfrio_table);
2102 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2103 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2104 break;
2105
2106 case DIOCRADDADDRS:
2107 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2108 error = ENODEV;
2109 break;
2110 }
2111 pfr_table_copyin_cleanup(&io64->pfrio_table);
2112 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2113 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2114 PFR_FLAG_USERIOCTL);
2115 break;
2116
2117 case DIOCRDELADDRS:
2118 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2119 error = ENODEV;
2120 break;
2121 }
2122 pfr_table_copyin_cleanup(&io64->pfrio_table);
2123 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2124 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2125 PFR_FLAG_USERIOCTL);
2126 break;
2127
2128 case DIOCRSETADDRS:
2129 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2130 error = ENODEV;
2131 break;
2132 }
2133 pfr_table_copyin_cleanup(&io64->pfrio_table);
2134 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2135 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2136 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2137 PFR_FLAG_USERIOCTL, 0);
2138 break;
2139
2140 case DIOCRGETADDRS:
2141 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2142 error = ENODEV;
2143 break;
2144 }
2145 pfr_table_copyin_cleanup(&io64->pfrio_table);
2146 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2147 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2148 break;
2149
2150 case DIOCRGETASTATS:
2151 if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2152 error = ENODEV;
2153 break;
2154 }
2155 pfr_table_copyin_cleanup(&io64->pfrio_table);
2156 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2157 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2158 break;
2159
2160 case DIOCRCLRASTATS:
2161 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2162 error = ENODEV;
2163 break;
2164 }
2165 pfr_table_copyin_cleanup(&io64->pfrio_table);
2166 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2167 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2168 PFR_FLAG_USERIOCTL);
2169 break;
2170
2171 case DIOCRTSTADDRS:
2172 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2173 error = ENODEV;
2174 break;
2175 }
2176 pfr_table_copyin_cleanup(&io64->pfrio_table);
2177 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2178 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2179 PFR_FLAG_USERIOCTL);
2180 break;
2181
2182 case DIOCRINADEFINE:
2183 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2184 error = ENODEV;
2185 break;
2186 }
2187 pfr_table_copyin_cleanup(&io64->pfrio_table);
2188 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2189 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2190 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2191 break;
2192
2193 default:
2194 VERIFY(0);
2195 /* NOTREACHED */
2196 }
2197 goto done;
2198 #else
2199 #pragma unused(io64)
2200 #endif /* __LP64__ */
2201
2202 struct32:
2203 /*
2204 * 32-bit structure processing
2205 */
2206 switch (cmd) {
2207 case DIOCRCLRTABLES:
2208 if (io32->pfrio_esize != 0) {
2209 error = ENODEV;
2210 break;
2211 }
2212 pfr_table_copyin_cleanup(&io32->pfrio_table);
2213 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2214 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2215 break;
2216
2217 case DIOCRADDTABLES:
2218 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2219 error = ENODEV;
2220 break;
2221 }
2222 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2223 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2224 break;
2225
2226 case DIOCRDELTABLES:
2227 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2228 error = ENODEV;
2229 break;
2230 }
2231 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2232 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2233 break;
2234
2235 case DIOCRGETTABLES:
2236 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2237 error = ENODEV;
2238 break;
2239 }
2240 pfr_table_copyin_cleanup(&io32->pfrio_table);
2241 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2242 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2243 break;
2244
2245 case DIOCRGETTSTATS:
2246 if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2247 error = ENODEV;
2248 break;
2249 }
2250 pfr_table_copyin_cleanup(&io32->pfrio_table);
2251 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2252 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2253 break;
2254
2255 case DIOCRCLRTSTATS:
2256 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2257 error = ENODEV;
2258 break;
2259 }
2260 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2261 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2262 break;
2263
2264 case DIOCRSETTFLAGS:
2265 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2266 error = ENODEV;
2267 break;
2268 }
2269 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2270 io32->pfrio_setflag, io32->pfrio_clrflag,
2271 &io32->pfrio_nchange, &io32->pfrio_ndel,
2272 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2273 break;
2274
2275 case DIOCRCLRADDRS:
2276 if (io32->pfrio_esize != 0) {
2277 error = ENODEV;
2278 break;
2279 }
2280 pfr_table_copyin_cleanup(&io32->pfrio_table);
2281 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2282 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2283 break;
2284
2285 case DIOCRADDADDRS:
2286 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2287 error = ENODEV;
2288 break;
2289 }
2290 pfr_table_copyin_cleanup(&io32->pfrio_table);
2291 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2292 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2293 PFR_FLAG_USERIOCTL);
2294 break;
2295
2296 case DIOCRDELADDRS:
2297 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2298 error = ENODEV;
2299 break;
2300 }
2301 pfr_table_copyin_cleanup(&io32->pfrio_table);
2302 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2303 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2304 PFR_FLAG_USERIOCTL);
2305 break;
2306
2307 case DIOCRSETADDRS:
2308 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2309 error = ENODEV;
2310 break;
2311 }
2312 pfr_table_copyin_cleanup(&io32->pfrio_table);
2313 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2314 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2315 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2316 PFR_FLAG_USERIOCTL, 0);
2317 break;
2318
2319 case DIOCRGETADDRS:
2320 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2321 error = ENODEV;
2322 break;
2323 }
2324 pfr_table_copyin_cleanup(&io32->pfrio_table);
2325 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2326 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2327 break;
2328
2329 case DIOCRGETASTATS:
2330 if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2331 error = ENODEV;
2332 break;
2333 }
2334 pfr_table_copyin_cleanup(&io32->pfrio_table);
2335 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2336 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2337 break;
2338
2339 case DIOCRCLRASTATS:
2340 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2341 error = ENODEV;
2342 break;
2343 }
2344 pfr_table_copyin_cleanup(&io32->pfrio_table);
2345 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2346 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2347 PFR_FLAG_USERIOCTL);
2348 break;
2349
2350 case DIOCRTSTADDRS:
2351 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2352 error = ENODEV;
2353 break;
2354 }
2355 pfr_table_copyin_cleanup(&io32->pfrio_table);
2356 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2357 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2358 PFR_FLAG_USERIOCTL);
2359 break;
2360
2361 case DIOCRINADEFINE:
2362 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2363 error = ENODEV;
2364 break;
2365 }
2366 pfr_table_copyin_cleanup(&io32->pfrio_table);
2367 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2368 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2369 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2370 break;
2371
2372 default:
2373 VERIFY(0);
2374 /* NOTREACHED */
2375 }
2376 #ifdef __LP64__
2377 done:
2378 #endif
2379 return error;
2380 }
2381
2382 static int
pfioctl_ioc_tokens(u_long cmd,struct pfioc_tokens_32 * tok32,struct pfioc_tokens_64 * tok64,struct proc * p)2383 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2384 struct pfioc_tokens_64 *tok64, struct proc *p)
2385 {
2386 int token_size = 0, token_alloc_size = 0;
2387 struct pfioc_token *__sized_by(token_size) tokens = NULL;
2388 struct pfioc_kernel_token *entry, *tmp;
2389 user_addr_t token_buf;
2390 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2391 char *ptr;
2392
2393 switch (cmd) {
2394 case DIOCGETSTARTERS: {
2395 if (nr_tokens == 0) {
2396 error = ENOENT;
2397 break;
2398 }
2399 if (os_mul_overflow(sizeof(struct pfioc_token), nr_tokens, &token_alloc_size)) {
2400 os_log_error(OS_LOG_DEFAULT, "%s: size overflows", __func__);
2401 error = ERANGE;
2402 break;
2403 }
2404 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2405 if (cnt == 0) {
2406 if (p64) {
2407 tok64->size = token_alloc_size;
2408 } else {
2409 tok32->size = token_alloc_size;
2410 }
2411 break;
2412 }
2413
2414 #ifdef __LP64__
2415 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2416 #else
2417 token_buf = tok32->pgt_buf;
2418 #endif
2419 tokens = (struct pfioc_token *)kalloc_data(token_alloc_size, Z_WAITOK | Z_ZERO);
2420 token_size = token_alloc_size;
2421 if (tokens == NULL) {
2422 error = ENOMEM;
2423 break;
2424 }
2425
2426 ptr = (void *)tokens;
2427 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2428 struct pfioc_token *t;
2429
2430 if ((unsigned)cnt < sizeof(*tokens)) {
2431 break; /* no more buffer space left */
2432 }
2433 t = (struct pfioc_token *)(void *)ptr;
2434 t->token_value = entry->token.token_value;
2435 t->timestamp = entry->token.timestamp;
2436 t->pid = entry->token.pid;
2437 bcopy(entry->token.proc_name, t->proc_name,
2438 PFTOK_PROCNAME_LEN);
2439 ptr += sizeof(struct pfioc_token);
2440
2441 cnt -= sizeof(struct pfioc_token);
2442 }
2443
2444 if (cnt < ocnt) {
2445 error = copyout(tokens, token_buf, ocnt - cnt);
2446 }
2447
2448 if (p64) {
2449 tok64->size = ocnt - cnt;
2450 } else {
2451 tok32->size = ocnt - cnt;
2452 }
2453
2454 kfree_data_sized_by(tokens, token_size);
2455 break;
2456 }
2457
2458 default:
2459 VERIFY(0);
2460 /* NOTREACHED */
2461 }
2462
2463 return error;
2464 }
2465
2466 static void
pf_expire_states_and_src_nodes(struct pf_rule * rule)2467 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2468 {
2469 struct pf_state *state;
2470 struct pf_src_node *sn;
2471 int killed = 0;
2472
2473 /* expire the states */
2474 state = TAILQ_FIRST(&state_list);
2475 while (state) {
2476 if (state->rule.ptr == rule) {
2477 state->timeout = PFTM_PURGE;
2478 }
2479 state = TAILQ_NEXT(state, entry_list);
2480 }
2481 pf_purge_expired_states(pf_status.states);
2482
2483 /* expire the src_nodes */
2484 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2485 if (sn->rule.ptr != rule) {
2486 continue;
2487 }
2488 if (sn->states != 0) {
2489 RB_FOREACH(state, pf_state_tree_id,
2490 &tree_id) {
2491 if (state->src_node == sn) {
2492 state->src_node = NULL;
2493 }
2494 if (state->nat_src_node == sn) {
2495 state->nat_src_node = NULL;
2496 }
2497 }
2498 sn->states = 0;
2499 }
2500 sn->expire = 1;
2501 killed++;
2502 }
2503 if (killed) {
2504 pf_purge_expired_src_nodes();
2505 }
2506 }
2507
2508 static void
pf_delete_rule_from_ruleset(struct pf_ruleset * ruleset,int rs_num,struct pf_rule * rule)2509 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2510 struct pf_rule *rule)
2511 {
2512 struct pf_rule *r;
2513 int nr = 0;
2514
2515 pf_expire_states_and_src_nodes(rule);
2516
2517 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2518 if (ruleset->rules[rs_num].active.rcount-- == 0) {
2519 panic("%s: rcount value broken!", __func__);
2520 }
2521 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2522
2523 while (r) {
2524 r->nr = nr++;
2525 r = TAILQ_NEXT(r, entries);
2526 }
2527 }
2528
2529
2530 static void
pf_ruleset_cleanup(struct pf_ruleset * ruleset,int rs)2531 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2532 {
2533 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2534 ruleset->rules[rs].active.ticket =
2535 ++ruleset->rules[rs].inactive.ticket;
2536 }
2537
2538 /*
2539 * req_dev encodes the PF interface. Currently, possible values are
2540 * 0 or PFRULE_PFM
2541 */
2542 static int
pf_delete_rule_by_ticket(struct pfioc_rule * pr,u_int32_t req_dev)2543 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2544 {
2545 struct pf_ruleset *ruleset;
2546 struct pf_rule *rule = NULL;
2547 int is_anchor;
2548 int error = 0;
2549 int i;
2550
2551 is_anchor = (pr->anchor_call[0] != '\0');
2552 if ((ruleset = pf_find_ruleset_with_owner(__unsafe_null_terminated_from_indexable(pr->anchor),
2553 __unsafe_null_terminated_from_indexable(pr->rule.owner), is_anchor, &error)) == NULL) {
2554 goto done;
2555 }
2556
2557 for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2558 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2559 while (rule && (rule->ticket != pr->rule.ticket)) {
2560 rule = TAILQ_NEXT(rule, entries);
2561 }
2562 }
2563 if (rule == NULL) {
2564 error = ENOENT;
2565 goto done;
2566 } else {
2567 i--;
2568 }
2569
2570 if (strbufcmp(rule->owner, pr->rule.owner)) {
2571 error = EACCES;
2572 goto done;
2573 }
2574
2575 delete_rule:
2576 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2577 ((strbufcmp(ruleset->anchor->owner, "")) == 0) &&
2578 ((ruleset->rules[i].active.rcount - 1) == 0)) {
2579 /* set rule & ruleset to parent and repeat */
2580 struct pf_rule *delete_rule = rule;
2581 struct pf_ruleset *delete_ruleset = ruleset;
2582
2583 #define parent_ruleset ruleset->anchor->parent->ruleset
2584 if (ruleset->anchor->parent == NULL) {
2585 ruleset = &pf_main_ruleset;
2586 } else {
2587 ruleset = &parent_ruleset;
2588 }
2589
2590 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2591 while (rule &&
2592 (rule->anchor != delete_ruleset->anchor)) {
2593 rule = TAILQ_NEXT(rule, entries);
2594 }
2595 if (rule == NULL) {
2596 panic("%s: rule not found!", __func__);
2597 }
2598
2599 /*
2600 * if reqest device != rule's device, bail :
2601 * with error if ticket matches;
2602 * without error if ticket doesn't match (i.e. its just cleanup)
2603 */
2604 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2605 if (rule->ticket != pr->rule.ticket) {
2606 goto done;
2607 } else {
2608 error = EACCES;
2609 goto done;
2610 }
2611 }
2612
2613 if (delete_rule->rule_flag & PFRULE_PFM) {
2614 pffwrules--;
2615 }
2616
2617 pf_delete_rule_from_ruleset(delete_ruleset,
2618 i, delete_rule);
2619 delete_ruleset->rules[i].active.ticket =
2620 ++delete_ruleset->rules[i].inactive.ticket;
2621 goto delete_rule;
2622 } else {
2623 /*
2624 * process deleting rule only if device that added the
2625 * rule matches device that issued the request
2626 */
2627 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2628 error = EACCES;
2629 goto done;
2630 }
2631 if (rule->rule_flag & PFRULE_PFM) {
2632 pffwrules--;
2633 }
2634 pf_delete_rule_from_ruleset(ruleset, i,
2635 rule);
2636 pf_ruleset_cleanup(ruleset, i);
2637 }
2638
2639 done:
2640 if (ruleset) {
2641 pf_release_ruleset(ruleset);
2642 ruleset = NULL;
2643 }
2644 return error;
2645 }
2646
2647 /*
2648 * req_dev encodes the PF interface. Currently, possible values are
2649 * 0 or PFRULE_PFM
2650 */
2651 static void
pf_delete_rule_by_owner(char const * owner,u_int32_t req_dev)2652 pf_delete_rule_by_owner(char const *owner, u_int32_t req_dev)
2653 {
2654 struct pf_ruleset *__single ruleset;
2655 struct pf_rule *__single rule, *__single next;
2656 int deleted = 0;
2657
2658 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2659 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2660 ruleset = &pf_main_ruleset;
2661 while (rule) {
2662 next = TAILQ_NEXT(rule, entries);
2663 /*
2664 * process deleting rule only if device that added the
2665 * rule matches device that issued the request
2666 */
2667 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2668 rule = next;
2669 } else if (rule->anchor) {
2670 if (((strlcmp(rule->owner, owner, sizeof(rule->owner))) == 0) ||
2671 ((strbufcmp(rule->owner, "")) == 0)) {
2672 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2673 if (deleted) {
2674 pf_ruleset_cleanup(ruleset, rs);
2675 deleted = 0;
2676 }
2677 /* step into anchor */
2678 ruleset =
2679 &rule->anchor->ruleset;
2680 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2681 continue;
2682 } else {
2683 if (rule->rule_flag &
2684 PFRULE_PFM) {
2685 pffwrules--;
2686 }
2687 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2688 deleted = 1;
2689 rule = next;
2690 }
2691 } else {
2692 rule = next;
2693 }
2694 } else {
2695 if (((strlcmp(rule->owner, owner, sizeof(rule->owner))) == 0)) {
2696 /* delete rule */
2697 if (rule->rule_flag & PFRULE_PFM) {
2698 pffwrules--;
2699 }
2700 pf_delete_rule_from_ruleset(ruleset,
2701 rs, rule);
2702 deleted = 1;
2703 }
2704 rule = next;
2705 }
2706 if (rule == NULL) {
2707 if (deleted) {
2708 pf_ruleset_cleanup(ruleset, rs);
2709 deleted = 0;
2710 }
2711 if (ruleset != &pf_main_ruleset) {
2712 pf_deleterule_anchor_step_out(&ruleset,
2713 rs, &rule);
2714 }
2715 }
2716 }
2717 }
2718 }
2719
2720 static void
pf_deleterule_anchor_step_out(struct pf_ruleset ** ruleset_ptr,int rs,struct pf_rule ** rule_ptr)2721 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2722 int rs, struct pf_rule **rule_ptr)
2723 {
2724 struct pf_ruleset *ruleset = *ruleset_ptr;
2725 struct pf_rule *rule = *rule_ptr;
2726
2727 /* step out of anchor */
2728 struct pf_ruleset *rs_copy = ruleset;
2729 ruleset = ruleset->anchor->parent?
2730 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2731
2732 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2733 while (rule && (rule->anchor != rs_copy->anchor)) {
2734 rule = TAILQ_NEXT(rule, entries);
2735 }
2736 if (rule == NULL) {
2737 panic("%s: parent rule of anchor not found!", __func__);
2738 }
2739 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2740 rule = TAILQ_NEXT(rule, entries);
2741 }
2742
2743 *ruleset_ptr = ruleset;
2744 *rule_ptr = rule;
2745 }
2746
2747 static void
pf_addrwrap_setup(struct pf_addr_wrap * aw)2748 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2749 {
2750 VERIFY(aw);
2751 bzero(&aw->p, sizeof aw->p);
2752 }
2753
2754 static int
pf_rule_setup(struct pfioc_rule * pr,struct pf_rule * rule,struct pf_ruleset * ruleset)2755 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2756 struct pf_ruleset *ruleset)
2757 {
2758 struct pf_pooladdr *__single apa;
2759 int error = 0;
2760
2761 if (rule->ifname[0]) {
2762 rule->kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(rule->ifname));
2763 if (rule->kif == NULL) {
2764 pool_put(&pf_rule_pl, rule);
2765 return EINVAL;
2766 }
2767 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2768 }
2769 if (rule->tagname[0]) {
2770 if ((rule->tag = pf_tagname2tag(__unsafe_null_terminated_from_indexable(rule->tagname))) == 0) {
2771 error = EBUSY;
2772 }
2773 }
2774 if (rule->match_tagname[0]) {
2775 if ((rule->match_tag =
2776 pf_tagname2tag(__unsafe_null_terminated_from_indexable(rule->match_tagname))) == 0) {
2777 error = EBUSY;
2778 }
2779 }
2780 if (rule->rt && !rule->direction) {
2781 error = EINVAL;
2782 }
2783 #if PFLOG
2784 if (!rule->log) {
2785 rule->logif = 0;
2786 }
2787 if (rule->logif >= PFLOGIFS_MAX) {
2788 error = EINVAL;
2789 }
2790 #endif /* PFLOG */
2791 pf_addrwrap_setup(&rule->src.addr);
2792 pf_addrwrap_setup(&rule->dst.addr);
2793 if (pf_rtlabel_add(&rule->src.addr) ||
2794 pf_rtlabel_add(&rule->dst.addr)) {
2795 error = EBUSY;
2796 }
2797 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2798 error = EINVAL;
2799 }
2800 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2801 error = EINVAL;
2802 }
2803 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2804 error = EINVAL;
2805 }
2806 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2807 error = EINVAL;
2808 }
2809 if (pf_anchor_setup(rule, ruleset, pr->anchor_call, sizeof(pr->anchor_call))) {
2810 error = EINVAL;
2811 }
2812 TAILQ_FOREACH(apa, &pf_pabuf, entries)
2813 if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2814 error = EINVAL;
2815 }
2816
2817 if (rule->overload_tblname[0]) {
2818 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2819 __unsafe_null_terminated_from_indexable(rule->overload_tblname))) == NULL) {
2820 error = EINVAL;
2821 } else {
2822 rule->overload_tbl->pfrkt_flags |=
2823 PFR_TFLAG_ACTIVE;
2824 }
2825 }
2826
2827 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2828
2829 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2830 (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2831 rule->anchor == NULL) ||
2832 (rule->rt > PF_FASTROUTE)) &&
2833 (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2834 error = EINVAL;
2835 }
2836
2837 if (error) {
2838 pf_rm_rule(NULL, rule);
2839 return error;
2840 }
2841 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2842 * the address pool's family will be AF_INET
2843 */
2844 rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2845 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2846 rule->evaluations = rule->packets[0] = rule->packets[1] =
2847 rule->bytes[0] = rule->bytes[1] = 0;
2848
2849 return 0;
2850 }
2851
2852 static int
pfioctl_ioc_rule(u_long cmd,int minordev,struct pfioc_rule * pr,struct proc * p)2853 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2854 {
2855 int error = 0;
2856 u_int32_t req_dev = 0;
2857 struct pf_ruleset *__single ruleset = NULL;
2858
2859 switch (cmd) {
2860 case DIOCADDRULE: {
2861 struct pf_rule *__single rule, *__single tail;
2862 int rs_num;
2863
2864 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2865 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2866 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->anchor));
2867 if (ruleset == NULL) {
2868 error = EINVAL;
2869 break;
2870 }
2871 rs_num = pf_get_ruleset_number(pr->rule.action);
2872 if (rs_num >= PF_RULESET_MAX) {
2873 error = EINVAL;
2874 break;
2875 }
2876 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2877 error = EINVAL;
2878 break;
2879 }
2880 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2881 error = EBUSY;
2882 break;
2883 }
2884 if (pr->pool_ticket != ticket_pabuf) {
2885 error = EBUSY;
2886 break;
2887 }
2888 rule = pool_get(&pf_rule_pl, PR_WAITOK);
2889 if (rule == NULL) {
2890 error = ENOMEM;
2891 break;
2892 }
2893 pf_rule_copyin(&pr->rule, rule, p, minordev);
2894 #if !INET
2895 if (rule->af == AF_INET) {
2896 pool_put(&pf_rule_pl, rule);
2897 error = EAFNOSUPPORT;
2898 break;
2899 }
2900 #endif /* INET */
2901 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2902 pf_rulequeue);
2903 if (tail) {
2904 rule->nr = tail->nr + 1;
2905 } else {
2906 rule->nr = 0;
2907 }
2908
2909 if ((error = pf_rule_setup(pr, rule, ruleset))) {
2910 break;
2911 }
2912 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2913 rule, entries);
2914 ruleset->rules[rs_num].inactive.rcount++;
2915 if (rule->rule_flag & PFRULE_PFM) {
2916 pffwrules++;
2917 }
2918
2919 if (rule->action == PF_NAT64) {
2920 os_atomic_inc(&pf_nat64_configured, relaxed);
2921 }
2922
2923 if (pr->anchor_call[0] == '\0') {
2924 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2925 if (rule->rule_flag & PFRULE_PFM) {
2926 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2927 }
2928 }
2929
2930 #if DUMMYNET
2931 if (rule->action == PF_DUMMYNET) {
2932 struct dummynet_event dn_event;
2933 uint32_t direction = DN_INOUT;
2934 bzero(&dn_event, sizeof(dn_event));
2935
2936 dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2937
2938 if (rule->direction == PF_IN) {
2939 direction = DN_IN;
2940 } else if (rule->direction == PF_OUT) {
2941 direction = DN_OUT;
2942 }
2943
2944 dn_event.dn_event_rule_config.dir = direction;
2945 dn_event.dn_event_rule_config.af = rule->af;
2946 dn_event.dn_event_rule_config.proto = rule->proto;
2947 dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2948 dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2949 strbufcpy(dn_event.dn_event_rule_config.ifname, rule->ifname);
2950
2951 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2952 }
2953 #endif
2954 break;
2955 }
2956
2957 case DIOCGETRULES: {
2958 struct pf_rule *__single tail;
2959 int rs_num;
2960
2961 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2962 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2963 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->anchor));
2964 if (ruleset == NULL) {
2965 error = EINVAL;
2966 break;
2967 }
2968 rs_num = pf_get_ruleset_number(pr->rule.action);
2969 if (rs_num >= PF_RULESET_MAX) {
2970 error = EINVAL;
2971 break;
2972 }
2973 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2974 pf_rulequeue);
2975 if (tail) {
2976 pr->nr = tail->nr + 1;
2977 } else {
2978 pr->nr = 0;
2979 }
2980 pr->ticket = ruleset->rules[rs_num].active.ticket;
2981 break;
2982 }
2983
2984 case DIOCGETRULE: {
2985 struct pf_rule *__single rule;
2986 int rs_num, i;
2987
2988 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2989 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2990 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->anchor));
2991 if (ruleset == NULL) {
2992 error = EINVAL;
2993 break;
2994 }
2995 rs_num = pf_get_ruleset_number(pr->rule.action);
2996 if (rs_num >= PF_RULESET_MAX) {
2997 error = EINVAL;
2998 break;
2999 }
3000 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3001 error = EBUSY;
3002 break;
3003 }
3004 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3005 while ((rule != NULL) && (rule->nr != pr->nr)) {
3006 rule = TAILQ_NEXT(rule, entries);
3007 }
3008 if (rule == NULL) {
3009 error = EBUSY;
3010 break;
3011 }
3012 pf_rule_copyout(rule, &pr->rule);
3013 if (pf_anchor_copyout(ruleset, rule, pr)) {
3014 error = EBUSY;
3015 break;
3016 }
3017 pfi_dynaddr_copyout(&pr->rule.src.addr);
3018 pfi_dynaddr_copyout(&pr->rule.dst.addr);
3019 pf_tbladdr_copyout(&pr->rule.src.addr);
3020 pf_tbladdr_copyout(&pr->rule.dst.addr);
3021 pf_rtlabel_copyout(&pr->rule.src.addr);
3022 pf_rtlabel_copyout(&pr->rule.dst.addr);
3023 for (i = 0; i < PF_SKIP_COUNT; ++i) {
3024 if (rule->skip[i].ptr == NULL) {
3025 pr->rule.skip[i].nr = -1;
3026 } else {
3027 pr->rule.skip[i].nr =
3028 rule->skip[i].ptr->nr;
3029 }
3030 }
3031
3032 if (pr->action == PF_GET_CLR_CNTR) {
3033 rule->evaluations = 0;
3034 rule->packets[0] = rule->packets[1] = 0;
3035 rule->bytes[0] = rule->bytes[1] = 0;
3036 }
3037 break;
3038 }
3039
3040 case DIOCCHANGERULE: {
3041 struct pfioc_rule *__single pcr = pr;
3042 struct pf_rule *__single oldrule = NULL, *__single newrule = NULL;
3043 struct pf_pooladdr *__single pa;
3044 u_int32_t nr = 0;
3045 int rs_num;
3046
3047 if (!(pcr->action == PF_CHANGE_REMOVE ||
3048 pcr->action == PF_CHANGE_GET_TICKET) &&
3049 pcr->pool_ticket != ticket_pabuf) {
3050 error = EBUSY;
3051 break;
3052 }
3053
3054 if (pcr->action < PF_CHANGE_ADD_HEAD ||
3055 pcr->action > PF_CHANGE_GET_TICKET) {
3056 error = EINVAL;
3057 break;
3058 }
3059 pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
3060 pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
3061 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pcr->anchor));
3062 if (ruleset == NULL) {
3063 error = EINVAL;
3064 break;
3065 }
3066 rs_num = pf_get_ruleset_number(pcr->rule.action);
3067 if (rs_num >= PF_RULESET_MAX) {
3068 error = EINVAL;
3069 break;
3070 }
3071
3072 if (pcr->action == PF_CHANGE_GET_TICKET) {
3073 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3074 break;
3075 } else {
3076 if (pcr->ticket !=
3077 ruleset->rules[rs_num].active.ticket) {
3078 error = EINVAL;
3079 break;
3080 }
3081 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3082 error = EINVAL;
3083 break;
3084 }
3085 }
3086
3087 if (pcr->action != PF_CHANGE_REMOVE) {
3088 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
3089 if (newrule == NULL) {
3090 error = ENOMEM;
3091 break;
3092 }
3093 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3094 #if !INET
3095 if (newrule->af == AF_INET) {
3096 pool_put(&pf_rule_pl, newrule);
3097 error = EAFNOSUPPORT;
3098 break;
3099 }
3100 #endif /* INET */
3101 if (newrule->ifname[0]) {
3102 newrule->kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(newrule->ifname));
3103 if (newrule->kif == NULL) {
3104 pool_put(&pf_rule_pl, newrule);
3105 error = EINVAL;
3106 break;
3107 }
3108 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3109 } else {
3110 newrule->kif = NULL;
3111 }
3112
3113 if (newrule->tagname[0]) {
3114 if ((newrule->tag =
3115 pf_tagname2tag(__unsafe_null_terminated_from_indexable(newrule->tagname))) == 0) {
3116 error = EBUSY;
3117 }
3118 }
3119 if (newrule->match_tagname[0]) {
3120 if ((newrule->match_tag = pf_tagname2tag(
3121 __unsafe_null_terminated_from_indexable(newrule->match_tagname))) == 0) {
3122 error = EBUSY;
3123 }
3124 }
3125 if (newrule->rt && !newrule->direction) {
3126 error = EINVAL;
3127 }
3128 #if PFLOG
3129 if (!newrule->log) {
3130 newrule->logif = 0;
3131 }
3132 if (newrule->logif >= PFLOGIFS_MAX) {
3133 error = EINVAL;
3134 }
3135 #endif /* PFLOG */
3136 pf_addrwrap_setup(&newrule->src.addr);
3137 pf_addrwrap_setup(&newrule->dst.addr);
3138 if (pf_rtlabel_add(&newrule->src.addr) ||
3139 pf_rtlabel_add(&newrule->dst.addr)) {
3140 error = EBUSY;
3141 }
3142 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3143 error = EINVAL;
3144 }
3145 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3146 error = EINVAL;
3147 }
3148 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3149 error = EINVAL;
3150 }
3151 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3152 error = EINVAL;
3153 }
3154 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call, sizeof(pcr->anchor_call))) {
3155 error = EINVAL;
3156 }
3157 TAILQ_FOREACH(pa, &pf_pabuf, entries)
3158 if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3159 error = EINVAL;
3160 }
3161
3162 if (newrule->overload_tblname[0]) {
3163 if ((newrule->overload_tbl = pfr_attach_table(
3164 ruleset, __unsafe_null_terminated_from_indexable(newrule->overload_tblname))) ==
3165 NULL) {
3166 error = EINVAL;
3167 } else {
3168 newrule->overload_tbl->pfrkt_flags |=
3169 PFR_TFLAG_ACTIVE;
3170 }
3171 }
3172
3173 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3174 if (((((newrule->action == PF_NAT) ||
3175 (newrule->action == PF_RDR) ||
3176 (newrule->action == PF_BINAT) ||
3177 (newrule->rt > PF_FASTROUTE)) &&
3178 !newrule->anchor)) &&
3179 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3180 error = EINVAL;
3181 }
3182
3183 if (error) {
3184 pf_rm_rule(NULL, newrule);
3185 break;
3186 }
3187 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3188 newrule->evaluations = 0;
3189 newrule->packets[0] = newrule->packets[1] = 0;
3190 newrule->bytes[0] = newrule->bytes[1] = 0;
3191 }
3192 pf_empty_pool(&pf_pabuf);
3193
3194 if (pcr->action == PF_CHANGE_ADD_HEAD) {
3195 oldrule = TAILQ_FIRST(
3196 ruleset->rules[rs_num].active.ptr);
3197 } else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3198 oldrule = TAILQ_LAST(
3199 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3200 } else {
3201 oldrule = TAILQ_FIRST(
3202 ruleset->rules[rs_num].active.ptr);
3203 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3204 oldrule = TAILQ_NEXT(oldrule, entries);
3205 }
3206 if (oldrule == NULL) {
3207 if (newrule != NULL) {
3208 pf_rm_rule(NULL, newrule);
3209 }
3210 error = EINVAL;
3211 break;
3212 }
3213 }
3214
3215 if (pcr->action == PF_CHANGE_REMOVE) {
3216 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3217 ruleset->rules[rs_num].active.rcount--;
3218 } else {
3219 if (oldrule == NULL) {
3220 TAILQ_INSERT_TAIL(
3221 ruleset->rules[rs_num].active.ptr,
3222 newrule, entries);
3223 } else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3224 pcr->action == PF_CHANGE_ADD_BEFORE) {
3225 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3226 } else {
3227 TAILQ_INSERT_AFTER(
3228 ruleset->rules[rs_num].active.ptr,
3229 oldrule, newrule, entries);
3230 }
3231 ruleset->rules[rs_num].active.rcount++;
3232 }
3233
3234 nr = 0;
3235 TAILQ_FOREACH(oldrule,
3236 ruleset->rules[rs_num].active.ptr, entries)
3237 oldrule->nr = nr++;
3238
3239 ruleset->rules[rs_num].active.ticket++;
3240
3241 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3242 #if SKYWALK
3243 pf_process_compatibilities();
3244 #endif // SKYWALK
3245 break;
3246 }
3247
3248 case DIOCINSERTRULE: {
3249 struct pf_rule *__single rule, *__single tail, *__single r;
3250 int rs_num;
3251 int is_anchor;
3252
3253 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3254 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3255 is_anchor = (pr->anchor_call[0] != '\0');
3256
3257 if ((ruleset = pf_find_ruleset_with_owner(__unsafe_null_terminated_from_indexable(pr->anchor),
3258 __unsafe_null_terminated_from_indexable(pr->rule.owner), is_anchor, &error)) == NULL) {
3259 break;
3260 }
3261
3262 rs_num = pf_get_ruleset_number(pr->rule.action);
3263 if (rs_num >= PF_RULESET_MAX) {
3264 error = EINVAL;
3265 break;
3266 }
3267 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3268 error = EINVAL;
3269 break;
3270 }
3271
3272 /* make sure this anchor rule doesn't exist already */
3273 if (is_anchor) {
3274 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3275 while (r) {
3276 if (r->anchor &&
3277 ((strbufcmp(r->anchor->name,
3278 pr->anchor_call)) == 0)) {
3279 if (((strbufcmp(pr->rule.owner,
3280 r->owner)) == 0) ||
3281 ((strbufcmp(r->owner, "")) == 0)) {
3282 error = EEXIST;
3283 } else {
3284 error = EPERM;
3285 }
3286 break;
3287 }
3288 r = TAILQ_NEXT(r, entries);
3289 }
3290 if (error != 0) {
3291 break;
3292 }
3293 }
3294
3295 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3296 if (rule == NULL) {
3297 error = ENOMEM;
3298 break;
3299 }
3300 pf_rule_copyin(&pr->rule, rule, p, minordev);
3301 #if !INET
3302 if (rule->af == AF_INET) {
3303 pool_put(&pf_rule_pl, rule);
3304 error = EAFNOSUPPORT;
3305 break;
3306 }
3307 #endif /* INET */
3308 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3309 while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3310 r = TAILQ_NEXT(r, entries);
3311 }
3312 if (r == NULL) {
3313 if ((tail =
3314 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3315 pf_rulequeue)) != NULL) {
3316 rule->nr = tail->nr + 1;
3317 } else {
3318 rule->nr = 0;
3319 }
3320 } else {
3321 rule->nr = r->nr;
3322 }
3323
3324 if ((error = pf_rule_setup(pr, rule, ruleset))) {
3325 break;
3326 }
3327
3328 if (rule->anchor != NULL) {
3329 strbufcpy(rule->anchor->owner, rule->owner);
3330 }
3331
3332 if (r) {
3333 TAILQ_INSERT_BEFORE(r, rule, entries);
3334 while (r && ++r->nr) {
3335 r = TAILQ_NEXT(r, entries);
3336 }
3337 } else {
3338 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3339 rule, entries);
3340 }
3341 ruleset->rules[rs_num].active.rcount++;
3342
3343 /* Calculate checksum for the main ruleset */
3344 if (ruleset == &pf_main_ruleset) {
3345 error = pf_setup_pfsync_matching(ruleset);
3346 }
3347
3348 pf_ruleset_cleanup(ruleset, rs_num);
3349 rule->ticket = VM_KERNEL_ADDRHASH((u_int64_t)(uintptr_t)rule);
3350
3351 pr->rule.ticket = rule->ticket;
3352 pf_rule_copyout(rule, &pr->rule);
3353 if (rule->rule_flag & PFRULE_PFM) {
3354 pffwrules++;
3355 }
3356 if (rule->action == PF_NAT64) {
3357 os_atomic_inc(&pf_nat64_configured, relaxed);
3358 }
3359
3360 if (pr->anchor_call[0] == '\0') {
3361 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3362 if (rule->rule_flag & PFRULE_PFM) {
3363 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3364 }
3365 }
3366 #if SKYWALK
3367 pf_process_compatibilities();
3368 #endif // SKYWALK
3369 break;
3370 }
3371
3372 case DIOCDELETERULE: {
3373 ASSERT(ruleset == NULL);
3374 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3375 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3376
3377 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3378 error = EINVAL;
3379 break;
3380 }
3381
3382 /* get device through which request is made */
3383 if ((uint8_t)minordev == PFDEV_PFM) {
3384 req_dev |= PFRULE_PFM;
3385 }
3386
3387 if (pr->rule.ticket) {
3388 if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3389 break;
3390 }
3391 } else {
3392 pf_delete_rule_by_owner(__unsafe_null_terminated_from_indexable(pr->rule.owner), req_dev);
3393 }
3394 pr->nr = pffwrules;
3395 if (pr->rule.action == PF_NAT64) {
3396 os_atomic_dec(&pf_nat64_configured, relaxed);
3397 }
3398 #if SKYWALK
3399 pf_process_compatibilities();
3400 #endif // SKYWALK
3401 break;
3402 }
3403
3404 default:
3405 VERIFY(0);
3406 /* NOTREACHED */
3407 }
3408 if (ruleset != NULL) {
3409 pf_release_ruleset(ruleset);
3410 ruleset = NULL;
3411 }
3412
3413 return error;
3414 }
3415
3416 static int
pfioctl_ioc_state_kill(u_long cmd,struct pfioc_state_kill * psk,struct proc * p)3417 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3418 {
3419 #pragma unused(p)
3420 int error = 0;
3421
3422 psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3423 psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3424
3425 bool ifname_matched = true;
3426 bool owner_matched = true;
3427
3428 switch (cmd) {
3429 case DIOCCLRSTATES: {
3430 struct pf_state *s, *nexts;
3431 int killed = 0;
3432
3433 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3434 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3435 /*
3436 * Purge all states only when neither ifname
3437 * or owner is provided. If any of these are provided
3438 * we purge only the states with meta data that match
3439 */
3440 bool unlink_state = false;
3441 ifname_matched = true;
3442 owner_matched = true;
3443
3444 if (psk->psk_ifname[0] &&
3445 strbufcmp(psk->psk_ifname, s->kif->pfik_name)) {
3446 ifname_matched = false;
3447 }
3448
3449 if (psk->psk_ownername[0] &&
3450 ((NULL == s->rule.ptr) ||
3451 strbufcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3452 owner_matched = false;
3453 }
3454
3455 unlink_state = ifname_matched && owner_matched;
3456
3457 if (unlink_state) {
3458 #if NPFSYNC
3459 /* don't send out individual delete messages */
3460 s->sync_flags = PFSTATE_NOSYNC;
3461 #endif
3462 pf_unlink_state(s);
3463 killed++;
3464 }
3465 }
3466 psk->psk_af = (sa_family_t)killed;
3467 #if NPFSYNC
3468 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3469 #endif
3470 break;
3471 }
3472
3473 case DIOCKILLSTATES: {
3474 struct pf_state *s, *nexts;
3475 struct pf_state_key *sk;
3476 struct pf_state_host *src, *dst;
3477 int killed = 0;
3478
3479 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3480 s = nexts) {
3481 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3482 sk = s->state_key;
3483 ifname_matched = true;
3484 owner_matched = true;
3485
3486 if (psk->psk_ifname[0] &&
3487 strbufcmp(psk->psk_ifname, s->kif->pfik_name)) {
3488 ifname_matched = false;
3489 }
3490
3491 if (psk->psk_ownername[0] &&
3492 ((NULL == s->rule.ptr) ||
3493 strbufcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3494 owner_matched = false;
3495 }
3496
3497 if (sk->direction == PF_OUT) {
3498 src = &sk->lan;
3499 dst = &sk->ext_lan;
3500 } else {
3501 src = &sk->ext_lan;
3502 dst = &sk->lan;
3503 }
3504 if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3505 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3506 PF_MATCHA(psk->psk_src.neg,
3507 &psk->psk_src.addr.v.a.addr,
3508 &psk->psk_src.addr.v.a.mask,
3509 &src->addr, sk->af_lan) &&
3510 PF_MATCHA(psk->psk_dst.neg,
3511 &psk->psk_dst.addr.v.a.addr,
3512 &psk->psk_dst.addr.v.a.mask,
3513 &dst->addr, sk->af_lan) &&
3514 (pf_match_xport(psk->psk_proto,
3515 psk->psk_proto_variant, &psk->psk_src.xport,
3516 &src->xport)) &&
3517 (pf_match_xport(psk->psk_proto,
3518 psk->psk_proto_variant, &psk->psk_dst.xport,
3519 &dst->xport)) &&
3520 ifname_matched &&
3521 owner_matched) {
3522 #if NPFSYNC
3523 /* send immediate delete of state */
3524 pfsync_delete_state(s);
3525 s->sync_flags |= PFSTATE_NOSYNC;
3526 #endif
3527 pf_unlink_state(s);
3528 killed++;
3529 }
3530 }
3531 psk->psk_af = (sa_family_t)killed;
3532 break;
3533 }
3534
3535 default:
3536 VERIFY(0);
3537 /* NOTREACHED */
3538 }
3539
3540 return error;
3541 }
3542
3543 static int
pfioctl_ioc_state(u_long cmd,struct pfioc_state * ps,struct proc * p)3544 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3545 {
3546 #pragma unused(p)
3547 int error = 0;
3548
3549 switch (cmd) {
3550 case DIOCADDSTATE: {
3551 struct pfsync_state *__single sp = &ps->state;
3552 struct pf_state *__single s;
3553 struct pf_state_key *__single sk;
3554 struct pfi_kif *__single kif;
3555
3556 if (sp->timeout >= PFTM_MAX) {
3557 error = EINVAL;
3558 break;
3559 }
3560 s = pool_get(&pf_state_pl, PR_WAITOK);
3561 if (s == NULL) {
3562 error = ENOMEM;
3563 break;
3564 }
3565 bzero(s, sizeof(struct pf_state));
3566 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3567 pool_put(&pf_state_pl, s);
3568 error = ENOMEM;
3569 break;
3570 }
3571 pf_state_import(sp, sk, s);
3572 kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(sp->ifname));
3573 if (kif == NULL) {
3574 pf_detach_state(s, 0);
3575 pool_put(&pf_state_pl, s);
3576 error = ENOENT;
3577 break;
3578 }
3579 TAILQ_INIT(&s->unlink_hooks);
3580 s->state_key->app_state = 0;
3581 if (pf_insert_state(kif, s)) {
3582 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3583 pool_put(&pf_state_pl, s);
3584 error = EEXIST;
3585 break;
3586 }
3587 pf_default_rule.states++;
3588 VERIFY(pf_default_rule.states != 0);
3589 break;
3590 }
3591
3592 case DIOCGETSTATE: {
3593 struct pf_state *s;
3594 struct pf_state_cmp id_key;
3595
3596 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3597 id_key.creatorid = ps->state.creatorid;
3598
3599 s = pf_find_state_byid(&id_key);
3600 if (s == NULL) {
3601 error = ENOENT;
3602 break;
3603 }
3604
3605 pf_state_export(&ps->state, s->state_key, s);
3606 break;
3607 }
3608
3609 default:
3610 VERIFY(0);
3611 /* NOTREACHED */
3612 }
3613
3614 return error;
3615 }
3616
3617 static int
pfioctl_ioc_states(u_long cmd,struct pfioc_states_32 * ps32,struct pfioc_states_64 * ps64,struct proc * p)3618 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3619 struct pfioc_states_64 *ps64, struct proc *p)
3620 {
3621 int p64 = proc_is64bit(p);
3622 int error = 0;
3623
3624 switch (cmd) {
3625 case DIOCGETSTATES: { /* struct pfioc_states */
3626 struct pf_state *__single state;
3627 struct pfsync_state *__single pstore;
3628 user_addr_t buf;
3629 u_int32_t nr = 0;
3630 int len, size;
3631
3632 len = (p64 ? ps64->ps_len : ps32->ps_len);
3633 if (len == 0) {
3634 size = sizeof(struct pfsync_state) * pf_status.states;
3635 if (p64) {
3636 ps64->ps_len = size;
3637 } else {
3638 ps32->ps_len = size;
3639 }
3640 break;
3641 }
3642
3643 pstore = kalloc_type(struct pfsync_state,
3644 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3645 #ifdef __LP64__
3646 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3647 #else
3648 buf = ps32->ps_buf;
3649 #endif
3650
3651 state = TAILQ_FIRST(&state_list);
3652 while (state) {
3653 if (state->timeout != PFTM_UNLINKED) {
3654 if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3655 break;
3656 }
3657
3658 pf_state_export(pstore,
3659 state->state_key, state);
3660 error = copyout(pstore, buf, sizeof(*pstore));
3661 if (error) {
3662 kfree_type(struct pfsync_state, pstore);
3663 goto fail;
3664 }
3665 buf += sizeof(*pstore);
3666 nr++;
3667 }
3668 state = TAILQ_NEXT(state, entry_list);
3669 }
3670
3671 size = sizeof(struct pfsync_state) * nr;
3672 if (p64) {
3673 ps64->ps_len = size;
3674 } else {
3675 ps32->ps_len = size;
3676 }
3677
3678 kfree_type(struct pfsync_state, pstore);
3679 break;
3680 }
3681
3682 default:
3683 VERIFY(0);
3684 /* NOTREACHED */
3685 }
3686 fail:
3687 return error;
3688 }
3689
3690 static int
pfioctl_ioc_natlook(u_long cmd,struct pfioc_natlook * pnl,struct proc * p)3691 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3692 {
3693 #pragma unused(p)
3694 int error = 0;
3695
3696 switch (cmd) {
3697 case DIOCNATLOOK: {
3698 struct pf_state_key *sk;
3699 struct pf_state *state;
3700 struct pf_state_key_cmp key;
3701 int m = 0, direction = pnl->direction;
3702
3703 key.proto = pnl->proto;
3704 key.proto_variant = pnl->proto_variant;
3705
3706 if (!pnl->proto ||
3707 PF_AZERO(&pnl->saddr, pnl->af) ||
3708 PF_AZERO(&pnl->daddr, pnl->af) ||
3709 ((pnl->proto == IPPROTO_TCP ||
3710 pnl->proto == IPPROTO_UDP) &&
3711 (!pnl->dxport.port || !pnl->sxport.port))) {
3712 error = EINVAL;
3713 } else {
3714 /*
3715 * userland gives us source and dest of connection,
3716 * reverse the lookup so we ask for what happens with
3717 * the return traffic, enabling us to find it in the
3718 * state tree.
3719 */
3720 if (direction == PF_IN) {
3721 key.af_gwy = pnl->af;
3722 PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3723 pnl->af);
3724 memcpy(&key.ext_gwy.xport, &pnl->dxport,
3725 sizeof(key.ext_gwy.xport));
3726 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3727 memcpy(&key.gwy.xport, &pnl->sxport,
3728 sizeof(key.gwy.xport));
3729 state = pf_find_state_all(&key, PF_IN, &m);
3730 } else {
3731 key.af_lan = pnl->af;
3732 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3733 memcpy(&key.lan.xport, &pnl->dxport,
3734 sizeof(key.lan.xport));
3735 PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3736 pnl->af);
3737 memcpy(&key.ext_lan.xport, &pnl->sxport,
3738 sizeof(key.ext_lan.xport));
3739 state = pf_find_state_all(&key, PF_OUT, &m);
3740 }
3741 if (m > 1) {
3742 error = E2BIG; /* more than one state */
3743 } else if (state != NULL) {
3744 sk = state->state_key;
3745 if (direction == PF_IN) {
3746 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3747 sk->af_lan);
3748 memcpy(&pnl->rsxport, &sk->lan.xport,
3749 sizeof(pnl->rsxport));
3750 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3751 pnl->af);
3752 memcpy(&pnl->rdxport, &pnl->dxport,
3753 sizeof(pnl->rdxport));
3754 } else {
3755 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3756 sk->af_gwy);
3757 memcpy(&pnl->rdxport, &sk->gwy.xport,
3758 sizeof(pnl->rdxport));
3759 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3760 pnl->af);
3761 memcpy(&pnl->rsxport, &pnl->sxport,
3762 sizeof(pnl->rsxport));
3763 }
3764 } else {
3765 error = ENOENT;
3766 }
3767 }
3768 break;
3769 }
3770
3771 default:
3772 VERIFY(0);
3773 /* NOTREACHED */
3774 }
3775
3776 return error;
3777 }
3778
3779 static int
pfioctl_ioc_tm(u_long cmd,struct pfioc_tm * pt,struct proc * p)3780 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3781 {
3782 #pragma unused(p)
3783 int error = 0;
3784
3785 switch (cmd) {
3786 case DIOCSETTIMEOUT: {
3787 int old;
3788
3789 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3790 pt->seconds < 0) {
3791 error = EINVAL;
3792 goto fail;
3793 }
3794 old = pf_default_rule.timeout[pt->timeout];
3795 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3796 pt->seconds = 1;
3797 }
3798 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3799 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3800 wakeup(pf_purge_thread_fn);
3801 }
3802 pt->seconds = old;
3803 break;
3804 }
3805
3806 case DIOCGETTIMEOUT: {
3807 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3808 error = EINVAL;
3809 goto fail;
3810 }
3811 pt->seconds = pf_default_rule.timeout[pt->timeout];
3812 break;
3813 }
3814
3815 default:
3816 VERIFY(0);
3817 /* NOTREACHED */
3818 }
3819 fail:
3820 return error;
3821 }
3822
3823 static int
pfioctl_ioc_limit(u_long cmd,struct pfioc_limit * pl,struct proc * p)3824 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3825 {
3826 #pragma unused(p)
3827 int error = 0;
3828
3829 switch (cmd) {
3830 case DIOCGETLIMIT: {
3831 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3832 error = EINVAL;
3833 goto fail;
3834 }
3835 pl->limit = pf_pool_limits[pl->index].limit;
3836 break;
3837 }
3838
3839 case DIOCSETLIMIT: {
3840 int old_limit;
3841
3842 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3843 pf_pool_limits[pl->index].pp == NULL) {
3844 error = EINVAL;
3845 goto fail;
3846 }
3847 pool_sethardlimit(pf_pool_limits[pl->index].pp,
3848 pl->limit, NULL, 0);
3849 old_limit = pf_pool_limits[pl->index].limit;
3850 pf_pool_limits[pl->index].limit = pl->limit;
3851 pl->limit = old_limit;
3852 break;
3853 }
3854
3855 default:
3856 VERIFY(0);
3857 /* NOTREACHED */
3858 }
3859 fail:
3860 return error;
3861 }
3862
3863 static int
pfioctl_ioc_pooladdr(u_long cmd,struct pfioc_pooladdr * pp,struct proc * p)3864 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3865 {
3866 #pragma unused(p)
3867 struct pf_pooladdr *__single pa = NULL;
3868 struct pf_pool *__single pool = NULL;
3869 int error = 0;
3870 struct pf_ruleset *__single ruleset = NULL;
3871
3872 switch (cmd) {
3873 case DIOCBEGINADDRS: {
3874 pf_empty_pool(&pf_pabuf);
3875 pp->ticket = ++ticket_pabuf;
3876 break;
3877 }
3878
3879 case DIOCADDADDR: {
3880 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3881 if (pp->ticket != ticket_pabuf) {
3882 error = EBUSY;
3883 break;
3884 }
3885 #if !INET
3886 if (pp->af == AF_INET) {
3887 error = EAFNOSUPPORT;
3888 break;
3889 }
3890 #endif /* INET */
3891 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3892 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3893 pp->addr.addr.type != PF_ADDR_TABLE) {
3894 error = EINVAL;
3895 break;
3896 }
3897 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3898 if (pa == NULL) {
3899 error = ENOMEM;
3900 break;
3901 }
3902 pf_pooladdr_copyin(&pp->addr, pa);
3903 if (pa->ifname[0]) {
3904 pa->kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(pa->ifname));
3905 if (pa->kif == NULL) {
3906 pool_put(&pf_pooladdr_pl, pa);
3907 error = EINVAL;
3908 break;
3909 }
3910 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3911 }
3912 pf_addrwrap_setup(&pa->addr);
3913 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3914 pfi_dynaddr_remove(&pa->addr);
3915 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3916 pool_put(&pf_pooladdr_pl, pa);
3917 error = EINVAL;
3918 break;
3919 }
3920 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3921 break;
3922 }
3923
3924 case DIOCGETADDRS: {
3925 pp->nr = 0;
3926 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3927 pool = pf_get_pool(__unsafe_null_terminated_from_indexable(pp->anchor), pp->ticket, pp->r_action,
3928 pp->r_num, 0, 1, 0);
3929 if (pool == NULL) {
3930 error = EBUSY;
3931 break;
3932 }
3933 TAILQ_FOREACH(pa, &pool->list, entries)
3934 pp->nr++;
3935 break;
3936 }
3937
3938 case DIOCGETADDR: {
3939 u_int32_t nr = 0;
3940
3941 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3942 pool = pf_get_pool(__unsafe_null_terminated_from_indexable(pp->anchor), pp->ticket, pp->r_action,
3943 pp->r_num, 0, 1, 1);
3944 if (pool == NULL) {
3945 error = EBUSY;
3946 break;
3947 }
3948 pa = TAILQ_FIRST(&pool->list);
3949 while ((pa != NULL) && (nr < pp->nr)) {
3950 pa = TAILQ_NEXT(pa, entries);
3951 nr++;
3952 }
3953 if (pa == NULL) {
3954 error = EBUSY;
3955 break;
3956 }
3957 pf_pooladdr_copyout(pa, &pp->addr);
3958 pfi_dynaddr_copyout(&pp->addr.addr);
3959 pf_tbladdr_copyout(&pp->addr.addr);
3960 pf_rtlabel_copyout(&pp->addr.addr);
3961 break;
3962 }
3963
3964 case DIOCCHANGEADDR: {
3965 struct pfioc_pooladdr *__single pca = pp;
3966 struct pf_pooladdr *__single oldpa = NULL, *__single newpa = NULL;
3967
3968 if (pca->action < PF_CHANGE_ADD_HEAD ||
3969 pca->action > PF_CHANGE_REMOVE) {
3970 error = EINVAL;
3971 break;
3972 }
3973 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3974 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3975 pca->addr.addr.type != PF_ADDR_TABLE) {
3976 error = EINVAL;
3977 break;
3978 }
3979
3980 pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3981 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pca->anchor));
3982 if (ruleset == NULL) {
3983 error = EBUSY;
3984 break;
3985 }
3986 pool = pf_get_pool(__unsafe_null_terminated_from_indexable(pca->anchor), pca->ticket, pca->r_action,
3987 pca->r_num, pca->r_last, 1, 1);
3988 if (pool == NULL) {
3989 error = EBUSY;
3990 break;
3991 }
3992 if (pca->action != PF_CHANGE_REMOVE) {
3993 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3994 if (newpa == NULL) {
3995 error = ENOMEM;
3996 break;
3997 }
3998 pf_pooladdr_copyin(&pca->addr, newpa);
3999 #if !INET
4000 if (pca->af == AF_INET) {
4001 pool_put(&pf_pooladdr_pl, newpa);
4002 error = EAFNOSUPPORT;
4003 break;
4004 }
4005 #endif /* INET */
4006 if (newpa->ifname[0]) {
4007 newpa->kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(newpa->ifname));
4008 if (newpa->kif == NULL) {
4009 pool_put(&pf_pooladdr_pl, newpa);
4010 error = EINVAL;
4011 break;
4012 }
4013 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
4014 } else {
4015 newpa->kif = NULL;
4016 }
4017 pf_addrwrap_setup(&newpa->addr);
4018 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
4019 pf_tbladdr_setup(ruleset, &newpa->addr)) {
4020 pfi_dynaddr_remove(&newpa->addr);
4021 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
4022 pool_put(&pf_pooladdr_pl, newpa);
4023 error = EINVAL;
4024 break;
4025 }
4026 }
4027
4028 if (pca->action == PF_CHANGE_ADD_HEAD) {
4029 oldpa = TAILQ_FIRST(&pool->list);
4030 } else if (pca->action == PF_CHANGE_ADD_TAIL) {
4031 oldpa = TAILQ_LAST(&pool->list, pf_palist);
4032 } else {
4033 int i = 0;
4034
4035 oldpa = TAILQ_FIRST(&pool->list);
4036 while ((oldpa != NULL) && (i < (int)pca->nr)) {
4037 oldpa = TAILQ_NEXT(oldpa, entries);
4038 i++;
4039 }
4040 if (oldpa == NULL) {
4041 error = EINVAL;
4042 break;
4043 }
4044 }
4045
4046 if (pca->action == PF_CHANGE_REMOVE) {
4047 TAILQ_REMOVE(&pool->list, oldpa, entries);
4048 pfi_dynaddr_remove(&oldpa->addr);
4049 pf_tbladdr_remove(&oldpa->addr);
4050 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
4051 pool_put(&pf_pooladdr_pl, oldpa);
4052 } else {
4053 if (oldpa == NULL) {
4054 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4055 } else if (pca->action == PF_CHANGE_ADD_HEAD ||
4056 pca->action == PF_CHANGE_ADD_BEFORE) {
4057 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4058 } else {
4059 TAILQ_INSERT_AFTER(&pool->list, oldpa,
4060 newpa, entries);
4061 }
4062 }
4063
4064 pool->cur = TAILQ_FIRST(&pool->list);
4065 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
4066 pca->af);
4067 break;
4068 }
4069
4070 default:
4071 VERIFY(0);
4072 /* NOTREACHED */
4073 }
4074
4075 if (ruleset) {
4076 pf_release_ruleset(ruleset);
4077 ruleset = NULL;
4078 }
4079
4080 return error;
4081 }
4082
4083 static int
pfioctl_ioc_ruleset(u_long cmd,struct pfioc_ruleset * pr,struct proc * p)4084 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4085 {
4086 #pragma unused(p)
4087 int error = 0;
4088 struct pf_ruleset *ruleset = NULL;
4089
4090 switch (cmd) {
4091 case DIOCGETRULESETS: {
4092 struct pf_anchor *anchor;
4093
4094 pr->path[sizeof(pr->path) - 1] = '\0';
4095 pr->name[sizeof(pr->name) - 1] = '\0';
4096 if ((ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->path))) == NULL) {
4097 error = EINVAL;
4098 break;
4099 }
4100 pr->nr = 0;
4101 if (ruleset->anchor == NULL) {
4102 /* XXX kludge for pf_main_ruleset */
4103 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4104 if (anchor->parent == NULL) {
4105 pr->nr++;
4106 }
4107 } else {
4108 RB_FOREACH(anchor, pf_anchor_node,
4109 &ruleset->anchor->children)
4110 pr->nr++;
4111 }
4112 break;
4113 }
4114
4115 case DIOCGETRULESET: {
4116 struct pf_anchor *anchor;
4117 u_int32_t nr = 0;
4118
4119 pr->path[sizeof(pr->path) - 1] = '\0';
4120 if ((ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->path))) == NULL) {
4121 error = EINVAL;
4122 break;
4123 }
4124 pr->name[0] = 0;
4125 if (ruleset->anchor == NULL) {
4126 /* XXX kludge for pf_main_ruleset */
4127 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4128 if (anchor->parent == NULL && nr++ == pr->nr) {
4129 strbufcpy(pr->name, anchor->name);
4130 break;
4131 }
4132 } else {
4133 RB_FOREACH(anchor, pf_anchor_node,
4134 &ruleset->anchor->children)
4135 if (nr++ == pr->nr) {
4136 strbufcpy(pr->name, anchor->name);
4137 break;
4138 }
4139 }
4140 if (!pr->name[0]) {
4141 error = EBUSY;
4142 }
4143 break;
4144 }
4145
4146 default:
4147 VERIFY(0);
4148 /* NOTREACHED */
4149 }
4150
4151 if (ruleset) {
4152 pf_release_ruleset(ruleset);
4153 ruleset = NULL;
4154 }
4155 return error;
4156 }
4157
4158 static int
pfioctl_ioc_trans(u_long cmd,struct pfioc_trans_32 * io32,struct pfioc_trans_64 * io64,struct proc * p)4159 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4160 struct pfioc_trans_64 *io64, struct proc *p)
4161 {
4162 int error = 0, esize, size;
4163 user_addr_t buf;
4164 struct pf_ruleset *rs = NULL;
4165
4166 #ifdef __LP64__
4167 int p64 = proc_is64bit(p);
4168
4169 esize = (p64 ? io64->esize : io32->esize);
4170 size = (p64 ? io64->size : io32->size);
4171 buf = (p64 ? io64->array : io32->array);
4172 #else
4173 #pragma unused(io64, p)
4174 esize = io32->esize;
4175 size = io32->size;
4176 buf = io32->array;
4177 #endif
4178
4179 switch (cmd) {
4180 case DIOCXBEGIN: {
4181 struct pfioc_trans_e *__single ioe;
4182 struct pfr_table *__single table;
4183 int i;
4184
4185 if (esize != sizeof(*ioe)) {
4186 error = ENODEV;
4187 goto fail;
4188 }
4189 ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4190 table = kalloc_type(struct pfr_table, Z_WAITOK);
4191 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4192 if (copyin(buf, ioe, sizeof(*ioe))) {
4193 kfree_type(struct pfr_table, table);
4194 kfree_type(struct pfioc_trans_e, ioe);
4195 error = EFAULT;
4196 goto fail;
4197 }
4198 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4199 switch (ioe->rs_num) {
4200 case PF_RULESET_ALTQ:
4201 break;
4202 case PF_RULESET_TABLE:
4203 bzero(table, sizeof(*table));
4204 strbufcpy(table->pfrt_anchor, ioe->anchor);
4205 if ((error = pfr_ina_begin(table,
4206 &ioe->ticket, NULL, 0))) {
4207 kfree_type(struct pfr_table, table);
4208 kfree_type(struct pfioc_trans_e, ioe);
4209 goto fail;
4210 }
4211 break;
4212 default:
4213 if ((error = pf_begin_rules(&ioe->ticket,
4214 ioe->rs_num, __unsafe_null_terminated_from_indexable(ioe->anchor)))) {
4215 kfree_type(struct pfr_table, table);
4216 kfree_type(struct pfioc_trans_e, ioe);
4217 goto fail;
4218 }
4219 break;
4220 }
4221 if (copyout(ioe, buf, sizeof(*ioe))) {
4222 kfree_type(struct pfr_table, table);
4223 kfree_type(struct pfioc_trans_e, ioe);
4224 error = EFAULT;
4225 goto fail;
4226 }
4227 }
4228 kfree_type(struct pfr_table, table);
4229 kfree_type(struct pfioc_trans_e, ioe);
4230 break;
4231 }
4232
4233 case DIOCXROLLBACK: {
4234 struct pfioc_trans_e *__single ioe;
4235 struct pfr_table *__single table;
4236 int i;
4237
4238 if (esize != sizeof(*ioe)) {
4239 error = ENODEV;
4240 goto fail;
4241 }
4242 ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4243 table = kalloc_type(struct pfr_table, Z_WAITOK);
4244 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4245 if (copyin(buf, ioe, sizeof(*ioe))) {
4246 kfree_type(struct pfr_table, table);
4247 kfree_type(struct pfioc_trans_e, ioe);
4248 error = EFAULT;
4249 goto fail;
4250 }
4251 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4252 switch (ioe->rs_num) {
4253 case PF_RULESET_ALTQ:
4254 break;
4255 case PF_RULESET_TABLE:
4256 bzero(table, sizeof(*table));
4257 strbufcpy(table->pfrt_anchor, ioe->anchor);
4258 if ((error = pfr_ina_rollback(table,
4259 ioe->ticket, NULL, 0))) {
4260 kfree_type(struct pfr_table, table);
4261 kfree_type(struct pfioc_trans_e, ioe);
4262 goto fail; /* really bad */
4263 }
4264 break;
4265 default:
4266 if ((error = pf_rollback_rules(ioe->ticket,
4267 ioe->rs_num, __unsafe_null_terminated_from_indexable(ioe->anchor)))) {
4268 kfree_type(struct pfr_table, table);
4269 kfree_type(struct pfioc_trans_e, ioe);
4270 goto fail; /* really bad */
4271 }
4272 break;
4273 }
4274 }
4275 kfree_type(struct pfr_table, table);
4276 kfree_type(struct pfioc_trans_e, ioe);
4277 break;
4278 }
4279
4280 case DIOCXCOMMIT: {
4281 struct pfioc_trans_e *__single ioe;
4282 struct pfr_table *__single table;
4283 user_addr_t _buf = buf;
4284 int i;
4285
4286 if (esize != sizeof(*ioe)) {
4287 error = ENODEV;
4288 goto fail;
4289 }
4290 ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4291 table = kalloc_type(struct pfr_table, Z_WAITOK);
4292 /* first makes sure everything will succeed */
4293 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4294 if (copyin(buf, ioe, sizeof(*ioe))) {
4295 kfree_type(struct pfr_table, table);
4296 kfree_type(struct pfioc_trans_e, ioe);
4297 error = EFAULT;
4298 goto fail;
4299 }
4300 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4301 switch (ioe->rs_num) {
4302 case PF_RULESET_ALTQ:
4303 break;
4304 case PF_RULESET_TABLE:
4305 rs = pf_find_ruleset(__unsafe_null_terminated_from_indexable(ioe->anchor));
4306 if (rs == NULL || !rs->topen || ioe->ticket !=
4307 rs->tticket) {
4308 kfree_type(struct pfr_table, table);
4309 kfree_type(struct pfioc_trans_e, ioe);
4310 error = EBUSY;
4311 goto fail;
4312 }
4313 break;
4314 default:
4315 if (ioe->rs_num < 0 || ioe->rs_num >=
4316 PF_RULESET_MAX) {
4317 kfree_type(struct pfr_table, table);
4318 kfree_type(struct pfioc_trans_e, ioe);
4319 error = EINVAL;
4320 goto fail;
4321 }
4322 rs = pf_find_ruleset(__unsafe_null_terminated_from_indexable(ioe->anchor));
4323 if (rs == NULL ||
4324 !rs->rules[ioe->rs_num].inactive.open ||
4325 rs->rules[ioe->rs_num].inactive.ticket !=
4326 ioe->ticket) {
4327 kfree_type(struct pfr_table, table);
4328 kfree_type(struct pfioc_trans_e, ioe);
4329 error = EBUSY;
4330 goto fail;
4331 }
4332 break;
4333 }
4334 }
4335 buf = _buf;
4336 /* now do the commit - no errors should happen here */
4337 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4338 if (copyin(buf, ioe, sizeof(*ioe))) {
4339 kfree_type(struct pfr_table, table);
4340 kfree_type(struct pfioc_trans_e, ioe);
4341 error = EFAULT;
4342 goto fail;
4343 }
4344 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4345 switch (ioe->rs_num) {
4346 case PF_RULESET_ALTQ:
4347 break;
4348 case PF_RULESET_TABLE:
4349 bzero(table, sizeof(*table));
4350 strbufcpy(table->pfrt_anchor, ioe->anchor);
4351 if ((error = pfr_ina_commit(table, ioe->ticket,
4352 NULL, NULL, 0))) {
4353 kfree_type(struct pfr_table, table);
4354 kfree_type(struct pfioc_trans_e, ioe);
4355 goto fail;
4356 }
4357 break;
4358 default:
4359 if ((error = pf_commit_rules(ioe->ticket,
4360 ioe->rs_num, __unsafe_null_terminated_from_indexable(ioe->anchor)))) {
4361 kfree_type(struct pfr_table, table);
4362 kfree_type(struct pfioc_trans_e, ioe);
4363 goto fail;
4364 }
4365 break;
4366 }
4367 }
4368 kfree_type(struct pfr_table, table);
4369 kfree_type(struct pfioc_trans_e, ioe);
4370 #if SKYWALK
4371 pf_process_compatibilities();
4372 #endif // SKYWALK
4373 break;
4374 }
4375
4376 default:
4377 VERIFY(0);
4378 /* NOTREACHED */
4379 }
4380 fail:
4381 if (rs) {
4382 pf_release_ruleset(rs);
4383 rs = NULL;
4384 }
4385 return error;
4386 }
4387
4388 static int
pfioctl_ioc_src_nodes(u_long cmd,struct pfioc_src_nodes_32 * psn32,struct pfioc_src_nodes_64 * psn64,struct proc * p)4389 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4390 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4391 {
4392 int p64 = proc_is64bit(p);
4393 int error = 0;
4394
4395 switch (cmd) {
4396 case DIOCGETSRCNODES: {
4397 struct pf_src_node *__single n, *__single pstore;
4398 user_addr_t buf;
4399 u_int32_t nr = 0;
4400 int space, size;
4401
4402 space = (p64 ? psn64->psn_len : psn32->psn_len);
4403 if (space == 0) {
4404 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4405 nr++;
4406
4407 size = sizeof(struct pf_src_node) * nr;
4408 if (p64) {
4409 psn64->psn_len = size;
4410 } else {
4411 psn32->psn_len = size;
4412 }
4413 break;
4414 }
4415
4416 pstore = kalloc_type(struct pf_src_node, Z_WAITOK | Z_NOFAIL);
4417 #ifdef __LP64__
4418 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4419 #else
4420 buf = psn32->psn_buf;
4421 #endif
4422
4423 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4424 uint64_t secs = pf_time_second(), diff;
4425
4426 if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4427 break;
4428 }
4429
4430 bcopy(n, pstore, sizeof(*pstore));
4431 if (n->rule.ptr != NULL) {
4432 pstore->rule.nr = n->rule.ptr->nr;
4433 }
4434 pstore->creation = secs - pstore->creation;
4435 if (pstore->expire > secs) {
4436 pstore->expire -= secs;
4437 } else {
4438 pstore->expire = 0;
4439 }
4440
4441 /* adjust the connection rate estimate */
4442 diff = secs - n->conn_rate.last;
4443 if (diff >= n->conn_rate.seconds) {
4444 pstore->conn_rate.count = 0;
4445 } else {
4446 pstore->conn_rate.count -=
4447 n->conn_rate.count * diff /
4448 n->conn_rate.seconds;
4449 }
4450
4451 _RB_PARENT(pstore, entry) = NULL;
4452 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4453 pstore->kif = NULL;
4454
4455 error = copyout(pstore, buf, sizeof(*pstore));
4456 if (error) {
4457 kfree_type(struct pf_src_node, pstore);
4458 goto fail;
4459 }
4460 buf += sizeof(*pstore);
4461 nr++;
4462 }
4463
4464 size = sizeof(struct pf_src_node) * nr;
4465 if (p64) {
4466 psn64->psn_len = size;
4467 } else {
4468 psn32->psn_len = size;
4469 }
4470
4471 kfree_type(struct pf_src_node, pstore);
4472 break;
4473 }
4474
4475 default:
4476 VERIFY(0);
4477 /* NOTREACHED */
4478 }
4479 fail:
4480 return error;
4481 }
4482
4483 static int
pfioctl_ioc_src_node_kill(u_long cmd,struct pfioc_src_node_kill * psnk,struct proc * p)4484 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4485 struct proc *p)
4486 {
4487 #pragma unused(p)
4488 int error = 0;
4489
4490 switch (cmd) {
4491 case DIOCKILLSRCNODES: {
4492 struct pf_src_node *sn;
4493 struct pf_state *s;
4494 int killed = 0;
4495
4496 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4497 if (PF_MATCHA(psnk->psnk_src.neg,
4498 &psnk->psnk_src.addr.v.a.addr,
4499 &psnk->psnk_src.addr.v.a.mask,
4500 &sn->addr, sn->af) &&
4501 PF_MATCHA(psnk->psnk_dst.neg,
4502 &psnk->psnk_dst.addr.v.a.addr,
4503 &psnk->psnk_dst.addr.v.a.mask,
4504 &sn->raddr, sn->af)) {
4505 /* Handle state to src_node linkage */
4506 if (sn->states != 0) {
4507 RB_FOREACH(s, pf_state_tree_id,
4508 &tree_id) {
4509 if (s->src_node == sn) {
4510 s->src_node = NULL;
4511 }
4512 if (s->nat_src_node == sn) {
4513 s->nat_src_node = NULL;
4514 }
4515 }
4516 sn->states = 0;
4517 }
4518 sn->expire = 1;
4519 killed++;
4520 }
4521 }
4522
4523 if (killed > 0) {
4524 pf_purge_expired_src_nodes();
4525 }
4526
4527 psnk->psnk_af = (sa_family_t)killed;
4528 break;
4529 }
4530
4531 default:
4532 VERIFY(0);
4533 /* NOTREACHED */
4534 }
4535
4536 return error;
4537 }
4538
4539 static int
pfioctl_ioc_iface(u_long cmd,struct pfioc_iface_32 * io32,struct pfioc_iface_64 * io64,struct proc * p)4540 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4541 struct pfioc_iface_64 *io64, struct proc *p)
4542 {
4543 int p64 = proc_is64bit(p);
4544 int error = 0;
4545
4546 switch (cmd) {
4547 case DIOCIGETIFACES: {
4548 user_addr_t buf;
4549 int esize;
4550
4551 #ifdef __LP64__
4552 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4553 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4554 #else
4555 buf = io32->pfiio_buffer;
4556 esize = io32->pfiio_esize;
4557 #endif
4558
4559 /* esize must be that of the user space version of pfi_kif */
4560 if (esize != sizeof(struct pfi_uif)) {
4561 error = ENODEV;
4562 break;
4563 }
4564 if (p64) {
4565 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4566 } else {
4567 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4568 }
4569 error = pfi_get_ifaces(
4570 p64 ? __unsafe_null_terminated_from_indexable(io64->pfiio_name) :
4571 __unsafe_null_terminated_from_indexable(io32->pfiio_name), buf,
4572 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4573 break;
4574 }
4575
4576 case DIOCSETIFFLAG: {
4577 if (p64) {
4578 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4579 } else {
4580 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4581 }
4582
4583 error = pfi_set_flags(
4584 p64 ? __unsafe_null_terminated_from_indexable(io64->pfiio_name) :
4585 __unsafe_null_terminated_from_indexable(io32->pfiio_name),
4586 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4587 break;
4588 }
4589
4590 case DIOCCLRIFFLAG: {
4591 if (p64) {
4592 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4593 } else {
4594 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4595 }
4596
4597 error = pfi_clear_flags(
4598 p64 ? __unsafe_null_terminated_from_indexable(io64->pfiio_name) :
4599 __unsafe_null_terminated_from_indexable(io32->pfiio_name),
4600 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4601 break;
4602 }
4603
4604 default:
4605 VERIFY(0);
4606 /* NOTREACHED */
4607 }
4608
4609 return error;
4610 }
4611
4612 int
pf_af_hook(struct ifnet * ifp,struct mbuf ** mppn,struct mbuf ** mp,unsigned int af,int input,struct ip_fw_args * fwa)4613 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4614 unsigned int af, int input, struct ip_fw_args *fwa)
4615 {
4616 int error = 0;
4617 struct mbuf *__single nextpkt;
4618 net_thread_marks_t __single marks;
4619 struct ifnet *__single pf_ifp = ifp;
4620
4621 /* Always allow traffic on co-processor and management interfaces. */
4622 if (ifp != NULL &&
4623 ((!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) ||
4624 (!management_data_unrestricted && IFNET_IS_MANAGEMENT(ifp)))) {
4625 return 0;
4626 }
4627
4628 marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4629
4630 if (marks != net_thread_marks_none) {
4631 lck_rw_lock_shared(&pf_perim_lock);
4632 if (!pf_is_enabled) {
4633 goto done;
4634 }
4635 lck_mtx_lock(&pf_lock);
4636 }
4637
4638 if (mppn != NULL && *mppn != NULL) {
4639 VERIFY(*mppn == *mp);
4640 }
4641 if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4642 (*mp)->m_nextpkt = NULL;
4643 }
4644
4645 /*
4646 * For packets destined to locally hosted IP address
4647 * ip_output_list sets Mbuf's pkt header's rcvif to
4648 * the interface hosting the IP address.
4649 * While on the output path ifp passed to pf_af_hook
4650 * to such local communication is the loopback interface,
4651 * the input path derives ifp from mbuf packet header's
4652 * rcvif.
4653 * This asymmetry caues issues with PF.
4654 * To handle that case, we have a limited change here to
4655 * pass interface as loopback if packets are looped in.
4656 */
4657 if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4658 pf_ifp = lo_ifp;
4659 }
4660
4661 switch (af) {
4662 #if INET
4663 case AF_INET: {
4664 error = pf_inet_hook(pf_ifp, mp, input, fwa);
4665 break;
4666 }
4667 #endif /* INET */
4668 case AF_INET6:
4669 error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4670 break;
4671 default:
4672 break;
4673 }
4674
4675 /* When packet valid, link to the next packet */
4676 if (*mp != NULL && nextpkt != NULL) {
4677 struct mbuf *m = *mp;
4678 while (m->m_nextpkt != NULL) {
4679 m = m->m_nextpkt;
4680 }
4681 m->m_nextpkt = nextpkt;
4682 }
4683 /* Fix up linkage of previous packet in the chain */
4684 if (mppn != NULL) {
4685 if (*mp != NULL) {
4686 *mppn = *mp;
4687 } else {
4688 *mppn = nextpkt;
4689 }
4690 }
4691
4692 if (marks != net_thread_marks_none) {
4693 lck_mtx_unlock(&pf_lock);
4694 }
4695
4696 done:
4697 if (marks != net_thread_marks_none) {
4698 lck_rw_done(&pf_perim_lock);
4699 }
4700
4701 net_thread_marks_pop(marks);
4702 return error;
4703 }
4704
4705
4706 #if INET
4707 static __attribute__((noinline)) int
pf_inet_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4708 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4709 struct ip_fw_args *fwa)
4710 {
4711 struct mbuf *m = *mp;
4712 #if BYTE_ORDER != BIG_ENDIAN
4713 struct ip *ip = mtod(m, struct ip *);
4714 #endif
4715 int error = 0;
4716
4717 /*
4718 * If the packet is outbound, is originated locally, is flagged for
4719 * delayed UDP/TCP checksum calculation, and is about to be processed
4720 * for an interface that doesn't support the appropriate checksum
4721 * offloading, then calculated the checksum here so that PF can adjust
4722 * it properly.
4723 */
4724 if (!input && m->m_pkthdr.rcvif == NULL) {
4725 static const int mask = CSUM_DELAY_DATA;
4726 const int flags = m->m_pkthdr.csum_flags &
4727 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4728
4729 if (flags & mask) {
4730 in_delayed_cksum(m);
4731 m->m_pkthdr.csum_flags &= ~mask;
4732 }
4733 }
4734
4735 #if BYTE_ORDER != BIG_ENDIAN
4736 HTONS(ip->ip_len);
4737 HTONS(ip->ip_off);
4738 #endif
4739 if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4740 if (*mp != NULL) {
4741 m_drop(*mp, input ? DROPTAP_FLAG_DIR_IN : DROPTAP_FLAG_DIR_OUT,
4742 DROP_REASON_PF_NO_ROUTE, NULL, 0);
4743 *mp = NULL;
4744 error = EHOSTUNREACH;
4745 } else {
4746 error = EJUSTRETURN;
4747 }
4748 }
4749 #if BYTE_ORDER != BIG_ENDIAN
4750 else {
4751 if (*mp != NULL) {
4752 ip = mtod(*mp, struct ip *);
4753 NTOHS(ip->ip_len);
4754 NTOHS(ip->ip_off);
4755 }
4756 }
4757 #endif
4758 return error;
4759 }
4760 #endif /* INET */
4761
4762 int __attribute__((noinline))
pf_inet6_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4763 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4764 struct ip_fw_args *fwa)
4765 {
4766 int error = 0;
4767
4768 /*
4769 * If the packet is outbound, is originated locally, is flagged for
4770 * delayed UDP/TCP checksum calculation, and is about to be processed
4771 * for an interface that doesn't support the appropriate checksum
4772 * offloading, then calculated the checksum here so that PF can adjust
4773 * it properly.
4774 */
4775 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4776 static const int mask = CSUM_DELAY_IPV6_DATA;
4777 const int flags = (*mp)->m_pkthdr.csum_flags &
4778 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4779
4780 if (flags & mask) {
4781 /*
4782 * Checksum offload should not have been enabled
4783 * when extension headers exist, thus 0 for optlen.
4784 */
4785 in6_delayed_cksum(*mp);
4786 (*mp)->m_pkthdr.csum_flags &= ~mask;
4787 }
4788 }
4789
4790 if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4791 if (*mp != NULL) {
4792 m_drop(*mp, input ? DROPTAP_FLAG_DIR_IN : DROPTAP_FLAG_DIR_OUT,
4793 DROP_REASON_PF_NO_ROUTE, NULL, 0);
4794 *mp = NULL;
4795 error = EHOSTUNREACH;
4796 } else {
4797 error = EJUSTRETURN;
4798 }
4799 }
4800 return error;
4801 }
4802
4803 int
pf_ifaddr_hook(struct ifnet * ifp)4804 pf_ifaddr_hook(struct ifnet *ifp)
4805 {
4806 struct pfi_kif *kif = ifp->if_pf_kif;
4807
4808 if (kif != NULL) {
4809 lck_rw_lock_shared(&pf_perim_lock);
4810 lck_mtx_lock(&pf_lock);
4811
4812 pfi_kifaddr_update(kif);
4813
4814 lck_mtx_unlock(&pf_lock);
4815 lck_rw_done(&pf_perim_lock);
4816 }
4817 return 0;
4818 }
4819
4820 /*
4821 * Caller acquires dlil lock as writer (exclusive)
4822 */
4823 void
pf_ifnet_hook(struct ifnet * ifp,int attach)4824 pf_ifnet_hook(struct ifnet *ifp, int attach)
4825 {
4826 lck_rw_lock_shared(&pf_perim_lock);
4827 lck_mtx_lock(&pf_lock);
4828 if (attach) {
4829 pfi_attach_ifnet(ifp);
4830 } else {
4831 pfi_detach_ifnet(ifp);
4832 }
4833 lck_mtx_unlock(&pf_lock);
4834 lck_rw_done(&pf_perim_lock);
4835 }
4836
4837 static void
pf_attach_hooks(void)4838 pf_attach_hooks(void)
4839 {
4840 ifnet_head_lock_shared();
4841 /*
4842 * Check against ifnet_addrs[] before proceeding, in case this
4843 * is called very early on, e.g. during dlil_init() before any
4844 * network interface is attached.
4845 */
4846 if (ifnet_addrs != NULL) {
4847 int i;
4848
4849 for (i = 0; i <= if_index; i++) {
4850 struct ifnet *ifp = ifindex2ifnet[i];
4851 if (ifp != NULL) {
4852 pfi_attach_ifnet(ifp);
4853 }
4854 }
4855 }
4856 ifnet_head_done();
4857 }
4858
4859 #if 0
4860 /* currently unused along with pfdetach() */
4861 static void
4862 pf_detach_hooks(void)
4863 {
4864 ifnet_head_lock_shared();
4865 if (ifnet_addrs != NULL) {
4866 for (i = 0; i <= if_index; i++) {
4867 int i;
4868
4869 struct ifnet *ifp = ifindex2ifnet[i];
4870 if (ifp != NULL && ifp->if_pf_kif != NULL) {
4871 pfi_detach_ifnet(ifp);
4872 }
4873 }
4874 }
4875 ifnet_head_done();
4876 }
4877 #endif
4878
4879 /*
4880 * 'D' group ioctls.
4881 *
4882 * The switch statement below does nothing at runtime, as it serves as a
4883 * compile time check to ensure that all of the socket 'D' ioctls (those
4884 * in the 'D' group going thru soo_ioctl) that are made available by the
4885 * networking stack is unique. This works as long as this routine gets
4886 * updated each time a new interface ioctl gets added.
4887 *
4888 * Any failures at compile time indicates duplicated ioctl values.
4889 */
4890 static __attribute__((unused)) void
pfioctl_cassert(void)4891 pfioctl_cassert(void)
4892 {
4893 /*
4894 * This is equivalent to static_assert() and the compiler wouldn't
4895 * generate any instructions, thus for compile time only.
4896 */
4897 switch ((u_long)0) {
4898 case 0:
4899
4900 /* bsd/net/pfvar.h */
4901 case DIOCSTART:
4902 case DIOCSTOP:
4903 case DIOCADDRULE:
4904 case DIOCGETSTARTERS:
4905 case DIOCGETRULES:
4906 case DIOCGETRULE:
4907 case DIOCSTARTREF:
4908 case DIOCSTOPREF:
4909 case DIOCCLRSTATES:
4910 case DIOCGETSTATE:
4911 case DIOCSETSTATUSIF:
4912 case DIOCGETSTATUS:
4913 case DIOCCLRSTATUS:
4914 case DIOCNATLOOK:
4915 case DIOCSETDEBUG:
4916 case DIOCGETSTATES:
4917 case DIOCCHANGERULE:
4918 case DIOCINSERTRULE:
4919 case DIOCDELETERULE:
4920 case DIOCSETTIMEOUT:
4921 case DIOCGETTIMEOUT:
4922 case DIOCADDSTATE:
4923 case DIOCCLRRULECTRS:
4924 case DIOCGETLIMIT:
4925 case DIOCSETLIMIT:
4926 case DIOCKILLSTATES:
4927 case DIOCSTARTALTQ:
4928 case DIOCSTOPALTQ:
4929 case DIOCADDALTQ:
4930 case DIOCGETALTQS:
4931 case DIOCGETALTQ:
4932 case DIOCCHANGEALTQ:
4933 case DIOCGETQSTATS:
4934 case DIOCBEGINADDRS:
4935 case DIOCADDADDR:
4936 case DIOCGETADDRS:
4937 case DIOCGETADDR:
4938 case DIOCCHANGEADDR:
4939 case DIOCGETRULESETS:
4940 case DIOCGETRULESET:
4941 case DIOCRCLRTABLES:
4942 case DIOCRADDTABLES:
4943 case DIOCRDELTABLES:
4944 case DIOCRGETTABLES:
4945 case DIOCRGETTSTATS:
4946 case DIOCRCLRTSTATS:
4947 case DIOCRCLRADDRS:
4948 case DIOCRADDADDRS:
4949 case DIOCRDELADDRS:
4950 case DIOCRSETADDRS:
4951 case DIOCRGETADDRS:
4952 case DIOCRGETASTATS:
4953 case DIOCRCLRASTATS:
4954 case DIOCRTSTADDRS:
4955 case DIOCRSETTFLAGS:
4956 case DIOCRINADEFINE:
4957 case DIOCOSFPFLUSH:
4958 case DIOCOSFPADD:
4959 case DIOCOSFPGET:
4960 case DIOCXBEGIN:
4961 case DIOCXCOMMIT:
4962 case DIOCXROLLBACK:
4963 case DIOCGETSRCNODES:
4964 case DIOCCLRSRCNODES:
4965 case DIOCSETHOSTID:
4966 case DIOCIGETIFACES:
4967 case DIOCSETIFFLAG:
4968 case DIOCCLRIFFLAG:
4969 case DIOCKILLSRCNODES:
4970 case DIOCGIFSPEED:
4971 ;
4972 }
4973 }
4974
4975 #if SKYWALK
4976 static void
pf_process_compatibilities(void)4977 pf_process_compatibilities(void)
4978 {
4979 uint32_t compat_bitmap;
4980
4981 if (!kernel_is_macos_or_server()) {
4982 return;
4983 }
4984
4985 compat_bitmap = pf_check_compatible_rules();
4986
4987 net_filter_event_mark(NET_FILTER_EVENT_PF,
4988 (compat_bitmap &
4989 (PF_COMPATIBLE_FLAGS_CUSTOM_ANCHORS_PRESENT |
4990 PF_COMPATIBLE_FLAGS_CUSTOM_RULES_PRESENT)) == 0);
4991
4992 net_filter_event_mark(NET_FILTER_EVENT_PF_PRIVATE_PROXY,
4993 ((compat_bitmap & PF_COMPATIBLE_FLAGS_PF_ENABLED) == 0) ||
4994 (compat_bitmap & PF_COMPATIBLE_FLAGS_CUSTOM_RULES_PRESENT) == 0);
4995 }
4996 #endif // SKYWALK
4997