1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83 #include <os/log.h>
84
85 #include <mach/vm_param.h>
86
87 #include <net/dlil.h>
88 #include <net/if.h>
89 #include <net/if_types.h>
90 #include <net/net_api_stats.h>
91 #include <net/route.h>
92 #if SKYWALK
93 #include <skywalk/lib/net_filter_event.h>
94 #endif
95
96 #include <netinet/in.h>
97 #include <netinet/in_var.h>
98 #include <netinet/in_systm.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/ip_icmp.h>
102 #include <netinet/if_ether.h>
103
104 #if DUMMYNET
105 #include <netinet/ip_dummynet.h>
106 #else
107 struct ip_fw_args;
108 #endif /* DUMMYNET */
109
110 #include <libkern/crypto/md5.h>
111
112 #include <machine/machine_routines.h>
113
114 #include <miscfs/devfs/devfs.h>
115
116 #include <net/pfvar.h>
117
118 #if NPFSYNC
119 #include <net/if_pfsync.h>
120 #endif /* NPFSYNC */
121
122 #if PFLOG
123 #include <net/if_pflog.h>
124 #endif /* PFLOG */
125
126 #include <netinet/ip6.h>
127 #include <netinet/in_pcb.h>
128
129 #include <dev/random/randomdev.h>
130
131 #if 0
132 static void pfdetach(void);
133 #endif
134 static int pfopen(dev_t, int, int, struct proc *);
135 static int pfclose(dev_t, int, int, struct proc *);
136 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
137 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
138 struct pfioc_table_64 *, struct proc *);
139 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
140 struct pfioc_tokens_64 *, struct proc *);
141 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
142 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
143 struct proc *);
144 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
145 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
146 struct pfioc_states_64 *, struct proc *);
147 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
148 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
149 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
150 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
151 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
152 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
153 struct pfioc_trans_64 *, struct proc *);
154 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
155 struct pfioc_src_nodes_64 *, struct proc *);
156 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
157 struct proc *);
158 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
159 struct pfioc_iface_64 *, struct proc *);
160 static struct pf_pool *pf_get_pool(char const *, u_int32_t, u_int8_t, u_int32_t,
161 u_int8_t, u_int8_t, u_int8_t);
162 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
163 static void pf_empty_pool(struct pf_palist *);
164 static int pf_begin_rules(u_int32_t *, int, const char *);
165 static int pf_rollback_rules(u_int32_t, int, char const *);
166 static int pf_setup_pfsync_matching(struct pf_ruleset *);
167 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
168 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
169 static int pf_commit_rules(u_int32_t, int, char const *);
170 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
171 int);
172 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
173 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
174 struct pf_state *);
175 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
176 struct pf_state *);
177 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
178 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
179 static void pf_expire_states_and_src_nodes(struct pf_rule *);
180 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
181 int, struct pf_rule *);
182 static void pf_addrwrap_setup(struct pf_addr_wrap *);
183 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
184 struct pf_ruleset *);
185 static void pf_delete_rule_by_owner(char const *, u_int32_t);
186 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
187 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
188 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
189 int, struct pf_rule **);
190 #if SKYWALK
191 static void pf_process_compatibilities(void);
192 #endif // SKYWALK
193
194 #define PF_CDEV_MAJOR (-1)
195
196 static const struct cdevsw pf_cdevsw = {
197 .d_open = pfopen,
198 .d_close = pfclose,
199 .d_read = eno_rdwrt,
200 .d_write = eno_rdwrt,
201 .d_ioctl = pfioctl,
202 .d_stop = eno_stop,
203 .d_reset = eno_reset,
204 .d_ttys = NULL,
205 .d_select = eno_select,
206 .d_mmap = eno_mmap,
207 .d_strategy = eno_strat,
208 .d_reserved_1 = eno_getc,
209 .d_reserved_2 = eno_putc,
210 .d_type = 0
211 };
212
213 static void pf_attach_hooks(void);
214 #if 0
215 /* currently unused along with pfdetach() */
216 static void pf_detach_hooks(void);
217 #endif
218
219 /*
220 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
221 * and used in pf_af_hook() for performance optimization, such that packets
222 * will enter pf_test() or pf_test6() only when PF is running.
223 */
224 int pf_is_enabled = 0;
225
226 u_int32_t pf_hash_seed;
227 int16_t pf_nat64_configured = 0;
228
229 /*
230 * These are the pf enabled reference counting variables
231 */
232 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token))
233
234 static u_int64_t pf_enabled_ref_count;
235 static u_int32_t nr_tokens = 0;
236 static u_int32_t pffwrules;
237 static u_int32_t pfdevcnt;
238
239 SLIST_HEAD(list_head, pfioc_kernel_token);
240 static struct list_head token_list_head;
241
242 struct pf_rule pf_default_rule;
243
244 typedef struct {
245 char tag_name[PF_TAG_NAME_SIZE];
246 uint16_t tag_id;
247 } pf_reserved_tag_table_t;
248
249 #define NUM_RESERVED_TAGS 2
250 static pf_reserved_tag_table_t pf_reserved_tag_table[NUM_RESERVED_TAGS] = {
251 { PF_TAG_NAME_SYSTEM_SERVICE, PF_TAG_ID_SYSTEM_SERVICE},
252 { PF_TAG_NAME_STACK_DROP, PF_TAG_ID_STACK_DROP},
253 };
254 #define RESERVED_TAG_ID_MIN PF_TAG_ID_SYSTEM_SERVICE
255
256 #define DYNAMIC_TAG_ID_MAX 50000
257 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
258 TAILQ_HEAD_INITIALIZER(pf_tags);
259
260 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
261 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
262 #endif
263 static u_int16_t tagname2tag(struct pf_tags *, char const *);
264 static void tag_unref(struct pf_tags *, u_int16_t);
265 static int pf_rtlabel_add(struct pf_addr_wrap *);
266 static void pf_rtlabel_remove(struct pf_addr_wrap *);
267 static void pf_rtlabel_copyout(struct pf_addr_wrap *);
268
269 #if INET
270 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
271 struct ip_fw_args *);
272 #endif /* INET */
273 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
274 struct ip_fw_args *);
275
276 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
277
278 /*
279 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
280 */
281 #define PFIOCX_STRUCT_DECL(s) \
282 struct { \
283 union { \
284 struct s##_32 _s##_32; \
285 struct s##_64 _s##_64; \
286 } _u; \
287 } *__single s##_un = NULL \
288
289 #define PFIOCX_STRUCT_BEGIN(a, s) { \
290 VERIFY(s##_un == NULL); \
291 s##_un = kalloc_type(typeof(*s##_un), Z_WAITOK_ZERO_NOFAIL); \
292 if (p64) \
293 bcopy((struct s##_64 *)(void *) (a), &s##_un->_u._s##_64, \
294 sizeof (struct s##_64)); \
295 else \
296 bcopy((struct s##_32 *)(void *) (a), &s##_un->_u._s##_32, \
297 sizeof (struct s##_32)); \
298 }
299
300 #define PFIOCX_STRUCT_END(s, a) { \
301 VERIFY(s##_un != NULL); \
302 if (p64) \
303 bcopy(&s##_un->_u._s##_64, (struct s##_64 *)(void *) (a), sizeof (struct s##_64)); \
304 else \
305 bcopy(&s##_un->_u._s##_32, (struct s##_32 *)(void *) (a), sizeof (struct s##_32)); \
306 kfree_type(typeof(*s##_un), s##_un); \
307 }
308
309 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
310 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
311
312 /*
313 * Helper macros for regular ioctl structures.
314 */
315 #define PFIOC_STRUCT_BEGIN(a, v) { \
316 VERIFY((v) == NULL); \
317 (v) = kalloc_type(typeof(*(v)), Z_WAITOK_ZERO_NOFAIL); \
318 bcopy((typeof(v))(void *) a, v, sizeof (*(v))); \
319 }
320
321 #define PFIOC_STRUCT_END(v, a) { \
322 VERIFY((v) != NULL); \
323 bcopy(v, (typeof(v))(void *) a, sizeof (*(v))); \
324 kfree_type(typeof(*(v)), v); \
325 }
326
327 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
328 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
329
330 struct thread *pf_purge_thread;
331
332 extern void pfi_kifaddr_update(void *);
333
334 /* pf enable ref-counting helper functions */
335 static u_int64_t generate_token(struct proc *);
336 static int remove_token(struct pfioc_remove_token *);
337 static void invalidate_all_tokens(void);
338
339 static u_int64_t
generate_token(struct proc * p)340 generate_token(struct proc *p)
341 {
342 u_int64_t token_value;
343 struct pfioc_kernel_token *new_token;
344
345 if (nr_tokens + 1 > NR_TOKENS_LIMIT) {
346 os_log_error(OS_LOG_DEFAULT, "%s: NR_TOKENS_LIMIT reached", __func__);
347 return 0;
348 }
349
350 new_token = kalloc_type(struct pfioc_kernel_token,
351 Z_WAITOK | Z_ZERO | Z_NOFAIL);
352
353 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
354
355 token_value = VM_KERNEL_ADDRHASH((u_int64_t)(uintptr_t)new_token);
356
357 new_token->token.token_value = token_value;
358 new_token->token.pid = proc_pid(p);
359 proc_name(new_token->token.pid, new_token->token.proc_name,
360 sizeof(new_token->token.proc_name));
361 new_token->token.timestamp = pf_calendar_time_second();
362
363 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
364 nr_tokens++;
365
366 return token_value;
367 }
368
369 static int
remove_token(struct pfioc_remove_token * tok)370 remove_token(struct pfioc_remove_token *tok)
371 {
372 struct pfioc_kernel_token *__single entry, *__single tmp;
373
374 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
375
376 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
377 if (tok->token_value == entry->token.token_value) {
378 SLIST_REMOVE(&token_list_head, entry,
379 pfioc_kernel_token, next);
380 kfree_type(struct pfioc_kernel_token, entry);
381 nr_tokens--;
382 return 0; /* success */
383 }
384 }
385
386 printf("pf : remove failure\n");
387 return ESRCH; /* failure */
388 }
389
390 static void
invalidate_all_tokens(void)391 invalidate_all_tokens(void)
392 {
393 struct pfioc_kernel_token *__single entry, *__single tmp;
394
395 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
396
397 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
398 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
399 kfree_type(struct pfioc_kernel_token, entry);
400 }
401
402 nr_tokens = 0;
403 }
404
405 struct pf_reass_tag_container {
406 struct m_tag pf_reass_m_tag;
407 struct pf_fragment_tag pf_reass_fragment_tag;
408 };
409
410 static struct m_tag *
m_tag_kalloc_pf_reass(u_int32_t id,u_int16_t type,uint16_t len,int wait)411 m_tag_kalloc_pf_reass(u_int32_t id, u_int16_t type, uint16_t len, int wait)
412 {
413 struct pf_reass_tag_container *tag_container;
414 struct m_tag *tag = NULL;
415
416 assert3u(id, ==, KERNEL_MODULE_TAG_ID);
417 assert3u(type, ==, KERNEL_TAG_TYPE_PF_REASS);
418 assert3u(len, ==, sizeof(struct pf_fragment_tag));
419
420 if (len != sizeof(struct pf_fragment_tag)) {
421 return NULL;
422 }
423
424 tag_container = kalloc_type(struct pf_reass_tag_container, wait | M_ZERO);
425 if (tag_container != NULL) {
426 tag = &tag_container->pf_reass_m_tag;
427
428 assert3p(tag, ==, tag_container);
429
430 M_TAG_INIT(tag, id, type, len, &tag_container->pf_reass_fragment_tag, NULL);
431 }
432
433 return tag;
434 }
435
436 static void
m_tag_kfree_pf_reass(struct m_tag * tag)437 m_tag_kfree_pf_reass(struct m_tag *tag)
438 {
439 struct pf_reass_tag_container *__single tag_container = (struct pf_reass_tag_container *)tag;
440
441 assert3u(tag->m_tag_len, ==, sizeof(struct pf_fragment_tag));
442
443 kfree_type(struct pf_reass_tag_container, tag_container);
444 }
445
446 void
pf_register_m_tag(void)447 pf_register_m_tag(void)
448 {
449 int error;
450
451 error = m_register_internal_tag_type(KERNEL_TAG_TYPE_PF_REASS, sizeof(struct pf_fragment_tag),
452 m_tag_kalloc_pf_reass, m_tag_kfree_pf_reass);
453
454 assert3u(error, ==, 0);
455 }
456
457 void
pfinit(void)458 pfinit(void)
459 {
460 u_int32_t *t = pf_default_rule.timeout;
461 int maj;
462
463 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
464 NULL);
465 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
466 "pfsrctrpl", NULL);
467 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
468 NULL);
469 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
470 "pfstatekeypl", NULL);
471 pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
472 "pfappstatepl", NULL);
473 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
474 "pfpooladdrpl", NULL);
475 pfr_initialize();
476 pfi_initialize();
477 pf_osfp_initialize();
478
479 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
480 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
481
482 if (max_mem <= 256 * 1024 * 1024) {
483 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
484 PFR_KENTRY_HIWAT_SMALL;
485 }
486
487 RB_INIT(&tree_src_tracking);
488 RB_INIT(&pf_anchors);
489 pf_init_ruleset(&pf_main_ruleset);
490 TAILQ_INIT(&pf_pabuf);
491 TAILQ_INIT(&state_list);
492
493 _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
494 _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
495 _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
496 _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
497 _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
498 _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
499 _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
500 _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
501 _CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
502 _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
503 _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
504
505 /* default rule should never be garbage collected */
506 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
507 pf_default_rule.action = PF_PASS;
508 pf_default_rule.nr = -1;
509 pf_default_rule.rtableid = IFSCOPE_NONE;
510
511 /* initialize default timeouts */
512 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
513 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
514 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
515 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
516 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
517 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
518 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
519 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
520 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
521 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
522 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
523 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
524 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
525 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
526 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
527 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
528 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
529 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
530 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
531 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
532 t[PFTM_FRAG] = PFTM_FRAG_VAL;
533 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
534 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
535 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
536 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
537 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
538
539 pf_normalize_init();
540 bzero(&pf_status, sizeof(pf_status));
541 pf_status.debug = PF_DEBUG_URGENT;
542 pf_hash_seed = RandomULong();
543
544 /* XXX do our best to avoid a conflict */
545 pf_status.hostid = random();
546
547 if (kernel_thread_start(pf_purge_thread_fn, NULL,
548 &pf_purge_thread) != 0) {
549 printf("%s: unable to start purge thread!", __func__);
550 return;
551 }
552
553 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
554 if (maj == -1) {
555 printf("%s: failed to allocate major number!\n", __func__);
556 return;
557 }
558 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
559 UID_ROOT, GID_WHEEL, 0600, "pf");
560
561 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
562 UID_ROOT, GID_WHEEL, 0600, "pfm");
563
564 pf_attach_hooks();
565 #if DUMMYNET
566 dummynet_init();
567 #endif
568 }
569
570 #if 0
571 static void
572 pfdetach(void)
573 {
574 struct pf_anchor *anchor;
575 struct pf_state *state;
576 struct pf_src_node *node;
577 struct pfioc_table pt;
578 u_int32_t ticket;
579 int i;
580 char r = '\0';
581
582 pf_detach_hooks();
583
584 pf_status.running = 0;
585 wakeup(pf_purge_thread_fn);
586
587 /* clear the rulesets */
588 for (i = 0; i < PF_RULESET_MAX; i++) {
589 if (pf_begin_rules(&ticket, i, &r) == 0) {
590 pf_commit_rules(ticket, i, &r);
591 }
592 }
593
594 /* clear states */
595 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
596 state->timeout = PFTM_PURGE;
597 #if NPFSYNC
598 state->sync_flags = PFSTATE_NOSYNC;
599 #endif
600 }
601 pf_purge_expired_states(pf_status.states);
602
603 #if NPFSYNC
604 pfsync_clear_states(pf_status.hostid, NULL);
605 #endif
606
607 /* clear source nodes */
608 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
609 state->src_node = NULL;
610 state->nat_src_node = NULL;
611 }
612 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
613 node->expire = 1;
614 node->states = 0;
615 }
616 pf_purge_expired_src_nodes();
617
618 /* clear tables */
619 memset(&pt, '\0', sizeof(pt));
620 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
621
622 /* destroy anchors */
623 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
624 for (i = 0; i < PF_RULESET_MAX; i++) {
625 if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
626 pf_commit_rules(ticket, i, anchor->name);
627 }
628 }
629 }
630
631 /* destroy main ruleset */
632 pf_remove_if_empty_ruleset(&pf_main_ruleset);
633
634 /* destroy the pools */
635 pool_destroy(&pf_pooladdr_pl);
636 pool_destroy(&pf_state_pl);
637 pool_destroy(&pf_rule_pl);
638 pool_destroy(&pf_src_tree_pl);
639
640 /* destroy subsystems */
641 pf_normalize_destroy();
642 pf_osfp_destroy();
643 pfr_destroy();
644 pfi_destroy();
645 }
646 #endif
647
648 static int
pfopen(dev_t dev,int flags,int fmt,struct proc * p)649 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
650 {
651 #pragma unused(flags, fmt, p)
652 if (minor(dev) >= PFDEV_MAX) {
653 return ENXIO;
654 }
655
656 if (minor(dev) == PFDEV_PFM) {
657 lck_mtx_lock(&pf_lock);
658 if (pfdevcnt != 0) {
659 lck_mtx_unlock(&pf_lock);
660 return EBUSY;
661 }
662 pfdevcnt++;
663 lck_mtx_unlock(&pf_lock);
664 }
665 return 0;
666 }
667
668 static int
pfclose(dev_t dev,int flags,int fmt,struct proc * p)669 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
670 {
671 #pragma unused(flags, fmt, p)
672 if (minor(dev) >= PFDEV_MAX) {
673 return ENXIO;
674 }
675
676 if (minor(dev) == PFDEV_PFM) {
677 lck_mtx_lock(&pf_lock);
678 VERIFY(pfdevcnt > 0);
679 pfdevcnt--;
680 lck_mtx_unlock(&pf_lock);
681 }
682 return 0;
683 }
684
685 static struct pf_pool *
pf_get_pool(char const * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)686 pf_get_pool(char const *anchor, u_int32_t ticket, u_int8_t rule_action,
687 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
688 u_int8_t check_ticket)
689 {
690 struct pf_ruleset *__single ruleset;
691 struct pf_rule *__single rule;
692 int rs_num;
693 struct pf_pool *__single p = NULL;
694
695 ruleset = pf_find_ruleset(anchor);
696 if (ruleset == NULL) {
697 goto done;
698 }
699 rs_num = pf_get_ruleset_number(rule_action);
700 if (rs_num >= PF_RULESET_MAX) {
701 goto done;
702 }
703 if (active) {
704 if (check_ticket && ticket !=
705 ruleset->rules[rs_num].active.ticket) {
706 goto done;
707 }
708 if (r_last) {
709 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
710 pf_rulequeue);
711 } else {
712 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
713 }
714 } else {
715 if (check_ticket && ticket !=
716 ruleset->rules[rs_num].inactive.ticket) {
717 goto done;
718 }
719 if (r_last) {
720 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
721 pf_rulequeue);
722 } else {
723 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
724 }
725 }
726 if (!r_last) {
727 while ((rule != NULL) && (rule->nr != rule_number)) {
728 rule = TAILQ_NEXT(rule, entries);
729 }
730 }
731 if (rule == NULL) {
732 goto done;
733 }
734
735 p = &rule->rpool;
736 done:
737
738 if (ruleset) {
739 pf_release_ruleset(ruleset);
740 ruleset = NULL;
741 }
742
743 return p;
744 }
745
746 static void
pf_mv_pool(struct pf_palist * poola,struct pf_palist * poolb)747 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
748 {
749 struct pf_pooladdr *mv_pool_pa;
750
751 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
752 TAILQ_REMOVE(poola, mv_pool_pa, entries);
753 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
754 }
755 }
756
757 static void
pf_empty_pool(struct pf_palist * poola)758 pf_empty_pool(struct pf_palist *poola)
759 {
760 struct pf_pooladdr *empty_pool_pa;
761
762 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
763 pfi_dynaddr_remove(&empty_pool_pa->addr);
764 pf_tbladdr_remove(&empty_pool_pa->addr);
765 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
766 TAILQ_REMOVE(poola, empty_pool_pa, entries);
767 pool_put(&pf_pooladdr_pl, empty_pool_pa);
768 }
769 }
770
771 void
pf_rm_rule(struct pf_rulequeue * rulequeue,struct pf_rule * rule)772 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
773 {
774 if (rulequeue != NULL) {
775 if (rule->states <= 0) {
776 /*
777 * XXX - we need to remove the table *before* detaching
778 * the rule to make sure the table code does not delete
779 * the anchor under our feet.
780 */
781 pf_tbladdr_remove(&rule->src.addr);
782 pf_tbladdr_remove(&rule->dst.addr);
783 if (rule->overload_tbl) {
784 pfr_detach_table(rule->overload_tbl);
785 }
786 }
787 TAILQ_REMOVE(rulequeue, rule, entries);
788 rule->entries.tqe_prev = NULL;
789 rule->nr = -1;
790 }
791
792 if (rule->states > 0 || rule->src_nodes > 0 ||
793 rule->entries.tqe_prev != NULL) {
794 return;
795 }
796 pf_tag_unref(rule->tag);
797 pf_tag_unref(rule->match_tag);
798 pf_rtlabel_remove(&rule->src.addr);
799 pf_rtlabel_remove(&rule->dst.addr);
800 pfi_dynaddr_remove(&rule->src.addr);
801 pfi_dynaddr_remove(&rule->dst.addr);
802 if (rulequeue == NULL) {
803 pf_tbladdr_remove(&rule->src.addr);
804 pf_tbladdr_remove(&rule->dst.addr);
805 if (rule->overload_tbl) {
806 pfr_detach_table(rule->overload_tbl);
807 }
808 }
809 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
810 pf_anchor_remove(rule);
811 pf_empty_pool(&rule->rpool.list);
812 pool_put(&pf_rule_pl, rule);
813 }
814
815 static u_int16_t
tagname2tag(struct pf_tags * head,char const * tagname)816 tagname2tag(struct pf_tags *head, char const *tagname)
817 {
818 struct pf_tagname *__single tag, *__single p = NULL;
819 uint16_t new_tagid = 1;
820 bool reserved_tag = false;
821
822 TAILQ_FOREACH(tag, head, entries)
823 if (strlcmp(tag->name, tagname, sizeof(tag->name)) == 0) {
824 tag->ref++;
825 return tag->tag;
826 }
827
828 /*
829 * check if it is a reserved tag.
830 */
831 _CASSERT(RESERVED_TAG_ID_MIN > DYNAMIC_TAG_ID_MAX);
832 for (int i = 0; i < NUM_RESERVED_TAGS; i++) {
833 if (strlcmp(pf_reserved_tag_table[i].tag_name, tagname, PF_TAG_NAME_SIZE) == 0) {
834 new_tagid = pf_reserved_tag_table[i].tag_id;
835 reserved_tag = true;
836 goto skip_dynamic_tag_alloc;
837 }
838 }
839
840 /*
841 * to avoid fragmentation, we do a linear search from the beginning
842 * and take the first free slot we find. if there is none or the list
843 * is empty, append a new entry at the end.
844 */
845
846 /* new entry */
847 if (!TAILQ_EMPTY(head)) {
848 /* skip reserved tags */
849 for (p = TAILQ_FIRST(head); p != NULL &&
850 p->tag >= RESERVED_TAG_ID_MIN;
851 p = TAILQ_NEXT(p, entries)) {
852 ;
853 }
854
855 for (; p != NULL && p->tag == new_tagid;
856 p = TAILQ_NEXT(p, entries)) {
857 new_tagid = p->tag + 1;
858 }
859 }
860
861 if (new_tagid > DYNAMIC_TAG_ID_MAX) {
862 return 0;
863 }
864
865 skip_dynamic_tag_alloc:
866 /* allocate and fill new struct pf_tagname */
867 tag = kalloc_type(struct pf_tagname, Z_WAITOK | Z_ZERO | Z_NOFAIL);
868 strlcpy(tag->name, tagname, sizeof(tag->name));
869 tag->tag = new_tagid;
870 tag->ref++;
871
872 if (reserved_tag) { /* insert reserved tag at the head */
873 TAILQ_INSERT_HEAD(head, tag, entries);
874 } else if (p != NULL) { /* insert new entry before p */
875 TAILQ_INSERT_BEFORE(p, tag, entries);
876 } else { /* either list empty or no free slot in between */
877 TAILQ_INSERT_TAIL(head, tag, entries);
878 }
879
880 return tag->tag;
881 }
882
883 static void
tag_unref(struct pf_tags * head,u_int16_t tag)884 tag_unref(struct pf_tags *head, u_int16_t tag)
885 {
886 struct pf_tagname *__single p, *__single next;
887
888 if (tag == 0) {
889 return;
890 }
891
892 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
893 next = TAILQ_NEXT(p, entries);
894 if (tag == p->tag) {
895 if (--p->ref == 0) {
896 TAILQ_REMOVE(head, p, entries);
897 kfree_type(struct pf_tagname, p);
898 }
899 break;
900 }
901 }
902 }
903
904 u_int16_t
pf_tagname2tag(char const * tagname)905 pf_tagname2tag(char const *tagname)
906 {
907 return tagname2tag(&pf_tags, tagname);
908 }
909
910 u_int16_t
pf_tagname2tag_ext(char const * tagname)911 pf_tagname2tag_ext(char const *tagname)
912 {
913 u_int16_t tag;
914
915 lck_rw_lock_exclusive(&pf_perim_lock);
916 lck_mtx_lock(&pf_lock);
917 tag = pf_tagname2tag(tagname);
918 lck_mtx_unlock(&pf_lock);
919 lck_rw_done(&pf_perim_lock);
920 return tag;
921 }
922
923 void
pf_tag_ref(u_int16_t tag)924 pf_tag_ref(u_int16_t tag)
925 {
926 struct pf_tagname *t;
927
928 TAILQ_FOREACH(t, &pf_tags, entries)
929 if (t->tag == tag) {
930 break;
931 }
932 if (t != NULL) {
933 t->ref++;
934 }
935 }
936
937 void
pf_tag_unref(u_int16_t tag)938 pf_tag_unref(u_int16_t tag)
939 {
940 tag_unref(&pf_tags, tag);
941 }
942
943 static int
pf_rtlabel_add(struct pf_addr_wrap * a)944 pf_rtlabel_add(struct pf_addr_wrap *a)
945 {
946 #pragma unused(a)
947 return 0;
948 }
949
950 static void
pf_rtlabel_remove(struct pf_addr_wrap * a)951 pf_rtlabel_remove(struct pf_addr_wrap *a)
952 {
953 #pragma unused(a)
954 }
955
956 static void
pf_rtlabel_copyout(struct pf_addr_wrap * a)957 pf_rtlabel_copyout(struct pf_addr_wrap *a)
958 {
959 #pragma unused(a)
960 }
961
962 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)963 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
964 {
965 struct pf_ruleset *rs;
966 struct pf_rule *rule;
967
968 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
969 return EINVAL;
970 }
971 rs = pf_find_or_create_ruleset(anchor);
972 if (rs == NULL) {
973 return EINVAL;
974 }
975 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
976 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
977 rs->rules[rs_num].inactive.rcount--;
978 }
979 *ticket = ++rs->rules[rs_num].inactive.ticket;
980 rs->rules[rs_num].inactive.open = 1;
981 pf_release_ruleset(rs);
982 rs = NULL;
983 return 0;
984 }
985
986 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char const * anchor)987 pf_rollback_rules(u_int32_t ticket, int rs_num, char const *anchor)
988 {
989 struct pf_ruleset *__single rs = NULL;
990 struct pf_rule *__single rule;
991 int err = 0;
992
993 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
994 err = EINVAL;
995 goto done;
996 }
997 rs = pf_find_ruleset(anchor);
998 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
999 rs->rules[rs_num].inactive.ticket != ticket) {
1000 goto done;
1001 }
1002 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1003 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1004 rs->rules[rs_num].inactive.rcount--;
1005 }
1006 rs->rules[rs_num].inactive.open = 0;
1007
1008 done:
1009 if (rs) {
1010 pf_release_ruleset(rs);
1011 rs = NULL;
1012 }
1013 return err;
1014 }
1015
1016 #define PF_MD5_UPD(st, elm) \
1017 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof((st)->elm))
1018
1019 #define PF_MD5_UPD_STRBUF(st, elm) \
1020 MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int) strbuflen((st)->elm, sizeof(st)->elm))
1021
1022 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
1023 (stor) = htonl((st)->elm); \
1024 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
1025 } while (0)
1026
1027 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
1028 (stor) = htons((st)->elm); \
1029 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
1030 } while (0)
1031
1032 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr,u_int8_t proto)1033 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
1034 {
1035 PF_MD5_UPD(pfr, addr.type);
1036 switch (pfr->addr.type) {
1037 case PF_ADDR_DYNIFTL:
1038 PF_MD5_UPD(pfr, addr.v.ifname);
1039 PF_MD5_UPD(pfr, addr.iflags);
1040 break;
1041 case PF_ADDR_TABLE:
1042 PF_MD5_UPD(pfr, addr.v.tblname);
1043 break;
1044 case PF_ADDR_ADDRMASK:
1045 /* XXX ignore af? */
1046 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1047 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1048 break;
1049 case PF_ADDR_RTLABEL:
1050 PF_MD5_UPD(pfr, addr.v.rtlabelname);
1051 break;
1052 }
1053
1054 switch (proto) {
1055 case IPPROTO_TCP:
1056 case IPPROTO_UDP:
1057 PF_MD5_UPD(pfr, xport.range.port[0]);
1058 PF_MD5_UPD(pfr, xport.range.port[1]);
1059 PF_MD5_UPD(pfr, xport.range.op);
1060 break;
1061
1062 default:
1063 break;
1064 }
1065
1066 PF_MD5_UPD(pfr, neg);
1067 }
1068
1069 static void
pf_hash_rule(MD5_CTX * ctx,struct pf_rule * rule)1070 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1071 {
1072 u_int16_t x;
1073 u_int32_t y;
1074
1075 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1076 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1077 PF_MD5_UPD_STRBUF(rule, label);
1078 PF_MD5_UPD_STRBUF(rule, ifname);
1079 PF_MD5_UPD_STRBUF(rule, match_tagname);
1080 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1081 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1082 PF_MD5_UPD_HTONL(rule, prob, y);
1083 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1084 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1085 PF_MD5_UPD(rule, uid.op);
1086 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1087 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1088 PF_MD5_UPD(rule, gid.op);
1089 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1090 PF_MD5_UPD(rule, action);
1091 PF_MD5_UPD(rule, direction);
1092 PF_MD5_UPD(rule, af);
1093 PF_MD5_UPD(rule, quick);
1094 PF_MD5_UPD(rule, ifnot);
1095 PF_MD5_UPD(rule, match_tag_not);
1096 PF_MD5_UPD(rule, natpass);
1097 PF_MD5_UPD(rule, keep_state);
1098 PF_MD5_UPD(rule, proto);
1099 PF_MD5_UPD(rule, type);
1100 PF_MD5_UPD(rule, code);
1101 PF_MD5_UPD(rule, flags);
1102 PF_MD5_UPD(rule, flagset);
1103 PF_MD5_UPD(rule, allow_opts);
1104 PF_MD5_UPD(rule, rt);
1105 PF_MD5_UPD(rule, tos);
1106 }
1107
1108 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char const * anchor)1109 pf_commit_rules(u_int32_t ticket, int rs_num, char const *anchor)
1110 {
1111 struct pf_ruleset *rs = NULL;
1112 struct pf_rule *rule, **old_array, *r;
1113 struct pf_rulequeue *old_rules;
1114 int error = 0;
1115 u_int32_t old_rcount;
1116 u_int32_t old_rsize;
1117
1118 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1119
1120 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1121 error = EINVAL;
1122 goto done;
1123 }
1124 rs = pf_find_ruleset(anchor);
1125 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1126 ticket != rs->rules[rs_num].inactive.ticket) {
1127 error = EBUSY;
1128 goto done;
1129 }
1130
1131 /* Calculate checksum for the main ruleset */
1132 if (rs == &pf_main_ruleset) {
1133 error = pf_setup_pfsync_matching(rs);
1134 if (error != 0) {
1135 goto done;
1136 }
1137 }
1138
1139 /* Swap rules, keep the old. */
1140 old_rules = rs->rules[rs_num].active.ptr;
1141 old_rcount = rs->rules[rs_num].active.rcount;
1142 old_array = rs->rules[rs_num].active.ptr_array;
1143 old_rsize = rs->rules[rs_num].active.rsize;
1144
1145 if (old_rcount != 0) {
1146 r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1147 while (r) {
1148 if (r->rule_flag & PFRULE_PFM) {
1149 pffwrules--;
1150 }
1151 r = TAILQ_NEXT(r, entries);
1152 }
1153 }
1154
1155
1156 rs->rules[rs_num].active.ptr =
1157 rs->rules[rs_num].inactive.ptr;
1158 rs->rules[rs_num].active.ptr_array =
1159 rs->rules[rs_num].inactive.ptr_array;
1160 rs->rules[rs_num].active.rsize =
1161 rs->rules[rs_num].inactive.rsize;
1162 rs->rules[rs_num].active.rcount =
1163 rs->rules[rs_num].inactive.rcount;
1164 rs->rules[rs_num].inactive.ptr = old_rules;
1165 rs->rules[rs_num].inactive.ptr_array = old_array;
1166 rs->rules[rs_num].inactive.rsize = old_rsize;
1167 rs->rules[rs_num].inactive.rcount = old_rcount;
1168
1169 rs->rules[rs_num].active.ticket =
1170 rs->rules[rs_num].inactive.ticket;
1171 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1172
1173
1174 /* Purge the old rule list. */
1175 while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1176 pf_rm_rule(old_rules, rule);
1177 }
1178 kfree_type_counted_by(struct pf_rule *, rs->rules[rs_num].inactive.rsize,
1179 rs->rules[rs_num].inactive.ptr_array);
1180 rs->rules[rs_num].inactive.ptr_array = NULL;
1181 rs->rules[rs_num].inactive.rsize = 0;
1182 rs->rules[rs_num].inactive.rcount = 0;
1183 rs->rules[rs_num].inactive.open = 0;
1184
1185 done:
1186 if (rs) {
1187 pf_release_ruleset(rs);
1188 }
1189 return error;
1190 }
1191
1192 static void
pf_rule_copyin(struct pf_rule * src,struct pf_rule * dst,struct proc * p,int minordev)1193 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1194 int minordev)
1195 {
1196 bcopy(src, dst, sizeof(struct pf_rule));
1197
1198 dst->label[sizeof(dst->label) - 1] = '\0';
1199 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1200 dst->qname[sizeof(dst->qname) - 1] = '\0';
1201 dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1202 dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1203 dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1204 dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1205 dst->owner[sizeof(dst->owner) - 1] = '\0';
1206
1207 dst->cuid = kauth_cred_getuid(kauth_cred_get());
1208 dst->cpid = proc_getpid(p);
1209
1210 dst->anchor = NULL;
1211 dst->kif = NULL;
1212 dst->overload_tbl = NULL;
1213
1214 TAILQ_INIT(&dst->rpool.list);
1215 dst->rpool.cur = NULL;
1216
1217 /* initialize refcounting */
1218 dst->states = 0;
1219 dst->src_nodes = 0;
1220
1221 dst->entries.tqe_prev = NULL;
1222 dst->entries.tqe_next = NULL;
1223 if ((uint8_t)minordev == PFDEV_PFM) {
1224 dst->rule_flag |= PFRULE_PFM;
1225 }
1226
1227 /*
1228 * userland should not pass any skip pointers to us
1229 */
1230 for (uint32_t i = 0; i < PF_SKIP_COUNT; ++i) {
1231 dst->skip[i].ptr = 0;
1232 }
1233 }
1234
1235 static void
pf_rule_copyout(struct pf_rule * src,struct pf_rule * dst)1236 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1237 {
1238 bcopy(src, dst, sizeof(struct pf_rule));
1239
1240 dst->anchor = NULL;
1241 dst->kif = NULL;
1242 dst->overload_tbl = NULL;
1243
1244 dst->rpool.list.tqh_first = NULL;
1245 dst->rpool.list.tqh_last = NULL;
1246 dst->rpool.cur = NULL;
1247
1248 dst->entries.tqe_prev = NULL;
1249 dst->entries.tqe_next = NULL;
1250
1251 /*
1252 * redact skip pointers for security
1253 */
1254 for (uint32_t i = 0; i < PF_SKIP_COUNT; ++i) {
1255 dst->skip[i].ptr = 0;
1256 }
1257 }
1258
1259 static void
pf_state_export(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1260 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1261 struct pf_state *s)
1262 {
1263 uint64_t secs = pf_time_second();
1264 bzero(sp, sizeof(struct pfsync_state));
1265
1266 /* copy from state key */
1267 sp->lan.addr = sk->lan.addr;
1268 sp->lan.xport = sk->lan.xport;
1269 sp->gwy.addr = sk->gwy.addr;
1270 sp->gwy.xport = sk->gwy.xport;
1271 sp->ext_lan.addr = sk->ext_lan.addr;
1272 sp->ext_lan.xport = sk->ext_lan.xport;
1273 sp->ext_gwy.addr = sk->ext_gwy.addr;
1274 sp->ext_gwy.xport = sk->ext_gwy.xport;
1275 sp->proto_variant = sk->proto_variant;
1276 sp->tag = s->tag;
1277 sp->proto = sk->proto;
1278 sp->af_lan = sk->af_lan;
1279 sp->af_gwy = sk->af_gwy;
1280 sp->direction = sk->direction;
1281 sp->flowhash = sk->flowhash;
1282
1283 /* copy from state */
1284 memcpy(&sp->id, &s->id, sizeof(sp->id));
1285 sp->creatorid = s->creatorid;
1286 strbufcpy(sp->ifname, s->kif->pfik_name);
1287 pf_state_peer_to_pfsync(&s->src, &sp->src);
1288 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1289
1290 sp->rule = s->rule.ptr->nr;
1291 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1292 (unsigned)-1 : s->nat_rule.ptr->nr;
1293 sp->anchor = (s->anchor.ptr == NULL) ?
1294 (unsigned)-1 : s->anchor.ptr->nr;
1295
1296 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1297 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1298 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1299 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1300 sp->creation = secs - s->creation;
1301 sp->expire = pf_state_expires(s);
1302 sp->log = s->log;
1303 sp->allow_opts = s->allow_opts;
1304 sp->timeout = s->timeout;
1305
1306 if (s->src_node) {
1307 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1308 }
1309 if (s->nat_src_node) {
1310 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1311 }
1312
1313 if (sp->expire > secs) {
1314 sp->expire -= secs;
1315 } else {
1316 sp->expire = 0;
1317 }
1318 }
1319
1320 static void
pf_state_import(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1321 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1322 struct pf_state *s)
1323 {
1324 /* copy to state key */
1325 sk->lan.addr = sp->lan.addr;
1326 sk->lan.xport = sp->lan.xport;
1327 sk->gwy.addr = sp->gwy.addr;
1328 sk->gwy.xport = sp->gwy.xport;
1329 sk->ext_lan.addr = sp->ext_lan.addr;
1330 sk->ext_lan.xport = sp->ext_lan.xport;
1331 sk->ext_gwy.addr = sp->ext_gwy.addr;
1332 sk->ext_gwy.xport = sp->ext_gwy.xport;
1333 sk->proto_variant = sp->proto_variant;
1334 s->tag = sp->tag;
1335 sk->proto = sp->proto;
1336 sk->af_lan = sp->af_lan;
1337 sk->af_gwy = sp->af_gwy;
1338 sk->direction = sp->direction;
1339 ASSERT(sk->flowsrc == FLOWSRC_PF);
1340 ASSERT(sk->flowhash != 0);
1341
1342 /* copy to state */
1343 memcpy(&s->id, &sp->id, sizeof(sp->id));
1344 s->creatorid = sp->creatorid;
1345 pf_state_peer_from_pfsync(&sp->src, &s->src);
1346 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1347
1348 s->rule.ptr = &pf_default_rule;
1349 s->nat_rule.ptr = NULL;
1350 s->anchor.ptr = NULL;
1351 s->rt_kif = NULL;
1352 s->creation = pf_time_second();
1353 s->expire = pf_time_second();
1354 if (sp->expire > 0) {
1355 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1356 }
1357 s->pfsync_time = 0;
1358 s->packets[0] = s->packets[1] = 0;
1359 s->bytes[0] = s->bytes[1] = 0;
1360 }
1361
1362 static void
pf_pooladdr_copyin(struct pf_pooladdr * src,struct pf_pooladdr * dst)1363 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1364 {
1365 bcopy(src, dst, sizeof(struct pf_pooladdr));
1366
1367 dst->entries.tqe_prev = NULL;
1368 dst->entries.tqe_next = NULL;
1369 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1370 dst->kif = NULL;
1371 }
1372
1373 static void
pf_pooladdr_copyout(struct pf_pooladdr * src,struct pf_pooladdr * dst)1374 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1375 {
1376 bcopy(src, dst, sizeof(struct pf_pooladdr));
1377
1378 dst->entries.tqe_prev = NULL;
1379 dst->entries.tqe_next = NULL;
1380 dst->kif = NULL;
1381 }
1382
1383 static int
pf_setup_pfsync_matching(struct pf_ruleset * rs)1384 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1385 {
1386 MD5_CTX ctx;
1387 struct pf_rule *rule;
1388 int rs_cnt;
1389 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1390
1391 MD5Init(&ctx);
1392 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1393 /* XXX PF_RULESET_SCRUB as well? */
1394 if (rs_cnt == PF_RULESET_SCRUB) {
1395 continue;
1396 }
1397
1398 rs->rules[rs_cnt].inactive.ptr_array = krealloc_type(struct pf_rule *,
1399 rs->rules[rs_cnt].inactive.rsize, rs->rules[rs_cnt].inactive.rcount,
1400 rs->rules[rs_cnt].inactive.ptr_array, Z_WAITOK | Z_REALLOCF);
1401 rs->rules[rs_cnt].inactive.rsize =
1402 rs->rules[rs_cnt].inactive.rcount;
1403
1404 if (rs->rules[rs_cnt].inactive.rcount &&
1405 !rs->rules[rs_cnt].inactive.ptr_array) {
1406 rs->rules[rs_cnt].inactive.ptr_array = rs->rules[rs_cnt].inactive.ptr_array;
1407 rs->rules[rs_cnt].inactive.rsize = 0;
1408 return ENOMEM;
1409 }
1410
1411 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1412 entries) {
1413 pf_hash_rule(&ctx, rule);
1414 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1415 }
1416 }
1417
1418 MD5Final(digest, &ctx);
1419 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1420 return 0;
1421 }
1422
1423 static void
pf_start(void)1424 pf_start(void)
1425 {
1426 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1427
1428 VERIFY(pf_is_enabled == 0);
1429
1430 pf_is_enabled = 1;
1431 pf_status.running = 1;
1432 pf_status.since = pf_calendar_time_second();
1433 if (pf_status.stateid == 0) {
1434 pf_status.stateid = pf_time_second();
1435 pf_status.stateid = pf_status.stateid << 32;
1436 }
1437 wakeup(pf_purge_thread_fn);
1438 #if SKYWALK
1439 pf_process_compatibilities();
1440 #endif // SKYWALK
1441 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1442 }
1443
1444 static void
pf_stop(void)1445 pf_stop(void)
1446 {
1447 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1448
1449 VERIFY(pf_is_enabled);
1450
1451 pf_status.running = 0;
1452 pf_is_enabled = 0;
1453 pf_status.since = pf_calendar_time_second();
1454 wakeup(pf_purge_thread_fn);
1455 #if SKYWALK
1456 pf_process_compatibilities();
1457 #endif // SKYWALK
1458 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1459 }
1460
1461 static int
pfioctl(dev_t dev,u_long cmd,caddr_t addr,int flags,struct proc * p)1462 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1463 {
1464 #pragma unused(dev)
1465 int p64 = proc_is64bit(p);
1466 int error = 0;
1467 int minordev = minor(dev);
1468
1469 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1470 return EPERM;
1471 }
1472
1473 /* XXX keep in sync with switch() below */
1474 if (securelevel > 1) {
1475 switch (cmd) {
1476 case DIOCGETRULES:
1477 case DIOCGETRULE:
1478 case DIOCGETADDRS:
1479 case DIOCGETADDR:
1480 case DIOCGETSTATE:
1481 case DIOCSETSTATUSIF:
1482 case DIOCGETSTATUS:
1483 case DIOCCLRSTATUS:
1484 case DIOCNATLOOK:
1485 case DIOCSETDEBUG:
1486 case DIOCGETSTATES:
1487 case DIOCINSERTRULE:
1488 case DIOCDELETERULE:
1489 case DIOCGETTIMEOUT:
1490 case DIOCCLRRULECTRS:
1491 case DIOCGETLIMIT:
1492 case DIOCGETALTQS:
1493 case DIOCGETALTQ:
1494 case DIOCGETQSTATS:
1495 case DIOCGETRULESETS:
1496 case DIOCGETRULESET:
1497 case DIOCRGETTABLES:
1498 case DIOCRGETTSTATS:
1499 case DIOCRCLRTSTATS:
1500 case DIOCRCLRADDRS:
1501 case DIOCRADDADDRS:
1502 case DIOCRDELADDRS:
1503 case DIOCRSETADDRS:
1504 case DIOCRGETADDRS:
1505 case DIOCRGETASTATS:
1506 case DIOCRCLRASTATS:
1507 case DIOCRTSTADDRS:
1508 case DIOCOSFPGET:
1509 case DIOCGETSRCNODES:
1510 case DIOCCLRSRCNODES:
1511 case DIOCIGETIFACES:
1512 case DIOCGIFSPEED:
1513 case DIOCSETIFFLAG:
1514 case DIOCCLRIFFLAG:
1515 break;
1516 case DIOCRCLRTABLES:
1517 case DIOCRADDTABLES:
1518 case DIOCRDELTABLES:
1519 case DIOCRSETTFLAGS: {
1520 int pfrio_flags;
1521
1522 struct pfioc_table *__single table = (struct pfioc_table *)(void *) addr;
1523
1524 bcopy(&table->pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1525
1526 if (pfrio_flags & PFR_FLAG_DUMMY) {
1527 break; /* dummy operation ok */
1528 }
1529 return EPERM;
1530 }
1531 default:
1532 return EPERM;
1533 }
1534 }
1535
1536 if (!(flags & FWRITE)) {
1537 switch (cmd) {
1538 case DIOCSTART:
1539 case DIOCSTARTREF:
1540 case DIOCSTOP:
1541 case DIOCSTOPREF:
1542 case DIOCGETSTARTERS:
1543 case DIOCGETRULES:
1544 case DIOCGETADDRS:
1545 case DIOCGETADDR:
1546 case DIOCGETSTATE:
1547 case DIOCGETSTATUS:
1548 case DIOCGETSTATES:
1549 case DIOCINSERTRULE:
1550 case DIOCDELETERULE:
1551 case DIOCGETTIMEOUT:
1552 case DIOCGETLIMIT:
1553 case DIOCGETALTQS:
1554 case DIOCGETALTQ:
1555 case DIOCGETQSTATS:
1556 case DIOCGETRULESETS:
1557 case DIOCGETRULESET:
1558 case DIOCNATLOOK:
1559 case DIOCRGETTABLES:
1560 case DIOCRGETTSTATS:
1561 case DIOCRGETADDRS:
1562 case DIOCRGETASTATS:
1563 case DIOCRTSTADDRS:
1564 case DIOCOSFPGET:
1565 case DIOCGETSRCNODES:
1566 case DIOCIGETIFACES:
1567 case DIOCGIFSPEED:
1568 break;
1569 case DIOCRCLRTABLES:
1570 case DIOCRADDTABLES:
1571 case DIOCRDELTABLES:
1572 case DIOCRCLRTSTATS:
1573 case DIOCRCLRADDRS:
1574 case DIOCRADDADDRS:
1575 case DIOCRDELADDRS:
1576 case DIOCRSETADDRS:
1577 case DIOCRSETTFLAGS: {
1578 int pfrio_flags;
1579
1580 bcopy(&((struct pfioc_table *)(void *)addr)->
1581 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1582
1583 if (pfrio_flags & PFR_FLAG_DUMMY) {
1584 flags |= FWRITE; /* need write lock for dummy */
1585 break; /* dummy operation ok */
1586 }
1587 return EACCES;
1588 }
1589 case DIOCGETRULE: {
1590 u_int32_t action;
1591
1592 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1593 &action, sizeof(action));
1594
1595 if (action == PF_GET_CLR_CNTR) {
1596 return EACCES;
1597 }
1598 break;
1599 }
1600 default:
1601 return EACCES;
1602 }
1603 }
1604
1605 if (flags & FWRITE) {
1606 lck_rw_lock_exclusive(&pf_perim_lock);
1607 } else {
1608 lck_rw_lock_shared(&pf_perim_lock);
1609 }
1610
1611 lck_mtx_lock(&pf_lock);
1612
1613 switch (cmd) {
1614 case DIOCSTART:
1615 if (pf_status.running) {
1616 /*
1617 * Increment the reference for a simple -e enable, so
1618 * that even if other processes drop their references,
1619 * pf will still be available to processes that turned
1620 * it on without taking a reference
1621 */
1622 if (nr_tokens == pf_enabled_ref_count) {
1623 pf_enabled_ref_count++;
1624 VERIFY(pf_enabled_ref_count != 0);
1625 }
1626 error = EEXIST;
1627 } else if (pf_purge_thread == NULL) {
1628 error = ENOMEM;
1629 } else {
1630 pf_start();
1631 pf_enabled_ref_count++;
1632 VERIFY(pf_enabled_ref_count != 0);
1633 }
1634 break;
1635
1636 case DIOCSTARTREF: /* u_int64_t */
1637 if (pf_purge_thread == NULL) {
1638 error = ENOMEM;
1639 } else {
1640 u_int64_t token;
1641
1642 /* small enough to be on stack */
1643 if ((token = generate_token(p)) != 0) {
1644 if (pf_is_enabled == 0) {
1645 pf_start();
1646 }
1647 pf_enabled_ref_count++;
1648 VERIFY(pf_enabled_ref_count != 0);
1649 } else {
1650 error = ENOMEM;
1651 DPFPRINTF(PF_DEBUG_URGENT,
1652 ("pf: unable to generate token\n"));
1653 }
1654 bcopy(&token, (uint64_t *)(void *)addr, sizeof(token));
1655 }
1656 break;
1657
1658 case DIOCSTOP:
1659 if (!pf_status.running) {
1660 error = ENOENT;
1661 } else {
1662 pf_stop();
1663 pf_enabled_ref_count = 0;
1664 invalidate_all_tokens();
1665 }
1666 break;
1667
1668 case DIOCSTOPREF: /* struct pfioc_remove_token */
1669 if (!pf_status.running) {
1670 error = ENOENT;
1671 } else {
1672 struct pfioc_remove_token pfrt;
1673
1674 /* small enough to be on stack */
1675 bcopy((struct pfioc_remove_token *)(void *)addr, &pfrt, sizeof(pfrt));
1676 if ((error = remove_token(&pfrt)) == 0) {
1677 VERIFY(pf_enabled_ref_count != 0);
1678 pf_enabled_ref_count--;
1679 /* return currently held references */
1680 pfrt.refcount = pf_enabled_ref_count;
1681 DPFPRINTF(PF_DEBUG_MISC,
1682 ("pf: enabled refcount decremented\n"));
1683 } else {
1684 error = EINVAL;
1685 DPFPRINTF(PF_DEBUG_URGENT,
1686 ("pf: token mismatch\n"));
1687 }
1688 bcopy(&pfrt, (struct pfioc_remove_token *)(void *)addr, sizeof(pfrt));
1689
1690 if (error == 0 && pf_enabled_ref_count == 0) {
1691 pf_stop();
1692 }
1693 }
1694 break;
1695
1696 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1697 PFIOCX_STRUCT_DECL(pfioc_tokens);
1698
1699 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens);
1700 error = pfioctl_ioc_tokens(cmd,
1701 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1702 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1703 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1704 break;
1705 }
1706
1707 case DIOCADDRULE: /* struct pfioc_rule */
1708 case DIOCGETRULES: /* struct pfioc_rule */
1709 case DIOCGETRULE: /* struct pfioc_rule */
1710 case DIOCCHANGERULE: /* struct pfioc_rule */
1711 case DIOCINSERTRULE: /* struct pfioc_rule */
1712 case DIOCDELETERULE: { /* struct pfioc_rule */
1713 struct pfioc_rule *__single pr = NULL;
1714
1715 PFIOC_STRUCT_BEGIN(addr, pr);
1716 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1717 PFIOC_STRUCT_END(pr, addr);
1718 break;
1719 }
1720
1721 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1722 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1723 struct pfioc_state_kill *__single psk = NULL;
1724
1725 PFIOC_STRUCT_BEGIN(addr, psk);
1726 error = pfioctl_ioc_state_kill(cmd, psk, p);
1727 PFIOC_STRUCT_END(psk, addr);
1728 break;
1729 }
1730
1731 case DIOCADDSTATE: /* struct pfioc_state */
1732 case DIOCGETSTATE: { /* struct pfioc_state */
1733 struct pfioc_state *__single ps = NULL;
1734
1735 PFIOC_STRUCT_BEGIN(addr, ps);
1736 error = pfioctl_ioc_state(cmd, ps, p);
1737 PFIOC_STRUCT_END(ps, addr);
1738 break;
1739 }
1740
1741 case DIOCGETSTATES: { /* struct pfioc_states */
1742 PFIOCX_STRUCT_DECL(pfioc_states);
1743
1744 PFIOCX_STRUCT_BEGIN(addr, pfioc_states);
1745 error = pfioctl_ioc_states(cmd,
1746 PFIOCX_STRUCT_ADDR32(pfioc_states),
1747 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1748 PFIOCX_STRUCT_END(pfioc_states, addr);
1749 break;
1750 }
1751
1752 case DIOCGETSTATUS: { /* struct pf_status */
1753 struct pf_status *__single s = NULL;
1754
1755 PFIOC_STRUCT_BEGIN(&pf_status, s);
1756 pfi_update_status(__unsafe_null_terminated_from_indexable(s->ifname), s);
1757 PFIOC_STRUCT_END(s, addr);
1758 break;
1759 }
1760
1761 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1762 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1763
1764 /* OK for unaligned accesses */
1765 if (pi->ifname[0] == 0) {
1766 bzero(pf_status.ifname, IFNAMSIZ);
1767 break;
1768 }
1769 strbufcpy(pf_status.ifname, pi->ifname);
1770 break;
1771 }
1772
1773 case DIOCCLRSTATUS: {
1774 bzero(pf_status.counters, sizeof(pf_status.counters));
1775 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1776 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1777 pf_status.since = pf_calendar_time_second();
1778 if (*pf_status.ifname) {
1779 pfi_update_status(__unsafe_null_terminated_from_indexable(pf_status.ifname), NULL);
1780 }
1781 break;
1782 }
1783
1784 case DIOCNATLOOK: { /* struct pfioc_natlook */
1785 struct pfioc_natlook *__single pnl = NULL;
1786
1787 PFIOC_STRUCT_BEGIN(addr, pnl);
1788 error = pfioctl_ioc_natlook(cmd, pnl, p);
1789 PFIOC_STRUCT_END(pnl, addr);
1790 break;
1791 }
1792
1793 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1794 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1795 struct pfioc_tm pt;
1796
1797 /* small enough to be on stack */
1798 bcopy((struct pfioc_tm *)(void *) addr, &pt, sizeof(pt));
1799 error = pfioctl_ioc_tm(cmd, &pt, p);
1800 bcopy(&pt, (struct pfioc_tm *)(void *) addr, sizeof(pt));
1801 break;
1802 }
1803
1804 case DIOCGETLIMIT: /* struct pfioc_limit */
1805 case DIOCSETLIMIT: { /* struct pfioc_limit */
1806 struct pfioc_limit pl;
1807
1808 /* small enough to be on stack */
1809 bcopy((struct pfioc_limit *)(void *) addr, &pl, sizeof(pl));
1810 error = pfioctl_ioc_limit(cmd, &pl, p);
1811 bcopy(&pl, (struct pfioc_limit *)(void *) addr, sizeof(pl));
1812 break;
1813 }
1814
1815 case DIOCSETDEBUG: { /* u_int32_t */
1816 bcopy((uint32_t *)(void *) addr, &pf_status.debug, sizeof(u_int32_t));
1817 break;
1818 }
1819
1820 case DIOCCLRRULECTRS: {
1821 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1822 struct pf_ruleset *ruleset = &pf_main_ruleset;
1823 struct pf_rule *rule;
1824
1825 TAILQ_FOREACH(rule,
1826 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1827 rule->evaluations = 0;
1828 rule->packets[0] = rule->packets[1] = 0;
1829 rule->bytes[0] = rule->bytes[1] = 0;
1830 }
1831 break;
1832 }
1833
1834 case DIOCGIFSPEED: {
1835 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1836 struct pf_ifspeed ps;
1837 struct ifnet *ifp;
1838 u_int64_t baudrate;
1839
1840 if (psp->ifname[0] != '\0') {
1841 strbufcpy(ps.ifname, psp->ifname);
1842 ifp = ifunit(__unsafe_null_terminated_from_indexable(ps.ifname));
1843 if (ifp != NULL) {
1844 baudrate = ifp->if_output_bw.max_bw;
1845 bcopy(&baudrate, &psp->baudrate,
1846 sizeof(baudrate));
1847 } else {
1848 error = EINVAL;
1849 }
1850 } else {
1851 error = EINVAL;
1852 }
1853 break;
1854 }
1855
1856 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
1857 case DIOCADDADDR: /* struct pfioc_pooladdr */
1858 case DIOCGETADDRS: /* struct pfioc_pooladdr */
1859 case DIOCGETADDR: /* struct pfioc_pooladdr */
1860 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
1861 struct pfioc_pooladdr *__single pp = NULL;
1862
1863 PFIOC_STRUCT_BEGIN(addr, pp);
1864 error = pfioctl_ioc_pooladdr(cmd, pp, p);
1865 PFIOC_STRUCT_END(pp, addr);
1866 break;
1867 }
1868
1869 case DIOCGETRULESETS: /* struct pfioc_ruleset */
1870 case DIOCGETRULESET: { /* struct pfioc_ruleset */
1871 struct pfioc_ruleset *__single pr = NULL;
1872
1873 PFIOC_STRUCT_BEGIN(addr, pr);
1874 error = pfioctl_ioc_ruleset(cmd, pr, p);
1875 PFIOC_STRUCT_END(pr, addr);
1876 break;
1877 }
1878
1879 case DIOCRCLRTABLES: /* struct pfioc_table */
1880 case DIOCRADDTABLES: /* struct pfioc_table */
1881 case DIOCRDELTABLES: /* struct pfioc_table */
1882 case DIOCRGETTABLES: /* struct pfioc_table */
1883 case DIOCRGETTSTATS: /* struct pfioc_table */
1884 case DIOCRCLRTSTATS: /* struct pfioc_table */
1885 case DIOCRSETTFLAGS: /* struct pfioc_table */
1886 case DIOCRCLRADDRS: /* struct pfioc_table */
1887 case DIOCRADDADDRS: /* struct pfioc_table */
1888 case DIOCRDELADDRS: /* struct pfioc_table */
1889 case DIOCRSETADDRS: /* struct pfioc_table */
1890 case DIOCRGETADDRS: /* struct pfioc_table */
1891 case DIOCRGETASTATS: /* struct pfioc_table */
1892 case DIOCRCLRASTATS: /* struct pfioc_table */
1893 case DIOCRTSTADDRS: /* struct pfioc_table */
1894 case DIOCRINADEFINE: { /* struct pfioc_table */
1895 PFIOCX_STRUCT_DECL(pfioc_table);
1896
1897 PFIOCX_STRUCT_BEGIN(addr, pfioc_table);
1898 error = pfioctl_ioc_table(cmd,
1899 PFIOCX_STRUCT_ADDR32(pfioc_table),
1900 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1901 PFIOCX_STRUCT_END(pfioc_table, addr);
1902 break;
1903 }
1904
1905 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
1906 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
1907 struct pf_osfp_ioctl *__single io = NULL;
1908
1909 PFIOC_STRUCT_BEGIN(addr, io);
1910 if (cmd == DIOCOSFPADD) {
1911 error = pf_osfp_add(io);
1912 } else {
1913 VERIFY(cmd == DIOCOSFPGET);
1914 error = pf_osfp_get(io);
1915 }
1916 PFIOC_STRUCT_END(io, addr);
1917 break;
1918 }
1919
1920 case DIOCXBEGIN: /* struct pfioc_trans */
1921 case DIOCXROLLBACK: /* struct pfioc_trans */
1922 case DIOCXCOMMIT: { /* struct pfioc_trans */
1923 PFIOCX_STRUCT_DECL(pfioc_trans);
1924
1925 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans);
1926 error = pfioctl_ioc_trans(cmd,
1927 PFIOCX_STRUCT_ADDR32(pfioc_trans),
1928 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1929 PFIOCX_STRUCT_END(pfioc_trans, addr);
1930 break;
1931 }
1932
1933 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
1934 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1935
1936 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes);
1937 error = pfioctl_ioc_src_nodes(cmd,
1938 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1939 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1940 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1941 break;
1942 }
1943
1944 case DIOCCLRSRCNODES: {
1945 struct pf_src_node *n;
1946 struct pf_state *state;
1947
1948 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1949 state->src_node = NULL;
1950 state->nat_src_node = NULL;
1951 }
1952 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1953 n->expire = 1;
1954 n->states = 0;
1955 }
1956 pf_purge_expired_src_nodes();
1957 pf_status.src_nodes = 0;
1958 break;
1959 }
1960
1961 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
1962 struct pfioc_src_node_kill *__single psnk = NULL;
1963
1964 PFIOC_STRUCT_BEGIN(addr, psnk);
1965 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1966 PFIOC_STRUCT_END(psnk, addr);
1967 break;
1968 }
1969
1970 case DIOCSETHOSTID: { /* u_int32_t */
1971 u_int32_t hid;
1972
1973 /* small enough to be on stack */
1974 bcopy((u_int32_t * __single)(void *__single)addr, &hid, sizeof(hid));
1975 if (hid == 0) {
1976 pf_status.hostid = random();
1977 } else {
1978 pf_status.hostid = hid;
1979 }
1980 break;
1981 }
1982
1983 case DIOCOSFPFLUSH:
1984 pf_osfp_flush();
1985 break;
1986
1987 case DIOCIGETIFACES: /* struct pfioc_iface */
1988 case DIOCSETIFFLAG: /* struct pfioc_iface */
1989 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
1990 PFIOCX_STRUCT_DECL(pfioc_iface);
1991
1992 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface);
1993 error = pfioctl_ioc_iface(cmd,
1994 PFIOCX_STRUCT_ADDR32(pfioc_iface),
1995 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1996 PFIOCX_STRUCT_END(pfioc_iface, addr);
1997 break;
1998 }
1999
2000 default:
2001 error = ENODEV;
2002 break;
2003 }
2004
2005 lck_mtx_unlock(&pf_lock);
2006 lck_rw_done(&pf_perim_lock);
2007
2008 return error;
2009 }
2010
2011 static int
pfioctl_ioc_table(u_long cmd,struct pfioc_table_32 * io32,struct pfioc_table_64 * io64,struct proc * p)2012 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
2013 struct pfioc_table_64 *io64, struct proc *p)
2014 {
2015 int p64 = proc_is64bit(p);
2016 int error = 0;
2017
2018 if (!p64) {
2019 goto struct32;
2020 }
2021
2022 #ifdef __LP64__
2023 /*
2024 * 64-bit structure processing
2025 */
2026 switch (cmd) {
2027 case DIOCRCLRTABLES:
2028 if (io64->pfrio_esize != 0) {
2029 error = ENODEV;
2030 break;
2031 }
2032 pfr_table_copyin_cleanup(&io64->pfrio_table);
2033 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
2034 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2035 break;
2036
2037 case DIOCRADDTABLES:
2038 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2039 error = ENODEV;
2040 break;
2041 }
2042 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
2043 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2044 break;
2045
2046 case DIOCRDELTABLES:
2047 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2048 error = ENODEV;
2049 break;
2050 }
2051 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
2052 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2053 break;
2054
2055 case DIOCRGETTABLES:
2056 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2057 error = ENODEV;
2058 break;
2059 }
2060 pfr_table_copyin_cleanup(&io64->pfrio_table);
2061 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
2062 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2063 break;
2064
2065 case DIOCRGETTSTATS:
2066 if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
2067 error = ENODEV;
2068 break;
2069 }
2070 pfr_table_copyin_cleanup(&io64->pfrio_table);
2071 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
2072 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2073 break;
2074
2075 case DIOCRCLRTSTATS:
2076 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2077 error = ENODEV;
2078 break;
2079 }
2080 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
2081 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2082 break;
2083
2084 case DIOCRSETTFLAGS:
2085 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2086 error = ENODEV;
2087 break;
2088 }
2089 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
2090 io64->pfrio_setflag, io64->pfrio_clrflag,
2091 &io64->pfrio_nchange, &io64->pfrio_ndel,
2092 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2093 break;
2094
2095 case DIOCRCLRADDRS:
2096 if (io64->pfrio_esize != 0) {
2097 error = ENODEV;
2098 break;
2099 }
2100 pfr_table_copyin_cleanup(&io64->pfrio_table);
2101 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2102 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2103 break;
2104
2105 case DIOCRADDADDRS:
2106 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2107 error = ENODEV;
2108 break;
2109 }
2110 pfr_table_copyin_cleanup(&io64->pfrio_table);
2111 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2112 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2113 PFR_FLAG_USERIOCTL);
2114 break;
2115
2116 case DIOCRDELADDRS:
2117 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2118 error = ENODEV;
2119 break;
2120 }
2121 pfr_table_copyin_cleanup(&io64->pfrio_table);
2122 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2123 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2124 PFR_FLAG_USERIOCTL);
2125 break;
2126
2127 case DIOCRSETADDRS:
2128 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2129 error = ENODEV;
2130 break;
2131 }
2132 pfr_table_copyin_cleanup(&io64->pfrio_table);
2133 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2134 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2135 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2136 PFR_FLAG_USERIOCTL, 0);
2137 break;
2138
2139 case DIOCRGETADDRS:
2140 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2141 error = ENODEV;
2142 break;
2143 }
2144 pfr_table_copyin_cleanup(&io64->pfrio_table);
2145 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2146 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2147 break;
2148
2149 case DIOCRGETASTATS:
2150 if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2151 error = ENODEV;
2152 break;
2153 }
2154 pfr_table_copyin_cleanup(&io64->pfrio_table);
2155 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2156 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2157 break;
2158
2159 case DIOCRCLRASTATS:
2160 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2161 error = ENODEV;
2162 break;
2163 }
2164 pfr_table_copyin_cleanup(&io64->pfrio_table);
2165 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2166 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2167 PFR_FLAG_USERIOCTL);
2168 break;
2169
2170 case DIOCRTSTADDRS:
2171 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2172 error = ENODEV;
2173 break;
2174 }
2175 pfr_table_copyin_cleanup(&io64->pfrio_table);
2176 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2177 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2178 PFR_FLAG_USERIOCTL);
2179 break;
2180
2181 case DIOCRINADEFINE:
2182 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2183 error = ENODEV;
2184 break;
2185 }
2186 pfr_table_copyin_cleanup(&io64->pfrio_table);
2187 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2188 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2189 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2190 break;
2191
2192 default:
2193 VERIFY(0);
2194 /* NOTREACHED */
2195 }
2196 goto done;
2197 #else
2198 #pragma unused(io64)
2199 #endif /* __LP64__ */
2200
2201 struct32:
2202 /*
2203 * 32-bit structure processing
2204 */
2205 switch (cmd) {
2206 case DIOCRCLRTABLES:
2207 if (io32->pfrio_esize != 0) {
2208 error = ENODEV;
2209 break;
2210 }
2211 pfr_table_copyin_cleanup(&io32->pfrio_table);
2212 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2213 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2214 break;
2215
2216 case DIOCRADDTABLES:
2217 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2218 error = ENODEV;
2219 break;
2220 }
2221 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2222 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2223 break;
2224
2225 case DIOCRDELTABLES:
2226 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2227 error = ENODEV;
2228 break;
2229 }
2230 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2231 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2232 break;
2233
2234 case DIOCRGETTABLES:
2235 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2236 error = ENODEV;
2237 break;
2238 }
2239 pfr_table_copyin_cleanup(&io32->pfrio_table);
2240 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2241 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2242 break;
2243
2244 case DIOCRGETTSTATS:
2245 if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2246 error = ENODEV;
2247 break;
2248 }
2249 pfr_table_copyin_cleanup(&io32->pfrio_table);
2250 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2251 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2252 break;
2253
2254 case DIOCRCLRTSTATS:
2255 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2256 error = ENODEV;
2257 break;
2258 }
2259 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2260 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2261 break;
2262
2263 case DIOCRSETTFLAGS:
2264 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2265 error = ENODEV;
2266 break;
2267 }
2268 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2269 io32->pfrio_setflag, io32->pfrio_clrflag,
2270 &io32->pfrio_nchange, &io32->pfrio_ndel,
2271 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2272 break;
2273
2274 case DIOCRCLRADDRS:
2275 if (io32->pfrio_esize != 0) {
2276 error = ENODEV;
2277 break;
2278 }
2279 pfr_table_copyin_cleanup(&io32->pfrio_table);
2280 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2281 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2282 break;
2283
2284 case DIOCRADDADDRS:
2285 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2286 error = ENODEV;
2287 break;
2288 }
2289 pfr_table_copyin_cleanup(&io32->pfrio_table);
2290 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2291 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2292 PFR_FLAG_USERIOCTL);
2293 break;
2294
2295 case DIOCRDELADDRS:
2296 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2297 error = ENODEV;
2298 break;
2299 }
2300 pfr_table_copyin_cleanup(&io32->pfrio_table);
2301 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2302 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2303 PFR_FLAG_USERIOCTL);
2304 break;
2305
2306 case DIOCRSETADDRS:
2307 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2308 error = ENODEV;
2309 break;
2310 }
2311 pfr_table_copyin_cleanup(&io32->pfrio_table);
2312 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2313 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2314 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2315 PFR_FLAG_USERIOCTL, 0);
2316 break;
2317
2318 case DIOCRGETADDRS:
2319 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2320 error = ENODEV;
2321 break;
2322 }
2323 pfr_table_copyin_cleanup(&io32->pfrio_table);
2324 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2325 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2326 break;
2327
2328 case DIOCRGETASTATS:
2329 if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2330 error = ENODEV;
2331 break;
2332 }
2333 pfr_table_copyin_cleanup(&io32->pfrio_table);
2334 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2335 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2336 break;
2337
2338 case DIOCRCLRASTATS:
2339 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2340 error = ENODEV;
2341 break;
2342 }
2343 pfr_table_copyin_cleanup(&io32->pfrio_table);
2344 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2345 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2346 PFR_FLAG_USERIOCTL);
2347 break;
2348
2349 case DIOCRTSTADDRS:
2350 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2351 error = ENODEV;
2352 break;
2353 }
2354 pfr_table_copyin_cleanup(&io32->pfrio_table);
2355 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2356 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2357 PFR_FLAG_USERIOCTL);
2358 break;
2359
2360 case DIOCRINADEFINE:
2361 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2362 error = ENODEV;
2363 break;
2364 }
2365 pfr_table_copyin_cleanup(&io32->pfrio_table);
2366 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2367 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2368 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2369 break;
2370
2371 default:
2372 VERIFY(0);
2373 /* NOTREACHED */
2374 }
2375 #ifdef __LP64__
2376 done:
2377 #endif
2378 return error;
2379 }
2380
2381 static int
pfioctl_ioc_tokens(u_long cmd,struct pfioc_tokens_32 * tok32,struct pfioc_tokens_64 * tok64,struct proc * p)2382 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2383 struct pfioc_tokens_64 *tok64, struct proc *p)
2384 {
2385 int token_size = 0, token_alloc_size = 0;
2386 struct pfioc_token *__sized_by(token_size) tokens = NULL;
2387 struct pfioc_kernel_token *entry, *tmp;
2388 user_addr_t token_buf;
2389 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2390 char *ptr;
2391
2392 switch (cmd) {
2393 case DIOCGETSTARTERS: {
2394 if (nr_tokens == 0) {
2395 error = ENOENT;
2396 break;
2397 }
2398 if (os_mul_overflow(sizeof(struct pfioc_token), nr_tokens, &token_alloc_size)) {
2399 os_log_error(OS_LOG_DEFAULT, "%s: size overflows", __func__);
2400 error = ERANGE;
2401 break;
2402 }
2403 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2404 if (cnt == 0) {
2405 if (p64) {
2406 tok64->size = token_alloc_size;
2407 } else {
2408 tok32->size = token_alloc_size;
2409 }
2410 break;
2411 }
2412
2413 #ifdef __LP64__
2414 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2415 #else
2416 token_buf = tok32->pgt_buf;
2417 #endif
2418 tokens = (struct pfioc_token *)kalloc_data(token_alloc_size, Z_WAITOK | Z_ZERO);
2419 token_size = token_alloc_size;
2420 if (tokens == NULL) {
2421 error = ENOMEM;
2422 break;
2423 }
2424
2425 ptr = (void *)tokens;
2426 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2427 struct pfioc_token *t;
2428
2429 if ((unsigned)cnt < sizeof(*tokens)) {
2430 break; /* no more buffer space left */
2431 }
2432 t = (struct pfioc_token *)(void *)ptr;
2433 t->token_value = entry->token.token_value;
2434 t->timestamp = entry->token.timestamp;
2435 t->pid = entry->token.pid;
2436 bcopy(entry->token.proc_name, t->proc_name,
2437 PFTOK_PROCNAME_LEN);
2438 ptr += sizeof(struct pfioc_token);
2439
2440 cnt -= sizeof(struct pfioc_token);
2441 }
2442
2443 if (cnt < ocnt) {
2444 error = copyout(tokens, token_buf, ocnt - cnt);
2445 }
2446
2447 if (p64) {
2448 tok64->size = ocnt - cnt;
2449 } else {
2450 tok32->size = ocnt - cnt;
2451 }
2452
2453 kfree_data_sized_by(tokens, token_size);
2454 break;
2455 }
2456
2457 default:
2458 VERIFY(0);
2459 /* NOTREACHED */
2460 }
2461
2462 return error;
2463 }
2464
2465 static void
pf_expire_states_and_src_nodes(struct pf_rule * rule)2466 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2467 {
2468 struct pf_state *state;
2469 struct pf_src_node *sn;
2470 int killed = 0;
2471
2472 /* expire the states */
2473 state = TAILQ_FIRST(&state_list);
2474 while (state) {
2475 if (state->rule.ptr == rule) {
2476 state->timeout = PFTM_PURGE;
2477 }
2478 state = TAILQ_NEXT(state, entry_list);
2479 }
2480 pf_purge_expired_states(pf_status.states);
2481
2482 /* expire the src_nodes */
2483 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2484 if (sn->rule.ptr != rule) {
2485 continue;
2486 }
2487 if (sn->states != 0) {
2488 RB_FOREACH(state, pf_state_tree_id,
2489 &tree_id) {
2490 if (state->src_node == sn) {
2491 state->src_node = NULL;
2492 }
2493 if (state->nat_src_node == sn) {
2494 state->nat_src_node = NULL;
2495 }
2496 }
2497 sn->states = 0;
2498 }
2499 sn->expire = 1;
2500 killed++;
2501 }
2502 if (killed) {
2503 pf_purge_expired_src_nodes();
2504 }
2505 }
2506
2507 static void
pf_delete_rule_from_ruleset(struct pf_ruleset * ruleset,int rs_num,struct pf_rule * rule)2508 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2509 struct pf_rule *rule)
2510 {
2511 struct pf_rule *r;
2512 int nr = 0;
2513
2514 pf_expire_states_and_src_nodes(rule);
2515
2516 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2517 if (ruleset->rules[rs_num].active.rcount-- == 0) {
2518 panic("%s: rcount value broken!", __func__);
2519 }
2520 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2521
2522 while (r) {
2523 r->nr = nr++;
2524 r = TAILQ_NEXT(r, entries);
2525 }
2526 }
2527
2528
2529 static void
pf_ruleset_cleanup(struct pf_ruleset * ruleset,int rs)2530 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2531 {
2532 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2533 ruleset->rules[rs].active.ticket =
2534 ++ruleset->rules[rs].inactive.ticket;
2535 }
2536
2537 /*
2538 * req_dev encodes the PF interface. Currently, possible values are
2539 * 0 or PFRULE_PFM
2540 */
2541 static int
pf_delete_rule_by_ticket(struct pfioc_rule * pr,u_int32_t req_dev)2542 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2543 {
2544 struct pf_ruleset *ruleset;
2545 struct pf_rule *rule = NULL;
2546 int is_anchor;
2547 int error = 0;
2548 int i;
2549
2550 is_anchor = (pr->anchor_call[0] != '\0');
2551 if ((ruleset = pf_find_ruleset_with_owner(__unsafe_null_terminated_from_indexable(pr->anchor),
2552 __unsafe_null_terminated_from_indexable(pr->rule.owner), is_anchor, &error)) == NULL) {
2553 goto done;
2554 }
2555
2556 for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2557 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2558 while (rule && (rule->ticket != pr->rule.ticket)) {
2559 rule = TAILQ_NEXT(rule, entries);
2560 }
2561 }
2562 if (rule == NULL) {
2563 error = ENOENT;
2564 goto done;
2565 } else {
2566 i--;
2567 }
2568
2569 if (strbufcmp(rule->owner, pr->rule.owner)) {
2570 error = EACCES;
2571 goto done;
2572 }
2573
2574 delete_rule:
2575 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2576 ((strbufcmp(ruleset->anchor->owner, "")) == 0) &&
2577 ((ruleset->rules[i].active.rcount - 1) == 0)) {
2578 /* set rule & ruleset to parent and repeat */
2579 struct pf_rule *delete_rule = rule;
2580 struct pf_ruleset *delete_ruleset = ruleset;
2581
2582 #define parent_ruleset ruleset->anchor->parent->ruleset
2583 if (ruleset->anchor->parent == NULL) {
2584 ruleset = &pf_main_ruleset;
2585 } else {
2586 ruleset = &parent_ruleset;
2587 }
2588
2589 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2590 while (rule &&
2591 (rule->anchor != delete_ruleset->anchor)) {
2592 rule = TAILQ_NEXT(rule, entries);
2593 }
2594 if (rule == NULL) {
2595 panic("%s: rule not found!", __func__);
2596 }
2597
2598 /*
2599 * if reqest device != rule's device, bail :
2600 * with error if ticket matches;
2601 * without error if ticket doesn't match (i.e. its just cleanup)
2602 */
2603 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2604 if (rule->ticket != pr->rule.ticket) {
2605 goto done;
2606 } else {
2607 error = EACCES;
2608 goto done;
2609 }
2610 }
2611
2612 if (delete_rule->rule_flag & PFRULE_PFM) {
2613 pffwrules--;
2614 }
2615
2616 pf_delete_rule_from_ruleset(delete_ruleset,
2617 i, delete_rule);
2618 delete_ruleset->rules[i].active.ticket =
2619 ++delete_ruleset->rules[i].inactive.ticket;
2620 goto delete_rule;
2621 } else {
2622 /*
2623 * process deleting rule only if device that added the
2624 * rule matches device that issued the request
2625 */
2626 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2627 error = EACCES;
2628 goto done;
2629 }
2630 if (rule->rule_flag & PFRULE_PFM) {
2631 pffwrules--;
2632 }
2633 pf_delete_rule_from_ruleset(ruleset, i,
2634 rule);
2635 pf_ruleset_cleanup(ruleset, i);
2636 }
2637
2638 done:
2639 if (ruleset) {
2640 pf_release_ruleset(ruleset);
2641 ruleset = NULL;
2642 }
2643 return error;
2644 }
2645
2646 /*
2647 * req_dev encodes the PF interface. Currently, possible values are
2648 * 0 or PFRULE_PFM
2649 */
2650 static void
pf_delete_rule_by_owner(char const * owner,u_int32_t req_dev)2651 pf_delete_rule_by_owner(char const *owner, u_int32_t req_dev)
2652 {
2653 struct pf_ruleset *__single ruleset;
2654 struct pf_rule *__single rule, *__single next;
2655 int deleted = 0;
2656
2657 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2658 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2659 ruleset = &pf_main_ruleset;
2660 while (rule) {
2661 next = TAILQ_NEXT(rule, entries);
2662 /*
2663 * process deleting rule only if device that added the
2664 * rule matches device that issued the request
2665 */
2666 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2667 rule = next;
2668 } else if (rule->anchor) {
2669 if (((strlcmp(rule->owner, owner, sizeof(rule->owner))) == 0) ||
2670 ((strbufcmp(rule->owner, "")) == 0)) {
2671 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2672 if (deleted) {
2673 pf_ruleset_cleanup(ruleset, rs);
2674 deleted = 0;
2675 }
2676 /* step into anchor */
2677 ruleset =
2678 &rule->anchor->ruleset;
2679 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2680 continue;
2681 } else {
2682 if (rule->rule_flag &
2683 PFRULE_PFM) {
2684 pffwrules--;
2685 }
2686 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2687 deleted = 1;
2688 rule = next;
2689 }
2690 } else {
2691 rule = next;
2692 }
2693 } else {
2694 if (((strlcmp(rule->owner, owner, sizeof(rule->owner))) == 0)) {
2695 /* delete rule */
2696 if (rule->rule_flag & PFRULE_PFM) {
2697 pffwrules--;
2698 }
2699 pf_delete_rule_from_ruleset(ruleset,
2700 rs, rule);
2701 deleted = 1;
2702 }
2703 rule = next;
2704 }
2705 if (rule == NULL) {
2706 if (deleted) {
2707 pf_ruleset_cleanup(ruleset, rs);
2708 deleted = 0;
2709 }
2710 if (ruleset != &pf_main_ruleset) {
2711 pf_deleterule_anchor_step_out(&ruleset,
2712 rs, &rule);
2713 }
2714 }
2715 }
2716 }
2717 }
2718
2719 static void
pf_deleterule_anchor_step_out(struct pf_ruleset ** ruleset_ptr,int rs,struct pf_rule ** rule_ptr)2720 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2721 int rs, struct pf_rule **rule_ptr)
2722 {
2723 struct pf_ruleset *ruleset = *ruleset_ptr;
2724 struct pf_rule *rule = *rule_ptr;
2725
2726 /* step out of anchor */
2727 struct pf_ruleset *rs_copy = ruleset;
2728 ruleset = ruleset->anchor->parent?
2729 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2730
2731 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2732 while (rule && (rule->anchor != rs_copy->anchor)) {
2733 rule = TAILQ_NEXT(rule, entries);
2734 }
2735 if (rule == NULL) {
2736 panic("%s: parent rule of anchor not found!", __func__);
2737 }
2738 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2739 rule = TAILQ_NEXT(rule, entries);
2740 }
2741
2742 *ruleset_ptr = ruleset;
2743 *rule_ptr = rule;
2744 }
2745
2746 static void
pf_addrwrap_setup(struct pf_addr_wrap * aw)2747 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2748 {
2749 VERIFY(aw);
2750 bzero(&aw->p, sizeof aw->p);
2751 }
2752
2753 static int
pf_rule_setup(struct pfioc_rule * pr,struct pf_rule * rule,struct pf_ruleset * ruleset)2754 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2755 struct pf_ruleset *ruleset)
2756 {
2757 struct pf_pooladdr *__single apa;
2758 int error = 0;
2759
2760 if (rule->ifname[0]) {
2761 rule->kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(rule->ifname));
2762 if (rule->kif == NULL) {
2763 pool_put(&pf_rule_pl, rule);
2764 return EINVAL;
2765 }
2766 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2767 }
2768 if (rule->tagname[0]) {
2769 if ((rule->tag = pf_tagname2tag(__unsafe_null_terminated_from_indexable(rule->tagname))) == 0) {
2770 error = EBUSY;
2771 }
2772 }
2773 if (rule->match_tagname[0]) {
2774 if ((rule->match_tag =
2775 pf_tagname2tag(__unsafe_null_terminated_from_indexable(rule->match_tagname))) == 0) {
2776 error = EBUSY;
2777 }
2778 }
2779 if (rule->rt && !rule->direction) {
2780 error = EINVAL;
2781 }
2782 #if PFLOG
2783 if (!rule->log) {
2784 rule->logif = 0;
2785 }
2786 if (rule->logif >= PFLOGIFS_MAX) {
2787 error = EINVAL;
2788 }
2789 #endif /* PFLOG */
2790 pf_addrwrap_setup(&rule->src.addr);
2791 pf_addrwrap_setup(&rule->dst.addr);
2792 if (pf_rtlabel_add(&rule->src.addr) ||
2793 pf_rtlabel_add(&rule->dst.addr)) {
2794 error = EBUSY;
2795 }
2796 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2797 error = EINVAL;
2798 }
2799 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2800 error = EINVAL;
2801 }
2802 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2803 error = EINVAL;
2804 }
2805 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2806 error = EINVAL;
2807 }
2808 if (pf_anchor_setup(rule, ruleset, pr->anchor_call, sizeof(pr->anchor_call))) {
2809 error = EINVAL;
2810 }
2811 TAILQ_FOREACH(apa, &pf_pabuf, entries)
2812 if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2813 error = EINVAL;
2814 }
2815
2816 if (rule->overload_tblname[0]) {
2817 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2818 __unsafe_null_terminated_from_indexable(rule->overload_tblname))) == NULL) {
2819 error = EINVAL;
2820 } else {
2821 rule->overload_tbl->pfrkt_flags |=
2822 PFR_TFLAG_ACTIVE;
2823 }
2824 }
2825
2826 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2827
2828 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2829 (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2830 rule->anchor == NULL) ||
2831 (rule->rt > PF_FASTROUTE)) &&
2832 (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2833 error = EINVAL;
2834 }
2835
2836 if (error) {
2837 pf_rm_rule(NULL, rule);
2838 return error;
2839 }
2840 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2841 * the address pool's family will be AF_INET
2842 */
2843 rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2844 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2845 rule->evaluations = rule->packets[0] = rule->packets[1] =
2846 rule->bytes[0] = rule->bytes[1] = 0;
2847
2848 return 0;
2849 }
2850
2851 static int
pfioctl_ioc_rule(u_long cmd,int minordev,struct pfioc_rule * pr,struct proc * p)2852 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2853 {
2854 int error = 0;
2855 u_int32_t req_dev = 0;
2856 struct pf_ruleset *__single ruleset = NULL;
2857
2858 switch (cmd) {
2859 case DIOCADDRULE: {
2860 struct pf_rule *__single rule, *__single tail;
2861 int rs_num;
2862
2863 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2864 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2865 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->anchor));
2866 if (ruleset == NULL) {
2867 error = EINVAL;
2868 break;
2869 }
2870 rs_num = pf_get_ruleset_number(pr->rule.action);
2871 if (rs_num >= PF_RULESET_MAX) {
2872 error = EINVAL;
2873 break;
2874 }
2875 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2876 error = EINVAL;
2877 break;
2878 }
2879 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2880 error = EBUSY;
2881 break;
2882 }
2883 if (pr->pool_ticket != ticket_pabuf) {
2884 error = EBUSY;
2885 break;
2886 }
2887 rule = pool_get(&pf_rule_pl, PR_WAITOK);
2888 if (rule == NULL) {
2889 error = ENOMEM;
2890 break;
2891 }
2892 pf_rule_copyin(&pr->rule, rule, p, minordev);
2893 #if !INET
2894 if (rule->af == AF_INET) {
2895 pool_put(&pf_rule_pl, rule);
2896 error = EAFNOSUPPORT;
2897 break;
2898 }
2899 #endif /* INET */
2900 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2901 pf_rulequeue);
2902 if (tail) {
2903 rule->nr = tail->nr + 1;
2904 } else {
2905 rule->nr = 0;
2906 }
2907
2908 if ((error = pf_rule_setup(pr, rule, ruleset))) {
2909 break;
2910 }
2911 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2912 rule, entries);
2913 ruleset->rules[rs_num].inactive.rcount++;
2914 if (rule->rule_flag & PFRULE_PFM) {
2915 pffwrules++;
2916 }
2917
2918 if (rule->action == PF_NAT64) {
2919 os_atomic_inc(&pf_nat64_configured, relaxed);
2920 }
2921
2922 if (pr->anchor_call[0] == '\0') {
2923 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2924 if (rule->rule_flag & PFRULE_PFM) {
2925 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2926 }
2927 }
2928
2929 #if DUMMYNET
2930 if (rule->action == PF_DUMMYNET) {
2931 struct dummynet_event dn_event;
2932 uint32_t direction = DN_INOUT;
2933 bzero(&dn_event, sizeof(dn_event));
2934
2935 dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2936
2937 if (rule->direction == PF_IN) {
2938 direction = DN_IN;
2939 } else if (rule->direction == PF_OUT) {
2940 direction = DN_OUT;
2941 }
2942
2943 dn_event.dn_event_rule_config.dir = direction;
2944 dn_event.dn_event_rule_config.af = rule->af;
2945 dn_event.dn_event_rule_config.proto = rule->proto;
2946 dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2947 dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2948 strbufcpy(dn_event.dn_event_rule_config.ifname, rule->ifname);
2949
2950 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2951 }
2952 #endif
2953 break;
2954 }
2955
2956 case DIOCGETRULES: {
2957 struct pf_rule *__single tail;
2958 int rs_num;
2959
2960 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2961 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2962 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->anchor));
2963 if (ruleset == NULL) {
2964 error = EINVAL;
2965 break;
2966 }
2967 rs_num = pf_get_ruleset_number(pr->rule.action);
2968 if (rs_num >= PF_RULESET_MAX) {
2969 error = EINVAL;
2970 break;
2971 }
2972 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2973 pf_rulequeue);
2974 if (tail) {
2975 pr->nr = tail->nr + 1;
2976 } else {
2977 pr->nr = 0;
2978 }
2979 pr->ticket = ruleset->rules[rs_num].active.ticket;
2980 break;
2981 }
2982
2983 case DIOCGETRULE: {
2984 struct pf_rule *__single rule;
2985 int rs_num, i;
2986
2987 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2988 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2989 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->anchor));
2990 if (ruleset == NULL) {
2991 error = EINVAL;
2992 break;
2993 }
2994 rs_num = pf_get_ruleset_number(pr->rule.action);
2995 if (rs_num >= PF_RULESET_MAX) {
2996 error = EINVAL;
2997 break;
2998 }
2999 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
3000 error = EBUSY;
3001 break;
3002 }
3003 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3004 while ((rule != NULL) && (rule->nr != pr->nr)) {
3005 rule = TAILQ_NEXT(rule, entries);
3006 }
3007 if (rule == NULL) {
3008 error = EBUSY;
3009 break;
3010 }
3011 pf_rule_copyout(rule, &pr->rule);
3012 if (pf_anchor_copyout(ruleset, rule, pr)) {
3013 error = EBUSY;
3014 break;
3015 }
3016 pfi_dynaddr_copyout(&pr->rule.src.addr);
3017 pfi_dynaddr_copyout(&pr->rule.dst.addr);
3018 pf_tbladdr_copyout(&pr->rule.src.addr);
3019 pf_tbladdr_copyout(&pr->rule.dst.addr);
3020 pf_rtlabel_copyout(&pr->rule.src.addr);
3021 pf_rtlabel_copyout(&pr->rule.dst.addr);
3022 for (i = 0; i < PF_SKIP_COUNT; ++i) {
3023 if (rule->skip[i].ptr == NULL) {
3024 pr->rule.skip[i].nr = -1;
3025 } else {
3026 pr->rule.skip[i].nr =
3027 rule->skip[i].ptr->nr;
3028 }
3029 }
3030
3031 if (pr->action == PF_GET_CLR_CNTR) {
3032 rule->evaluations = 0;
3033 rule->packets[0] = rule->packets[1] = 0;
3034 rule->bytes[0] = rule->bytes[1] = 0;
3035 }
3036 break;
3037 }
3038
3039 case DIOCCHANGERULE: {
3040 struct pfioc_rule *__single pcr = pr;
3041 struct pf_rule *__single oldrule = NULL, *__single newrule = NULL;
3042 struct pf_pooladdr *__single pa;
3043 u_int32_t nr = 0;
3044 int rs_num;
3045
3046 if (!(pcr->action == PF_CHANGE_REMOVE ||
3047 pcr->action == PF_CHANGE_GET_TICKET) &&
3048 pcr->pool_ticket != ticket_pabuf) {
3049 error = EBUSY;
3050 break;
3051 }
3052
3053 if (pcr->action < PF_CHANGE_ADD_HEAD ||
3054 pcr->action > PF_CHANGE_GET_TICKET) {
3055 error = EINVAL;
3056 break;
3057 }
3058 pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
3059 pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
3060 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pcr->anchor));
3061 if (ruleset == NULL) {
3062 error = EINVAL;
3063 break;
3064 }
3065 rs_num = pf_get_ruleset_number(pcr->rule.action);
3066 if (rs_num >= PF_RULESET_MAX) {
3067 error = EINVAL;
3068 break;
3069 }
3070
3071 if (pcr->action == PF_CHANGE_GET_TICKET) {
3072 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3073 break;
3074 } else {
3075 if (pcr->ticket !=
3076 ruleset->rules[rs_num].active.ticket) {
3077 error = EINVAL;
3078 break;
3079 }
3080 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3081 error = EINVAL;
3082 break;
3083 }
3084 }
3085
3086 if (pcr->action != PF_CHANGE_REMOVE) {
3087 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
3088 if (newrule == NULL) {
3089 error = ENOMEM;
3090 break;
3091 }
3092 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3093 #if !INET
3094 if (newrule->af == AF_INET) {
3095 pool_put(&pf_rule_pl, newrule);
3096 error = EAFNOSUPPORT;
3097 break;
3098 }
3099 #endif /* INET */
3100 if (newrule->ifname[0]) {
3101 newrule->kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(newrule->ifname));
3102 if (newrule->kif == NULL) {
3103 pool_put(&pf_rule_pl, newrule);
3104 error = EINVAL;
3105 break;
3106 }
3107 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3108 } else {
3109 newrule->kif = NULL;
3110 }
3111
3112 if (newrule->tagname[0]) {
3113 if ((newrule->tag =
3114 pf_tagname2tag(__unsafe_null_terminated_from_indexable(newrule->tagname))) == 0) {
3115 error = EBUSY;
3116 }
3117 }
3118 if (newrule->match_tagname[0]) {
3119 if ((newrule->match_tag = pf_tagname2tag(
3120 __unsafe_null_terminated_from_indexable(newrule->match_tagname))) == 0) {
3121 error = EBUSY;
3122 }
3123 }
3124 if (newrule->rt && !newrule->direction) {
3125 error = EINVAL;
3126 }
3127 #if PFLOG
3128 if (!newrule->log) {
3129 newrule->logif = 0;
3130 }
3131 if (newrule->logif >= PFLOGIFS_MAX) {
3132 error = EINVAL;
3133 }
3134 #endif /* PFLOG */
3135 pf_addrwrap_setup(&newrule->src.addr);
3136 pf_addrwrap_setup(&newrule->dst.addr);
3137 if (pf_rtlabel_add(&newrule->src.addr) ||
3138 pf_rtlabel_add(&newrule->dst.addr)) {
3139 error = EBUSY;
3140 }
3141 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3142 error = EINVAL;
3143 }
3144 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3145 error = EINVAL;
3146 }
3147 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3148 error = EINVAL;
3149 }
3150 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3151 error = EINVAL;
3152 }
3153 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call, sizeof(pcr->anchor_call))) {
3154 error = EINVAL;
3155 }
3156 TAILQ_FOREACH(pa, &pf_pabuf, entries)
3157 if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3158 error = EINVAL;
3159 }
3160
3161 if (newrule->overload_tblname[0]) {
3162 if ((newrule->overload_tbl = pfr_attach_table(
3163 ruleset, __unsafe_null_terminated_from_indexable(newrule->overload_tblname))) ==
3164 NULL) {
3165 error = EINVAL;
3166 } else {
3167 newrule->overload_tbl->pfrkt_flags |=
3168 PFR_TFLAG_ACTIVE;
3169 }
3170 }
3171
3172 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3173 if (((((newrule->action == PF_NAT) ||
3174 (newrule->action == PF_RDR) ||
3175 (newrule->action == PF_BINAT) ||
3176 (newrule->rt > PF_FASTROUTE)) &&
3177 !newrule->anchor)) &&
3178 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3179 error = EINVAL;
3180 }
3181
3182 if (error) {
3183 pf_rm_rule(NULL, newrule);
3184 break;
3185 }
3186 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3187 newrule->evaluations = 0;
3188 newrule->packets[0] = newrule->packets[1] = 0;
3189 newrule->bytes[0] = newrule->bytes[1] = 0;
3190 }
3191 pf_empty_pool(&pf_pabuf);
3192
3193 if (pcr->action == PF_CHANGE_ADD_HEAD) {
3194 oldrule = TAILQ_FIRST(
3195 ruleset->rules[rs_num].active.ptr);
3196 } else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3197 oldrule = TAILQ_LAST(
3198 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3199 } else {
3200 oldrule = TAILQ_FIRST(
3201 ruleset->rules[rs_num].active.ptr);
3202 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3203 oldrule = TAILQ_NEXT(oldrule, entries);
3204 }
3205 if (oldrule == NULL) {
3206 if (newrule != NULL) {
3207 pf_rm_rule(NULL, newrule);
3208 }
3209 error = EINVAL;
3210 break;
3211 }
3212 }
3213
3214 if (pcr->action == PF_CHANGE_REMOVE) {
3215 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3216 ruleset->rules[rs_num].active.rcount--;
3217 } else {
3218 if (oldrule == NULL) {
3219 TAILQ_INSERT_TAIL(
3220 ruleset->rules[rs_num].active.ptr,
3221 newrule, entries);
3222 } else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3223 pcr->action == PF_CHANGE_ADD_BEFORE) {
3224 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3225 } else {
3226 TAILQ_INSERT_AFTER(
3227 ruleset->rules[rs_num].active.ptr,
3228 oldrule, newrule, entries);
3229 }
3230 ruleset->rules[rs_num].active.rcount++;
3231 }
3232
3233 nr = 0;
3234 TAILQ_FOREACH(oldrule,
3235 ruleset->rules[rs_num].active.ptr, entries)
3236 oldrule->nr = nr++;
3237
3238 ruleset->rules[rs_num].active.ticket++;
3239
3240 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3241 #if SKYWALK
3242 pf_process_compatibilities();
3243 #endif // SKYWALK
3244 break;
3245 }
3246
3247 case DIOCINSERTRULE: {
3248 struct pf_rule *__single rule, *__single tail, *__single r;
3249 int rs_num;
3250 int is_anchor;
3251
3252 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3253 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3254 is_anchor = (pr->anchor_call[0] != '\0');
3255
3256 if ((ruleset = pf_find_ruleset_with_owner(__unsafe_null_terminated_from_indexable(pr->anchor),
3257 __unsafe_null_terminated_from_indexable(pr->rule.owner), is_anchor, &error)) == NULL) {
3258 break;
3259 }
3260
3261 rs_num = pf_get_ruleset_number(pr->rule.action);
3262 if (rs_num >= PF_RULESET_MAX) {
3263 error = EINVAL;
3264 break;
3265 }
3266 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3267 error = EINVAL;
3268 break;
3269 }
3270
3271 /* make sure this anchor rule doesn't exist already */
3272 if (is_anchor) {
3273 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3274 while (r) {
3275 if (r->anchor &&
3276 ((strbufcmp(r->anchor->name,
3277 pr->anchor_call)) == 0)) {
3278 if (((strbufcmp(pr->rule.owner,
3279 r->owner)) == 0) ||
3280 ((strbufcmp(r->owner, "")) == 0)) {
3281 error = EEXIST;
3282 } else {
3283 error = EPERM;
3284 }
3285 break;
3286 }
3287 r = TAILQ_NEXT(r, entries);
3288 }
3289 if (error != 0) {
3290 break;
3291 }
3292 }
3293
3294 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3295 if (rule == NULL) {
3296 error = ENOMEM;
3297 break;
3298 }
3299 pf_rule_copyin(&pr->rule, rule, p, minordev);
3300 #if !INET
3301 if (rule->af == AF_INET) {
3302 pool_put(&pf_rule_pl, rule);
3303 error = EAFNOSUPPORT;
3304 break;
3305 }
3306 #endif /* INET */
3307 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3308 while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3309 r = TAILQ_NEXT(r, entries);
3310 }
3311 if (r == NULL) {
3312 if ((tail =
3313 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3314 pf_rulequeue)) != NULL) {
3315 rule->nr = tail->nr + 1;
3316 } else {
3317 rule->nr = 0;
3318 }
3319 } else {
3320 rule->nr = r->nr;
3321 }
3322
3323 if ((error = pf_rule_setup(pr, rule, ruleset))) {
3324 break;
3325 }
3326
3327 if (rule->anchor != NULL) {
3328 strbufcpy(rule->anchor->owner, rule->owner);
3329 }
3330
3331 if (r) {
3332 TAILQ_INSERT_BEFORE(r, rule, entries);
3333 while (r && ++r->nr) {
3334 r = TAILQ_NEXT(r, entries);
3335 }
3336 } else {
3337 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3338 rule, entries);
3339 }
3340 ruleset->rules[rs_num].active.rcount++;
3341
3342 /* Calculate checksum for the main ruleset */
3343 if (ruleset == &pf_main_ruleset) {
3344 error = pf_setup_pfsync_matching(ruleset);
3345 }
3346
3347 pf_ruleset_cleanup(ruleset, rs_num);
3348 rule->ticket = VM_KERNEL_ADDRHASH((u_int64_t)(uintptr_t)rule);
3349
3350 pr->rule.ticket = rule->ticket;
3351 pf_rule_copyout(rule, &pr->rule);
3352 if (rule->rule_flag & PFRULE_PFM) {
3353 pffwrules++;
3354 }
3355 if (rule->action == PF_NAT64) {
3356 os_atomic_inc(&pf_nat64_configured, relaxed);
3357 }
3358
3359 if (pr->anchor_call[0] == '\0') {
3360 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3361 if (rule->rule_flag & PFRULE_PFM) {
3362 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3363 }
3364 }
3365 #if SKYWALK
3366 pf_process_compatibilities();
3367 #endif // SKYWALK
3368 break;
3369 }
3370
3371 case DIOCDELETERULE: {
3372 ASSERT(ruleset == NULL);
3373 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3374 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3375
3376 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3377 error = EINVAL;
3378 break;
3379 }
3380
3381 /* get device through which request is made */
3382 if ((uint8_t)minordev == PFDEV_PFM) {
3383 req_dev |= PFRULE_PFM;
3384 }
3385
3386 if (pr->rule.ticket) {
3387 if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3388 break;
3389 }
3390 } else {
3391 pf_delete_rule_by_owner(__unsafe_null_terminated_from_indexable(pr->rule.owner), req_dev);
3392 }
3393 pr->nr = pffwrules;
3394 if (pr->rule.action == PF_NAT64) {
3395 os_atomic_dec(&pf_nat64_configured, relaxed);
3396 }
3397 #if SKYWALK
3398 pf_process_compatibilities();
3399 #endif // SKYWALK
3400 break;
3401 }
3402
3403 default:
3404 VERIFY(0);
3405 /* NOTREACHED */
3406 }
3407 if (ruleset != NULL) {
3408 pf_release_ruleset(ruleset);
3409 ruleset = NULL;
3410 }
3411
3412 return error;
3413 }
3414
3415 static int
pfioctl_ioc_state_kill(u_long cmd,struct pfioc_state_kill * psk,struct proc * p)3416 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3417 {
3418 #pragma unused(p)
3419 int error = 0;
3420
3421 psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3422 psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3423
3424 bool ifname_matched = true;
3425 bool owner_matched = true;
3426
3427 switch (cmd) {
3428 case DIOCCLRSTATES: {
3429 struct pf_state *s, *nexts;
3430 int killed = 0;
3431
3432 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3433 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3434 /*
3435 * Purge all states only when neither ifname
3436 * or owner is provided. If any of these are provided
3437 * we purge only the states with meta data that match
3438 */
3439 bool unlink_state = false;
3440 ifname_matched = true;
3441 owner_matched = true;
3442
3443 if (psk->psk_ifname[0] &&
3444 strbufcmp(psk->psk_ifname, s->kif->pfik_name)) {
3445 ifname_matched = false;
3446 }
3447
3448 if (psk->psk_ownername[0] &&
3449 ((NULL == s->rule.ptr) ||
3450 strbufcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3451 owner_matched = false;
3452 }
3453
3454 unlink_state = ifname_matched && owner_matched;
3455
3456 if (unlink_state) {
3457 #if NPFSYNC
3458 /* don't send out individual delete messages */
3459 s->sync_flags = PFSTATE_NOSYNC;
3460 #endif
3461 pf_unlink_state(s);
3462 killed++;
3463 }
3464 }
3465 psk->psk_af = (sa_family_t)killed;
3466 #if NPFSYNC
3467 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3468 #endif
3469 break;
3470 }
3471
3472 case DIOCKILLSTATES: {
3473 struct pf_state *s, *nexts;
3474 struct pf_state_key *sk;
3475 struct pf_state_host *src, *dst;
3476 int killed = 0;
3477
3478 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3479 s = nexts) {
3480 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3481 sk = s->state_key;
3482 ifname_matched = true;
3483 owner_matched = true;
3484
3485 if (psk->psk_ifname[0] &&
3486 strbufcmp(psk->psk_ifname, s->kif->pfik_name)) {
3487 ifname_matched = false;
3488 }
3489
3490 if (psk->psk_ownername[0] &&
3491 ((NULL == s->rule.ptr) ||
3492 strbufcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3493 owner_matched = false;
3494 }
3495
3496 if (sk->direction == PF_OUT) {
3497 src = &sk->lan;
3498 dst = &sk->ext_lan;
3499 } else {
3500 src = &sk->ext_lan;
3501 dst = &sk->lan;
3502 }
3503 if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3504 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3505 PF_MATCHA(psk->psk_src.neg,
3506 &psk->psk_src.addr.v.a.addr,
3507 &psk->psk_src.addr.v.a.mask,
3508 &src->addr, sk->af_lan) &&
3509 PF_MATCHA(psk->psk_dst.neg,
3510 &psk->psk_dst.addr.v.a.addr,
3511 &psk->psk_dst.addr.v.a.mask,
3512 &dst->addr, sk->af_lan) &&
3513 (pf_match_xport(psk->psk_proto,
3514 psk->psk_proto_variant, &psk->psk_src.xport,
3515 &src->xport)) &&
3516 (pf_match_xport(psk->psk_proto,
3517 psk->psk_proto_variant, &psk->psk_dst.xport,
3518 &dst->xport)) &&
3519 ifname_matched &&
3520 owner_matched) {
3521 #if NPFSYNC
3522 /* send immediate delete of state */
3523 pfsync_delete_state(s);
3524 s->sync_flags |= PFSTATE_NOSYNC;
3525 #endif
3526 pf_unlink_state(s);
3527 killed++;
3528 }
3529 }
3530 psk->psk_af = (sa_family_t)killed;
3531 break;
3532 }
3533
3534 default:
3535 VERIFY(0);
3536 /* NOTREACHED */
3537 }
3538
3539 return error;
3540 }
3541
3542 static int
pfioctl_ioc_state(u_long cmd,struct pfioc_state * ps,struct proc * p)3543 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3544 {
3545 #pragma unused(p)
3546 int error = 0;
3547
3548 switch (cmd) {
3549 case DIOCADDSTATE: {
3550 struct pfsync_state *__single sp = &ps->state;
3551 struct pf_state *__single s;
3552 struct pf_state_key *__single sk;
3553 struct pfi_kif *__single kif;
3554
3555 if (sp->timeout >= PFTM_MAX) {
3556 error = EINVAL;
3557 break;
3558 }
3559 s = pool_get(&pf_state_pl, PR_WAITOK);
3560 if (s == NULL) {
3561 error = ENOMEM;
3562 break;
3563 }
3564 bzero(s, sizeof(struct pf_state));
3565 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3566 pool_put(&pf_state_pl, s);
3567 error = ENOMEM;
3568 break;
3569 }
3570 pf_state_import(sp, sk, s);
3571 kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(sp->ifname));
3572 if (kif == NULL) {
3573 pf_detach_state(s, 0);
3574 pool_put(&pf_state_pl, s);
3575 error = ENOENT;
3576 break;
3577 }
3578 TAILQ_INIT(&s->unlink_hooks);
3579 s->state_key->app_state = 0;
3580 if (pf_insert_state(kif, s)) {
3581 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3582 pool_put(&pf_state_pl, s);
3583 error = EEXIST;
3584 break;
3585 }
3586 pf_default_rule.states++;
3587 VERIFY(pf_default_rule.states != 0);
3588 break;
3589 }
3590
3591 case DIOCGETSTATE: {
3592 struct pf_state *s;
3593 struct pf_state_cmp id_key;
3594
3595 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3596 id_key.creatorid = ps->state.creatorid;
3597
3598 s = pf_find_state_byid(&id_key);
3599 if (s == NULL) {
3600 error = ENOENT;
3601 break;
3602 }
3603
3604 pf_state_export(&ps->state, s->state_key, s);
3605 break;
3606 }
3607
3608 default:
3609 VERIFY(0);
3610 /* NOTREACHED */
3611 }
3612
3613 return error;
3614 }
3615
3616 static int
pfioctl_ioc_states(u_long cmd,struct pfioc_states_32 * ps32,struct pfioc_states_64 * ps64,struct proc * p)3617 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3618 struct pfioc_states_64 *ps64, struct proc *p)
3619 {
3620 int p64 = proc_is64bit(p);
3621 int error = 0;
3622
3623 switch (cmd) {
3624 case DIOCGETSTATES: { /* struct pfioc_states */
3625 struct pf_state *__single state;
3626 struct pfsync_state *__single pstore;
3627 user_addr_t buf;
3628 u_int32_t nr = 0;
3629 int len, size;
3630
3631 len = (p64 ? ps64->ps_len : ps32->ps_len);
3632 if (len == 0) {
3633 size = sizeof(struct pfsync_state) * pf_status.states;
3634 if (p64) {
3635 ps64->ps_len = size;
3636 } else {
3637 ps32->ps_len = size;
3638 }
3639 break;
3640 }
3641
3642 pstore = kalloc_type(struct pfsync_state,
3643 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3644 #ifdef __LP64__
3645 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3646 #else
3647 buf = ps32->ps_buf;
3648 #endif
3649
3650 state = TAILQ_FIRST(&state_list);
3651 while (state) {
3652 if (state->timeout != PFTM_UNLINKED) {
3653 if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3654 break;
3655 }
3656
3657 pf_state_export(pstore,
3658 state->state_key, state);
3659 error = copyout(pstore, buf, sizeof(*pstore));
3660 if (error) {
3661 kfree_type(struct pfsync_state, pstore);
3662 goto fail;
3663 }
3664 buf += sizeof(*pstore);
3665 nr++;
3666 }
3667 state = TAILQ_NEXT(state, entry_list);
3668 }
3669
3670 size = sizeof(struct pfsync_state) * nr;
3671 if (p64) {
3672 ps64->ps_len = size;
3673 } else {
3674 ps32->ps_len = size;
3675 }
3676
3677 kfree_type(struct pfsync_state, pstore);
3678 break;
3679 }
3680
3681 default:
3682 VERIFY(0);
3683 /* NOTREACHED */
3684 }
3685 fail:
3686 return error;
3687 }
3688
3689 static int
pfioctl_ioc_natlook(u_long cmd,struct pfioc_natlook * pnl,struct proc * p)3690 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3691 {
3692 #pragma unused(p)
3693 int error = 0;
3694
3695 switch (cmd) {
3696 case DIOCNATLOOK: {
3697 struct pf_state_key *sk;
3698 struct pf_state *state;
3699 struct pf_state_key_cmp key;
3700 int m = 0, direction = pnl->direction;
3701
3702 key.proto = pnl->proto;
3703 key.proto_variant = pnl->proto_variant;
3704
3705 if (!pnl->proto ||
3706 PF_AZERO(&pnl->saddr, pnl->af) ||
3707 PF_AZERO(&pnl->daddr, pnl->af) ||
3708 ((pnl->proto == IPPROTO_TCP ||
3709 pnl->proto == IPPROTO_UDP) &&
3710 (!pnl->dxport.port || !pnl->sxport.port))) {
3711 error = EINVAL;
3712 } else {
3713 /*
3714 * userland gives us source and dest of connection,
3715 * reverse the lookup so we ask for what happens with
3716 * the return traffic, enabling us to find it in the
3717 * state tree.
3718 */
3719 if (direction == PF_IN) {
3720 key.af_gwy = pnl->af;
3721 PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3722 pnl->af);
3723 memcpy(&key.ext_gwy.xport, &pnl->dxport,
3724 sizeof(key.ext_gwy.xport));
3725 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3726 memcpy(&key.gwy.xport, &pnl->sxport,
3727 sizeof(key.gwy.xport));
3728 state = pf_find_state_all(&key, PF_IN, &m);
3729 } else {
3730 key.af_lan = pnl->af;
3731 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3732 memcpy(&key.lan.xport, &pnl->dxport,
3733 sizeof(key.lan.xport));
3734 PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3735 pnl->af);
3736 memcpy(&key.ext_lan.xport, &pnl->sxport,
3737 sizeof(key.ext_lan.xport));
3738 state = pf_find_state_all(&key, PF_OUT, &m);
3739 }
3740 if (m > 1) {
3741 error = E2BIG; /* more than one state */
3742 } else if (state != NULL) {
3743 sk = state->state_key;
3744 if (direction == PF_IN) {
3745 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3746 sk->af_lan);
3747 memcpy(&pnl->rsxport, &sk->lan.xport,
3748 sizeof(pnl->rsxport));
3749 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3750 pnl->af);
3751 memcpy(&pnl->rdxport, &pnl->dxport,
3752 sizeof(pnl->rdxport));
3753 } else {
3754 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3755 sk->af_gwy);
3756 memcpy(&pnl->rdxport, &sk->gwy.xport,
3757 sizeof(pnl->rdxport));
3758 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3759 pnl->af);
3760 memcpy(&pnl->rsxport, &pnl->sxport,
3761 sizeof(pnl->rsxport));
3762 }
3763 } else {
3764 error = ENOENT;
3765 }
3766 }
3767 break;
3768 }
3769
3770 default:
3771 VERIFY(0);
3772 /* NOTREACHED */
3773 }
3774
3775 return error;
3776 }
3777
3778 static int
pfioctl_ioc_tm(u_long cmd,struct pfioc_tm * pt,struct proc * p)3779 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3780 {
3781 #pragma unused(p)
3782 int error = 0;
3783
3784 switch (cmd) {
3785 case DIOCSETTIMEOUT: {
3786 int old;
3787
3788 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3789 pt->seconds < 0) {
3790 error = EINVAL;
3791 goto fail;
3792 }
3793 old = pf_default_rule.timeout[pt->timeout];
3794 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3795 pt->seconds = 1;
3796 }
3797 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3798 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3799 wakeup(pf_purge_thread_fn);
3800 }
3801 pt->seconds = old;
3802 break;
3803 }
3804
3805 case DIOCGETTIMEOUT: {
3806 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3807 error = EINVAL;
3808 goto fail;
3809 }
3810 pt->seconds = pf_default_rule.timeout[pt->timeout];
3811 break;
3812 }
3813
3814 default:
3815 VERIFY(0);
3816 /* NOTREACHED */
3817 }
3818 fail:
3819 return error;
3820 }
3821
3822 static int
pfioctl_ioc_limit(u_long cmd,struct pfioc_limit * pl,struct proc * p)3823 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3824 {
3825 #pragma unused(p)
3826 int error = 0;
3827
3828 switch (cmd) {
3829 case DIOCGETLIMIT: {
3830 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3831 error = EINVAL;
3832 goto fail;
3833 }
3834 pl->limit = pf_pool_limits[pl->index].limit;
3835 break;
3836 }
3837
3838 case DIOCSETLIMIT: {
3839 int old_limit;
3840
3841 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3842 pf_pool_limits[pl->index].pp == NULL) {
3843 error = EINVAL;
3844 goto fail;
3845 }
3846 pool_sethardlimit(pf_pool_limits[pl->index].pp,
3847 pl->limit, NULL, 0);
3848 old_limit = pf_pool_limits[pl->index].limit;
3849 pf_pool_limits[pl->index].limit = pl->limit;
3850 pl->limit = old_limit;
3851 break;
3852 }
3853
3854 default:
3855 VERIFY(0);
3856 /* NOTREACHED */
3857 }
3858 fail:
3859 return error;
3860 }
3861
3862 static int
pfioctl_ioc_pooladdr(u_long cmd,struct pfioc_pooladdr * pp,struct proc * p)3863 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3864 {
3865 #pragma unused(p)
3866 struct pf_pooladdr *__single pa = NULL;
3867 struct pf_pool *__single pool = NULL;
3868 int error = 0;
3869 struct pf_ruleset *__single ruleset = NULL;
3870
3871 switch (cmd) {
3872 case DIOCBEGINADDRS: {
3873 pf_empty_pool(&pf_pabuf);
3874 pp->ticket = ++ticket_pabuf;
3875 break;
3876 }
3877
3878 case DIOCADDADDR: {
3879 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3880 if (pp->ticket != ticket_pabuf) {
3881 error = EBUSY;
3882 break;
3883 }
3884 #if !INET
3885 if (pp->af == AF_INET) {
3886 error = EAFNOSUPPORT;
3887 break;
3888 }
3889 #endif /* INET */
3890 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3891 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3892 pp->addr.addr.type != PF_ADDR_TABLE) {
3893 error = EINVAL;
3894 break;
3895 }
3896 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3897 if (pa == NULL) {
3898 error = ENOMEM;
3899 break;
3900 }
3901 pf_pooladdr_copyin(&pp->addr, pa);
3902 if (pa->ifname[0]) {
3903 pa->kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(pa->ifname));
3904 if (pa->kif == NULL) {
3905 pool_put(&pf_pooladdr_pl, pa);
3906 error = EINVAL;
3907 break;
3908 }
3909 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3910 }
3911 pf_addrwrap_setup(&pa->addr);
3912 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3913 pfi_dynaddr_remove(&pa->addr);
3914 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3915 pool_put(&pf_pooladdr_pl, pa);
3916 error = EINVAL;
3917 break;
3918 }
3919 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3920 break;
3921 }
3922
3923 case DIOCGETADDRS: {
3924 pp->nr = 0;
3925 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3926 pool = pf_get_pool(__unsafe_null_terminated_from_indexable(pp->anchor), pp->ticket, pp->r_action,
3927 pp->r_num, 0, 1, 0);
3928 if (pool == NULL) {
3929 error = EBUSY;
3930 break;
3931 }
3932 TAILQ_FOREACH(pa, &pool->list, entries)
3933 pp->nr++;
3934 break;
3935 }
3936
3937 case DIOCGETADDR: {
3938 u_int32_t nr = 0;
3939
3940 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3941 pool = pf_get_pool(__unsafe_null_terminated_from_indexable(pp->anchor), pp->ticket, pp->r_action,
3942 pp->r_num, 0, 1, 1);
3943 if (pool == NULL) {
3944 error = EBUSY;
3945 break;
3946 }
3947 pa = TAILQ_FIRST(&pool->list);
3948 while ((pa != NULL) && (nr < pp->nr)) {
3949 pa = TAILQ_NEXT(pa, entries);
3950 nr++;
3951 }
3952 if (pa == NULL) {
3953 error = EBUSY;
3954 break;
3955 }
3956 pf_pooladdr_copyout(pa, &pp->addr);
3957 pfi_dynaddr_copyout(&pp->addr.addr);
3958 pf_tbladdr_copyout(&pp->addr.addr);
3959 pf_rtlabel_copyout(&pp->addr.addr);
3960 break;
3961 }
3962
3963 case DIOCCHANGEADDR: {
3964 struct pfioc_pooladdr *__single pca = pp;
3965 struct pf_pooladdr *__single oldpa = NULL, *__single newpa = NULL;
3966
3967 if (pca->action < PF_CHANGE_ADD_HEAD ||
3968 pca->action > PF_CHANGE_REMOVE) {
3969 error = EINVAL;
3970 break;
3971 }
3972 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3973 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3974 pca->addr.addr.type != PF_ADDR_TABLE) {
3975 error = EINVAL;
3976 break;
3977 }
3978
3979 pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3980 ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pca->anchor));
3981 if (ruleset == NULL) {
3982 error = EBUSY;
3983 break;
3984 }
3985 pool = pf_get_pool(__unsafe_null_terminated_from_indexable(pca->anchor), pca->ticket, pca->r_action,
3986 pca->r_num, pca->r_last, 1, 1);
3987 if (pool == NULL) {
3988 error = EBUSY;
3989 break;
3990 }
3991 if (pca->action != PF_CHANGE_REMOVE) {
3992 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3993 if (newpa == NULL) {
3994 error = ENOMEM;
3995 break;
3996 }
3997 pf_pooladdr_copyin(&pca->addr, newpa);
3998 #if !INET
3999 if (pca->af == AF_INET) {
4000 pool_put(&pf_pooladdr_pl, newpa);
4001 error = EAFNOSUPPORT;
4002 break;
4003 }
4004 #endif /* INET */
4005 if (newpa->ifname[0]) {
4006 newpa->kif = pfi_kif_get(__unsafe_null_terminated_from_indexable(newpa->ifname));
4007 if (newpa->kif == NULL) {
4008 pool_put(&pf_pooladdr_pl, newpa);
4009 error = EINVAL;
4010 break;
4011 }
4012 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
4013 } else {
4014 newpa->kif = NULL;
4015 }
4016 pf_addrwrap_setup(&newpa->addr);
4017 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
4018 pf_tbladdr_setup(ruleset, &newpa->addr)) {
4019 pfi_dynaddr_remove(&newpa->addr);
4020 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
4021 pool_put(&pf_pooladdr_pl, newpa);
4022 error = EINVAL;
4023 break;
4024 }
4025 }
4026
4027 if (pca->action == PF_CHANGE_ADD_HEAD) {
4028 oldpa = TAILQ_FIRST(&pool->list);
4029 } else if (pca->action == PF_CHANGE_ADD_TAIL) {
4030 oldpa = TAILQ_LAST(&pool->list, pf_palist);
4031 } else {
4032 int i = 0;
4033
4034 oldpa = TAILQ_FIRST(&pool->list);
4035 while ((oldpa != NULL) && (i < (int)pca->nr)) {
4036 oldpa = TAILQ_NEXT(oldpa, entries);
4037 i++;
4038 }
4039 if (oldpa == NULL) {
4040 error = EINVAL;
4041 break;
4042 }
4043 }
4044
4045 if (pca->action == PF_CHANGE_REMOVE) {
4046 TAILQ_REMOVE(&pool->list, oldpa, entries);
4047 pfi_dynaddr_remove(&oldpa->addr);
4048 pf_tbladdr_remove(&oldpa->addr);
4049 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
4050 pool_put(&pf_pooladdr_pl, oldpa);
4051 } else {
4052 if (oldpa == NULL) {
4053 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4054 } else if (pca->action == PF_CHANGE_ADD_HEAD ||
4055 pca->action == PF_CHANGE_ADD_BEFORE) {
4056 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4057 } else {
4058 TAILQ_INSERT_AFTER(&pool->list, oldpa,
4059 newpa, entries);
4060 }
4061 }
4062
4063 pool->cur = TAILQ_FIRST(&pool->list);
4064 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
4065 pca->af);
4066 break;
4067 }
4068
4069 default:
4070 VERIFY(0);
4071 /* NOTREACHED */
4072 }
4073
4074 if (ruleset) {
4075 pf_release_ruleset(ruleset);
4076 ruleset = NULL;
4077 }
4078
4079 return error;
4080 }
4081
4082 static int
pfioctl_ioc_ruleset(u_long cmd,struct pfioc_ruleset * pr,struct proc * p)4083 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4084 {
4085 #pragma unused(p)
4086 int error = 0;
4087 struct pf_ruleset *ruleset = NULL;
4088
4089 switch (cmd) {
4090 case DIOCGETRULESETS: {
4091 struct pf_anchor *anchor;
4092
4093 pr->path[sizeof(pr->path) - 1] = '\0';
4094 pr->name[sizeof(pr->name) - 1] = '\0';
4095 if ((ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->path))) == NULL) {
4096 error = EINVAL;
4097 break;
4098 }
4099 pr->nr = 0;
4100 if (ruleset->anchor == NULL) {
4101 /* XXX kludge for pf_main_ruleset */
4102 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4103 if (anchor->parent == NULL) {
4104 pr->nr++;
4105 }
4106 } else {
4107 RB_FOREACH(anchor, pf_anchor_node,
4108 &ruleset->anchor->children)
4109 pr->nr++;
4110 }
4111 break;
4112 }
4113
4114 case DIOCGETRULESET: {
4115 struct pf_anchor *anchor;
4116 u_int32_t nr = 0;
4117
4118 pr->path[sizeof(pr->path) - 1] = '\0';
4119 if ((ruleset = pf_find_ruleset(__unsafe_null_terminated_from_indexable(pr->path))) == NULL) {
4120 error = EINVAL;
4121 break;
4122 }
4123 pr->name[0] = 0;
4124 if (ruleset->anchor == NULL) {
4125 /* XXX kludge for pf_main_ruleset */
4126 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4127 if (anchor->parent == NULL && nr++ == pr->nr) {
4128 strbufcpy(pr->name, anchor->name);
4129 break;
4130 }
4131 } else {
4132 RB_FOREACH(anchor, pf_anchor_node,
4133 &ruleset->anchor->children)
4134 if (nr++ == pr->nr) {
4135 strbufcpy(pr->name, anchor->name);
4136 break;
4137 }
4138 }
4139 if (!pr->name[0]) {
4140 error = EBUSY;
4141 }
4142 break;
4143 }
4144
4145 default:
4146 VERIFY(0);
4147 /* NOTREACHED */
4148 }
4149
4150 if (ruleset) {
4151 pf_release_ruleset(ruleset);
4152 ruleset = NULL;
4153 }
4154 return error;
4155 }
4156
4157 static int
pfioctl_ioc_trans(u_long cmd,struct pfioc_trans_32 * io32,struct pfioc_trans_64 * io64,struct proc * p)4158 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4159 struct pfioc_trans_64 *io64, struct proc *p)
4160 {
4161 int error = 0, esize, size;
4162 user_addr_t buf;
4163 struct pf_ruleset *rs = NULL;
4164
4165 #ifdef __LP64__
4166 int p64 = proc_is64bit(p);
4167
4168 esize = (p64 ? io64->esize : io32->esize);
4169 size = (p64 ? io64->size : io32->size);
4170 buf = (p64 ? io64->array : io32->array);
4171 #else
4172 #pragma unused(io64, p)
4173 esize = io32->esize;
4174 size = io32->size;
4175 buf = io32->array;
4176 #endif
4177
4178 switch (cmd) {
4179 case DIOCXBEGIN: {
4180 struct pfioc_trans_e *__single ioe;
4181 struct pfr_table *__single table;
4182 int i;
4183
4184 if (esize != sizeof(*ioe)) {
4185 error = ENODEV;
4186 goto fail;
4187 }
4188 ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4189 table = kalloc_type(struct pfr_table, Z_WAITOK);
4190 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4191 if (copyin(buf, ioe, sizeof(*ioe))) {
4192 kfree_type(struct pfr_table, table);
4193 kfree_type(struct pfioc_trans_e, ioe);
4194 error = EFAULT;
4195 goto fail;
4196 }
4197 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4198 switch (ioe->rs_num) {
4199 case PF_RULESET_ALTQ:
4200 break;
4201 case PF_RULESET_TABLE:
4202 bzero(table, sizeof(*table));
4203 strbufcpy(table->pfrt_anchor, ioe->anchor);
4204 if ((error = pfr_ina_begin(table,
4205 &ioe->ticket, NULL, 0))) {
4206 kfree_type(struct pfr_table, table);
4207 kfree_type(struct pfioc_trans_e, ioe);
4208 goto fail;
4209 }
4210 break;
4211 default:
4212 if ((error = pf_begin_rules(&ioe->ticket,
4213 ioe->rs_num, __unsafe_null_terminated_from_indexable(ioe->anchor)))) {
4214 kfree_type(struct pfr_table, table);
4215 kfree_type(struct pfioc_trans_e, ioe);
4216 goto fail;
4217 }
4218 break;
4219 }
4220 if (copyout(ioe, buf, sizeof(*ioe))) {
4221 kfree_type(struct pfr_table, table);
4222 kfree_type(struct pfioc_trans_e, ioe);
4223 error = EFAULT;
4224 goto fail;
4225 }
4226 }
4227 kfree_type(struct pfr_table, table);
4228 kfree_type(struct pfioc_trans_e, ioe);
4229 break;
4230 }
4231
4232 case DIOCXROLLBACK: {
4233 struct pfioc_trans_e *__single ioe;
4234 struct pfr_table *__single table;
4235 int i;
4236
4237 if (esize != sizeof(*ioe)) {
4238 error = ENODEV;
4239 goto fail;
4240 }
4241 ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4242 table = kalloc_type(struct pfr_table, Z_WAITOK);
4243 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4244 if (copyin(buf, ioe, sizeof(*ioe))) {
4245 kfree_type(struct pfr_table, table);
4246 kfree_type(struct pfioc_trans_e, ioe);
4247 error = EFAULT;
4248 goto fail;
4249 }
4250 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4251 switch (ioe->rs_num) {
4252 case PF_RULESET_ALTQ:
4253 break;
4254 case PF_RULESET_TABLE:
4255 bzero(table, sizeof(*table));
4256 strbufcpy(table->pfrt_anchor, ioe->anchor);
4257 if ((error = pfr_ina_rollback(table,
4258 ioe->ticket, NULL, 0))) {
4259 kfree_type(struct pfr_table, table);
4260 kfree_type(struct pfioc_trans_e, ioe);
4261 goto fail; /* really bad */
4262 }
4263 break;
4264 default:
4265 if ((error = pf_rollback_rules(ioe->ticket,
4266 ioe->rs_num, __unsafe_null_terminated_from_indexable(ioe->anchor)))) {
4267 kfree_type(struct pfr_table, table);
4268 kfree_type(struct pfioc_trans_e, ioe);
4269 goto fail; /* really bad */
4270 }
4271 break;
4272 }
4273 }
4274 kfree_type(struct pfr_table, table);
4275 kfree_type(struct pfioc_trans_e, ioe);
4276 break;
4277 }
4278
4279 case DIOCXCOMMIT: {
4280 struct pfioc_trans_e *__single ioe;
4281 struct pfr_table *__single table;
4282 user_addr_t _buf = buf;
4283 int i;
4284
4285 if (esize != sizeof(*ioe)) {
4286 error = ENODEV;
4287 goto fail;
4288 }
4289 ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4290 table = kalloc_type(struct pfr_table, Z_WAITOK);
4291 /* first makes sure everything will succeed */
4292 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4293 if (copyin(buf, ioe, sizeof(*ioe))) {
4294 kfree_type(struct pfr_table, table);
4295 kfree_type(struct pfioc_trans_e, ioe);
4296 error = EFAULT;
4297 goto fail;
4298 }
4299 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4300 switch (ioe->rs_num) {
4301 case PF_RULESET_ALTQ:
4302 break;
4303 case PF_RULESET_TABLE:
4304 rs = pf_find_ruleset(__unsafe_null_terminated_from_indexable(ioe->anchor));
4305 if (rs == NULL || !rs->topen || ioe->ticket !=
4306 rs->tticket) {
4307 kfree_type(struct pfr_table, table);
4308 kfree_type(struct pfioc_trans_e, ioe);
4309 error = EBUSY;
4310 goto fail;
4311 }
4312 break;
4313 default:
4314 if (ioe->rs_num < 0 || ioe->rs_num >=
4315 PF_RULESET_MAX) {
4316 kfree_type(struct pfr_table, table);
4317 kfree_type(struct pfioc_trans_e, ioe);
4318 error = EINVAL;
4319 goto fail;
4320 }
4321 rs = pf_find_ruleset(__unsafe_null_terminated_from_indexable(ioe->anchor));
4322 if (rs == NULL ||
4323 !rs->rules[ioe->rs_num].inactive.open ||
4324 rs->rules[ioe->rs_num].inactive.ticket !=
4325 ioe->ticket) {
4326 kfree_type(struct pfr_table, table);
4327 kfree_type(struct pfioc_trans_e, ioe);
4328 error = EBUSY;
4329 goto fail;
4330 }
4331 break;
4332 }
4333 }
4334 buf = _buf;
4335 /* now do the commit - no errors should happen here */
4336 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4337 if (copyin(buf, ioe, sizeof(*ioe))) {
4338 kfree_type(struct pfr_table, table);
4339 kfree_type(struct pfioc_trans_e, ioe);
4340 error = EFAULT;
4341 goto fail;
4342 }
4343 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4344 switch (ioe->rs_num) {
4345 case PF_RULESET_ALTQ:
4346 break;
4347 case PF_RULESET_TABLE:
4348 bzero(table, sizeof(*table));
4349 strbufcpy(table->pfrt_anchor, ioe->anchor);
4350 if ((error = pfr_ina_commit(table, ioe->ticket,
4351 NULL, NULL, 0))) {
4352 kfree_type(struct pfr_table, table);
4353 kfree_type(struct pfioc_trans_e, ioe);
4354 goto fail;
4355 }
4356 break;
4357 default:
4358 if ((error = pf_commit_rules(ioe->ticket,
4359 ioe->rs_num, __unsafe_null_terminated_from_indexable(ioe->anchor)))) {
4360 kfree_type(struct pfr_table, table);
4361 kfree_type(struct pfioc_trans_e, ioe);
4362 goto fail;
4363 }
4364 break;
4365 }
4366 }
4367 kfree_type(struct pfr_table, table);
4368 kfree_type(struct pfioc_trans_e, ioe);
4369 #if SKYWALK
4370 pf_process_compatibilities();
4371 #endif // SKYWALK
4372 break;
4373 }
4374
4375 default:
4376 VERIFY(0);
4377 /* NOTREACHED */
4378 }
4379 fail:
4380 if (rs) {
4381 pf_release_ruleset(rs);
4382 rs = NULL;
4383 }
4384 return error;
4385 }
4386
4387 static int
pfioctl_ioc_src_nodes(u_long cmd,struct pfioc_src_nodes_32 * psn32,struct pfioc_src_nodes_64 * psn64,struct proc * p)4388 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4389 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4390 {
4391 int p64 = proc_is64bit(p);
4392 int error = 0;
4393
4394 switch (cmd) {
4395 case DIOCGETSRCNODES: {
4396 struct pf_src_node *__single n, *__single pstore;
4397 user_addr_t buf;
4398 u_int32_t nr = 0;
4399 int space, size;
4400
4401 space = (p64 ? psn64->psn_len : psn32->psn_len);
4402 if (space == 0) {
4403 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4404 nr++;
4405
4406 size = sizeof(struct pf_src_node) * nr;
4407 if (p64) {
4408 psn64->psn_len = size;
4409 } else {
4410 psn32->psn_len = size;
4411 }
4412 break;
4413 }
4414
4415 pstore = kalloc_type(struct pf_src_node, Z_WAITOK | Z_NOFAIL);
4416 #ifdef __LP64__
4417 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4418 #else
4419 buf = psn32->psn_buf;
4420 #endif
4421
4422 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4423 uint64_t secs = pf_time_second(), diff;
4424
4425 if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4426 break;
4427 }
4428
4429 bcopy(n, pstore, sizeof(*pstore));
4430 if (n->rule.ptr != NULL) {
4431 pstore->rule.nr = n->rule.ptr->nr;
4432 }
4433 pstore->creation = secs - pstore->creation;
4434 if (pstore->expire > secs) {
4435 pstore->expire -= secs;
4436 } else {
4437 pstore->expire = 0;
4438 }
4439
4440 /* adjust the connection rate estimate */
4441 diff = secs - n->conn_rate.last;
4442 if (diff >= n->conn_rate.seconds) {
4443 pstore->conn_rate.count = 0;
4444 } else {
4445 pstore->conn_rate.count -=
4446 n->conn_rate.count * diff /
4447 n->conn_rate.seconds;
4448 }
4449
4450 _RB_PARENT(pstore, entry) = NULL;
4451 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4452 pstore->kif = NULL;
4453
4454 error = copyout(pstore, buf, sizeof(*pstore));
4455 if (error) {
4456 kfree_type(struct pf_src_node, pstore);
4457 goto fail;
4458 }
4459 buf += sizeof(*pstore);
4460 nr++;
4461 }
4462
4463 size = sizeof(struct pf_src_node) * nr;
4464 if (p64) {
4465 psn64->psn_len = size;
4466 } else {
4467 psn32->psn_len = size;
4468 }
4469
4470 kfree_type(struct pf_src_node, pstore);
4471 break;
4472 }
4473
4474 default:
4475 VERIFY(0);
4476 /* NOTREACHED */
4477 }
4478 fail:
4479 return error;
4480 }
4481
4482 static int
pfioctl_ioc_src_node_kill(u_long cmd,struct pfioc_src_node_kill * psnk,struct proc * p)4483 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4484 struct proc *p)
4485 {
4486 #pragma unused(p)
4487 int error = 0;
4488
4489 switch (cmd) {
4490 case DIOCKILLSRCNODES: {
4491 struct pf_src_node *sn;
4492 struct pf_state *s;
4493 int killed = 0;
4494
4495 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4496 if (PF_MATCHA(psnk->psnk_src.neg,
4497 &psnk->psnk_src.addr.v.a.addr,
4498 &psnk->psnk_src.addr.v.a.mask,
4499 &sn->addr, sn->af) &&
4500 PF_MATCHA(psnk->psnk_dst.neg,
4501 &psnk->psnk_dst.addr.v.a.addr,
4502 &psnk->psnk_dst.addr.v.a.mask,
4503 &sn->raddr, sn->af)) {
4504 /* Handle state to src_node linkage */
4505 if (sn->states != 0) {
4506 RB_FOREACH(s, pf_state_tree_id,
4507 &tree_id) {
4508 if (s->src_node == sn) {
4509 s->src_node = NULL;
4510 }
4511 if (s->nat_src_node == sn) {
4512 s->nat_src_node = NULL;
4513 }
4514 }
4515 sn->states = 0;
4516 }
4517 sn->expire = 1;
4518 killed++;
4519 }
4520 }
4521
4522 if (killed > 0) {
4523 pf_purge_expired_src_nodes();
4524 }
4525
4526 psnk->psnk_af = (sa_family_t)killed;
4527 break;
4528 }
4529
4530 default:
4531 VERIFY(0);
4532 /* NOTREACHED */
4533 }
4534
4535 return error;
4536 }
4537
4538 static int
pfioctl_ioc_iface(u_long cmd,struct pfioc_iface_32 * io32,struct pfioc_iface_64 * io64,struct proc * p)4539 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4540 struct pfioc_iface_64 *io64, struct proc *p)
4541 {
4542 int p64 = proc_is64bit(p);
4543 int error = 0;
4544
4545 switch (cmd) {
4546 case DIOCIGETIFACES: {
4547 user_addr_t buf;
4548 int esize;
4549
4550 #ifdef __LP64__
4551 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4552 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4553 #else
4554 buf = io32->pfiio_buffer;
4555 esize = io32->pfiio_esize;
4556 #endif
4557
4558 /* esize must be that of the user space version of pfi_kif */
4559 if (esize != sizeof(struct pfi_uif)) {
4560 error = ENODEV;
4561 break;
4562 }
4563 if (p64) {
4564 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4565 } else {
4566 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4567 }
4568 error = pfi_get_ifaces(
4569 p64 ? __unsafe_null_terminated_from_indexable(io64->pfiio_name) :
4570 __unsafe_null_terminated_from_indexable(io32->pfiio_name), buf,
4571 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4572 break;
4573 }
4574
4575 case DIOCSETIFFLAG: {
4576 if (p64) {
4577 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4578 } else {
4579 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4580 }
4581
4582 error = pfi_set_flags(
4583 p64 ? __unsafe_null_terminated_from_indexable(io64->pfiio_name) :
4584 __unsafe_null_terminated_from_indexable(io32->pfiio_name),
4585 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4586 break;
4587 }
4588
4589 case DIOCCLRIFFLAG: {
4590 if (p64) {
4591 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4592 } else {
4593 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4594 }
4595
4596 error = pfi_clear_flags(
4597 p64 ? __unsafe_null_terminated_from_indexable(io64->pfiio_name) :
4598 __unsafe_null_terminated_from_indexable(io32->pfiio_name),
4599 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4600 break;
4601 }
4602
4603 default:
4604 VERIFY(0);
4605 /* NOTREACHED */
4606 }
4607
4608 return error;
4609 }
4610
4611 int
pf_af_hook(struct ifnet * ifp,struct mbuf ** mppn,struct mbuf ** mp,unsigned int af,int input,struct ip_fw_args * fwa)4612 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4613 unsigned int af, int input, struct ip_fw_args *fwa)
4614 {
4615 int error = 0;
4616 struct mbuf *__single nextpkt;
4617 net_thread_marks_t __single marks;
4618 struct ifnet *__single pf_ifp = ifp;
4619
4620 /* Always allow traffic on co-processor and management interfaces. */
4621 if (ifp != NULL &&
4622 ((!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) ||
4623 (!management_data_unrestricted && IFNET_IS_MANAGEMENT(ifp)))) {
4624 return 0;
4625 }
4626
4627 marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4628
4629 if (marks != net_thread_marks_none) {
4630 lck_rw_lock_shared(&pf_perim_lock);
4631 if (!pf_is_enabled) {
4632 goto done;
4633 }
4634 lck_mtx_lock(&pf_lock);
4635 }
4636
4637 if (mppn != NULL && *mppn != NULL) {
4638 VERIFY(*mppn == *mp);
4639 }
4640 if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4641 (*mp)->m_nextpkt = NULL;
4642 }
4643
4644 /*
4645 * For packets destined to locally hosted IP address
4646 * ip_output_list sets Mbuf's pkt header's rcvif to
4647 * the interface hosting the IP address.
4648 * While on the output path ifp passed to pf_af_hook
4649 * to such local communication is the loopback interface,
4650 * the input path derives ifp from mbuf packet header's
4651 * rcvif.
4652 * This asymmetry caues issues with PF.
4653 * To handle that case, we have a limited change here to
4654 * pass interface as loopback if packets are looped in.
4655 */
4656 if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4657 pf_ifp = lo_ifp;
4658 }
4659
4660 switch (af) {
4661 #if INET
4662 case AF_INET: {
4663 error = pf_inet_hook(pf_ifp, mp, input, fwa);
4664 break;
4665 }
4666 #endif /* INET */
4667 case AF_INET6:
4668 error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4669 break;
4670 default:
4671 break;
4672 }
4673
4674 /* When packet valid, link to the next packet */
4675 if (*mp != NULL && nextpkt != NULL) {
4676 struct mbuf *m = *mp;
4677 while (m->m_nextpkt != NULL) {
4678 m = m->m_nextpkt;
4679 }
4680 m->m_nextpkt = nextpkt;
4681 }
4682 /* Fix up linkage of previous packet in the chain */
4683 if (mppn != NULL) {
4684 if (*mp != NULL) {
4685 *mppn = *mp;
4686 } else {
4687 *mppn = nextpkt;
4688 }
4689 }
4690
4691 if (marks != net_thread_marks_none) {
4692 lck_mtx_unlock(&pf_lock);
4693 }
4694
4695 done:
4696 if (marks != net_thread_marks_none) {
4697 lck_rw_done(&pf_perim_lock);
4698 }
4699
4700 net_thread_marks_pop(marks);
4701 return error;
4702 }
4703
4704
4705 #if INET
4706 static __attribute__((noinline)) int
pf_inet_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4707 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4708 struct ip_fw_args *fwa)
4709 {
4710 struct mbuf *m = *mp;
4711 #if BYTE_ORDER != BIG_ENDIAN
4712 struct ip *ip = mtod(m, struct ip *);
4713 #endif
4714 int error = 0;
4715
4716 /*
4717 * If the packet is outbound, is originated locally, is flagged for
4718 * delayed UDP/TCP checksum calculation, and is about to be processed
4719 * for an interface that doesn't support the appropriate checksum
4720 * offloading, then calculated the checksum here so that PF can adjust
4721 * it properly.
4722 */
4723 if (!input && m->m_pkthdr.rcvif == NULL) {
4724 static const int mask = CSUM_DELAY_DATA;
4725 const int flags = m->m_pkthdr.csum_flags &
4726 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4727
4728 if (flags & mask) {
4729 in_delayed_cksum(m);
4730 m->m_pkthdr.csum_flags &= ~mask;
4731 }
4732 }
4733
4734 #if BYTE_ORDER != BIG_ENDIAN
4735 HTONS(ip->ip_len);
4736 HTONS(ip->ip_off);
4737 #endif
4738 if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4739 if (*mp != NULL) {
4740 m_freem(*mp);
4741 *mp = NULL;
4742 error = EHOSTUNREACH;
4743 } else {
4744 error = EJUSTRETURN;
4745 }
4746 }
4747 #if BYTE_ORDER != BIG_ENDIAN
4748 else {
4749 if (*mp != NULL) {
4750 ip = mtod(*mp, struct ip *);
4751 NTOHS(ip->ip_len);
4752 NTOHS(ip->ip_off);
4753 }
4754 }
4755 #endif
4756 return error;
4757 }
4758 #endif /* INET */
4759
4760 int __attribute__((noinline))
pf_inet6_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4761 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4762 struct ip_fw_args *fwa)
4763 {
4764 int error = 0;
4765
4766 /*
4767 * If the packet is outbound, is originated locally, is flagged for
4768 * delayed UDP/TCP checksum calculation, and is about to be processed
4769 * for an interface that doesn't support the appropriate checksum
4770 * offloading, then calculated the checksum here so that PF can adjust
4771 * it properly.
4772 */
4773 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4774 static const int mask = CSUM_DELAY_IPV6_DATA;
4775 const int flags = (*mp)->m_pkthdr.csum_flags &
4776 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4777
4778 if (flags & mask) {
4779 /*
4780 * Checksum offload should not have been enabled
4781 * when extension headers exist, thus 0 for optlen.
4782 */
4783 in6_delayed_cksum(*mp);
4784 (*mp)->m_pkthdr.csum_flags &= ~mask;
4785 }
4786 }
4787
4788 if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4789 if (*mp != NULL) {
4790 m_freem(*mp);
4791 *mp = NULL;
4792 error = EHOSTUNREACH;
4793 } else {
4794 error = EJUSTRETURN;
4795 }
4796 }
4797 return error;
4798 }
4799
4800 int
pf_ifaddr_hook(struct ifnet * ifp)4801 pf_ifaddr_hook(struct ifnet *ifp)
4802 {
4803 struct pfi_kif *kif = ifp->if_pf_kif;
4804
4805 if (kif != NULL) {
4806 lck_rw_lock_shared(&pf_perim_lock);
4807 lck_mtx_lock(&pf_lock);
4808
4809 pfi_kifaddr_update(kif);
4810
4811 lck_mtx_unlock(&pf_lock);
4812 lck_rw_done(&pf_perim_lock);
4813 }
4814 return 0;
4815 }
4816
4817 /*
4818 * Caller acquires dlil lock as writer (exclusive)
4819 */
4820 void
pf_ifnet_hook(struct ifnet * ifp,int attach)4821 pf_ifnet_hook(struct ifnet *ifp, int attach)
4822 {
4823 lck_rw_lock_shared(&pf_perim_lock);
4824 lck_mtx_lock(&pf_lock);
4825 if (attach) {
4826 pfi_attach_ifnet(ifp);
4827 } else {
4828 pfi_detach_ifnet(ifp);
4829 }
4830 lck_mtx_unlock(&pf_lock);
4831 lck_rw_done(&pf_perim_lock);
4832 }
4833
4834 static void
pf_attach_hooks(void)4835 pf_attach_hooks(void)
4836 {
4837 ifnet_head_lock_shared();
4838 /*
4839 * Check against ifnet_addrs[] before proceeding, in case this
4840 * is called very early on, e.g. during dlil_init() before any
4841 * network interface is attached.
4842 */
4843 if (ifnet_addrs != NULL) {
4844 int i;
4845
4846 for (i = 0; i <= if_index; i++) {
4847 struct ifnet *ifp = ifindex2ifnet[i];
4848 if (ifp != NULL) {
4849 pfi_attach_ifnet(ifp);
4850 }
4851 }
4852 }
4853 ifnet_head_done();
4854 }
4855
4856 #if 0
4857 /* currently unused along with pfdetach() */
4858 static void
4859 pf_detach_hooks(void)
4860 {
4861 ifnet_head_lock_shared();
4862 if (ifnet_addrs != NULL) {
4863 for (i = 0; i <= if_index; i++) {
4864 int i;
4865
4866 struct ifnet *ifp = ifindex2ifnet[i];
4867 if (ifp != NULL && ifp->if_pf_kif != NULL) {
4868 pfi_detach_ifnet(ifp);
4869 }
4870 }
4871 }
4872 ifnet_head_done();
4873 }
4874 #endif
4875
4876 /*
4877 * 'D' group ioctls.
4878 *
4879 * The switch statement below does nothing at runtime, as it serves as a
4880 * compile time check to ensure that all of the socket 'D' ioctls (those
4881 * in the 'D' group going thru soo_ioctl) that are made available by the
4882 * networking stack is unique. This works as long as this routine gets
4883 * updated each time a new interface ioctl gets added.
4884 *
4885 * Any failures at compile time indicates duplicated ioctl values.
4886 */
4887 static __attribute__((unused)) void
pfioctl_cassert(void)4888 pfioctl_cassert(void)
4889 {
4890 /*
4891 * This is equivalent to _CASSERT() and the compiler wouldn't
4892 * generate any instructions, thus for compile time only.
4893 */
4894 switch ((u_long)0) {
4895 case 0:
4896
4897 /* bsd/net/pfvar.h */
4898 case DIOCSTART:
4899 case DIOCSTOP:
4900 case DIOCADDRULE:
4901 case DIOCGETSTARTERS:
4902 case DIOCGETRULES:
4903 case DIOCGETRULE:
4904 case DIOCSTARTREF:
4905 case DIOCSTOPREF:
4906 case DIOCCLRSTATES:
4907 case DIOCGETSTATE:
4908 case DIOCSETSTATUSIF:
4909 case DIOCGETSTATUS:
4910 case DIOCCLRSTATUS:
4911 case DIOCNATLOOK:
4912 case DIOCSETDEBUG:
4913 case DIOCGETSTATES:
4914 case DIOCCHANGERULE:
4915 case DIOCINSERTRULE:
4916 case DIOCDELETERULE:
4917 case DIOCSETTIMEOUT:
4918 case DIOCGETTIMEOUT:
4919 case DIOCADDSTATE:
4920 case DIOCCLRRULECTRS:
4921 case DIOCGETLIMIT:
4922 case DIOCSETLIMIT:
4923 case DIOCKILLSTATES:
4924 case DIOCSTARTALTQ:
4925 case DIOCSTOPALTQ:
4926 case DIOCADDALTQ:
4927 case DIOCGETALTQS:
4928 case DIOCGETALTQ:
4929 case DIOCCHANGEALTQ:
4930 case DIOCGETQSTATS:
4931 case DIOCBEGINADDRS:
4932 case DIOCADDADDR:
4933 case DIOCGETADDRS:
4934 case DIOCGETADDR:
4935 case DIOCCHANGEADDR:
4936 case DIOCGETRULESETS:
4937 case DIOCGETRULESET:
4938 case DIOCRCLRTABLES:
4939 case DIOCRADDTABLES:
4940 case DIOCRDELTABLES:
4941 case DIOCRGETTABLES:
4942 case DIOCRGETTSTATS:
4943 case DIOCRCLRTSTATS:
4944 case DIOCRCLRADDRS:
4945 case DIOCRADDADDRS:
4946 case DIOCRDELADDRS:
4947 case DIOCRSETADDRS:
4948 case DIOCRGETADDRS:
4949 case DIOCRGETASTATS:
4950 case DIOCRCLRASTATS:
4951 case DIOCRTSTADDRS:
4952 case DIOCRSETTFLAGS:
4953 case DIOCRINADEFINE:
4954 case DIOCOSFPFLUSH:
4955 case DIOCOSFPADD:
4956 case DIOCOSFPGET:
4957 case DIOCXBEGIN:
4958 case DIOCXCOMMIT:
4959 case DIOCXROLLBACK:
4960 case DIOCGETSRCNODES:
4961 case DIOCCLRSRCNODES:
4962 case DIOCSETHOSTID:
4963 case DIOCIGETIFACES:
4964 case DIOCSETIFFLAG:
4965 case DIOCCLRIFFLAG:
4966 case DIOCKILLSRCNODES:
4967 case DIOCGIFSPEED:
4968 ;
4969 }
4970 }
4971
4972 #if SKYWALK
4973 static void
pf_process_compatibilities(void)4974 pf_process_compatibilities(void)
4975 {
4976 uint32_t compat_bitmap;
4977
4978 if (!kernel_is_macos_or_server()) {
4979 return;
4980 }
4981
4982 compat_bitmap = pf_check_compatible_rules();
4983
4984 net_filter_event_mark(NET_FILTER_EVENT_PF,
4985 (compat_bitmap &
4986 (PF_COMPATIBLE_FLAGS_CUSTOM_ANCHORS_PRESENT |
4987 PF_COMPATIBLE_FLAGS_CUSTOM_RULES_PRESENT)) == 0);
4988
4989 net_filter_event_mark(NET_FILTER_EVENT_PF_PRIVATE_PROXY,
4990 ((compat_bitmap & PF_COMPATIBLE_FLAGS_PF_ENABLED) == 0) ||
4991 (compat_bitmap & PF_COMPATIBLE_FLAGS_CUSTOM_RULES_PRESENT) == 0);
4992 }
4993 #endif // SKYWALK
4994