1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /* $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83 #include <os/log.h>
84
85 #include <mach/vm_param.h>
86
87 #include <net/dlil.h>
88 #include <net/if.h>
89 #include <net/if_types.h>
90 #include <net/net_api_stats.h>
91 #include <net/route.h>
92 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
93 #include <skywalk/lib/net_filter_event.h>
94 #endif
95
96 #include <netinet/in.h>
97 #include <netinet/in_var.h>
98 #include <netinet/in_systm.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/ip_icmp.h>
102 #include <netinet/if_ether.h>
103
104 #if DUMMYNET
105 #include <netinet/ip_dummynet.h>
106 #else
107 struct ip_fw_args;
108 #endif /* DUMMYNET */
109
110 #include <libkern/crypto/md5.h>
111
112 #include <machine/machine_routines.h>
113
114 #include <miscfs/devfs/devfs.h>
115
116 #include <net/pfvar.h>
117
118 #if NPFSYNC
119 #include <net/if_pfsync.h>
120 #endif /* NPFSYNC */
121
122 #if PFLOG
123 #include <net/if_pflog.h>
124 #endif /* PFLOG */
125
126 #include <netinet/ip6.h>
127 #include <netinet/in_pcb.h>
128
129 #include <dev/random/randomdev.h>
130
131 #if 0
132 static void pfdetach(void);
133 #endif
134 static int pfopen(dev_t, int, int, struct proc *);
135 static int pfclose(dev_t, int, int, struct proc *);
136 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
137 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
138 struct pfioc_table_64 *, struct proc *);
139 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
140 struct pfioc_tokens_64 *, struct proc *);
141 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
142 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
143 struct proc *);
144 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
145 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
146 struct pfioc_states_64 *, struct proc *);
147 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
148 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
149 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
150 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
151 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
152 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
153 struct pfioc_trans_64 *, struct proc *);
154 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
155 struct pfioc_src_nodes_64 *, struct proc *);
156 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
157 struct proc *);
158 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
159 struct pfioc_iface_64 *, struct proc *);
160 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
161 u_int8_t, u_int8_t, u_int8_t);
162 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
163 static void pf_empty_pool(struct pf_palist *);
164 static int pf_begin_rules(u_int32_t *, int, const char *);
165 static int pf_rollback_rules(u_int32_t, int, char *);
166 static int pf_setup_pfsync_matching(struct pf_ruleset *);
167 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
168 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
169 static int pf_commit_rules(u_int32_t, int, char *);
170 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
171 int);
172 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
173 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
174 struct pf_state *);
175 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
176 struct pf_state *);
177 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
178 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
179 static void pf_expire_states_and_src_nodes(struct pf_rule *);
180 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
181 int, struct pf_rule *);
182 static void pf_addrwrap_setup(struct pf_addr_wrap *);
183 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
184 struct pf_ruleset *);
185 static void pf_delete_rule_by_owner(char *, u_int32_t);
186 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
187 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
188 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
189 int, struct pf_rule **);
190
191 #define PF_CDEV_MAJOR (-1)
192
193 static const struct cdevsw pf_cdevsw = {
194 .d_open = pfopen,
195 .d_close = pfclose,
196 .d_read = eno_rdwrt,
197 .d_write = eno_rdwrt,
198 .d_ioctl = pfioctl,
199 .d_stop = eno_stop,
200 .d_reset = eno_reset,
201 .d_ttys = NULL,
202 .d_select = eno_select,
203 .d_mmap = eno_mmap,
204 .d_strategy = eno_strat,
205 .d_reserved_1 = eno_getc,
206 .d_reserved_2 = eno_putc,
207 .d_type = 0
208 };
209
210 static void pf_attach_hooks(void);
211 #if 0
212 /* currently unused along with pfdetach() */
213 static void pf_detach_hooks(void);
214 #endif
215
216 /*
217 * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
218 * and used in pf_af_hook() for performance optimization, such that packets
219 * will enter pf_test() or pf_test6() only when PF is running.
220 */
221 int pf_is_enabled = 0;
222
223 u_int32_t pf_hash_seed;
224 int16_t pf_nat64_configured = 0;
225
226 /*
227 * These are the pf enabled reference counting variables
228 */
229 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token))
230
231 static u_int64_t pf_enabled_ref_count;
232 static u_int32_t nr_tokens = 0;
233 static u_int32_t pffwrules;
234 static u_int32_t pfdevcnt;
235
236 SLIST_HEAD(list_head, pfioc_kernel_token);
237 static struct list_head token_list_head;
238
239 struct pf_rule pf_default_rule;
240
241 typedef struct {
242 char tag_name[PF_TAG_NAME_SIZE];
243 uint16_t tag_id;
244 } pf_reserved_tag_table_t;
245
246 #define NUM_RESERVED_TAGS 2
247 static pf_reserved_tag_table_t pf_reserved_tag_table[NUM_RESERVED_TAGS] = {
248 { PF_TAG_NAME_SYSTEM_SERVICE, PF_TAG_ID_SYSTEM_SERVICE},
249 { PF_TAG_NAME_STACK_DROP, PF_TAG_ID_STACK_DROP},
250 };
251 #define RESERVED_TAG_ID_MIN PF_TAG_ID_SYSTEM_SERVICE
252
253 #define DYNAMIC_TAG_ID_MAX 50000
254 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags =
255 TAILQ_HEAD_INITIALIZER(pf_tags);
256
257 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
258 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
259 #endif
260 static u_int16_t tagname2tag(struct pf_tags *, char *);
261 static void tag_unref(struct pf_tags *, u_int16_t);
262 static int pf_rtlabel_add(struct pf_addr_wrap *);
263 static void pf_rtlabel_remove(struct pf_addr_wrap *);
264 static void pf_rtlabel_copyout(struct pf_addr_wrap *);
265
266 #if INET
267 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
268 struct ip_fw_args *);
269 #endif /* INET */
270 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
271 struct ip_fw_args *);
272
273 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
274
275 /*
276 * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
277 */
278 #define PFIOCX_STRUCT_DECL(s) \
279 struct { \
280 union { \
281 struct s##_32 _s##_32; \
282 struct s##_64 _s##_64; \
283 } _u; \
284 } *s##_un = NULL \
285
286 #define PFIOCX_STRUCT_BEGIN(a, s) { \
287 VERIFY(s##_un == NULL); \
288 s##_un = kalloc_type(typeof(*s##_un), Z_WAITOK_ZERO_NOFAIL); \
289 if (p64) \
290 bcopy(a, &s##_un->_u._s##_64, \
291 sizeof (struct s##_64)); \
292 else \
293 bcopy(a, &s##_un->_u._s##_32, \
294 sizeof (struct s##_32)); \
295 }
296
297 #define PFIOCX_STRUCT_END(s, a) { \
298 VERIFY(s##_un != NULL); \
299 if (p64) \
300 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \
301 else \
302 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \
303 kfree_type(typeof(*s##_un), s##_un); \
304 }
305
306 #define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
307 #define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
308
309 /*
310 * Helper macros for regular ioctl structures.
311 */
312 #define PFIOC_STRUCT_BEGIN(a, v) { \
313 VERIFY((v) == NULL); \
314 (v) = kalloc_type(typeof(*(v)), Z_WAITOK_ZERO_NOFAIL); \
315 bcopy(a, v, sizeof (*(v))); \
316 }
317
318 #define PFIOC_STRUCT_END(v, a) { \
319 VERIFY((v) != NULL); \
320 bcopy(v, a, sizeof (*(v))); \
321 kfree_type(typeof(*(v)), v); \
322 }
323
324 #define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32)
325 #define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64)
326
327 struct thread *pf_purge_thread;
328
329 extern void pfi_kifaddr_update(void *);
330
331 /* pf enable ref-counting helper functions */
332 static u_int64_t generate_token(struct proc *);
333 static int remove_token(struct pfioc_remove_token *);
334 static void invalidate_all_tokens(void);
335
336 static u_int64_t
generate_token(struct proc * p)337 generate_token(struct proc *p)
338 {
339 u_int64_t token_value;
340 struct pfioc_kernel_token *new_token;
341
342 if (nr_tokens + 1 > NR_TOKENS_LIMIT) {
343 os_log_error(OS_LOG_DEFAULT, "%s: NR_TOKENS_LIMIT reached", __func__);
344 return 0;
345 }
346
347 new_token = kalloc_type(struct pfioc_kernel_token,
348 Z_WAITOK | Z_ZERO | Z_NOFAIL);
349
350 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
351
352 token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
353
354 new_token->token.token_value = token_value;
355 new_token->token.pid = proc_pid(p);
356 proc_name(new_token->token.pid, new_token->token.proc_name,
357 sizeof(new_token->token.proc_name));
358 new_token->token.timestamp = pf_calendar_time_second();
359
360 SLIST_INSERT_HEAD(&token_list_head, new_token, next);
361 nr_tokens++;
362
363 return token_value;
364 }
365
366 static int
remove_token(struct pfioc_remove_token * tok)367 remove_token(struct pfioc_remove_token *tok)
368 {
369 struct pfioc_kernel_token *entry, *tmp;
370
371 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
372
373 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
374 if (tok->token_value == entry->token.token_value) {
375 SLIST_REMOVE(&token_list_head, entry,
376 pfioc_kernel_token, next);
377 kfree_type(struct pfioc_kernel_token, entry);
378 nr_tokens--;
379 return 0; /* success */
380 }
381 }
382
383 printf("pf : remove failure\n");
384 return ESRCH; /* failure */
385 }
386
387 static void
invalidate_all_tokens(void)388 invalidate_all_tokens(void)
389 {
390 struct pfioc_kernel_token *entry, *tmp;
391
392 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
393
394 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
395 SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
396 kfree_type(struct pfioc_kernel_token, entry);
397 }
398
399 nr_tokens = 0;
400 }
401
402 void
pfinit(void)403 pfinit(void)
404 {
405 u_int32_t *t = pf_default_rule.timeout;
406 int maj;
407
408 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
409 NULL);
410 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
411 "pfsrctrpl", NULL);
412 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
413 NULL);
414 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
415 "pfstatekeypl", NULL);
416 pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
417 "pfappstatepl", NULL);
418 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
419 "pfpooladdrpl", NULL);
420 pfr_initialize();
421 pfi_initialize();
422 pf_osfp_initialize();
423
424 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
425 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
426
427 if (max_mem <= 256 * 1024 * 1024) {
428 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
429 PFR_KENTRY_HIWAT_SMALL;
430 }
431
432 RB_INIT(&tree_src_tracking);
433 RB_INIT(&pf_anchors);
434 pf_init_ruleset(&pf_main_ruleset);
435 TAILQ_INIT(&pf_pabuf);
436 TAILQ_INIT(&state_list);
437
438 _CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
439 _CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
440 _CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
441 _CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
442 _CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
443 _CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
444 _CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
445 _CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
446 _CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
447 _CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
448 _CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
449
450 /* default rule should never be garbage collected */
451 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
452 pf_default_rule.action = PF_PASS;
453 pf_default_rule.nr = -1;
454 pf_default_rule.rtableid = IFSCOPE_NONE;
455
456 /* initialize default timeouts */
457 t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
458 t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
459 t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
460 t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
461 t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
462 t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
463 t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
464 t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
465 t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
466 t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
467 t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
468 t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
469 t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
470 t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
471 t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
472 t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
473 t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
474 t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
475 t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
476 t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
477 t[PFTM_FRAG] = PFTM_FRAG_VAL;
478 t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
479 t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
480 t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
481 t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
482 t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
483
484 pf_normalize_init();
485 bzero(&pf_status, sizeof(pf_status));
486 pf_status.debug = PF_DEBUG_URGENT;
487 pf_hash_seed = RandomULong();
488
489 /* XXX do our best to avoid a conflict */
490 pf_status.hostid = random();
491
492 if (kernel_thread_start(pf_purge_thread_fn, NULL,
493 &pf_purge_thread) != 0) {
494 printf("%s: unable to start purge thread!", __func__);
495 return;
496 }
497
498 maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
499 if (maj == -1) {
500 printf("%s: failed to allocate major number!\n", __func__);
501 return;
502 }
503 (void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
504 UID_ROOT, GID_WHEEL, 0600, "pf");
505
506 (void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
507 UID_ROOT, GID_WHEEL, 0600, "pfm");
508
509 pf_attach_hooks();
510 #if DUMMYNET
511 dummynet_init();
512 #endif
513 }
514
515 #if 0
516 static void
517 pfdetach(void)
518 {
519 struct pf_anchor *anchor;
520 struct pf_state *state;
521 struct pf_src_node *node;
522 struct pfioc_table pt;
523 u_int32_t ticket;
524 int i;
525 char r = '\0';
526
527 pf_detach_hooks();
528
529 pf_status.running = 0;
530 wakeup(pf_purge_thread_fn);
531
532 /* clear the rulesets */
533 for (i = 0; i < PF_RULESET_MAX; i++) {
534 if (pf_begin_rules(&ticket, i, &r) == 0) {
535 pf_commit_rules(ticket, i, &r);
536 }
537 }
538
539 /* clear states */
540 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
541 state->timeout = PFTM_PURGE;
542 #if NPFSYNC
543 state->sync_flags = PFSTATE_NOSYNC;
544 #endif
545 }
546 pf_purge_expired_states(pf_status.states);
547
548 #if NPFSYNC
549 pfsync_clear_states(pf_status.hostid, NULL);
550 #endif
551
552 /* clear source nodes */
553 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
554 state->src_node = NULL;
555 state->nat_src_node = NULL;
556 }
557 RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
558 node->expire = 1;
559 node->states = 0;
560 }
561 pf_purge_expired_src_nodes();
562
563 /* clear tables */
564 memset(&pt, '\0', sizeof(pt));
565 pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
566
567 /* destroy anchors */
568 while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
569 for (i = 0; i < PF_RULESET_MAX; i++) {
570 if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
571 pf_commit_rules(ticket, i, anchor->name);
572 }
573 }
574 }
575
576 /* destroy main ruleset */
577 pf_remove_if_empty_ruleset(&pf_main_ruleset);
578
579 /* destroy the pools */
580 pool_destroy(&pf_pooladdr_pl);
581 pool_destroy(&pf_state_pl);
582 pool_destroy(&pf_rule_pl);
583 pool_destroy(&pf_src_tree_pl);
584
585 /* destroy subsystems */
586 pf_normalize_destroy();
587 pf_osfp_destroy();
588 pfr_destroy();
589 pfi_destroy();
590 }
591 #endif
592
593 static int
pfopen(dev_t dev,int flags,int fmt,struct proc * p)594 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
595 {
596 #pragma unused(flags, fmt, p)
597 if (minor(dev) >= PFDEV_MAX) {
598 return ENXIO;
599 }
600
601 if (minor(dev) == PFDEV_PFM) {
602 lck_mtx_lock(&pf_lock);
603 if (pfdevcnt != 0) {
604 lck_mtx_unlock(&pf_lock);
605 return EBUSY;
606 }
607 pfdevcnt++;
608 lck_mtx_unlock(&pf_lock);
609 }
610 return 0;
611 }
612
613 static int
pfclose(dev_t dev,int flags,int fmt,struct proc * p)614 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
615 {
616 #pragma unused(flags, fmt, p)
617 if (minor(dev) >= PFDEV_MAX) {
618 return ENXIO;
619 }
620
621 if (minor(dev) == PFDEV_PFM) {
622 lck_mtx_lock(&pf_lock);
623 VERIFY(pfdevcnt > 0);
624 pfdevcnt--;
625 lck_mtx_unlock(&pf_lock);
626 }
627 return 0;
628 }
629
630 static struct pf_pool *
pf_get_pool(char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)631 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
632 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
633 u_int8_t check_ticket)
634 {
635 struct pf_ruleset *ruleset;
636 struct pf_rule *rule;
637 int rs_num;
638
639 ruleset = pf_find_ruleset(anchor);
640 if (ruleset == NULL) {
641 return NULL;
642 }
643 rs_num = pf_get_ruleset_number(rule_action);
644 if (rs_num >= PF_RULESET_MAX) {
645 return NULL;
646 }
647 if (active) {
648 if (check_ticket && ticket !=
649 ruleset->rules[rs_num].active.ticket) {
650 return NULL;
651 }
652 if (r_last) {
653 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
654 pf_rulequeue);
655 } else {
656 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
657 }
658 } else {
659 if (check_ticket && ticket !=
660 ruleset->rules[rs_num].inactive.ticket) {
661 return NULL;
662 }
663 if (r_last) {
664 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
665 pf_rulequeue);
666 } else {
667 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
668 }
669 }
670 if (!r_last) {
671 while ((rule != NULL) && (rule->nr != rule_number)) {
672 rule = TAILQ_NEXT(rule, entries);
673 }
674 }
675 if (rule == NULL) {
676 return NULL;
677 }
678
679 return &rule->rpool;
680 }
681
682 static void
pf_mv_pool(struct pf_palist * poola,struct pf_palist * poolb)683 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
684 {
685 struct pf_pooladdr *mv_pool_pa;
686
687 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
688 TAILQ_REMOVE(poola, mv_pool_pa, entries);
689 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
690 }
691 }
692
693 static void
pf_empty_pool(struct pf_palist * poola)694 pf_empty_pool(struct pf_palist *poola)
695 {
696 struct pf_pooladdr *empty_pool_pa;
697
698 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
699 pfi_dynaddr_remove(&empty_pool_pa->addr);
700 pf_tbladdr_remove(&empty_pool_pa->addr);
701 pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
702 TAILQ_REMOVE(poola, empty_pool_pa, entries);
703 pool_put(&pf_pooladdr_pl, empty_pool_pa);
704 }
705 }
706
707 void
pf_rm_rule(struct pf_rulequeue * rulequeue,struct pf_rule * rule)708 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
709 {
710 if (rulequeue != NULL) {
711 if (rule->states <= 0) {
712 /*
713 * XXX - we need to remove the table *before* detaching
714 * the rule to make sure the table code does not delete
715 * the anchor under our feet.
716 */
717 pf_tbladdr_remove(&rule->src.addr);
718 pf_tbladdr_remove(&rule->dst.addr);
719 if (rule->overload_tbl) {
720 pfr_detach_table(rule->overload_tbl);
721 }
722 }
723 TAILQ_REMOVE(rulequeue, rule, entries);
724 rule->entries.tqe_prev = NULL;
725 rule->nr = -1;
726 }
727
728 if (rule->states > 0 || rule->src_nodes > 0 ||
729 rule->entries.tqe_prev != NULL) {
730 return;
731 }
732 pf_tag_unref(rule->tag);
733 pf_tag_unref(rule->match_tag);
734 pf_rtlabel_remove(&rule->src.addr);
735 pf_rtlabel_remove(&rule->dst.addr);
736 pfi_dynaddr_remove(&rule->src.addr);
737 pfi_dynaddr_remove(&rule->dst.addr);
738 if (rulequeue == NULL) {
739 pf_tbladdr_remove(&rule->src.addr);
740 pf_tbladdr_remove(&rule->dst.addr);
741 if (rule->overload_tbl) {
742 pfr_detach_table(rule->overload_tbl);
743 }
744 }
745 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
746 pf_anchor_remove(rule);
747 pf_empty_pool(&rule->rpool.list);
748 pool_put(&pf_rule_pl, rule);
749 }
750
751 static u_int16_t
tagname2tag(struct pf_tags * head,char * tagname)752 tagname2tag(struct pf_tags *head, char *tagname)
753 {
754 struct pf_tagname *tag, *p = NULL;
755 uint16_t new_tagid = 1;
756 bool reserved_tag = false;
757
758 TAILQ_FOREACH(tag, head, entries)
759 if (strcmp(tagname, tag->name) == 0) {
760 tag->ref++;
761 return tag->tag;
762 }
763
764 /*
765 * check if it is a reserved tag.
766 */
767 _CASSERT(RESERVED_TAG_ID_MIN > DYNAMIC_TAG_ID_MAX);
768 for (int i = 0; i < NUM_RESERVED_TAGS; i++) {
769 if (strncmp(tagname, pf_reserved_tag_table[i].tag_name,
770 PF_TAG_NAME_SIZE) == 0) {
771 new_tagid = pf_reserved_tag_table[i].tag_id;
772 reserved_tag = true;
773 goto skip_dynamic_tag_alloc;
774 }
775 }
776
777 /*
778 * to avoid fragmentation, we do a linear search from the beginning
779 * and take the first free slot we find. if there is none or the list
780 * is empty, append a new entry at the end.
781 */
782
783 /* new entry */
784 if (!TAILQ_EMPTY(head)) {
785 /* skip reserved tags */
786 for (p = TAILQ_FIRST(head); p != NULL &&
787 p->tag >= RESERVED_TAG_ID_MIN;
788 p = TAILQ_NEXT(p, entries)) {
789 ;
790 }
791
792 for (; p != NULL && p->tag == new_tagid;
793 p = TAILQ_NEXT(p, entries)) {
794 new_tagid = p->tag + 1;
795 }
796 }
797
798 if (new_tagid > DYNAMIC_TAG_ID_MAX) {
799 return 0;
800 }
801
802 skip_dynamic_tag_alloc:
803 /* allocate and fill new struct pf_tagname */
804 tag = kalloc_type(struct pf_tagname, Z_WAITOK | Z_ZERO | Z_NOFAIL);
805 strlcpy(tag->name, tagname, sizeof(tag->name));
806 tag->tag = new_tagid;
807 tag->ref++;
808
809 if (reserved_tag) { /* insert reserved tag at the head */
810 TAILQ_INSERT_HEAD(head, tag, entries);
811 } else if (p != NULL) { /* insert new entry before p */
812 TAILQ_INSERT_BEFORE(p, tag, entries);
813 } else { /* either list empty or no free slot in between */
814 TAILQ_INSERT_TAIL(head, tag, entries);
815 }
816
817 return tag->tag;
818 }
819
820 static void
tag_unref(struct pf_tags * head,u_int16_t tag)821 tag_unref(struct pf_tags *head, u_int16_t tag)
822 {
823 struct pf_tagname *p, *next;
824
825 if (tag == 0) {
826 return;
827 }
828
829 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
830 next = TAILQ_NEXT(p, entries);
831 if (tag == p->tag) {
832 if (--p->ref == 0) {
833 TAILQ_REMOVE(head, p, entries);
834 kfree_type(struct pf_tagname, p);
835 }
836 break;
837 }
838 }
839 }
840
841 u_int16_t
pf_tagname2tag(char * tagname)842 pf_tagname2tag(char *tagname)
843 {
844 return tagname2tag(&pf_tags, tagname);
845 }
846
847 u_int16_t
pf_tagname2tag_ext(char * tagname)848 pf_tagname2tag_ext(char *tagname)
849 {
850 u_int16_t tag;
851
852 lck_rw_lock_exclusive(&pf_perim_lock);
853 lck_mtx_lock(&pf_lock);
854 tag = pf_tagname2tag(tagname);
855 lck_mtx_unlock(&pf_lock);
856 lck_rw_done(&pf_perim_lock);
857 return tag;
858 }
859
860 void
pf_tag_ref(u_int16_t tag)861 pf_tag_ref(u_int16_t tag)
862 {
863 struct pf_tagname *t;
864
865 TAILQ_FOREACH(t, &pf_tags, entries)
866 if (t->tag == tag) {
867 break;
868 }
869 if (t != NULL) {
870 t->ref++;
871 }
872 }
873
874 void
pf_tag_unref(u_int16_t tag)875 pf_tag_unref(u_int16_t tag)
876 {
877 tag_unref(&pf_tags, tag);
878 }
879
880 static int
pf_rtlabel_add(struct pf_addr_wrap * a)881 pf_rtlabel_add(struct pf_addr_wrap *a)
882 {
883 #pragma unused(a)
884 return 0;
885 }
886
887 static void
pf_rtlabel_remove(struct pf_addr_wrap * a)888 pf_rtlabel_remove(struct pf_addr_wrap *a)
889 {
890 #pragma unused(a)
891 }
892
893 static void
pf_rtlabel_copyout(struct pf_addr_wrap * a)894 pf_rtlabel_copyout(struct pf_addr_wrap *a)
895 {
896 #pragma unused(a)
897 }
898
899 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)900 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
901 {
902 struct pf_ruleset *rs;
903 struct pf_rule *rule;
904
905 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
906 return EINVAL;
907 }
908 rs = pf_find_or_create_ruleset(anchor);
909 if (rs == NULL) {
910 return EINVAL;
911 }
912 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
913 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
914 rs->rules[rs_num].inactive.rcount--;
915 }
916 *ticket = ++rs->rules[rs_num].inactive.ticket;
917 rs->rules[rs_num].inactive.open = 1;
918 return 0;
919 }
920
921 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)922 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
923 {
924 struct pf_ruleset *rs;
925 struct pf_rule *rule;
926
927 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
928 return EINVAL;
929 }
930 rs = pf_find_ruleset(anchor);
931 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
932 rs->rules[rs_num].inactive.ticket != ticket) {
933 return 0;
934 }
935 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
936 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
937 rs->rules[rs_num].inactive.rcount--;
938 }
939 rs->rules[rs_num].inactive.open = 0;
940 return 0;
941 }
942
943 #define PF_MD5_UPD(st, elm) \
944 MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
945
946 #define PF_MD5_UPD_STR(st, elm) \
947 MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int)strlen((st)->elm))
948
949 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \
950 (stor) = htonl((st)->elm); \
951 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \
952 } while (0)
953
954 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \
955 (stor) = htons((st)->elm); \
956 MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \
957 } while (0)
958
959 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr,u_int8_t proto)960 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
961 {
962 PF_MD5_UPD(pfr, addr.type);
963 switch (pfr->addr.type) {
964 case PF_ADDR_DYNIFTL:
965 PF_MD5_UPD(pfr, addr.v.ifname);
966 PF_MD5_UPD(pfr, addr.iflags);
967 break;
968 case PF_ADDR_TABLE:
969 PF_MD5_UPD(pfr, addr.v.tblname);
970 break;
971 case PF_ADDR_ADDRMASK:
972 /* XXX ignore af? */
973 PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
974 PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
975 break;
976 case PF_ADDR_RTLABEL:
977 PF_MD5_UPD(pfr, addr.v.rtlabelname);
978 break;
979 }
980
981 switch (proto) {
982 case IPPROTO_TCP:
983 case IPPROTO_UDP:
984 PF_MD5_UPD(pfr, xport.range.port[0]);
985 PF_MD5_UPD(pfr, xport.range.port[1]);
986 PF_MD5_UPD(pfr, xport.range.op);
987 break;
988
989 default:
990 break;
991 }
992
993 PF_MD5_UPD(pfr, neg);
994 }
995
996 static void
pf_hash_rule(MD5_CTX * ctx,struct pf_rule * rule)997 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
998 {
999 u_int16_t x;
1000 u_int32_t y;
1001
1002 pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1003 pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1004 PF_MD5_UPD_STR(rule, label);
1005 PF_MD5_UPD_STR(rule, ifname);
1006 PF_MD5_UPD_STR(rule, match_tagname);
1007 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1008 PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1009 PF_MD5_UPD_HTONL(rule, prob, y);
1010 PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1011 PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1012 PF_MD5_UPD(rule, uid.op);
1013 PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1014 PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1015 PF_MD5_UPD(rule, gid.op);
1016 PF_MD5_UPD_HTONL(rule, rule_flag, y);
1017 PF_MD5_UPD(rule, action);
1018 PF_MD5_UPD(rule, direction);
1019 PF_MD5_UPD(rule, af);
1020 PF_MD5_UPD(rule, quick);
1021 PF_MD5_UPD(rule, ifnot);
1022 PF_MD5_UPD(rule, match_tag_not);
1023 PF_MD5_UPD(rule, natpass);
1024 PF_MD5_UPD(rule, keep_state);
1025 PF_MD5_UPD(rule, proto);
1026 PF_MD5_UPD(rule, type);
1027 PF_MD5_UPD(rule, code);
1028 PF_MD5_UPD(rule, flags);
1029 PF_MD5_UPD(rule, flagset);
1030 PF_MD5_UPD(rule, allow_opts);
1031 PF_MD5_UPD(rule, rt);
1032 PF_MD5_UPD(rule, tos);
1033 }
1034
1035 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1036 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1037 {
1038 struct pf_ruleset *rs;
1039 struct pf_rule *rule, **old_array, *r;
1040 struct pf_rulequeue *old_rules;
1041 int error;
1042 u_int32_t old_rcount;
1043 u_int32_t old_rsize;
1044
1045 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1046
1047 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1048 return EINVAL;
1049 }
1050 rs = pf_find_ruleset(anchor);
1051 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1052 ticket != rs->rules[rs_num].inactive.ticket) {
1053 return EBUSY;
1054 }
1055
1056 /* Calculate checksum for the main ruleset */
1057 if (rs == &pf_main_ruleset) {
1058 error = pf_setup_pfsync_matching(rs);
1059 if (error != 0) {
1060 return error;
1061 }
1062 }
1063
1064 /* Swap rules, keep the old. */
1065 old_rules = rs->rules[rs_num].active.ptr;
1066 old_rcount = rs->rules[rs_num].active.rcount;
1067 old_rsize = rs->rules[rs_num].active.rsize;
1068 old_array = rs->rules[rs_num].active.ptr_array;
1069
1070 if (old_rcount != 0) {
1071 r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1072 while (r) {
1073 if (r->rule_flag & PFRULE_PFM) {
1074 pffwrules--;
1075 }
1076 r = TAILQ_NEXT(r, entries);
1077 }
1078 }
1079
1080
1081 rs->rules[rs_num].active.ptr =
1082 rs->rules[rs_num].inactive.ptr;
1083 rs->rules[rs_num].active.ptr_array =
1084 rs->rules[rs_num].inactive.ptr_array;
1085 rs->rules[rs_num].active.rsize =
1086 rs->rules[rs_num].inactive.rsize;
1087 rs->rules[rs_num].active.rcount =
1088 rs->rules[rs_num].inactive.rcount;
1089 rs->rules[rs_num].inactive.ptr = old_rules;
1090 rs->rules[rs_num].inactive.ptr_array = old_array;
1091 rs->rules[rs_num].inactive.rcount = old_rcount;
1092 rs->rules[rs_num].inactive.rsize = old_rsize;
1093
1094 rs->rules[rs_num].active.ticket =
1095 rs->rules[rs_num].inactive.ticket;
1096 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1097
1098
1099 /* Purge the old rule list. */
1100 while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1101 pf_rm_rule(old_rules, rule);
1102 }
1103 kfree_type(struct pf_rule *, rs->rules[rs_num].inactive.rsize,
1104 rs->rules[rs_num].inactive.ptr_array);
1105 rs->rules[rs_num].inactive.ptr_array = NULL;
1106 rs->rules[rs_num].inactive.rcount = 0;
1107 rs->rules[rs_num].inactive.rsize = 0;
1108 rs->rules[rs_num].inactive.open = 0;
1109 pf_remove_if_empty_ruleset(rs);
1110 return 0;
1111 }
1112
1113 static void
pf_rule_copyin(struct pf_rule * src,struct pf_rule * dst,struct proc * p,int minordev)1114 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1115 int minordev)
1116 {
1117 bcopy(src, dst, sizeof(struct pf_rule));
1118
1119 dst->label[sizeof(dst->label) - 1] = '\0';
1120 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1121 dst->qname[sizeof(dst->qname) - 1] = '\0';
1122 dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1123 dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1124 dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1125 dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1126 dst->owner[sizeof(dst->owner) - 1] = '\0';
1127
1128 dst->cuid = kauth_cred_getuid(kauth_cred_get());
1129 dst->cpid = proc_getpid(p);
1130
1131 dst->anchor = NULL;
1132 dst->kif = NULL;
1133 dst->overload_tbl = NULL;
1134
1135 TAILQ_INIT(&dst->rpool.list);
1136 dst->rpool.cur = NULL;
1137
1138 /* initialize refcounting */
1139 dst->states = 0;
1140 dst->src_nodes = 0;
1141
1142 dst->entries.tqe_prev = NULL;
1143 dst->entries.tqe_next = NULL;
1144 if ((uint8_t)minordev == PFDEV_PFM) {
1145 dst->rule_flag |= PFRULE_PFM;
1146 }
1147 }
1148
1149 static void
pf_rule_copyout(struct pf_rule * src,struct pf_rule * dst)1150 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1151 {
1152 bcopy(src, dst, sizeof(struct pf_rule));
1153
1154 dst->anchor = NULL;
1155 dst->kif = NULL;
1156 dst->overload_tbl = NULL;
1157
1158 dst->rpool.list.tqh_first = NULL;
1159 dst->rpool.list.tqh_last = NULL;
1160 dst->rpool.cur = NULL;
1161
1162 dst->entries.tqe_prev = NULL;
1163 dst->entries.tqe_next = NULL;
1164 }
1165
1166 static void
pf_state_export(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1167 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1168 struct pf_state *s)
1169 {
1170 uint64_t secs = pf_time_second();
1171 bzero(sp, sizeof(struct pfsync_state));
1172
1173 /* copy from state key */
1174 sp->lan.addr = sk->lan.addr;
1175 sp->lan.xport = sk->lan.xport;
1176 sp->gwy.addr = sk->gwy.addr;
1177 sp->gwy.xport = sk->gwy.xport;
1178 sp->ext_lan.addr = sk->ext_lan.addr;
1179 sp->ext_lan.xport = sk->ext_lan.xport;
1180 sp->ext_gwy.addr = sk->ext_gwy.addr;
1181 sp->ext_gwy.xport = sk->ext_gwy.xport;
1182 sp->proto_variant = sk->proto_variant;
1183 sp->tag = s->tag;
1184 sp->proto = sk->proto;
1185 sp->af_lan = sk->af_lan;
1186 sp->af_gwy = sk->af_gwy;
1187 sp->direction = sk->direction;
1188 sp->flowhash = sk->flowhash;
1189
1190 /* copy from state */
1191 memcpy(&sp->id, &s->id, sizeof(sp->id));
1192 sp->creatorid = s->creatorid;
1193 strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1194 pf_state_peer_to_pfsync(&s->src, &sp->src);
1195 pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1196
1197 sp->rule = s->rule.ptr->nr;
1198 sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1199 (unsigned)-1 : s->nat_rule.ptr->nr;
1200 sp->anchor = (s->anchor.ptr == NULL) ?
1201 (unsigned)-1 : s->anchor.ptr->nr;
1202
1203 pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1204 pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1205 pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1206 pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1207 sp->creation = secs - s->creation;
1208 sp->expire = pf_state_expires(s);
1209 sp->log = s->log;
1210 sp->allow_opts = s->allow_opts;
1211 sp->timeout = s->timeout;
1212
1213 if (s->src_node) {
1214 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1215 }
1216 if (s->nat_src_node) {
1217 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1218 }
1219
1220 if (sp->expire > secs) {
1221 sp->expire -= secs;
1222 } else {
1223 sp->expire = 0;
1224 }
1225 }
1226
1227 static void
pf_state_import(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1228 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1229 struct pf_state *s)
1230 {
1231 /* copy to state key */
1232 sk->lan.addr = sp->lan.addr;
1233 sk->lan.xport = sp->lan.xport;
1234 sk->gwy.addr = sp->gwy.addr;
1235 sk->gwy.xport = sp->gwy.xport;
1236 sk->ext_lan.addr = sp->ext_lan.addr;
1237 sk->ext_lan.xport = sp->ext_lan.xport;
1238 sk->ext_gwy.addr = sp->ext_gwy.addr;
1239 sk->ext_gwy.xport = sp->ext_gwy.xport;
1240 sk->proto_variant = sp->proto_variant;
1241 s->tag = sp->tag;
1242 sk->proto = sp->proto;
1243 sk->af_lan = sp->af_lan;
1244 sk->af_gwy = sp->af_gwy;
1245 sk->direction = sp->direction;
1246 sk->flowhash = pf_calc_state_key_flowhash(sk);
1247
1248 /* copy to state */
1249 memcpy(&s->id, &sp->id, sizeof(sp->id));
1250 s->creatorid = sp->creatorid;
1251 pf_state_peer_from_pfsync(&sp->src, &s->src);
1252 pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1253
1254 s->rule.ptr = &pf_default_rule;
1255 s->nat_rule.ptr = NULL;
1256 s->anchor.ptr = NULL;
1257 s->rt_kif = NULL;
1258 s->creation = pf_time_second();
1259 s->expire = pf_time_second();
1260 if (sp->expire > 0) {
1261 s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1262 }
1263 s->pfsync_time = 0;
1264 s->packets[0] = s->packets[1] = 0;
1265 s->bytes[0] = s->bytes[1] = 0;
1266 }
1267
1268 static void
pf_pooladdr_copyin(struct pf_pooladdr * src,struct pf_pooladdr * dst)1269 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1270 {
1271 bcopy(src, dst, sizeof(struct pf_pooladdr));
1272
1273 dst->entries.tqe_prev = NULL;
1274 dst->entries.tqe_next = NULL;
1275 dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1276 dst->kif = NULL;
1277 }
1278
1279 static void
pf_pooladdr_copyout(struct pf_pooladdr * src,struct pf_pooladdr * dst)1280 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1281 {
1282 bcopy(src, dst, sizeof(struct pf_pooladdr));
1283
1284 dst->entries.tqe_prev = NULL;
1285 dst->entries.tqe_next = NULL;
1286 dst->kif = NULL;
1287 }
1288
1289 static int
pf_setup_pfsync_matching(struct pf_ruleset * rs)1290 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1291 {
1292 MD5_CTX ctx;
1293 struct pf_rule *rule;
1294 int rs_cnt;
1295 u_int8_t digest[PF_MD5_DIGEST_LENGTH];
1296
1297 MD5Init(&ctx);
1298 for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1299 /* XXX PF_RULESET_SCRUB as well? */
1300 if (rs_cnt == PF_RULESET_SCRUB) {
1301 continue;
1302 }
1303
1304 rs->rules[rs_cnt].inactive.ptr_array = krealloc_type(struct pf_rule *,
1305 rs->rules[rs_cnt].inactive.rsize, rs->rules[rs_cnt].inactive.rcount,
1306 rs->rules[rs_cnt].inactive.ptr_array, Z_WAITOK | Z_REALLOCF);
1307
1308 if (rs->rules[rs_cnt].inactive.rcount &&
1309 !rs->rules[rs_cnt].inactive.ptr_array) {
1310 rs->rules[rs_cnt].inactive.rsize = 0;
1311 return ENOMEM;
1312 }
1313 rs->rules[rs_cnt].inactive.rsize =
1314 rs->rules[rs_cnt].inactive.rcount;
1315
1316 TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1317 entries) {
1318 pf_hash_rule(&ctx, rule);
1319 (rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1320 }
1321 }
1322
1323 MD5Final(digest, &ctx);
1324 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1325 return 0;
1326 }
1327
1328 static void
pf_start(void)1329 pf_start(void)
1330 {
1331 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1332
1333 VERIFY(pf_is_enabled == 0);
1334
1335 pf_is_enabled = 1;
1336 pf_status.running = 1;
1337 pf_status.since = pf_calendar_time_second();
1338 if (pf_status.stateid == 0) {
1339 pf_status.stateid = pf_time_second();
1340 pf_status.stateid = pf_status.stateid << 32;
1341 }
1342 wakeup(pf_purge_thread_fn);
1343 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
1344 net_filter_event_mark(NET_FILTER_EVENT_PF,
1345 pf_check_compatible_rules());
1346 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1347 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1348 }
1349
1350 static void
pf_stop(void)1351 pf_stop(void)
1352 {
1353 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1354
1355 VERIFY(pf_is_enabled);
1356
1357 pf_status.running = 0;
1358 pf_is_enabled = 0;
1359 pf_status.since = pf_calendar_time_second();
1360 wakeup(pf_purge_thread_fn);
1361 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
1362 net_filter_event_mark(NET_FILTER_EVENT_PF,
1363 pf_check_compatible_rules());
1364 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1365 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1366 }
1367
1368 static int
pfioctl(dev_t dev,u_long cmd,caddr_t addr,int flags,struct proc * p)1369 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1370 {
1371 #pragma unused(dev)
1372 int p64 = proc_is64bit(p);
1373 int error = 0;
1374 int minordev = minor(dev);
1375
1376 if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1377 return EPERM;
1378 }
1379
1380 /* XXX keep in sync with switch() below */
1381 if (securelevel > 1) {
1382 switch (cmd) {
1383 case DIOCGETRULES:
1384 case DIOCGETRULE:
1385 case DIOCGETADDRS:
1386 case DIOCGETADDR:
1387 case DIOCGETSTATE:
1388 case DIOCSETSTATUSIF:
1389 case DIOCGETSTATUS:
1390 case DIOCCLRSTATUS:
1391 case DIOCNATLOOK:
1392 case DIOCSETDEBUG:
1393 case DIOCGETSTATES:
1394 case DIOCINSERTRULE:
1395 case DIOCDELETERULE:
1396 case DIOCGETTIMEOUT:
1397 case DIOCCLRRULECTRS:
1398 case DIOCGETLIMIT:
1399 case DIOCGETALTQS:
1400 case DIOCGETALTQ:
1401 case DIOCGETQSTATS:
1402 case DIOCGETRULESETS:
1403 case DIOCGETRULESET:
1404 case DIOCRGETTABLES:
1405 case DIOCRGETTSTATS:
1406 case DIOCRCLRTSTATS:
1407 case DIOCRCLRADDRS:
1408 case DIOCRADDADDRS:
1409 case DIOCRDELADDRS:
1410 case DIOCRSETADDRS:
1411 case DIOCRGETADDRS:
1412 case DIOCRGETASTATS:
1413 case DIOCRCLRASTATS:
1414 case DIOCRTSTADDRS:
1415 case DIOCOSFPGET:
1416 case DIOCGETSRCNODES:
1417 case DIOCCLRSRCNODES:
1418 case DIOCIGETIFACES:
1419 case DIOCGIFSPEED:
1420 case DIOCSETIFFLAG:
1421 case DIOCCLRIFFLAG:
1422 break;
1423 case DIOCRCLRTABLES:
1424 case DIOCRADDTABLES:
1425 case DIOCRDELTABLES:
1426 case DIOCRSETTFLAGS: {
1427 int pfrio_flags;
1428
1429 bcopy(&((struct pfioc_table *)(void *)addr)->
1430 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1431
1432 if (pfrio_flags & PFR_FLAG_DUMMY) {
1433 break; /* dummy operation ok */
1434 }
1435 return EPERM;
1436 }
1437 default:
1438 return EPERM;
1439 }
1440 }
1441
1442 if (!(flags & FWRITE)) {
1443 switch (cmd) {
1444 case DIOCSTART:
1445 case DIOCSTARTREF:
1446 case DIOCSTOP:
1447 case DIOCSTOPREF:
1448 case DIOCGETSTARTERS:
1449 case DIOCGETRULES:
1450 case DIOCGETADDRS:
1451 case DIOCGETADDR:
1452 case DIOCGETSTATE:
1453 case DIOCGETSTATUS:
1454 case DIOCGETSTATES:
1455 case DIOCINSERTRULE:
1456 case DIOCDELETERULE:
1457 case DIOCGETTIMEOUT:
1458 case DIOCGETLIMIT:
1459 case DIOCGETALTQS:
1460 case DIOCGETALTQ:
1461 case DIOCGETQSTATS:
1462 case DIOCGETRULESETS:
1463 case DIOCGETRULESET:
1464 case DIOCNATLOOK:
1465 case DIOCRGETTABLES:
1466 case DIOCRGETTSTATS:
1467 case DIOCRGETADDRS:
1468 case DIOCRGETASTATS:
1469 case DIOCRTSTADDRS:
1470 case DIOCOSFPGET:
1471 case DIOCGETSRCNODES:
1472 case DIOCIGETIFACES:
1473 case DIOCGIFSPEED:
1474 break;
1475 case DIOCRCLRTABLES:
1476 case DIOCRADDTABLES:
1477 case DIOCRDELTABLES:
1478 case DIOCRCLRTSTATS:
1479 case DIOCRCLRADDRS:
1480 case DIOCRADDADDRS:
1481 case DIOCRDELADDRS:
1482 case DIOCRSETADDRS:
1483 case DIOCRSETTFLAGS: {
1484 int pfrio_flags;
1485
1486 bcopy(&((struct pfioc_table *)(void *)addr)->
1487 pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1488
1489 if (pfrio_flags & PFR_FLAG_DUMMY) {
1490 flags |= FWRITE; /* need write lock for dummy */
1491 break; /* dummy operation ok */
1492 }
1493 return EACCES;
1494 }
1495 case DIOCGETRULE: {
1496 u_int32_t action;
1497
1498 bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1499 &action, sizeof(action));
1500
1501 if (action == PF_GET_CLR_CNTR) {
1502 return EACCES;
1503 }
1504 break;
1505 }
1506 default:
1507 return EACCES;
1508 }
1509 }
1510
1511 if (flags & FWRITE) {
1512 lck_rw_lock_exclusive(&pf_perim_lock);
1513 } else {
1514 lck_rw_lock_shared(&pf_perim_lock);
1515 }
1516
1517 lck_mtx_lock(&pf_lock);
1518
1519 switch (cmd) {
1520 case DIOCSTART:
1521 if (pf_status.running) {
1522 /*
1523 * Increment the reference for a simple -e enable, so
1524 * that even if other processes drop their references,
1525 * pf will still be available to processes that turned
1526 * it on without taking a reference
1527 */
1528 if (nr_tokens == pf_enabled_ref_count) {
1529 pf_enabled_ref_count++;
1530 VERIFY(pf_enabled_ref_count != 0);
1531 }
1532 error = EEXIST;
1533 } else if (pf_purge_thread == NULL) {
1534 error = ENOMEM;
1535 } else {
1536 pf_start();
1537 pf_enabled_ref_count++;
1538 VERIFY(pf_enabled_ref_count != 0);
1539 }
1540 break;
1541
1542 case DIOCSTARTREF: /* u_int64_t */
1543 if (pf_purge_thread == NULL) {
1544 error = ENOMEM;
1545 } else {
1546 u_int64_t token;
1547
1548 /* small enough to be on stack */
1549 if ((token = generate_token(p)) != 0) {
1550 if (pf_is_enabled == 0) {
1551 pf_start();
1552 }
1553 pf_enabled_ref_count++;
1554 VERIFY(pf_enabled_ref_count != 0);
1555 } else {
1556 error = ENOMEM;
1557 DPFPRINTF(PF_DEBUG_URGENT,
1558 ("pf: unable to generate token\n"));
1559 }
1560 bcopy(&token, addr, sizeof(token));
1561 }
1562 break;
1563
1564 case DIOCSTOP:
1565 if (!pf_status.running) {
1566 error = ENOENT;
1567 } else {
1568 pf_stop();
1569 pf_enabled_ref_count = 0;
1570 invalidate_all_tokens();
1571 }
1572 break;
1573
1574 case DIOCSTOPREF: /* struct pfioc_remove_token */
1575 if (!pf_status.running) {
1576 error = ENOENT;
1577 } else {
1578 struct pfioc_remove_token pfrt;
1579
1580 /* small enough to be on stack */
1581 bcopy(addr, &pfrt, sizeof(pfrt));
1582 if ((error = remove_token(&pfrt)) == 0) {
1583 VERIFY(pf_enabled_ref_count != 0);
1584 pf_enabled_ref_count--;
1585 /* return currently held references */
1586 pfrt.refcount = pf_enabled_ref_count;
1587 DPFPRINTF(PF_DEBUG_MISC,
1588 ("pf: enabled refcount decremented\n"));
1589 } else {
1590 error = EINVAL;
1591 DPFPRINTF(PF_DEBUG_URGENT,
1592 ("pf: token mismatch\n"));
1593 }
1594 bcopy(&pfrt, addr, sizeof(pfrt));
1595
1596 if (error == 0 && pf_enabled_ref_count == 0) {
1597 pf_stop();
1598 }
1599 }
1600 break;
1601
1602 case DIOCGETSTARTERS: { /* struct pfioc_tokens */
1603 PFIOCX_STRUCT_DECL(pfioc_tokens);
1604
1605 PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens);
1606 error = pfioctl_ioc_tokens(cmd,
1607 PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1608 PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1609 PFIOCX_STRUCT_END(pfioc_tokens, addr);
1610 break;
1611 }
1612
1613 case DIOCADDRULE: /* struct pfioc_rule */
1614 case DIOCGETRULES: /* struct pfioc_rule */
1615 case DIOCGETRULE: /* struct pfioc_rule */
1616 case DIOCCHANGERULE: /* struct pfioc_rule */
1617 case DIOCINSERTRULE: /* struct pfioc_rule */
1618 case DIOCDELETERULE: { /* struct pfioc_rule */
1619 struct pfioc_rule *pr = NULL;
1620
1621 PFIOC_STRUCT_BEGIN(addr, pr);
1622 error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1623 PFIOC_STRUCT_END(pr, addr);
1624 break;
1625 }
1626
1627 case DIOCCLRSTATES: /* struct pfioc_state_kill */
1628 case DIOCKILLSTATES: { /* struct pfioc_state_kill */
1629 struct pfioc_state_kill *psk = NULL;
1630
1631 PFIOC_STRUCT_BEGIN(addr, psk);
1632 error = pfioctl_ioc_state_kill(cmd, psk, p);
1633 PFIOC_STRUCT_END(psk, addr);
1634 break;
1635 }
1636
1637 case DIOCADDSTATE: /* struct pfioc_state */
1638 case DIOCGETSTATE: { /* struct pfioc_state */
1639 struct pfioc_state *ps = NULL;
1640
1641 PFIOC_STRUCT_BEGIN(addr, ps);
1642 error = pfioctl_ioc_state(cmd, ps, p);
1643 PFIOC_STRUCT_END(ps, addr);
1644 break;
1645 }
1646
1647 case DIOCGETSTATES: { /* struct pfioc_states */
1648 PFIOCX_STRUCT_DECL(pfioc_states);
1649
1650 PFIOCX_STRUCT_BEGIN(addr, pfioc_states);
1651 error = pfioctl_ioc_states(cmd,
1652 PFIOCX_STRUCT_ADDR32(pfioc_states),
1653 PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1654 PFIOCX_STRUCT_END(pfioc_states, addr);
1655 break;
1656 }
1657
1658 case DIOCGETSTATUS: { /* struct pf_status */
1659 struct pf_status *s = NULL;
1660
1661 PFIOC_STRUCT_BEGIN(&pf_status, s);
1662 pfi_update_status(s->ifname, s);
1663 PFIOC_STRUCT_END(s, addr);
1664 break;
1665 }
1666
1667 case DIOCSETSTATUSIF: { /* struct pfioc_if */
1668 struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1669
1670 /* OK for unaligned accesses */
1671 if (pi->ifname[0] == 0) {
1672 bzero(pf_status.ifname, IFNAMSIZ);
1673 break;
1674 }
1675 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1676 break;
1677 }
1678
1679 case DIOCCLRSTATUS: {
1680 bzero(pf_status.counters, sizeof(pf_status.counters));
1681 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1682 bzero(pf_status.scounters, sizeof(pf_status.scounters));
1683 pf_status.since = pf_calendar_time_second();
1684 if (*pf_status.ifname) {
1685 pfi_update_status(pf_status.ifname, NULL);
1686 }
1687 break;
1688 }
1689
1690 case DIOCNATLOOK: { /* struct pfioc_natlook */
1691 struct pfioc_natlook *pnl = NULL;
1692
1693 PFIOC_STRUCT_BEGIN(addr, pnl);
1694 error = pfioctl_ioc_natlook(cmd, pnl, p);
1695 PFIOC_STRUCT_END(pnl, addr);
1696 break;
1697 }
1698
1699 case DIOCSETTIMEOUT: /* struct pfioc_tm */
1700 case DIOCGETTIMEOUT: { /* struct pfioc_tm */
1701 struct pfioc_tm pt;
1702
1703 /* small enough to be on stack */
1704 bcopy(addr, &pt, sizeof(pt));
1705 error = pfioctl_ioc_tm(cmd, &pt, p);
1706 bcopy(&pt, addr, sizeof(pt));
1707 break;
1708 }
1709
1710 case DIOCGETLIMIT: /* struct pfioc_limit */
1711 case DIOCSETLIMIT: { /* struct pfioc_limit */
1712 struct pfioc_limit pl;
1713
1714 /* small enough to be on stack */
1715 bcopy(addr, &pl, sizeof(pl));
1716 error = pfioctl_ioc_limit(cmd, &pl, p);
1717 bcopy(&pl, addr, sizeof(pl));
1718 break;
1719 }
1720
1721 case DIOCSETDEBUG: { /* u_int32_t */
1722 bcopy(addr, &pf_status.debug, sizeof(u_int32_t));
1723 break;
1724 }
1725
1726 case DIOCCLRRULECTRS: {
1727 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1728 struct pf_ruleset *ruleset = &pf_main_ruleset;
1729 struct pf_rule *rule;
1730
1731 TAILQ_FOREACH(rule,
1732 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1733 rule->evaluations = 0;
1734 rule->packets[0] = rule->packets[1] = 0;
1735 rule->bytes[0] = rule->bytes[1] = 0;
1736 }
1737 break;
1738 }
1739
1740 case DIOCGIFSPEED: {
1741 struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1742 struct pf_ifspeed ps;
1743 struct ifnet *ifp;
1744 u_int64_t baudrate;
1745
1746 if (psp->ifname[0] != '\0') {
1747 /* Can we completely trust user-land? */
1748 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1749 ps.ifname[IFNAMSIZ - 1] = '\0';
1750 ifp = ifunit(ps.ifname);
1751 if (ifp != NULL) {
1752 baudrate = ifp->if_output_bw.max_bw;
1753 bcopy(&baudrate, &psp->baudrate,
1754 sizeof(baudrate));
1755 } else {
1756 error = EINVAL;
1757 }
1758 } else {
1759 error = EINVAL;
1760 }
1761 break;
1762 }
1763
1764 case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
1765 case DIOCADDADDR: /* struct pfioc_pooladdr */
1766 case DIOCGETADDRS: /* struct pfioc_pooladdr */
1767 case DIOCGETADDR: /* struct pfioc_pooladdr */
1768 case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
1769 struct pfioc_pooladdr *pp = NULL;
1770
1771 PFIOC_STRUCT_BEGIN(addr, pp);
1772 error = pfioctl_ioc_pooladdr(cmd, pp, p);
1773 PFIOC_STRUCT_END(pp, addr);
1774 break;
1775 }
1776
1777 case DIOCGETRULESETS: /* struct pfioc_ruleset */
1778 case DIOCGETRULESET: { /* struct pfioc_ruleset */
1779 struct pfioc_ruleset *pr = NULL;
1780
1781 PFIOC_STRUCT_BEGIN(addr, pr);
1782 error = pfioctl_ioc_ruleset(cmd, pr, p);
1783 PFIOC_STRUCT_END(pr, addr);
1784 break;
1785 }
1786
1787 case DIOCRCLRTABLES: /* struct pfioc_table */
1788 case DIOCRADDTABLES: /* struct pfioc_table */
1789 case DIOCRDELTABLES: /* struct pfioc_table */
1790 case DIOCRGETTABLES: /* struct pfioc_table */
1791 case DIOCRGETTSTATS: /* struct pfioc_table */
1792 case DIOCRCLRTSTATS: /* struct pfioc_table */
1793 case DIOCRSETTFLAGS: /* struct pfioc_table */
1794 case DIOCRCLRADDRS: /* struct pfioc_table */
1795 case DIOCRADDADDRS: /* struct pfioc_table */
1796 case DIOCRDELADDRS: /* struct pfioc_table */
1797 case DIOCRSETADDRS: /* struct pfioc_table */
1798 case DIOCRGETADDRS: /* struct pfioc_table */
1799 case DIOCRGETASTATS: /* struct pfioc_table */
1800 case DIOCRCLRASTATS: /* struct pfioc_table */
1801 case DIOCRTSTADDRS: /* struct pfioc_table */
1802 case DIOCRINADEFINE: { /* struct pfioc_table */
1803 PFIOCX_STRUCT_DECL(pfioc_table);
1804
1805 PFIOCX_STRUCT_BEGIN(addr, pfioc_table);
1806 error = pfioctl_ioc_table(cmd,
1807 PFIOCX_STRUCT_ADDR32(pfioc_table),
1808 PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1809 PFIOCX_STRUCT_END(pfioc_table, addr);
1810 break;
1811 }
1812
1813 case DIOCOSFPADD: /* struct pf_osfp_ioctl */
1814 case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
1815 struct pf_osfp_ioctl *io = NULL;
1816
1817 PFIOC_STRUCT_BEGIN(addr, io);
1818 if (cmd == DIOCOSFPADD) {
1819 error = pf_osfp_add(io);
1820 } else {
1821 VERIFY(cmd == DIOCOSFPGET);
1822 error = pf_osfp_get(io);
1823 }
1824 PFIOC_STRUCT_END(io, addr);
1825 break;
1826 }
1827
1828 case DIOCXBEGIN: /* struct pfioc_trans */
1829 case DIOCXROLLBACK: /* struct pfioc_trans */
1830 case DIOCXCOMMIT: { /* struct pfioc_trans */
1831 PFIOCX_STRUCT_DECL(pfioc_trans);
1832
1833 PFIOCX_STRUCT_BEGIN(addr, pfioc_trans);
1834 error = pfioctl_ioc_trans(cmd,
1835 PFIOCX_STRUCT_ADDR32(pfioc_trans),
1836 PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1837 PFIOCX_STRUCT_END(pfioc_trans, addr);
1838 break;
1839 }
1840
1841 case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
1842 PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1843
1844 PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes);
1845 error = pfioctl_ioc_src_nodes(cmd,
1846 PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1847 PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1848 PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1849 break;
1850 }
1851
1852 case DIOCCLRSRCNODES: {
1853 struct pf_src_node *n;
1854 struct pf_state *state;
1855
1856 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1857 state->src_node = NULL;
1858 state->nat_src_node = NULL;
1859 }
1860 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1861 n->expire = 1;
1862 n->states = 0;
1863 }
1864 pf_purge_expired_src_nodes();
1865 pf_status.src_nodes = 0;
1866 break;
1867 }
1868
1869 case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
1870 struct pfioc_src_node_kill *psnk = NULL;
1871
1872 PFIOC_STRUCT_BEGIN(addr, psnk);
1873 error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1874 PFIOC_STRUCT_END(psnk, addr);
1875 break;
1876 }
1877
1878 case DIOCSETHOSTID: { /* u_int32_t */
1879 u_int32_t hid;
1880
1881 /* small enough to be on stack */
1882 bcopy(addr, &hid, sizeof(hid));
1883 if (hid == 0) {
1884 pf_status.hostid = random();
1885 } else {
1886 pf_status.hostid = hid;
1887 }
1888 break;
1889 }
1890
1891 case DIOCOSFPFLUSH:
1892 pf_osfp_flush();
1893 break;
1894
1895 case DIOCIGETIFACES: /* struct pfioc_iface */
1896 case DIOCSETIFFLAG: /* struct pfioc_iface */
1897 case DIOCCLRIFFLAG: { /* struct pfioc_iface */
1898 PFIOCX_STRUCT_DECL(pfioc_iface);
1899
1900 PFIOCX_STRUCT_BEGIN(addr, pfioc_iface);
1901 error = pfioctl_ioc_iface(cmd,
1902 PFIOCX_STRUCT_ADDR32(pfioc_iface),
1903 PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1904 PFIOCX_STRUCT_END(pfioc_iface, addr);
1905 break;
1906 }
1907
1908 default:
1909 error = ENODEV;
1910 break;
1911 }
1912
1913 lck_mtx_unlock(&pf_lock);
1914 lck_rw_done(&pf_perim_lock);
1915
1916 return error;
1917 }
1918
1919 static int
pfioctl_ioc_table(u_long cmd,struct pfioc_table_32 * io32,struct pfioc_table_64 * io64,struct proc * p)1920 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1921 struct pfioc_table_64 *io64, struct proc *p)
1922 {
1923 int p64 = proc_is64bit(p);
1924 int error = 0;
1925
1926 if (!p64) {
1927 goto struct32;
1928 }
1929
1930 #ifdef __LP64__
1931 /*
1932 * 64-bit structure processing
1933 */
1934 switch (cmd) {
1935 case DIOCRCLRTABLES:
1936 if (io64->pfrio_esize != 0) {
1937 error = ENODEV;
1938 break;
1939 }
1940 pfr_table_copyin_cleanup(&io64->pfrio_table);
1941 error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1942 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1943 break;
1944
1945 case DIOCRADDTABLES:
1946 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1947 error = ENODEV;
1948 break;
1949 }
1950 error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1951 &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1952 break;
1953
1954 case DIOCRDELTABLES:
1955 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1956 error = ENODEV;
1957 break;
1958 }
1959 error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1960 &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1961 break;
1962
1963 case DIOCRGETTABLES:
1964 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1965 error = ENODEV;
1966 break;
1967 }
1968 pfr_table_copyin_cleanup(&io64->pfrio_table);
1969 error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
1970 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1971 break;
1972
1973 case DIOCRGETTSTATS:
1974 if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
1975 error = ENODEV;
1976 break;
1977 }
1978 pfr_table_copyin_cleanup(&io64->pfrio_table);
1979 error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
1980 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1981 break;
1982
1983 case DIOCRCLRTSTATS:
1984 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1985 error = ENODEV;
1986 break;
1987 }
1988 error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
1989 &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1990 break;
1991
1992 case DIOCRSETTFLAGS:
1993 if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1994 error = ENODEV;
1995 break;
1996 }
1997 error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
1998 io64->pfrio_setflag, io64->pfrio_clrflag,
1999 &io64->pfrio_nchange, &io64->pfrio_ndel,
2000 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2001 break;
2002
2003 case DIOCRCLRADDRS:
2004 if (io64->pfrio_esize != 0) {
2005 error = ENODEV;
2006 break;
2007 }
2008 pfr_table_copyin_cleanup(&io64->pfrio_table);
2009 error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2010 io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2011 break;
2012
2013 case DIOCRADDADDRS:
2014 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2015 error = ENODEV;
2016 break;
2017 }
2018 pfr_table_copyin_cleanup(&io64->pfrio_table);
2019 error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2020 io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2021 PFR_FLAG_USERIOCTL);
2022 break;
2023
2024 case DIOCRDELADDRS:
2025 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2026 error = ENODEV;
2027 break;
2028 }
2029 pfr_table_copyin_cleanup(&io64->pfrio_table);
2030 error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2031 io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2032 PFR_FLAG_USERIOCTL);
2033 break;
2034
2035 case DIOCRSETADDRS:
2036 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2037 error = ENODEV;
2038 break;
2039 }
2040 pfr_table_copyin_cleanup(&io64->pfrio_table);
2041 error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2042 io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2043 &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2044 PFR_FLAG_USERIOCTL, 0);
2045 break;
2046
2047 case DIOCRGETADDRS:
2048 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2049 error = ENODEV;
2050 break;
2051 }
2052 pfr_table_copyin_cleanup(&io64->pfrio_table);
2053 error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2054 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2055 break;
2056
2057 case DIOCRGETASTATS:
2058 if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2059 error = ENODEV;
2060 break;
2061 }
2062 pfr_table_copyin_cleanup(&io64->pfrio_table);
2063 error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2064 &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2065 break;
2066
2067 case DIOCRCLRASTATS:
2068 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2069 error = ENODEV;
2070 break;
2071 }
2072 pfr_table_copyin_cleanup(&io64->pfrio_table);
2073 error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2074 io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2075 PFR_FLAG_USERIOCTL);
2076 break;
2077
2078 case DIOCRTSTADDRS:
2079 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2080 error = ENODEV;
2081 break;
2082 }
2083 pfr_table_copyin_cleanup(&io64->pfrio_table);
2084 error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2085 io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2086 PFR_FLAG_USERIOCTL);
2087 break;
2088
2089 case DIOCRINADEFINE:
2090 if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2091 error = ENODEV;
2092 break;
2093 }
2094 pfr_table_copyin_cleanup(&io64->pfrio_table);
2095 error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2096 io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2097 io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2098 break;
2099
2100 default:
2101 VERIFY(0);
2102 /* NOTREACHED */
2103 }
2104 goto done;
2105 #else
2106 #pragma unused(io64)
2107 #endif /* __LP64__ */
2108
2109 struct32:
2110 /*
2111 * 32-bit structure processing
2112 */
2113 switch (cmd) {
2114 case DIOCRCLRTABLES:
2115 if (io32->pfrio_esize != 0) {
2116 error = ENODEV;
2117 break;
2118 }
2119 pfr_table_copyin_cleanup(&io32->pfrio_table);
2120 error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2121 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2122 break;
2123
2124 case DIOCRADDTABLES:
2125 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2126 error = ENODEV;
2127 break;
2128 }
2129 error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2130 &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2131 break;
2132
2133 case DIOCRDELTABLES:
2134 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2135 error = ENODEV;
2136 break;
2137 }
2138 error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2139 &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2140 break;
2141
2142 case DIOCRGETTABLES:
2143 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2144 error = ENODEV;
2145 break;
2146 }
2147 pfr_table_copyin_cleanup(&io32->pfrio_table);
2148 error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2149 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2150 break;
2151
2152 case DIOCRGETTSTATS:
2153 if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2154 error = ENODEV;
2155 break;
2156 }
2157 pfr_table_copyin_cleanup(&io32->pfrio_table);
2158 error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2159 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2160 break;
2161
2162 case DIOCRCLRTSTATS:
2163 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2164 error = ENODEV;
2165 break;
2166 }
2167 error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2168 &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2169 break;
2170
2171 case DIOCRSETTFLAGS:
2172 if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2173 error = ENODEV;
2174 break;
2175 }
2176 error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2177 io32->pfrio_setflag, io32->pfrio_clrflag,
2178 &io32->pfrio_nchange, &io32->pfrio_ndel,
2179 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2180 break;
2181
2182 case DIOCRCLRADDRS:
2183 if (io32->pfrio_esize != 0) {
2184 error = ENODEV;
2185 break;
2186 }
2187 pfr_table_copyin_cleanup(&io32->pfrio_table);
2188 error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2189 io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2190 break;
2191
2192 case DIOCRADDADDRS:
2193 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2194 error = ENODEV;
2195 break;
2196 }
2197 pfr_table_copyin_cleanup(&io32->pfrio_table);
2198 error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2199 io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2200 PFR_FLAG_USERIOCTL);
2201 break;
2202
2203 case DIOCRDELADDRS:
2204 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2205 error = ENODEV;
2206 break;
2207 }
2208 pfr_table_copyin_cleanup(&io32->pfrio_table);
2209 error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2210 io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2211 PFR_FLAG_USERIOCTL);
2212 break;
2213
2214 case DIOCRSETADDRS:
2215 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2216 error = ENODEV;
2217 break;
2218 }
2219 pfr_table_copyin_cleanup(&io32->pfrio_table);
2220 error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2221 io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2222 &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2223 PFR_FLAG_USERIOCTL, 0);
2224 break;
2225
2226 case DIOCRGETADDRS:
2227 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2228 error = ENODEV;
2229 break;
2230 }
2231 pfr_table_copyin_cleanup(&io32->pfrio_table);
2232 error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2233 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2234 break;
2235
2236 case DIOCRGETASTATS:
2237 if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2238 error = ENODEV;
2239 break;
2240 }
2241 pfr_table_copyin_cleanup(&io32->pfrio_table);
2242 error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2243 &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2244 break;
2245
2246 case DIOCRCLRASTATS:
2247 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2248 error = ENODEV;
2249 break;
2250 }
2251 pfr_table_copyin_cleanup(&io32->pfrio_table);
2252 error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2253 io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2254 PFR_FLAG_USERIOCTL);
2255 break;
2256
2257 case DIOCRTSTADDRS:
2258 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2259 error = ENODEV;
2260 break;
2261 }
2262 pfr_table_copyin_cleanup(&io32->pfrio_table);
2263 error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2264 io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2265 PFR_FLAG_USERIOCTL);
2266 break;
2267
2268 case DIOCRINADEFINE:
2269 if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2270 error = ENODEV;
2271 break;
2272 }
2273 pfr_table_copyin_cleanup(&io32->pfrio_table);
2274 error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2275 io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2276 io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2277 break;
2278
2279 default:
2280 VERIFY(0);
2281 /* NOTREACHED */
2282 }
2283 #ifdef __LP64__
2284 done:
2285 #endif
2286 return error;
2287 }
2288
2289 static int
pfioctl_ioc_tokens(u_long cmd,struct pfioc_tokens_32 * tok32,struct pfioc_tokens_64 * tok64,struct proc * p)2290 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2291 struct pfioc_tokens_64 *tok64, struct proc *p)
2292 {
2293 struct pfioc_token *tokens;
2294 struct pfioc_kernel_token *entry, *tmp;
2295 user_addr_t token_buf;
2296 int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2297 char *ptr;
2298
2299 switch (cmd) {
2300 case DIOCGETSTARTERS: {
2301 int size;
2302
2303 if (nr_tokens == 0) {
2304 error = ENOENT;
2305 break;
2306 }
2307
2308 size = sizeof(struct pfioc_token) * nr_tokens;
2309 if (size / nr_tokens != sizeof(struct pfioc_token)) {
2310 os_log_error(OS_LOG_DEFAULT, "%s: size overflows", __func__);
2311 error = ERANGE;
2312 break;
2313 }
2314 ocnt = cnt = (p64 ? tok64->size : tok32->size);
2315 if (cnt == 0) {
2316 if (p64) {
2317 tok64->size = size;
2318 } else {
2319 tok32->size = size;
2320 }
2321 break;
2322 }
2323
2324 #ifdef __LP64__
2325 token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2326 #else
2327 token_buf = tok32->pgt_buf;
2328 #endif
2329 tokens = (struct pfioc_token *)kalloc_data(size, Z_WAITOK | Z_ZERO);
2330 if (tokens == NULL) {
2331 error = ENOMEM;
2332 break;
2333 }
2334
2335 ptr = (void *)tokens;
2336 SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2337 struct pfioc_token *t;
2338
2339 if ((unsigned)cnt < sizeof(*tokens)) {
2340 break; /* no more buffer space left */
2341 }
2342 t = (struct pfioc_token *)(void *)ptr;
2343 t->token_value = entry->token.token_value;
2344 t->timestamp = entry->token.timestamp;
2345 t->pid = entry->token.pid;
2346 bcopy(entry->token.proc_name, t->proc_name,
2347 PFTOK_PROCNAME_LEN);
2348 ptr += sizeof(struct pfioc_token);
2349
2350 cnt -= sizeof(struct pfioc_token);
2351 }
2352
2353 if (cnt < ocnt) {
2354 error = copyout(tokens, token_buf, ocnt - cnt);
2355 }
2356
2357 if (p64) {
2358 tok64->size = ocnt - cnt;
2359 } else {
2360 tok32->size = ocnt - cnt;
2361 }
2362
2363 kfree_data(tokens, size);
2364 break;
2365 }
2366
2367 default:
2368 VERIFY(0);
2369 /* NOTREACHED */
2370 }
2371
2372 return error;
2373 }
2374
2375 static void
pf_expire_states_and_src_nodes(struct pf_rule * rule)2376 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2377 {
2378 struct pf_state *state;
2379 struct pf_src_node *sn;
2380 int killed = 0;
2381
2382 /* expire the states */
2383 state = TAILQ_FIRST(&state_list);
2384 while (state) {
2385 if (state->rule.ptr == rule) {
2386 state->timeout = PFTM_PURGE;
2387 }
2388 state = TAILQ_NEXT(state, entry_list);
2389 }
2390 pf_purge_expired_states(pf_status.states);
2391
2392 /* expire the src_nodes */
2393 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2394 if (sn->rule.ptr != rule) {
2395 continue;
2396 }
2397 if (sn->states != 0) {
2398 RB_FOREACH(state, pf_state_tree_id,
2399 &tree_id) {
2400 if (state->src_node == sn) {
2401 state->src_node = NULL;
2402 }
2403 if (state->nat_src_node == sn) {
2404 state->nat_src_node = NULL;
2405 }
2406 }
2407 sn->states = 0;
2408 }
2409 sn->expire = 1;
2410 killed++;
2411 }
2412 if (killed) {
2413 pf_purge_expired_src_nodes();
2414 }
2415 }
2416
2417 static void
pf_delete_rule_from_ruleset(struct pf_ruleset * ruleset,int rs_num,struct pf_rule * rule)2418 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2419 struct pf_rule *rule)
2420 {
2421 struct pf_rule *r;
2422 int nr = 0;
2423
2424 pf_expire_states_and_src_nodes(rule);
2425
2426 pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2427 if (ruleset->rules[rs_num].active.rcount-- == 0) {
2428 panic("%s: rcount value broken!", __func__);
2429 }
2430 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2431
2432 while (r) {
2433 r->nr = nr++;
2434 r = TAILQ_NEXT(r, entries);
2435 }
2436 }
2437
2438
2439 static void
pf_ruleset_cleanup(struct pf_ruleset * ruleset,int rs)2440 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2441 {
2442 pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2443 ruleset->rules[rs].active.ticket =
2444 ++ruleset->rules[rs].inactive.ticket;
2445 }
2446
2447 /*
2448 * req_dev encodes the PF interface. Currently, possible values are
2449 * 0 or PFRULE_PFM
2450 */
2451 static int
pf_delete_rule_by_ticket(struct pfioc_rule * pr,u_int32_t req_dev)2452 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2453 {
2454 struct pf_ruleset *ruleset;
2455 struct pf_rule *rule = NULL;
2456 int is_anchor;
2457 int error;
2458 int i;
2459
2460 is_anchor = (pr->anchor_call[0] != '\0');
2461 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2462 pr->rule.owner, is_anchor, &error)) == NULL) {
2463 return error;
2464 }
2465
2466 for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2467 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2468 while (rule && (rule->ticket != pr->rule.ticket)) {
2469 rule = TAILQ_NEXT(rule, entries);
2470 }
2471 }
2472 if (rule == NULL) {
2473 return ENOENT;
2474 } else {
2475 i--;
2476 }
2477
2478 if (strcmp(rule->owner, pr->rule.owner)) {
2479 return EACCES;
2480 }
2481
2482 delete_rule:
2483 if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2484 ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2485 ((ruleset->rules[i].active.rcount - 1) == 0)) {
2486 /* set rule & ruleset to parent and repeat */
2487 struct pf_rule *delete_rule = rule;
2488 struct pf_ruleset *delete_ruleset = ruleset;
2489
2490 #define parent_ruleset ruleset->anchor->parent->ruleset
2491 if (ruleset->anchor->parent == NULL) {
2492 ruleset = &pf_main_ruleset;
2493 } else {
2494 ruleset = &parent_ruleset;
2495 }
2496
2497 rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2498 while (rule &&
2499 (rule->anchor != delete_ruleset->anchor)) {
2500 rule = TAILQ_NEXT(rule, entries);
2501 }
2502 if (rule == NULL) {
2503 panic("%s: rule not found!", __func__);
2504 }
2505
2506 /*
2507 * if reqest device != rule's device, bail :
2508 * with error if ticket matches;
2509 * without error if ticket doesn't match (i.e. its just cleanup)
2510 */
2511 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2512 if (rule->ticket != pr->rule.ticket) {
2513 return 0;
2514 } else {
2515 return EACCES;
2516 }
2517 }
2518
2519 if (delete_rule->rule_flag & PFRULE_PFM) {
2520 pffwrules--;
2521 }
2522
2523 pf_delete_rule_from_ruleset(delete_ruleset,
2524 i, delete_rule);
2525 delete_ruleset->rules[i].active.ticket =
2526 ++delete_ruleset->rules[i].inactive.ticket;
2527 goto delete_rule;
2528 } else {
2529 /*
2530 * process deleting rule only if device that added the
2531 * rule matches device that issued the request
2532 */
2533 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2534 return EACCES;
2535 }
2536 if (rule->rule_flag & PFRULE_PFM) {
2537 pffwrules--;
2538 }
2539 pf_delete_rule_from_ruleset(ruleset, i,
2540 rule);
2541 pf_ruleset_cleanup(ruleset, i);
2542 }
2543
2544 return 0;
2545 }
2546
2547 /*
2548 * req_dev encodes the PF interface. Currently, possible values are
2549 * 0 or PFRULE_PFM
2550 */
2551 static void
pf_delete_rule_by_owner(char * owner,u_int32_t req_dev)2552 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2553 {
2554 struct pf_ruleset *ruleset;
2555 struct pf_rule *rule, *next;
2556 int deleted = 0;
2557
2558 for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2559 rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2560 ruleset = &pf_main_ruleset;
2561 while (rule) {
2562 next = TAILQ_NEXT(rule, entries);
2563 /*
2564 * process deleting rule only if device that added the
2565 * rule matches device that issued the request
2566 */
2567 if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2568 rule = next;
2569 continue;
2570 }
2571 if (rule->anchor) {
2572 if (((strcmp(rule->owner, owner)) == 0) ||
2573 ((strcmp(rule->owner, "")) == 0)) {
2574 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2575 if (deleted) {
2576 pf_ruleset_cleanup(ruleset, rs);
2577 deleted = 0;
2578 }
2579 /* step into anchor */
2580 ruleset =
2581 &rule->anchor->ruleset;
2582 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2583 continue;
2584 } else {
2585 if (rule->rule_flag &
2586 PFRULE_PFM) {
2587 pffwrules--;
2588 }
2589 pf_delete_rule_from_ruleset(ruleset, rs, rule);
2590 deleted = 1;
2591 rule = next;
2592 }
2593 } else {
2594 rule = next;
2595 }
2596 } else {
2597 if (((strcmp(rule->owner, owner)) == 0)) {
2598 /* delete rule */
2599 if (rule->rule_flag & PFRULE_PFM) {
2600 pffwrules--;
2601 }
2602 pf_delete_rule_from_ruleset(ruleset,
2603 rs, rule);
2604 deleted = 1;
2605 }
2606 rule = next;
2607 }
2608 if (rule == NULL) {
2609 if (deleted) {
2610 pf_ruleset_cleanup(ruleset, rs);
2611 deleted = 0;
2612 }
2613 if (ruleset != &pf_main_ruleset) {
2614 pf_deleterule_anchor_step_out(&ruleset,
2615 rs, &rule);
2616 }
2617 }
2618 }
2619 }
2620 }
2621
2622 static void
pf_deleterule_anchor_step_out(struct pf_ruleset ** ruleset_ptr,int rs,struct pf_rule ** rule_ptr)2623 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2624 int rs, struct pf_rule **rule_ptr)
2625 {
2626 struct pf_ruleset *ruleset = *ruleset_ptr;
2627 struct pf_rule *rule = *rule_ptr;
2628
2629 /* step out of anchor */
2630 struct pf_ruleset *rs_copy = ruleset;
2631 ruleset = ruleset->anchor->parent?
2632 &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2633
2634 rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2635 while (rule && (rule->anchor != rs_copy->anchor)) {
2636 rule = TAILQ_NEXT(rule, entries);
2637 }
2638 if (rule == NULL) {
2639 panic("%s: parent rule of anchor not found!", __func__);
2640 }
2641 if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2642 rule = TAILQ_NEXT(rule, entries);
2643 }
2644
2645 *ruleset_ptr = ruleset;
2646 *rule_ptr = rule;
2647 }
2648
2649 static void
pf_addrwrap_setup(struct pf_addr_wrap * aw)2650 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2651 {
2652 VERIFY(aw);
2653 bzero(&aw->p, sizeof aw->p);
2654 }
2655
2656 static int
pf_rule_setup(struct pfioc_rule * pr,struct pf_rule * rule,struct pf_ruleset * ruleset)2657 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2658 struct pf_ruleset *ruleset)
2659 {
2660 struct pf_pooladdr *apa;
2661 int error = 0;
2662
2663 if (rule->ifname[0]) {
2664 rule->kif = pfi_kif_get(rule->ifname);
2665 if (rule->kif == NULL) {
2666 pool_put(&pf_rule_pl, rule);
2667 return EINVAL;
2668 }
2669 pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2670 }
2671 if (rule->tagname[0]) {
2672 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) {
2673 error = EBUSY;
2674 }
2675 }
2676 if (rule->match_tagname[0]) {
2677 if ((rule->match_tag =
2678 pf_tagname2tag(rule->match_tagname)) == 0) {
2679 error = EBUSY;
2680 }
2681 }
2682 if (rule->rt && !rule->direction) {
2683 error = EINVAL;
2684 }
2685 #if PFLOG
2686 if (!rule->log) {
2687 rule->logif = 0;
2688 }
2689 if (rule->logif >= PFLOGIFS_MAX) {
2690 error = EINVAL;
2691 }
2692 #endif /* PFLOG */
2693 pf_addrwrap_setup(&rule->src.addr);
2694 pf_addrwrap_setup(&rule->dst.addr);
2695 if (pf_rtlabel_add(&rule->src.addr) ||
2696 pf_rtlabel_add(&rule->dst.addr)) {
2697 error = EBUSY;
2698 }
2699 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2700 error = EINVAL;
2701 }
2702 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2703 error = EINVAL;
2704 }
2705 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2706 error = EINVAL;
2707 }
2708 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2709 error = EINVAL;
2710 }
2711 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) {
2712 error = EINVAL;
2713 }
2714 TAILQ_FOREACH(apa, &pf_pabuf, entries)
2715 if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2716 error = EINVAL;
2717 }
2718
2719 if (rule->overload_tblname[0]) {
2720 if ((rule->overload_tbl = pfr_attach_table(ruleset,
2721 rule->overload_tblname)) == NULL) {
2722 error = EINVAL;
2723 } else {
2724 rule->overload_tbl->pfrkt_flags |=
2725 PFR_TFLAG_ACTIVE;
2726 }
2727 }
2728
2729 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2730
2731 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2732 (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2733 rule->anchor == NULL) ||
2734 (rule->rt > PF_FASTROUTE)) &&
2735 (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2736 error = EINVAL;
2737 }
2738
2739 if (error) {
2740 pf_rm_rule(NULL, rule);
2741 return error;
2742 }
2743 /* For a NAT64 rule the rule's address family is AF_INET6 whereas
2744 * the address pool's family will be AF_INET
2745 */
2746 rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2747 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2748 rule->evaluations = rule->packets[0] = rule->packets[1] =
2749 rule->bytes[0] = rule->bytes[1] = 0;
2750
2751 return 0;
2752 }
2753
2754 static int
pfioctl_ioc_rule(u_long cmd,int minordev,struct pfioc_rule * pr,struct proc * p)2755 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2756 {
2757 int error = 0;
2758 u_int32_t req_dev = 0;
2759
2760 switch (cmd) {
2761 case DIOCADDRULE: {
2762 struct pf_ruleset *ruleset;
2763 struct pf_rule *rule, *tail;
2764 int rs_num;
2765
2766 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2767 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2768 ruleset = pf_find_ruleset(pr->anchor);
2769 if (ruleset == NULL) {
2770 error = EINVAL;
2771 break;
2772 }
2773 rs_num = pf_get_ruleset_number(pr->rule.action);
2774 if (rs_num >= PF_RULESET_MAX) {
2775 error = EINVAL;
2776 break;
2777 }
2778 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2779 error = EINVAL;
2780 break;
2781 }
2782 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2783 error = EBUSY;
2784 break;
2785 }
2786 if (pr->pool_ticket != ticket_pabuf) {
2787 error = EBUSY;
2788 break;
2789 }
2790 rule = pool_get(&pf_rule_pl, PR_WAITOK);
2791 if (rule == NULL) {
2792 error = ENOMEM;
2793 break;
2794 }
2795 pf_rule_copyin(&pr->rule, rule, p, minordev);
2796 #if !INET
2797 if (rule->af == AF_INET) {
2798 pool_put(&pf_rule_pl, rule);
2799 error = EAFNOSUPPORT;
2800 break;
2801 }
2802 #endif /* INET */
2803 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2804 pf_rulequeue);
2805 if (tail) {
2806 rule->nr = tail->nr + 1;
2807 } else {
2808 rule->nr = 0;
2809 }
2810
2811 if ((error = pf_rule_setup(pr, rule, ruleset))) {
2812 break;
2813 }
2814
2815 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2816 rule, entries);
2817 ruleset->rules[rs_num].inactive.rcount++;
2818 if (rule->rule_flag & PFRULE_PFM) {
2819 pffwrules++;
2820 }
2821
2822 if (rule->action == PF_NAT64) {
2823 atomic_add_16(&pf_nat64_configured, 1);
2824 }
2825
2826 if (pr->anchor_call[0] == '\0') {
2827 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2828 if (rule->rule_flag & PFRULE_PFM) {
2829 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2830 }
2831 }
2832
2833 #if DUMMYNET
2834 if (rule->action == PF_DUMMYNET) {
2835 struct dummynet_event dn_event;
2836 uint32_t direction = DN_INOUT;
2837 bzero(&dn_event, sizeof(dn_event));
2838
2839 dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2840
2841 if (rule->direction == PF_IN) {
2842 direction = DN_IN;
2843 } else if (rule->direction == PF_OUT) {
2844 direction = DN_OUT;
2845 }
2846
2847 dn_event.dn_event_rule_config.dir = direction;
2848 dn_event.dn_event_rule_config.af = rule->af;
2849 dn_event.dn_event_rule_config.proto = rule->proto;
2850 dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2851 dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2852 strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2853 sizeof(dn_event.dn_event_rule_config.ifname));
2854
2855 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2856 }
2857 #endif
2858 break;
2859 }
2860
2861 case DIOCGETRULES: {
2862 struct pf_ruleset *ruleset;
2863 struct pf_rule *tail;
2864 int rs_num;
2865
2866 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2867 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2868 ruleset = pf_find_ruleset(pr->anchor);
2869 if (ruleset == NULL) {
2870 error = EINVAL;
2871 break;
2872 }
2873 rs_num = pf_get_ruleset_number(pr->rule.action);
2874 if (rs_num >= PF_RULESET_MAX) {
2875 error = EINVAL;
2876 break;
2877 }
2878 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2879 pf_rulequeue);
2880 if (tail) {
2881 pr->nr = tail->nr + 1;
2882 } else {
2883 pr->nr = 0;
2884 }
2885 pr->ticket = ruleset->rules[rs_num].active.ticket;
2886 break;
2887 }
2888
2889 case DIOCGETRULE: {
2890 struct pf_ruleset *ruleset;
2891 struct pf_rule *rule;
2892 int rs_num, i;
2893
2894 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2895 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2896 ruleset = pf_find_ruleset(pr->anchor);
2897 if (ruleset == NULL) {
2898 error = EINVAL;
2899 break;
2900 }
2901 rs_num = pf_get_ruleset_number(pr->rule.action);
2902 if (rs_num >= PF_RULESET_MAX) {
2903 error = EINVAL;
2904 break;
2905 }
2906 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2907 error = EBUSY;
2908 break;
2909 }
2910 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2911 while ((rule != NULL) && (rule->nr != pr->nr)) {
2912 rule = TAILQ_NEXT(rule, entries);
2913 }
2914 if (rule == NULL) {
2915 error = EBUSY;
2916 break;
2917 }
2918 pf_rule_copyout(rule, &pr->rule);
2919 if (pf_anchor_copyout(ruleset, rule, pr)) {
2920 error = EBUSY;
2921 break;
2922 }
2923 pfi_dynaddr_copyout(&pr->rule.src.addr);
2924 pfi_dynaddr_copyout(&pr->rule.dst.addr);
2925 pf_tbladdr_copyout(&pr->rule.src.addr);
2926 pf_tbladdr_copyout(&pr->rule.dst.addr);
2927 pf_rtlabel_copyout(&pr->rule.src.addr);
2928 pf_rtlabel_copyout(&pr->rule.dst.addr);
2929 for (i = 0; i < PF_SKIP_COUNT; ++i) {
2930 if (rule->skip[i].ptr == NULL) {
2931 pr->rule.skip[i].nr = -1;
2932 } else {
2933 pr->rule.skip[i].nr =
2934 rule->skip[i].ptr->nr;
2935 }
2936 }
2937
2938 if (pr->action == PF_GET_CLR_CNTR) {
2939 rule->evaluations = 0;
2940 rule->packets[0] = rule->packets[1] = 0;
2941 rule->bytes[0] = rule->bytes[1] = 0;
2942 }
2943 break;
2944 }
2945
2946 case DIOCCHANGERULE: {
2947 struct pfioc_rule *pcr = pr;
2948 struct pf_ruleset *ruleset;
2949 struct pf_rule *oldrule = NULL, *newrule = NULL;
2950 struct pf_pooladdr *pa;
2951 u_int32_t nr = 0;
2952 int rs_num;
2953
2954 if (!(pcr->action == PF_CHANGE_REMOVE ||
2955 pcr->action == PF_CHANGE_GET_TICKET) &&
2956 pcr->pool_ticket != ticket_pabuf) {
2957 error = EBUSY;
2958 break;
2959 }
2960
2961 if (pcr->action < PF_CHANGE_ADD_HEAD ||
2962 pcr->action > PF_CHANGE_GET_TICKET) {
2963 error = EINVAL;
2964 break;
2965 }
2966 pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
2967 pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
2968 ruleset = pf_find_ruleset(pcr->anchor);
2969 if (ruleset == NULL) {
2970 error = EINVAL;
2971 break;
2972 }
2973 rs_num = pf_get_ruleset_number(pcr->rule.action);
2974 if (rs_num >= PF_RULESET_MAX) {
2975 error = EINVAL;
2976 break;
2977 }
2978
2979 if (pcr->action == PF_CHANGE_GET_TICKET) {
2980 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2981 break;
2982 } else {
2983 if (pcr->ticket !=
2984 ruleset->rules[rs_num].active.ticket) {
2985 error = EINVAL;
2986 break;
2987 }
2988 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2989 error = EINVAL;
2990 break;
2991 }
2992 }
2993
2994 if (pcr->action != PF_CHANGE_REMOVE) {
2995 newrule = pool_get(&pf_rule_pl, PR_WAITOK);
2996 if (newrule == NULL) {
2997 error = ENOMEM;
2998 break;
2999 }
3000 pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3001 #if !INET
3002 if (newrule->af == AF_INET) {
3003 pool_put(&pf_rule_pl, newrule);
3004 error = EAFNOSUPPORT;
3005 break;
3006 }
3007 #endif /* INET */
3008 if (newrule->ifname[0]) {
3009 newrule->kif = pfi_kif_get(newrule->ifname);
3010 if (newrule->kif == NULL) {
3011 pool_put(&pf_rule_pl, newrule);
3012 error = EINVAL;
3013 break;
3014 }
3015 pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3016 } else {
3017 newrule->kif = NULL;
3018 }
3019
3020 if (newrule->tagname[0]) {
3021 if ((newrule->tag =
3022 pf_tagname2tag(newrule->tagname)) == 0) {
3023 error = EBUSY;
3024 }
3025 }
3026 if (newrule->match_tagname[0]) {
3027 if ((newrule->match_tag = pf_tagname2tag(
3028 newrule->match_tagname)) == 0) {
3029 error = EBUSY;
3030 }
3031 }
3032 if (newrule->rt && !newrule->direction) {
3033 error = EINVAL;
3034 }
3035 #if PFLOG
3036 if (!newrule->log) {
3037 newrule->logif = 0;
3038 }
3039 if (newrule->logif >= PFLOGIFS_MAX) {
3040 error = EINVAL;
3041 }
3042 #endif /* PFLOG */
3043 pf_addrwrap_setup(&newrule->src.addr);
3044 pf_addrwrap_setup(&newrule->dst.addr);
3045 if (pf_rtlabel_add(&newrule->src.addr) ||
3046 pf_rtlabel_add(&newrule->dst.addr)) {
3047 error = EBUSY;
3048 }
3049 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3050 error = EINVAL;
3051 }
3052 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3053 error = EINVAL;
3054 }
3055 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3056 error = EINVAL;
3057 }
3058 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3059 error = EINVAL;
3060 }
3061 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) {
3062 error = EINVAL;
3063 }
3064 TAILQ_FOREACH(pa, &pf_pabuf, entries)
3065 if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3066 error = EINVAL;
3067 }
3068
3069 if (newrule->overload_tblname[0]) {
3070 if ((newrule->overload_tbl = pfr_attach_table(
3071 ruleset, newrule->overload_tblname)) ==
3072 NULL) {
3073 error = EINVAL;
3074 } else {
3075 newrule->overload_tbl->pfrkt_flags |=
3076 PFR_TFLAG_ACTIVE;
3077 }
3078 }
3079
3080 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3081 if (((((newrule->action == PF_NAT) ||
3082 (newrule->action == PF_RDR) ||
3083 (newrule->action == PF_BINAT) ||
3084 (newrule->rt > PF_FASTROUTE)) &&
3085 !newrule->anchor)) &&
3086 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3087 error = EINVAL;
3088 }
3089
3090 if (error) {
3091 pf_rm_rule(NULL, newrule);
3092 break;
3093 }
3094 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3095 newrule->evaluations = 0;
3096 newrule->packets[0] = newrule->packets[1] = 0;
3097 newrule->bytes[0] = newrule->bytes[1] = 0;
3098 }
3099 pf_empty_pool(&pf_pabuf);
3100
3101 if (pcr->action == PF_CHANGE_ADD_HEAD) {
3102 oldrule = TAILQ_FIRST(
3103 ruleset->rules[rs_num].active.ptr);
3104 } else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3105 oldrule = TAILQ_LAST(
3106 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3107 } else {
3108 oldrule = TAILQ_FIRST(
3109 ruleset->rules[rs_num].active.ptr);
3110 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3111 oldrule = TAILQ_NEXT(oldrule, entries);
3112 }
3113 if (oldrule == NULL) {
3114 if (newrule != NULL) {
3115 pf_rm_rule(NULL, newrule);
3116 }
3117 error = EINVAL;
3118 break;
3119 }
3120 }
3121
3122 if (pcr->action == PF_CHANGE_REMOVE) {
3123 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3124 ruleset->rules[rs_num].active.rcount--;
3125 } else {
3126 if (oldrule == NULL) {
3127 TAILQ_INSERT_TAIL(
3128 ruleset->rules[rs_num].active.ptr,
3129 newrule, entries);
3130 } else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3131 pcr->action == PF_CHANGE_ADD_BEFORE) {
3132 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3133 } else {
3134 TAILQ_INSERT_AFTER(
3135 ruleset->rules[rs_num].active.ptr,
3136 oldrule, newrule, entries);
3137 }
3138 ruleset->rules[rs_num].active.rcount++;
3139 }
3140
3141 nr = 0;
3142 TAILQ_FOREACH(oldrule,
3143 ruleset->rules[rs_num].active.ptr, entries)
3144 oldrule->nr = nr++;
3145
3146 ruleset->rules[rs_num].active.ticket++;
3147
3148 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3149 pf_remove_if_empty_ruleset(ruleset);
3150 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
3151 net_filter_event_mark(NET_FILTER_EVENT_PF,
3152 pf_check_compatible_rules());
3153 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3154 break;
3155 }
3156
3157 case DIOCINSERTRULE: {
3158 struct pf_ruleset *ruleset;
3159 struct pf_rule *rule, *tail, *r;
3160 int rs_num;
3161 int is_anchor;
3162
3163 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3164 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3165 is_anchor = (pr->anchor_call[0] != '\0');
3166
3167 if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3168 pr->rule.owner, is_anchor, &error)) == NULL) {
3169 break;
3170 }
3171
3172 rs_num = pf_get_ruleset_number(pr->rule.action);
3173 if (rs_num >= PF_RULESET_MAX) {
3174 error = EINVAL;
3175 break;
3176 }
3177 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3178 error = EINVAL;
3179 break;
3180 }
3181
3182 /* make sure this anchor rule doesn't exist already */
3183 if (is_anchor) {
3184 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3185 while (r) {
3186 if (r->anchor &&
3187 ((strcmp(r->anchor->name,
3188 pr->anchor_call)) == 0)) {
3189 if (((strcmp(pr->rule.owner,
3190 r->owner)) == 0) ||
3191 ((strcmp(r->owner, "")) == 0)) {
3192 error = EEXIST;
3193 } else {
3194 error = EPERM;
3195 }
3196 break;
3197 }
3198 r = TAILQ_NEXT(r, entries);
3199 }
3200 if (error != 0) {
3201 return error;
3202 }
3203 }
3204
3205 rule = pool_get(&pf_rule_pl, PR_WAITOK);
3206 if (rule == NULL) {
3207 error = ENOMEM;
3208 break;
3209 }
3210 pf_rule_copyin(&pr->rule, rule, p, minordev);
3211 #if !INET
3212 if (rule->af == AF_INET) {
3213 pool_put(&pf_rule_pl, rule);
3214 error = EAFNOSUPPORT;
3215 break;
3216 }
3217 #endif /* INET */
3218 r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3219 while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3220 r = TAILQ_NEXT(r, entries);
3221 }
3222 if (r == NULL) {
3223 if ((tail =
3224 TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3225 pf_rulequeue)) != NULL) {
3226 rule->nr = tail->nr + 1;
3227 } else {
3228 rule->nr = 0;
3229 }
3230 } else {
3231 rule->nr = r->nr;
3232 }
3233
3234 if ((error = pf_rule_setup(pr, rule, ruleset))) {
3235 break;
3236 }
3237
3238 if (rule->anchor != NULL) {
3239 strlcpy(rule->anchor->owner, rule->owner,
3240 PF_OWNER_NAME_SIZE);
3241 }
3242
3243 if (r) {
3244 TAILQ_INSERT_BEFORE(r, rule, entries);
3245 while (r && ++r->nr) {
3246 r = TAILQ_NEXT(r, entries);
3247 }
3248 } else {
3249 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3250 rule, entries);
3251 }
3252 ruleset->rules[rs_num].active.rcount++;
3253
3254 /* Calculate checksum for the main ruleset */
3255 if (ruleset == &pf_main_ruleset) {
3256 error = pf_setup_pfsync_matching(ruleset);
3257 }
3258
3259 pf_ruleset_cleanup(ruleset, rs_num);
3260 rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3261
3262 pr->rule.ticket = rule->ticket;
3263 pf_rule_copyout(rule, &pr->rule);
3264 if (rule->rule_flag & PFRULE_PFM) {
3265 pffwrules++;
3266 }
3267 if (rule->action == PF_NAT64) {
3268 atomic_add_16(&pf_nat64_configured, 1);
3269 }
3270
3271 if (pr->anchor_call[0] == '\0') {
3272 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3273 if (rule->rule_flag & PFRULE_PFM) {
3274 INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3275 }
3276 }
3277 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
3278 net_filter_event_mark(NET_FILTER_EVENT_PF,
3279 pf_check_compatible_rules());
3280 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3281 break;
3282 }
3283
3284 case DIOCDELETERULE: {
3285 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3286 pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3287
3288 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3289 error = EINVAL;
3290 break;
3291 }
3292
3293 /* get device through which request is made */
3294 if ((uint8_t)minordev == PFDEV_PFM) {
3295 req_dev |= PFRULE_PFM;
3296 }
3297
3298 if (pr->rule.ticket) {
3299 if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3300 break;
3301 }
3302 } else {
3303 pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3304 }
3305 pr->nr = pffwrules;
3306 if (pr->rule.action == PF_NAT64) {
3307 atomic_add_16(&pf_nat64_configured, -1);
3308 }
3309 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
3310 net_filter_event_mark(NET_FILTER_EVENT_PF,
3311 pf_check_compatible_rules());
3312 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3313 break;
3314 }
3315
3316 default:
3317 VERIFY(0);
3318 /* NOTREACHED */
3319 }
3320
3321 return error;
3322 }
3323
3324 static int
pfioctl_ioc_state_kill(u_long cmd,struct pfioc_state_kill * psk,struct proc * p)3325 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3326 {
3327 #pragma unused(p)
3328 int error = 0;
3329
3330 psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3331 psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3332
3333 bool ifname_matched = true;
3334 bool owner_matched = true;
3335
3336 switch (cmd) {
3337 case DIOCCLRSTATES: {
3338 struct pf_state *s, *nexts;
3339 int killed = 0;
3340
3341 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3342 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3343 /*
3344 * Purge all states only when neither ifname
3345 * or owner is provided. If any of these are provided
3346 * we purge only the states with meta data that match
3347 */
3348 bool unlink_state = false;
3349 ifname_matched = true;
3350 owner_matched = true;
3351
3352 if (psk->psk_ifname[0] &&
3353 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3354 ifname_matched = false;
3355 }
3356
3357 if (psk->psk_ownername[0] &&
3358 ((NULL == s->rule.ptr) ||
3359 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3360 owner_matched = false;
3361 }
3362
3363 unlink_state = ifname_matched && owner_matched;
3364
3365 if (unlink_state) {
3366 #if NPFSYNC
3367 /* don't send out individual delete messages */
3368 s->sync_flags = PFSTATE_NOSYNC;
3369 #endif
3370 pf_unlink_state(s);
3371 killed++;
3372 }
3373 }
3374 psk->psk_af = (sa_family_t)killed;
3375 #if NPFSYNC
3376 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3377 #endif
3378 break;
3379 }
3380
3381 case DIOCKILLSTATES: {
3382 struct pf_state *s, *nexts;
3383 struct pf_state_key *sk;
3384 struct pf_state_host *src, *dst;
3385 int killed = 0;
3386
3387 for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3388 s = nexts) {
3389 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3390 sk = s->state_key;
3391 ifname_matched = true;
3392 owner_matched = true;
3393
3394 if (psk->psk_ifname[0] &&
3395 strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3396 ifname_matched = false;
3397 }
3398
3399 if (psk->psk_ownername[0] &&
3400 ((NULL == s->rule.ptr) ||
3401 strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3402 owner_matched = false;
3403 }
3404
3405 if (sk->direction == PF_OUT) {
3406 src = &sk->lan;
3407 dst = &sk->ext_lan;
3408 } else {
3409 src = &sk->ext_lan;
3410 dst = &sk->lan;
3411 }
3412 if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3413 (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3414 PF_MATCHA(psk->psk_src.neg,
3415 &psk->psk_src.addr.v.a.addr,
3416 &psk->psk_src.addr.v.a.mask,
3417 &src->addr, sk->af_lan) &&
3418 PF_MATCHA(psk->psk_dst.neg,
3419 &psk->psk_dst.addr.v.a.addr,
3420 &psk->psk_dst.addr.v.a.mask,
3421 &dst->addr, sk->af_lan) &&
3422 (pf_match_xport(psk->psk_proto,
3423 psk->psk_proto_variant, &psk->psk_src.xport,
3424 &src->xport)) &&
3425 (pf_match_xport(psk->psk_proto,
3426 psk->psk_proto_variant, &psk->psk_dst.xport,
3427 &dst->xport)) &&
3428 ifname_matched &&
3429 owner_matched) {
3430 #if NPFSYNC
3431 /* send immediate delete of state */
3432 pfsync_delete_state(s);
3433 s->sync_flags |= PFSTATE_NOSYNC;
3434 #endif
3435 pf_unlink_state(s);
3436 killed++;
3437 }
3438 }
3439 psk->psk_af = (sa_family_t)killed;
3440 break;
3441 }
3442
3443 default:
3444 VERIFY(0);
3445 /* NOTREACHED */
3446 }
3447
3448 return error;
3449 }
3450
3451 static int
pfioctl_ioc_state(u_long cmd,struct pfioc_state * ps,struct proc * p)3452 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3453 {
3454 #pragma unused(p)
3455 int error = 0;
3456
3457 switch (cmd) {
3458 case DIOCADDSTATE: {
3459 struct pfsync_state *sp = &ps->state;
3460 struct pf_state *s;
3461 struct pf_state_key *sk;
3462 struct pfi_kif *kif;
3463
3464 if (sp->timeout >= PFTM_MAX) {
3465 error = EINVAL;
3466 break;
3467 }
3468 s = pool_get(&pf_state_pl, PR_WAITOK);
3469 if (s == NULL) {
3470 error = ENOMEM;
3471 break;
3472 }
3473 bzero(s, sizeof(struct pf_state));
3474 if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3475 pool_put(&pf_state_pl, s);
3476 error = ENOMEM;
3477 break;
3478 }
3479 pf_state_import(sp, sk, s);
3480 kif = pfi_kif_get(sp->ifname);
3481 if (kif == NULL) {
3482 pool_put(&pf_state_pl, s);
3483 pool_put(&pf_state_key_pl, sk);
3484 error = ENOENT;
3485 break;
3486 }
3487 TAILQ_INIT(&s->unlink_hooks);
3488 s->state_key->app_state = 0;
3489 if (pf_insert_state(kif, s)) {
3490 pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3491 pool_put(&pf_state_pl, s);
3492 error = EEXIST;
3493 break;
3494 }
3495 pf_default_rule.states++;
3496 VERIFY(pf_default_rule.states != 0);
3497 break;
3498 }
3499
3500 case DIOCGETSTATE: {
3501 struct pf_state *s;
3502 struct pf_state_cmp id_key;
3503
3504 bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3505 id_key.creatorid = ps->state.creatorid;
3506
3507 s = pf_find_state_byid(&id_key);
3508 if (s == NULL) {
3509 error = ENOENT;
3510 break;
3511 }
3512
3513 pf_state_export(&ps->state, s->state_key, s);
3514 break;
3515 }
3516
3517 default:
3518 VERIFY(0);
3519 /* NOTREACHED */
3520 }
3521
3522 return error;
3523 }
3524
3525 static int
pfioctl_ioc_states(u_long cmd,struct pfioc_states_32 * ps32,struct pfioc_states_64 * ps64,struct proc * p)3526 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3527 struct pfioc_states_64 *ps64, struct proc *p)
3528 {
3529 int p64 = proc_is64bit(p);
3530 int error = 0;
3531
3532 switch (cmd) {
3533 case DIOCGETSTATES: { /* struct pfioc_states */
3534 struct pf_state *state;
3535 struct pfsync_state *pstore;
3536 user_addr_t buf;
3537 u_int32_t nr = 0;
3538 int len, size;
3539
3540 len = (p64 ? ps64->ps_len : ps32->ps_len);
3541 if (len == 0) {
3542 size = sizeof(struct pfsync_state) * pf_status.states;
3543 if (p64) {
3544 ps64->ps_len = size;
3545 } else {
3546 ps32->ps_len = size;
3547 }
3548 break;
3549 }
3550
3551 pstore = kalloc_type(struct pfsync_state,
3552 Z_WAITOK | Z_ZERO | Z_NOFAIL);
3553 #ifdef __LP64__
3554 buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3555 #else
3556 buf = ps32->ps_buf;
3557 #endif
3558
3559 state = TAILQ_FIRST(&state_list);
3560 while (state) {
3561 if (state->timeout != PFTM_UNLINKED) {
3562 if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3563 break;
3564 }
3565
3566 pf_state_export(pstore,
3567 state->state_key, state);
3568 error = copyout(pstore, buf, sizeof(*pstore));
3569 if (error) {
3570 kfree_type(struct pfsync_state, pstore);
3571 goto fail;
3572 }
3573 buf += sizeof(*pstore);
3574 nr++;
3575 }
3576 state = TAILQ_NEXT(state, entry_list);
3577 }
3578
3579 size = sizeof(struct pfsync_state) * nr;
3580 if (p64) {
3581 ps64->ps_len = size;
3582 } else {
3583 ps32->ps_len = size;
3584 }
3585
3586 kfree_type(struct pfsync_state, pstore);
3587 break;
3588 }
3589
3590 default:
3591 VERIFY(0);
3592 /* NOTREACHED */
3593 }
3594 fail:
3595 return error;
3596 }
3597
3598 static int
pfioctl_ioc_natlook(u_long cmd,struct pfioc_natlook * pnl,struct proc * p)3599 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3600 {
3601 #pragma unused(p)
3602 int error = 0;
3603
3604 switch (cmd) {
3605 case DIOCNATLOOK: {
3606 struct pf_state_key *sk;
3607 struct pf_state *state;
3608 struct pf_state_key_cmp key;
3609 int m = 0, direction = pnl->direction;
3610
3611 key.proto = pnl->proto;
3612 key.proto_variant = pnl->proto_variant;
3613
3614 if (!pnl->proto ||
3615 PF_AZERO(&pnl->saddr, pnl->af) ||
3616 PF_AZERO(&pnl->daddr, pnl->af) ||
3617 ((pnl->proto == IPPROTO_TCP ||
3618 pnl->proto == IPPROTO_UDP) &&
3619 (!pnl->dxport.port || !pnl->sxport.port))) {
3620 error = EINVAL;
3621 } else {
3622 /*
3623 * userland gives us source and dest of connection,
3624 * reverse the lookup so we ask for what happens with
3625 * the return traffic, enabling us to find it in the
3626 * state tree.
3627 */
3628 if (direction == PF_IN) {
3629 key.af_gwy = pnl->af;
3630 PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3631 pnl->af);
3632 memcpy(&key.ext_gwy.xport, &pnl->dxport,
3633 sizeof(key.ext_gwy.xport));
3634 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3635 memcpy(&key.gwy.xport, &pnl->sxport,
3636 sizeof(key.gwy.xport));
3637 state = pf_find_state_all(&key, PF_IN, &m);
3638 } else {
3639 key.af_lan = pnl->af;
3640 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3641 memcpy(&key.lan.xport, &pnl->dxport,
3642 sizeof(key.lan.xport));
3643 PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3644 pnl->af);
3645 memcpy(&key.ext_lan.xport, &pnl->sxport,
3646 sizeof(key.ext_lan.xport));
3647 state = pf_find_state_all(&key, PF_OUT, &m);
3648 }
3649 if (m > 1) {
3650 error = E2BIG; /* more than one state */
3651 } else if (state != NULL) {
3652 sk = state->state_key;
3653 if (direction == PF_IN) {
3654 PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3655 sk->af_lan);
3656 memcpy(&pnl->rsxport, &sk->lan.xport,
3657 sizeof(pnl->rsxport));
3658 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3659 pnl->af);
3660 memcpy(&pnl->rdxport, &pnl->dxport,
3661 sizeof(pnl->rdxport));
3662 } else {
3663 PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3664 sk->af_gwy);
3665 memcpy(&pnl->rdxport, &sk->gwy.xport,
3666 sizeof(pnl->rdxport));
3667 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3668 pnl->af);
3669 memcpy(&pnl->rsxport, &pnl->sxport,
3670 sizeof(pnl->rsxport));
3671 }
3672 } else {
3673 error = ENOENT;
3674 }
3675 }
3676 break;
3677 }
3678
3679 default:
3680 VERIFY(0);
3681 /* NOTREACHED */
3682 }
3683
3684 return error;
3685 }
3686
3687 static int
pfioctl_ioc_tm(u_long cmd,struct pfioc_tm * pt,struct proc * p)3688 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3689 {
3690 #pragma unused(p)
3691 int error = 0;
3692
3693 switch (cmd) {
3694 case DIOCSETTIMEOUT: {
3695 int old;
3696
3697 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3698 pt->seconds < 0) {
3699 error = EINVAL;
3700 goto fail;
3701 }
3702 old = pf_default_rule.timeout[pt->timeout];
3703 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3704 pt->seconds = 1;
3705 }
3706 pf_default_rule.timeout[pt->timeout] = pt->seconds;
3707 if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3708 wakeup(pf_purge_thread_fn);
3709 }
3710 pt->seconds = old;
3711 break;
3712 }
3713
3714 case DIOCGETTIMEOUT: {
3715 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3716 error = EINVAL;
3717 goto fail;
3718 }
3719 pt->seconds = pf_default_rule.timeout[pt->timeout];
3720 break;
3721 }
3722
3723 default:
3724 VERIFY(0);
3725 /* NOTREACHED */
3726 }
3727 fail:
3728 return error;
3729 }
3730
3731 static int
pfioctl_ioc_limit(u_long cmd,struct pfioc_limit * pl,struct proc * p)3732 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3733 {
3734 #pragma unused(p)
3735 int error = 0;
3736
3737 switch (cmd) {
3738 case DIOCGETLIMIT: {
3739 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3740 error = EINVAL;
3741 goto fail;
3742 }
3743 pl->limit = pf_pool_limits[pl->index].limit;
3744 break;
3745 }
3746
3747 case DIOCSETLIMIT: {
3748 int old_limit;
3749
3750 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3751 pf_pool_limits[pl->index].pp == NULL) {
3752 error = EINVAL;
3753 goto fail;
3754 }
3755 pool_sethardlimit(pf_pool_limits[pl->index].pp,
3756 pl->limit, NULL, 0);
3757 old_limit = pf_pool_limits[pl->index].limit;
3758 pf_pool_limits[pl->index].limit = pl->limit;
3759 pl->limit = old_limit;
3760 break;
3761 }
3762
3763 default:
3764 VERIFY(0);
3765 /* NOTREACHED */
3766 }
3767 fail:
3768 return error;
3769 }
3770
3771 static int
pfioctl_ioc_pooladdr(u_long cmd,struct pfioc_pooladdr * pp,struct proc * p)3772 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3773 {
3774 #pragma unused(p)
3775 struct pf_pooladdr *pa = NULL;
3776 struct pf_pool *pool = NULL;
3777 int error = 0;
3778
3779 switch (cmd) {
3780 case DIOCBEGINADDRS: {
3781 pf_empty_pool(&pf_pabuf);
3782 pp->ticket = ++ticket_pabuf;
3783 break;
3784 }
3785
3786 case DIOCADDADDR: {
3787 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3788 if (pp->ticket != ticket_pabuf) {
3789 error = EBUSY;
3790 break;
3791 }
3792 #if !INET
3793 if (pp->af == AF_INET) {
3794 error = EAFNOSUPPORT;
3795 break;
3796 }
3797 #endif /* INET */
3798 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3799 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3800 pp->addr.addr.type != PF_ADDR_TABLE) {
3801 error = EINVAL;
3802 break;
3803 }
3804 pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3805 if (pa == NULL) {
3806 error = ENOMEM;
3807 break;
3808 }
3809 pf_pooladdr_copyin(&pp->addr, pa);
3810 if (pa->ifname[0]) {
3811 pa->kif = pfi_kif_get(pa->ifname);
3812 if (pa->kif == NULL) {
3813 pool_put(&pf_pooladdr_pl, pa);
3814 error = EINVAL;
3815 break;
3816 }
3817 pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3818 }
3819 pf_addrwrap_setup(&pa->addr);
3820 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3821 pfi_dynaddr_remove(&pa->addr);
3822 pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3823 pool_put(&pf_pooladdr_pl, pa);
3824 error = EINVAL;
3825 break;
3826 }
3827 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3828 break;
3829 }
3830
3831 case DIOCGETADDRS: {
3832 pp->nr = 0;
3833 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3834 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3835 pp->r_num, 0, 1, 0);
3836 if (pool == NULL) {
3837 error = EBUSY;
3838 break;
3839 }
3840 TAILQ_FOREACH(pa, &pool->list, entries)
3841 pp->nr++;
3842 break;
3843 }
3844
3845 case DIOCGETADDR: {
3846 u_int32_t nr = 0;
3847
3848 pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3849 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3850 pp->r_num, 0, 1, 1);
3851 if (pool == NULL) {
3852 error = EBUSY;
3853 break;
3854 }
3855 pa = TAILQ_FIRST(&pool->list);
3856 while ((pa != NULL) && (nr < pp->nr)) {
3857 pa = TAILQ_NEXT(pa, entries);
3858 nr++;
3859 }
3860 if (pa == NULL) {
3861 error = EBUSY;
3862 break;
3863 }
3864 pf_pooladdr_copyout(pa, &pp->addr);
3865 pfi_dynaddr_copyout(&pp->addr.addr);
3866 pf_tbladdr_copyout(&pp->addr.addr);
3867 pf_rtlabel_copyout(&pp->addr.addr);
3868 break;
3869 }
3870
3871 case DIOCCHANGEADDR: {
3872 struct pfioc_pooladdr *pca = pp;
3873 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
3874 struct pf_ruleset *ruleset;
3875
3876 if (pca->action < PF_CHANGE_ADD_HEAD ||
3877 pca->action > PF_CHANGE_REMOVE) {
3878 error = EINVAL;
3879 break;
3880 }
3881 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3882 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3883 pca->addr.addr.type != PF_ADDR_TABLE) {
3884 error = EINVAL;
3885 break;
3886 }
3887
3888 pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3889 ruleset = pf_find_ruleset(pca->anchor);
3890 if (ruleset == NULL) {
3891 error = EBUSY;
3892 break;
3893 }
3894 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3895 pca->r_num, pca->r_last, 1, 1);
3896 if (pool == NULL) {
3897 error = EBUSY;
3898 break;
3899 }
3900 if (pca->action != PF_CHANGE_REMOVE) {
3901 newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3902 if (newpa == NULL) {
3903 error = ENOMEM;
3904 break;
3905 }
3906 pf_pooladdr_copyin(&pca->addr, newpa);
3907 #if !INET
3908 if (pca->af == AF_INET) {
3909 pool_put(&pf_pooladdr_pl, newpa);
3910 error = EAFNOSUPPORT;
3911 break;
3912 }
3913 #endif /* INET */
3914 if (newpa->ifname[0]) {
3915 newpa->kif = pfi_kif_get(newpa->ifname);
3916 if (newpa->kif == NULL) {
3917 pool_put(&pf_pooladdr_pl, newpa);
3918 error = EINVAL;
3919 break;
3920 }
3921 pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3922 } else {
3923 newpa->kif = NULL;
3924 }
3925 pf_addrwrap_setup(&newpa->addr);
3926 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3927 pf_tbladdr_setup(ruleset, &newpa->addr)) {
3928 pfi_dynaddr_remove(&newpa->addr);
3929 pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3930 pool_put(&pf_pooladdr_pl, newpa);
3931 error = EINVAL;
3932 break;
3933 }
3934 }
3935
3936 if (pca->action == PF_CHANGE_ADD_HEAD) {
3937 oldpa = TAILQ_FIRST(&pool->list);
3938 } else if (pca->action == PF_CHANGE_ADD_TAIL) {
3939 oldpa = TAILQ_LAST(&pool->list, pf_palist);
3940 } else {
3941 int i = 0;
3942
3943 oldpa = TAILQ_FIRST(&pool->list);
3944 while ((oldpa != NULL) && (i < (int)pca->nr)) {
3945 oldpa = TAILQ_NEXT(oldpa, entries);
3946 i++;
3947 }
3948 if (oldpa == NULL) {
3949 error = EINVAL;
3950 break;
3951 }
3952 }
3953
3954 if (pca->action == PF_CHANGE_REMOVE) {
3955 TAILQ_REMOVE(&pool->list, oldpa, entries);
3956 pfi_dynaddr_remove(&oldpa->addr);
3957 pf_tbladdr_remove(&oldpa->addr);
3958 pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3959 pool_put(&pf_pooladdr_pl, oldpa);
3960 } else {
3961 if (oldpa == NULL) {
3962 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3963 } else if (pca->action == PF_CHANGE_ADD_HEAD ||
3964 pca->action == PF_CHANGE_ADD_BEFORE) {
3965 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3966 } else {
3967 TAILQ_INSERT_AFTER(&pool->list, oldpa,
3968 newpa, entries);
3969 }
3970 }
3971
3972 pool->cur = TAILQ_FIRST(&pool->list);
3973 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
3974 pca->af);
3975 break;
3976 }
3977
3978 default:
3979 VERIFY(0);
3980 /* NOTREACHED */
3981 }
3982
3983 return error;
3984 }
3985
3986 static int
pfioctl_ioc_ruleset(u_long cmd,struct pfioc_ruleset * pr,struct proc * p)3987 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
3988 {
3989 #pragma unused(p)
3990 int error = 0;
3991
3992 switch (cmd) {
3993 case DIOCGETRULESETS: {
3994 struct pf_ruleset *ruleset;
3995 struct pf_anchor *anchor;
3996
3997 pr->path[sizeof(pr->path) - 1] = '\0';
3998 pr->name[sizeof(pr->name) - 1] = '\0';
3999 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4000 error = EINVAL;
4001 break;
4002 }
4003 pr->nr = 0;
4004 if (ruleset->anchor == NULL) {
4005 /* XXX kludge for pf_main_ruleset */
4006 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4007 if (anchor->parent == NULL) {
4008 pr->nr++;
4009 }
4010 } else {
4011 RB_FOREACH(anchor, pf_anchor_node,
4012 &ruleset->anchor->children)
4013 pr->nr++;
4014 }
4015 break;
4016 }
4017
4018 case DIOCGETRULESET: {
4019 struct pf_ruleset *ruleset;
4020 struct pf_anchor *anchor;
4021 u_int32_t nr = 0;
4022
4023 pr->path[sizeof(pr->path) - 1] = '\0';
4024 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4025 error = EINVAL;
4026 break;
4027 }
4028 pr->name[0] = 0;
4029 if (ruleset->anchor == NULL) {
4030 /* XXX kludge for pf_main_ruleset */
4031 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4032 if (anchor->parent == NULL && nr++ == pr->nr) {
4033 strlcpy(pr->name, anchor->name,
4034 sizeof(pr->name));
4035 break;
4036 }
4037 } else {
4038 RB_FOREACH(anchor, pf_anchor_node,
4039 &ruleset->anchor->children)
4040 if (nr++ == pr->nr) {
4041 strlcpy(pr->name, anchor->name,
4042 sizeof(pr->name));
4043 break;
4044 }
4045 }
4046 if (!pr->name[0]) {
4047 error = EBUSY;
4048 }
4049 break;
4050 }
4051
4052 default:
4053 VERIFY(0);
4054 /* NOTREACHED */
4055 }
4056
4057 return error;
4058 }
4059
4060 static int
pfioctl_ioc_trans(u_long cmd,struct pfioc_trans_32 * io32,struct pfioc_trans_64 * io64,struct proc * p)4061 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4062 struct pfioc_trans_64 *io64, struct proc *p)
4063 {
4064 int error = 0, esize, size;
4065 user_addr_t buf;
4066
4067 #ifdef __LP64__
4068 int p64 = proc_is64bit(p);
4069
4070 esize = (p64 ? io64->esize : io32->esize);
4071 size = (p64 ? io64->size : io32->size);
4072 buf = (p64 ? io64->array : io32->array);
4073 #else
4074 #pragma unused(io64, p)
4075 esize = io32->esize;
4076 size = io32->size;
4077 buf = io32->array;
4078 #endif
4079
4080 switch (cmd) {
4081 case DIOCXBEGIN: {
4082 struct pfioc_trans_e *ioe;
4083 struct pfr_table *table;
4084 int i;
4085
4086 if (esize != sizeof(*ioe)) {
4087 error = ENODEV;
4088 goto fail;
4089 }
4090 ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4091 table = kalloc_type(struct pfr_table, Z_WAITOK);
4092 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4093 if (copyin(buf, ioe, sizeof(*ioe))) {
4094 kfree_type(struct pfr_table, table);
4095 kfree_type(struct pfioc_trans_e, ioe);
4096 error = EFAULT;
4097 goto fail;
4098 }
4099 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4100 switch (ioe->rs_num) {
4101 case PF_RULESET_ALTQ:
4102 break;
4103 case PF_RULESET_TABLE:
4104 bzero(table, sizeof(*table));
4105 strlcpy(table->pfrt_anchor, ioe->anchor,
4106 sizeof(table->pfrt_anchor));
4107 if ((error = pfr_ina_begin(table,
4108 &ioe->ticket, NULL, 0))) {
4109 kfree_type(struct pfr_table, table);
4110 kfree_type(struct pfioc_trans_e, ioe);
4111 goto fail;
4112 }
4113 break;
4114 default:
4115 if ((error = pf_begin_rules(&ioe->ticket,
4116 ioe->rs_num, ioe->anchor))) {
4117 kfree_type(struct pfr_table, table);
4118 kfree_type(struct pfioc_trans_e, ioe);
4119 goto fail;
4120 }
4121 break;
4122 }
4123 if (copyout(ioe, buf, sizeof(*ioe))) {
4124 kfree_type(struct pfr_table, table);
4125 kfree_type(struct pfioc_trans_e, ioe);
4126 error = EFAULT;
4127 goto fail;
4128 }
4129 }
4130 kfree_type(struct pfr_table, table);
4131 kfree_type(struct pfioc_trans_e, ioe);
4132 break;
4133 }
4134
4135 case DIOCXROLLBACK: {
4136 struct pfioc_trans_e *ioe;
4137 struct pfr_table *table;
4138 int i;
4139
4140 if (esize != sizeof(*ioe)) {
4141 error = ENODEV;
4142 goto fail;
4143 }
4144 ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4145 table = kalloc_type(struct pfr_table, Z_WAITOK);
4146 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4147 if (copyin(buf, ioe, sizeof(*ioe))) {
4148 kfree_type(struct pfr_table, table);
4149 kfree_type(struct pfioc_trans_e, ioe);
4150 error = EFAULT;
4151 goto fail;
4152 }
4153 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4154 switch (ioe->rs_num) {
4155 case PF_RULESET_ALTQ:
4156 break;
4157 case PF_RULESET_TABLE:
4158 bzero(table, sizeof(*table));
4159 strlcpy(table->pfrt_anchor, ioe->anchor,
4160 sizeof(table->pfrt_anchor));
4161 if ((error = pfr_ina_rollback(table,
4162 ioe->ticket, NULL, 0))) {
4163 kfree_type(struct pfr_table, table);
4164 kfree_type(struct pfioc_trans_e, ioe);
4165 goto fail; /* really bad */
4166 }
4167 break;
4168 default:
4169 if ((error = pf_rollback_rules(ioe->ticket,
4170 ioe->rs_num, ioe->anchor))) {
4171 kfree_type(struct pfr_table, table);
4172 kfree_type(struct pfioc_trans_e, ioe);
4173 goto fail; /* really bad */
4174 }
4175 break;
4176 }
4177 }
4178 kfree_type(struct pfr_table, table);
4179 kfree_type(struct pfioc_trans_e, ioe);
4180 break;
4181 }
4182
4183 case DIOCXCOMMIT: {
4184 struct pfioc_trans_e *ioe;
4185 struct pfr_table *table;
4186 struct pf_ruleset *rs;
4187 user_addr_t _buf = buf;
4188 int i;
4189
4190 if (esize != sizeof(*ioe)) {
4191 error = ENODEV;
4192 goto fail;
4193 }
4194 ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4195 table = kalloc_type(struct pfr_table, Z_WAITOK);
4196 /* first makes sure everything will succeed */
4197 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4198 if (copyin(buf, ioe, sizeof(*ioe))) {
4199 kfree_type(struct pfr_table, table);
4200 kfree_type(struct pfioc_trans_e, ioe);
4201 error = EFAULT;
4202 goto fail;
4203 }
4204 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4205 switch (ioe->rs_num) {
4206 case PF_RULESET_ALTQ:
4207 break;
4208 case PF_RULESET_TABLE:
4209 rs = pf_find_ruleset(ioe->anchor);
4210 if (rs == NULL || !rs->topen || ioe->ticket !=
4211 rs->tticket) {
4212 kfree_type(struct pfr_table, table);
4213 kfree_type(struct pfioc_trans_e, ioe);
4214 error = EBUSY;
4215 goto fail;
4216 }
4217 break;
4218 default:
4219 if (ioe->rs_num < 0 || ioe->rs_num >=
4220 PF_RULESET_MAX) {
4221 kfree_type(struct pfr_table, table);
4222 kfree_type(struct pfioc_trans_e, ioe);
4223 error = EINVAL;
4224 goto fail;
4225 }
4226 rs = pf_find_ruleset(ioe->anchor);
4227 if (rs == NULL ||
4228 !rs->rules[ioe->rs_num].inactive.open ||
4229 rs->rules[ioe->rs_num].inactive.ticket !=
4230 ioe->ticket) {
4231 kfree_type(struct pfr_table, table);
4232 kfree_type(struct pfioc_trans_e, ioe);
4233 error = EBUSY;
4234 goto fail;
4235 }
4236 break;
4237 }
4238 }
4239 buf = _buf;
4240 /* now do the commit - no errors should happen here */
4241 for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4242 if (copyin(buf, ioe, sizeof(*ioe))) {
4243 kfree_type(struct pfr_table, table);
4244 kfree_type(struct pfioc_trans_e, ioe);
4245 error = EFAULT;
4246 goto fail;
4247 }
4248 ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4249 switch (ioe->rs_num) {
4250 case PF_RULESET_ALTQ:
4251 break;
4252 case PF_RULESET_TABLE:
4253 bzero(table, sizeof(*table));
4254 strlcpy(table->pfrt_anchor, ioe->anchor,
4255 sizeof(table->pfrt_anchor));
4256 if ((error = pfr_ina_commit(table, ioe->ticket,
4257 NULL, NULL, 0))) {
4258 kfree_type(struct pfr_table, table);
4259 kfree_type(struct pfioc_trans_e, ioe);
4260 goto fail; /* really bad */
4261 }
4262 break;
4263 default:
4264 if ((error = pf_commit_rules(ioe->ticket,
4265 ioe->rs_num, ioe->anchor))) {
4266 kfree_type(struct pfr_table, table);
4267 kfree_type(struct pfioc_trans_e, ioe);
4268 goto fail; /* really bad */
4269 }
4270 break;
4271 }
4272 }
4273 kfree_type(struct pfr_table, table);
4274 kfree_type(struct pfioc_trans_e, ioe);
4275 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
4276 net_filter_event_mark(NET_FILTER_EVENT_PF,
4277 pf_check_compatible_rules());
4278 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
4279 break;
4280 }
4281
4282 default:
4283 VERIFY(0);
4284 /* NOTREACHED */
4285 }
4286 fail:
4287 return error;
4288 }
4289
4290 static int
pfioctl_ioc_src_nodes(u_long cmd,struct pfioc_src_nodes_32 * psn32,struct pfioc_src_nodes_64 * psn64,struct proc * p)4291 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4292 struct pfioc_src_nodes_64 *psn64, struct proc *p)
4293 {
4294 int p64 = proc_is64bit(p);
4295 int error = 0;
4296
4297 switch (cmd) {
4298 case DIOCGETSRCNODES: {
4299 struct pf_src_node *n, *pstore;
4300 user_addr_t buf;
4301 u_int32_t nr = 0;
4302 int space, size;
4303
4304 space = (p64 ? psn64->psn_len : psn32->psn_len);
4305 if (space == 0) {
4306 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4307 nr++;
4308
4309 size = sizeof(struct pf_src_node) * nr;
4310 if (p64) {
4311 psn64->psn_len = size;
4312 } else {
4313 psn32->psn_len = size;
4314 }
4315 break;
4316 }
4317
4318 pstore = kalloc_type(struct pf_src_node, Z_WAITOK | Z_NOFAIL);
4319 #ifdef __LP64__
4320 buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4321 #else
4322 buf = psn32->psn_buf;
4323 #endif
4324
4325 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4326 uint64_t secs = pf_time_second(), diff;
4327
4328 if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4329 break;
4330 }
4331
4332 bcopy(n, pstore, sizeof(*pstore));
4333 if (n->rule.ptr != NULL) {
4334 pstore->rule.nr = n->rule.ptr->nr;
4335 }
4336 pstore->creation = secs - pstore->creation;
4337 if (pstore->expire > secs) {
4338 pstore->expire -= secs;
4339 } else {
4340 pstore->expire = 0;
4341 }
4342
4343 /* adjust the connection rate estimate */
4344 diff = secs - n->conn_rate.last;
4345 if (diff >= n->conn_rate.seconds) {
4346 pstore->conn_rate.count = 0;
4347 } else {
4348 pstore->conn_rate.count -=
4349 n->conn_rate.count * diff /
4350 n->conn_rate.seconds;
4351 }
4352
4353 _RB_PARENT(pstore, entry) = NULL;
4354 RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4355 pstore->kif = NULL;
4356
4357 error = copyout(pstore, buf, sizeof(*pstore));
4358 if (error) {
4359 kfree_type(struct pf_src_node, pstore);
4360 goto fail;
4361 }
4362 buf += sizeof(*pstore);
4363 nr++;
4364 }
4365
4366 size = sizeof(struct pf_src_node) * nr;
4367 if (p64) {
4368 psn64->psn_len = size;
4369 } else {
4370 psn32->psn_len = size;
4371 }
4372
4373 kfree_type(struct pf_src_node, pstore);
4374 break;
4375 }
4376
4377 default:
4378 VERIFY(0);
4379 /* NOTREACHED */
4380 }
4381 fail:
4382 return error;
4383 }
4384
4385 static int
pfioctl_ioc_src_node_kill(u_long cmd,struct pfioc_src_node_kill * psnk,struct proc * p)4386 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4387 struct proc *p)
4388 {
4389 #pragma unused(p)
4390 int error = 0;
4391
4392 switch (cmd) {
4393 case DIOCKILLSRCNODES: {
4394 struct pf_src_node *sn;
4395 struct pf_state *s;
4396 int killed = 0;
4397
4398 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4399 if (PF_MATCHA(psnk->psnk_src.neg,
4400 &psnk->psnk_src.addr.v.a.addr,
4401 &psnk->psnk_src.addr.v.a.mask,
4402 &sn->addr, sn->af) &&
4403 PF_MATCHA(psnk->psnk_dst.neg,
4404 &psnk->psnk_dst.addr.v.a.addr,
4405 &psnk->psnk_dst.addr.v.a.mask,
4406 &sn->raddr, sn->af)) {
4407 /* Handle state to src_node linkage */
4408 if (sn->states != 0) {
4409 RB_FOREACH(s, pf_state_tree_id,
4410 &tree_id) {
4411 if (s->src_node == sn) {
4412 s->src_node = NULL;
4413 }
4414 if (s->nat_src_node == sn) {
4415 s->nat_src_node = NULL;
4416 }
4417 }
4418 sn->states = 0;
4419 }
4420 sn->expire = 1;
4421 killed++;
4422 }
4423 }
4424
4425 if (killed > 0) {
4426 pf_purge_expired_src_nodes();
4427 }
4428
4429 psnk->psnk_af = (sa_family_t)killed;
4430 break;
4431 }
4432
4433 default:
4434 VERIFY(0);
4435 /* NOTREACHED */
4436 }
4437
4438 return error;
4439 }
4440
4441 static int
pfioctl_ioc_iface(u_long cmd,struct pfioc_iface_32 * io32,struct pfioc_iface_64 * io64,struct proc * p)4442 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4443 struct pfioc_iface_64 *io64, struct proc *p)
4444 {
4445 int p64 = proc_is64bit(p);
4446 int error = 0;
4447
4448 switch (cmd) {
4449 case DIOCIGETIFACES: {
4450 user_addr_t buf;
4451 int esize;
4452
4453 #ifdef __LP64__
4454 buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4455 esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4456 #else
4457 buf = io32->pfiio_buffer;
4458 esize = io32->pfiio_esize;
4459 #endif
4460
4461 /* esize must be that of the user space version of pfi_kif */
4462 if (esize != sizeof(struct pfi_uif)) {
4463 error = ENODEV;
4464 break;
4465 }
4466 if (p64) {
4467 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4468 } else {
4469 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4470 }
4471 error = pfi_get_ifaces(
4472 p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4473 p64 ? &io64->pfiio_size : &io32->pfiio_size);
4474 break;
4475 }
4476
4477 case DIOCSETIFFLAG: {
4478 if (p64) {
4479 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4480 } else {
4481 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4482 }
4483
4484 error = pfi_set_flags(
4485 p64 ? io64->pfiio_name : io32->pfiio_name,
4486 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4487 break;
4488 }
4489
4490 case DIOCCLRIFFLAG: {
4491 if (p64) {
4492 io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4493 } else {
4494 io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4495 }
4496
4497 error = pfi_clear_flags(
4498 p64 ? io64->pfiio_name : io32->pfiio_name,
4499 p64 ? io64->pfiio_flags : io32->pfiio_flags);
4500 break;
4501 }
4502
4503 default:
4504 VERIFY(0);
4505 /* NOTREACHED */
4506 }
4507
4508 return error;
4509 }
4510
4511 int
pf_af_hook(struct ifnet * ifp,struct mbuf ** mppn,struct mbuf ** mp,unsigned int af,int input,struct ip_fw_args * fwa)4512 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4513 unsigned int af, int input, struct ip_fw_args *fwa)
4514 {
4515 int error = 0;
4516 struct mbuf *nextpkt;
4517 net_thread_marks_t marks;
4518 struct ifnet * pf_ifp = ifp;
4519
4520 /* Always allow traffic on co-processor interfaces. */
4521 if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) {
4522 return 0;
4523 }
4524
4525 marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4526
4527 if (marks != net_thread_marks_none) {
4528 lck_rw_lock_shared(&pf_perim_lock);
4529 if (!pf_is_enabled) {
4530 goto done;
4531 }
4532 lck_mtx_lock(&pf_lock);
4533 }
4534
4535 if (mppn != NULL && *mppn != NULL) {
4536 VERIFY(*mppn == *mp);
4537 }
4538 if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4539 (*mp)->m_nextpkt = NULL;
4540 }
4541
4542 /*
4543 * For packets destined to locally hosted IP address
4544 * ip_output_list sets Mbuf's pkt header's rcvif to
4545 * the interface hosting the IP address.
4546 * While on the output path ifp passed to pf_af_hook
4547 * to such local communication is the loopback interface,
4548 * the input path derives ifp from mbuf packet header's
4549 * rcvif.
4550 * This asymmetry caues issues with PF.
4551 * To handle that case, we have a limited change here to
4552 * pass interface as loopback if packets are looped in.
4553 */
4554 if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4555 pf_ifp = lo_ifp;
4556 }
4557
4558 switch (af) {
4559 #if INET
4560 case AF_INET: {
4561 error = pf_inet_hook(pf_ifp, mp, input, fwa);
4562 break;
4563 }
4564 #endif /* INET */
4565 case AF_INET6:
4566 error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4567 break;
4568 default:
4569 break;
4570 }
4571
4572 /* When packet valid, link to the next packet */
4573 if (*mp != NULL && nextpkt != NULL) {
4574 struct mbuf *m = *mp;
4575 while (m->m_nextpkt != NULL) {
4576 m = m->m_nextpkt;
4577 }
4578 m->m_nextpkt = nextpkt;
4579 }
4580 /* Fix up linkage of previous packet in the chain */
4581 if (mppn != NULL) {
4582 if (*mp != NULL) {
4583 *mppn = *mp;
4584 } else {
4585 *mppn = nextpkt;
4586 }
4587 }
4588
4589 if (marks != net_thread_marks_none) {
4590 lck_mtx_unlock(&pf_lock);
4591 }
4592
4593 done:
4594 if (marks != net_thread_marks_none) {
4595 lck_rw_done(&pf_perim_lock);
4596 }
4597
4598 net_thread_marks_pop(marks);
4599 return error;
4600 }
4601
4602
4603 #if INET
4604 static __attribute__((noinline)) int
pf_inet_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4605 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4606 struct ip_fw_args *fwa)
4607 {
4608 struct mbuf *m = *mp;
4609 #if BYTE_ORDER != BIG_ENDIAN
4610 struct ip *ip = mtod(m, struct ip *);
4611 #endif
4612 int error = 0;
4613
4614 /*
4615 * If the packet is outbound, is originated locally, is flagged for
4616 * delayed UDP/TCP checksum calculation, and is about to be processed
4617 * for an interface that doesn't support the appropriate checksum
4618 * offloading, then calculated the checksum here so that PF can adjust
4619 * it properly.
4620 */
4621 if (!input && m->m_pkthdr.rcvif == NULL) {
4622 static const int mask = CSUM_DELAY_DATA;
4623 const int flags = m->m_pkthdr.csum_flags &
4624 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4625
4626 if (flags & mask) {
4627 in_delayed_cksum(m);
4628 m->m_pkthdr.csum_flags &= ~mask;
4629 }
4630 }
4631
4632 #if BYTE_ORDER != BIG_ENDIAN
4633 HTONS(ip->ip_len);
4634 HTONS(ip->ip_off);
4635 #endif
4636 if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4637 if (*mp != NULL) {
4638 m_freem(*mp);
4639 *mp = NULL;
4640 error = EHOSTUNREACH;
4641 } else {
4642 error = ENOBUFS;
4643 }
4644 }
4645 #if BYTE_ORDER != BIG_ENDIAN
4646 else {
4647 if (*mp != NULL) {
4648 ip = mtod(*mp, struct ip *);
4649 NTOHS(ip->ip_len);
4650 NTOHS(ip->ip_off);
4651 }
4652 }
4653 #endif
4654 return error;
4655 }
4656 #endif /* INET */
4657
4658 int __attribute__((noinline))
pf_inet6_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4659 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4660 struct ip_fw_args *fwa)
4661 {
4662 int error = 0;
4663
4664 /*
4665 * If the packet is outbound, is originated locally, is flagged for
4666 * delayed UDP/TCP checksum calculation, and is about to be processed
4667 * for an interface that doesn't support the appropriate checksum
4668 * offloading, then calculated the checksum here so that PF can adjust
4669 * it properly.
4670 */
4671 if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4672 static const int mask = CSUM_DELAY_IPV6_DATA;
4673 const int flags = (*mp)->m_pkthdr.csum_flags &
4674 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4675
4676 if (flags & mask) {
4677 /*
4678 * Checksum offload should not have been enabled
4679 * when extension headers exist, thus 0 for optlen.
4680 */
4681 in6_delayed_cksum(*mp);
4682 (*mp)->m_pkthdr.csum_flags &= ~mask;
4683 }
4684 }
4685
4686 if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4687 if (*mp != NULL) {
4688 m_freem(*mp);
4689 *mp = NULL;
4690 error = EHOSTUNREACH;
4691 } else {
4692 error = ENOBUFS;
4693 }
4694 }
4695 return error;
4696 }
4697
4698 int
pf_ifaddr_hook(struct ifnet * ifp)4699 pf_ifaddr_hook(struct ifnet *ifp)
4700 {
4701 struct pfi_kif *kif = ifp->if_pf_kif;
4702
4703 if (kif != NULL) {
4704 lck_rw_lock_shared(&pf_perim_lock);
4705 lck_mtx_lock(&pf_lock);
4706
4707 pfi_kifaddr_update(kif);
4708
4709 lck_mtx_unlock(&pf_lock);
4710 lck_rw_done(&pf_perim_lock);
4711 }
4712 return 0;
4713 }
4714
4715 /*
4716 * Caller acquires dlil lock as writer (exclusive)
4717 */
4718 void
pf_ifnet_hook(struct ifnet * ifp,int attach)4719 pf_ifnet_hook(struct ifnet *ifp, int attach)
4720 {
4721 lck_rw_lock_shared(&pf_perim_lock);
4722 lck_mtx_lock(&pf_lock);
4723 if (attach) {
4724 pfi_attach_ifnet(ifp);
4725 } else {
4726 pfi_detach_ifnet(ifp);
4727 }
4728 lck_mtx_unlock(&pf_lock);
4729 lck_rw_done(&pf_perim_lock);
4730 }
4731
4732 static void
pf_attach_hooks(void)4733 pf_attach_hooks(void)
4734 {
4735 ifnet_head_lock_shared();
4736 /*
4737 * Check against ifnet_addrs[] before proceeding, in case this
4738 * is called very early on, e.g. during dlil_init() before any
4739 * network interface is attached.
4740 */
4741 if (ifnet_addrs != NULL) {
4742 int i;
4743
4744 for (i = 0; i <= if_index; i++) {
4745 struct ifnet *ifp = ifindex2ifnet[i];
4746 if (ifp != NULL) {
4747 pfi_attach_ifnet(ifp);
4748 }
4749 }
4750 }
4751 ifnet_head_done();
4752 }
4753
4754 #if 0
4755 /* currently unused along with pfdetach() */
4756 static void
4757 pf_detach_hooks(void)
4758 {
4759 ifnet_head_lock_shared();
4760 if (ifnet_addrs != NULL) {
4761 for (i = 0; i <= if_index; i++) {
4762 int i;
4763
4764 struct ifnet *ifp = ifindex2ifnet[i];
4765 if (ifp != NULL && ifp->if_pf_kif != NULL) {
4766 pfi_detach_ifnet(ifp);
4767 }
4768 }
4769 }
4770 ifnet_head_done();
4771 }
4772 #endif
4773
4774 /*
4775 * 'D' group ioctls.
4776 *
4777 * The switch statement below does nothing at runtime, as it serves as a
4778 * compile time check to ensure that all of the socket 'D' ioctls (those
4779 * in the 'D' group going thru soo_ioctl) that are made available by the
4780 * networking stack is unique. This works as long as this routine gets
4781 * updated each time a new interface ioctl gets added.
4782 *
4783 * Any failures at compile time indicates duplicated ioctl values.
4784 */
4785 static __attribute__((unused)) void
pfioctl_cassert(void)4786 pfioctl_cassert(void)
4787 {
4788 /*
4789 * This is equivalent to _CASSERT() and the compiler wouldn't
4790 * generate any instructions, thus for compile time only.
4791 */
4792 switch ((u_long)0) {
4793 case 0:
4794
4795 /* bsd/net/pfvar.h */
4796 case DIOCSTART:
4797 case DIOCSTOP:
4798 case DIOCADDRULE:
4799 case DIOCGETSTARTERS:
4800 case DIOCGETRULES:
4801 case DIOCGETRULE:
4802 case DIOCSTARTREF:
4803 case DIOCSTOPREF:
4804 case DIOCCLRSTATES:
4805 case DIOCGETSTATE:
4806 case DIOCSETSTATUSIF:
4807 case DIOCGETSTATUS:
4808 case DIOCCLRSTATUS:
4809 case DIOCNATLOOK:
4810 case DIOCSETDEBUG:
4811 case DIOCGETSTATES:
4812 case DIOCCHANGERULE:
4813 case DIOCINSERTRULE:
4814 case DIOCDELETERULE:
4815 case DIOCSETTIMEOUT:
4816 case DIOCGETTIMEOUT:
4817 case DIOCADDSTATE:
4818 case DIOCCLRRULECTRS:
4819 case DIOCGETLIMIT:
4820 case DIOCSETLIMIT:
4821 case DIOCKILLSTATES:
4822 case DIOCSTARTALTQ:
4823 case DIOCSTOPALTQ:
4824 case DIOCADDALTQ:
4825 case DIOCGETALTQS:
4826 case DIOCGETALTQ:
4827 case DIOCCHANGEALTQ:
4828 case DIOCGETQSTATS:
4829 case DIOCBEGINADDRS:
4830 case DIOCADDADDR:
4831 case DIOCGETADDRS:
4832 case DIOCGETADDR:
4833 case DIOCCHANGEADDR:
4834 case DIOCGETRULESETS:
4835 case DIOCGETRULESET:
4836 case DIOCRCLRTABLES:
4837 case DIOCRADDTABLES:
4838 case DIOCRDELTABLES:
4839 case DIOCRGETTABLES:
4840 case DIOCRGETTSTATS:
4841 case DIOCRCLRTSTATS:
4842 case DIOCRCLRADDRS:
4843 case DIOCRADDADDRS:
4844 case DIOCRDELADDRS:
4845 case DIOCRSETADDRS:
4846 case DIOCRGETADDRS:
4847 case DIOCRGETASTATS:
4848 case DIOCRCLRASTATS:
4849 case DIOCRTSTADDRS:
4850 case DIOCRSETTFLAGS:
4851 case DIOCRINADEFINE:
4852 case DIOCOSFPFLUSH:
4853 case DIOCOSFPADD:
4854 case DIOCOSFPGET:
4855 case DIOCXBEGIN:
4856 case DIOCXCOMMIT:
4857 case DIOCXROLLBACK:
4858 case DIOCGETSRCNODES:
4859 case DIOCCLRSRCNODES:
4860 case DIOCSETHOSTID:
4861 case DIOCIGETIFACES:
4862 case DIOCSETIFFLAG:
4863 case DIOCCLRIFFLAG:
4864 case DIOCKILLSRCNODES:
4865 case DIOCGIFSPEED:
4866 ;
4867 }
4868 }
4869