xref: /xnu-8019.80.24/bsd/net/pf_ioctl.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*	$apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /*	$OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31 
32 /*
33  * Copyright (c) 2001 Daniel Hartmeier
34  * Copyright (c) 2002,2003 Henning Brauer
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *    - Redistributions of source code must retain the above copyright
42  *      notice, this list of conditions and the following disclaimer.
43  *    - Redistributions in binary form must reproduce the above
44  *      copyright notice, this list of conditions and the following
45  *      disclaimer in the documentation and/or other materials provided
46  *      with the distribution.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59  * POSSIBILITY OF SUCH DAMAGE.
60  *
61  * Effort sponsored in part by the Defense Advanced Research Projects
62  * Agency (DARPA) and Air Force Research Laboratory, Air Force
63  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64  *
65  */
66 
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83 #include <os/log.h>
84 
85 #include <mach/vm_param.h>
86 
87 #include <net/dlil.h>
88 #include <net/if.h>
89 #include <net/if_types.h>
90 #include <net/net_api_stats.h>
91 #include <net/route.h>
92 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
93 #include <skywalk/lib/net_filter_event.h>
94 #endif
95 
96 #include <netinet/in.h>
97 #include <netinet/in_var.h>
98 #include <netinet/in_systm.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/ip_icmp.h>
102 #include <netinet/if_ether.h>
103 
104 #if DUMMYNET
105 #include <netinet/ip_dummynet.h>
106 #else
107 struct ip_fw_args;
108 #endif /* DUMMYNET */
109 
110 #include <libkern/crypto/md5.h>
111 
112 #include <machine/machine_routines.h>
113 
114 #include <miscfs/devfs/devfs.h>
115 
116 #include <net/pfvar.h>
117 
118 #if NPFSYNC
119 #include <net/if_pfsync.h>
120 #endif /* NPFSYNC */
121 
122 #if PFLOG
123 #include <net/if_pflog.h>
124 #endif /* PFLOG */
125 
126 #include <netinet/ip6.h>
127 #include <netinet/in_pcb.h>
128 
129 #include <dev/random/randomdev.h>
130 
131 #if 0
132 static void pfdetach(void);
133 #endif
134 static int pfopen(dev_t, int, int, struct proc *);
135 static int pfclose(dev_t, int, int, struct proc *);
136 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
137 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
138     struct pfioc_table_64 *, struct proc *);
139 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
140     struct pfioc_tokens_64 *, struct proc *);
141 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
142 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
143     struct proc *);
144 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
145 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
146     struct pfioc_states_64 *, struct proc *);
147 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
148 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
149 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
150 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
151 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
152 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
153     struct pfioc_trans_64 *, struct proc *);
154 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
155     struct pfioc_src_nodes_64 *, struct proc *);
156 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
157     struct proc *);
158 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
159     struct pfioc_iface_64 *, struct proc *);
160 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
161     u_int8_t, u_int8_t, u_int8_t);
162 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
163 static void pf_empty_pool(struct pf_palist *);
164 static int pf_begin_rules(u_int32_t *, int, const char *);
165 static int pf_rollback_rules(u_int32_t, int, char *);
166 static int pf_setup_pfsync_matching(struct pf_ruleset *);
167 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
168 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
169 static int pf_commit_rules(u_int32_t, int, char *);
170 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
171     int);
172 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
173 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
174     struct pf_state *);
175 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
176     struct pf_state *);
177 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
178 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
179 static void pf_expire_states_and_src_nodes(struct pf_rule *);
180 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
181     int, struct pf_rule *);
182 static void pf_addrwrap_setup(struct pf_addr_wrap *);
183 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
184     struct pf_ruleset *);
185 static void pf_delete_rule_by_owner(char *, u_int32_t);
186 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
187 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
188 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
189     int, struct pf_rule **);
190 
191 #define PF_CDEV_MAJOR   (-1)
192 
193 static const struct cdevsw pf_cdevsw = {
194 	.d_open       = pfopen,
195 	.d_close      = pfclose,
196 	.d_read       = eno_rdwrt,
197 	.d_write      = eno_rdwrt,
198 	.d_ioctl      = pfioctl,
199 	.d_stop       = eno_stop,
200 	.d_reset      = eno_reset,
201 	.d_ttys       = NULL,
202 	.d_select     = eno_select,
203 	.d_mmap       = eno_mmap,
204 	.d_strategy   = eno_strat,
205 	.d_reserved_1 = eno_getc,
206 	.d_reserved_2 = eno_putc,
207 	.d_type       = 0
208 };
209 
210 static void pf_attach_hooks(void);
211 #if 0
212 /* currently unused along with pfdetach() */
213 static void pf_detach_hooks(void);
214 #endif
215 
216 /*
217  * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
218  * and used in pf_af_hook() for performance optimization, such that packets
219  * will enter pf_test() or pf_test6() only when PF is running.
220  */
221 int pf_is_enabled = 0;
222 
223 u_int32_t pf_hash_seed;
224 int16_t pf_nat64_configured = 0;
225 
226 /*
227  * These are the pf enabled reference counting variables
228  */
229 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token))
230 
231 static u_int64_t pf_enabled_ref_count;
232 static u_int32_t nr_tokens = 0;
233 static u_int32_t pffwrules;
234 static u_int32_t pfdevcnt;
235 
236 SLIST_HEAD(list_head, pfioc_kernel_token);
237 static struct list_head token_list_head;
238 
239 struct pf_rule           pf_default_rule;
240 
241 typedef struct {
242 	char tag_name[PF_TAG_NAME_SIZE];
243 	uint16_t tag_id;
244 } pf_reserved_tag_table_t;
245 
246 #define NUM_RESERVED_TAGS    2
247 static pf_reserved_tag_table_t pf_reserved_tag_table[NUM_RESERVED_TAGS] = {
248 	{ PF_TAG_NAME_SYSTEM_SERVICE, PF_TAG_ID_SYSTEM_SERVICE},
249 	{ PF_TAG_NAME_STACK_DROP, PF_TAG_ID_STACK_DROP},
250 };
251 #define RESERVED_TAG_ID_MIN    PF_TAG_ID_SYSTEM_SERVICE
252 
253 #define DYNAMIC_TAG_ID_MAX    50000
254 static TAILQ_HEAD(pf_tags, pf_tagname)  pf_tags =
255     TAILQ_HEAD_INITIALIZER(pf_tags);
256 
257 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
258 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
259 #endif
260 static u_int16_t         tagname2tag(struct pf_tags *, char *);
261 static void              tag2tagname(struct pf_tags *, u_int16_t, char *);
262 static void              tag_unref(struct pf_tags *, u_int16_t);
263 static int               pf_rtlabel_add(struct pf_addr_wrap *);
264 static void              pf_rtlabel_remove(struct pf_addr_wrap *);
265 static void              pf_rtlabel_copyout(struct pf_addr_wrap *);
266 
267 #if INET
268 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
269     struct ip_fw_args *);
270 #endif /* INET */
271 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
272     struct ip_fw_args *);
273 
274 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
275 
276 /*
277  * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
278  */
279 #define PFIOCX_STRUCT_DECL(s)                                           \
280 struct {                                                                \
281 	union {                                                         \
282 	        struct s##_32	_s##_32;                                \
283 	        struct s##_64	_s##_64;                                \
284 	} _u;                                                           \
285 } *s##_un = NULL                                                        \
286 
287 #define PFIOCX_STRUCT_BEGIN(a, s, _action) {                            \
288 	VERIFY(s##_un == NULL);                                         \
289 	s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO);    \
290 	if (s##_un == NULL) {                                           \
291 	        _action                                                 \
292 	} else {                                                        \
293 	        if (p64)                                                \
294 	                bcopy(a, &s##_un->_u._s##_64,                   \
295 	                    sizeof (struct s##_64));                    \
296 	        else                                                    \
297 	                bcopy(a, &s##_un->_u._s##_32,                   \
298 	                    sizeof (struct s##_32));                    \
299 	}                                                               \
300 }
301 
302 #define PFIOCX_STRUCT_END(s, a) {                                       \
303 	VERIFY(s##_un != NULL);                                         \
304 	if (p64)                                                        \
305 	        bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64));  \
306 	else                                                            \
307 	        bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32));  \
308 	_FREE(s##_un, M_TEMP);                                          \
309 	s##_un = NULL;                                                  \
310 }
311 
312 #define PFIOCX_STRUCT_ADDR32(s)         (&s##_un->_u._s##_32)
313 #define PFIOCX_STRUCT_ADDR64(s)         (&s##_un->_u._s##_64)
314 
315 /*
316  * Helper macros for regular ioctl structures.
317  */
318 #define PFIOC_STRUCT_BEGIN(a, v, _action) {                             \
319 	VERIFY((v) == NULL);                                            \
320 	(v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO);          \
321 	if ((v) == NULL) {                                              \
322 	        _action                                                 \
323 	} else {                                                        \
324 	        bcopy(a, v, sizeof (*(v)));                             \
325 	}                                                               \
326 }
327 
328 #define PFIOC_STRUCT_END(v, a) {                                        \
329 	VERIFY((v) != NULL);                                            \
330 	bcopy(v, a, sizeof (*(v)));                                     \
331 	_FREE(v, M_TEMP);                                               \
332 	(v) = NULL;                                                     \
333 }
334 
335 #define PFIOC_STRUCT_ADDR32(s)          (&s##_un->_u._s##_32)
336 #define PFIOC_STRUCT_ADDR64(s)          (&s##_un->_u._s##_64)
337 
338 struct thread *pf_purge_thread;
339 
340 extern void pfi_kifaddr_update(void *);
341 
342 /* pf enable ref-counting helper functions */
343 static u_int64_t                generate_token(struct proc *);
344 static int                      remove_token(struct pfioc_remove_token *);
345 static void                     invalidate_all_tokens(void);
346 
347 static u_int64_t
generate_token(struct proc * p)348 generate_token(struct proc *p)
349 {
350 	u_int64_t token_value;
351 	struct pfioc_kernel_token *new_token;
352 
353 	if (nr_tokens + 1 > NR_TOKENS_LIMIT) {
354 		os_log_error(OS_LOG_DEFAULT, "%s: NR_TOKENS_LIMIT reached", __func__);
355 		return 0;
356 	}
357 
358 	new_token = kalloc_type(struct pfioc_kernel_token,
359 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
360 
361 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
362 
363 	token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
364 
365 	new_token->token.token_value = token_value;
366 	new_token->token.pid = proc_pid(p);
367 	proc_name(new_token->token.pid, new_token->token.proc_name,
368 	    sizeof(new_token->token.proc_name));
369 	new_token->token.timestamp = pf_calendar_time_second();
370 
371 	SLIST_INSERT_HEAD(&token_list_head, new_token, next);
372 	nr_tokens++;
373 
374 	return token_value;
375 }
376 
377 static int
remove_token(struct pfioc_remove_token * tok)378 remove_token(struct pfioc_remove_token *tok)
379 {
380 	struct pfioc_kernel_token *entry, *tmp;
381 
382 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
383 
384 	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
385 		if (tok->token_value == entry->token.token_value) {
386 			SLIST_REMOVE(&token_list_head, entry,
387 			    pfioc_kernel_token, next);
388 			kfree_type(struct pfioc_kernel_token, entry);
389 			nr_tokens--;
390 			return 0;    /* success */
391 		}
392 	}
393 
394 	printf("pf : remove failure\n");
395 	return ESRCH;    /* failure */
396 }
397 
398 static void
invalidate_all_tokens(void)399 invalidate_all_tokens(void)
400 {
401 	struct pfioc_kernel_token *entry, *tmp;
402 
403 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
404 
405 	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
406 		SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
407 		kfree_type(struct pfioc_kernel_token, entry);
408 	}
409 
410 	nr_tokens = 0;
411 }
412 
413 void
pfinit(void)414 pfinit(void)
415 {
416 	u_int32_t *t = pf_default_rule.timeout;
417 	int maj;
418 
419 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
420 	    NULL);
421 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
422 	    "pfsrctrpl", NULL);
423 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
424 	    NULL);
425 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
426 	    "pfstatekeypl", NULL);
427 	pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
428 	    "pfappstatepl", NULL);
429 	pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
430 	    "pfpooladdrpl", NULL);
431 	pfr_initialize();
432 	pfi_initialize();
433 	pf_osfp_initialize();
434 
435 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
436 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
437 
438 	if (max_mem <= 256 * 1024 * 1024) {
439 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
440 		    PFR_KENTRY_HIWAT_SMALL;
441 	}
442 
443 	RB_INIT(&tree_src_tracking);
444 	RB_INIT(&pf_anchors);
445 	pf_init_ruleset(&pf_main_ruleset);
446 	TAILQ_INIT(&pf_pabuf);
447 	TAILQ_INIT(&state_list);
448 
449 	_CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
450 	_CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
451 	_CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
452 	_CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
453 	_CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
454 	_CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
455 	_CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
456 	_CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
457 	_CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
458 	_CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
459 	_CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
460 
461 	/* default rule should never be garbage collected */
462 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
463 	pf_default_rule.action = PF_PASS;
464 	pf_default_rule.nr = -1;
465 	pf_default_rule.rtableid = IFSCOPE_NONE;
466 
467 	/* initialize default timeouts */
468 	t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
469 	t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
470 	t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
471 	t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
472 	t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
473 	t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
474 	t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
475 	t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
476 	t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
477 	t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
478 	t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
479 	t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
480 	t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
481 	t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
482 	t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
483 	t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
484 	t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
485 	t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
486 	t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
487 	t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
488 	t[PFTM_FRAG] = PFTM_FRAG_VAL;
489 	t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
490 	t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
491 	t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
492 	t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
493 	t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
494 
495 	pf_normalize_init();
496 	bzero(&pf_status, sizeof(pf_status));
497 	pf_status.debug = PF_DEBUG_URGENT;
498 	pf_hash_seed = RandomULong();
499 
500 	/* XXX do our best to avoid a conflict */
501 	pf_status.hostid = random();
502 
503 	if (kernel_thread_start(pf_purge_thread_fn, NULL,
504 	    &pf_purge_thread) != 0) {
505 		printf("%s: unable to start purge thread!", __func__);
506 		return;
507 	}
508 
509 	maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
510 	if (maj == -1) {
511 		printf("%s: failed to allocate major number!\n", __func__);
512 		return;
513 	}
514 	(void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
515 	    UID_ROOT, GID_WHEEL, 0600, "pf", 0);
516 
517 	(void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
518 	    UID_ROOT, GID_WHEEL, 0600, "pfm", 0);
519 
520 	pf_attach_hooks();
521 #if DUMMYNET
522 	dummynet_init();
523 #endif
524 }
525 
526 #if 0
527 static void
528 pfdetach(void)
529 {
530 	struct pf_anchor        *anchor;
531 	struct pf_state         *state;
532 	struct pf_src_node      *node;
533 	struct pfioc_table      pt;
534 	u_int32_t               ticket;
535 	int                     i;
536 	char                    r = '\0';
537 
538 	pf_detach_hooks();
539 
540 	pf_status.running = 0;
541 	wakeup(pf_purge_thread_fn);
542 
543 	/* clear the rulesets */
544 	for (i = 0; i < PF_RULESET_MAX; i++) {
545 		if (pf_begin_rules(&ticket, i, &r) == 0) {
546 			pf_commit_rules(ticket, i, &r);
547 		}
548 	}
549 
550 	/* clear states */
551 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
552 		state->timeout = PFTM_PURGE;
553 #if NPFSYNC
554 		state->sync_flags = PFSTATE_NOSYNC;
555 #endif
556 	}
557 	pf_purge_expired_states(pf_status.states);
558 
559 #if NPFSYNC
560 	pfsync_clear_states(pf_status.hostid, NULL);
561 #endif
562 
563 	/* clear source nodes */
564 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
565 		state->src_node = NULL;
566 		state->nat_src_node = NULL;
567 	}
568 	RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
569 		node->expire = 1;
570 		node->states = 0;
571 	}
572 	pf_purge_expired_src_nodes();
573 
574 	/* clear tables */
575 	memset(&pt, '\0', sizeof(pt));
576 	pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
577 
578 	/* destroy anchors */
579 	while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
580 		for (i = 0; i < PF_RULESET_MAX; i++) {
581 			if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
582 				pf_commit_rules(ticket, i, anchor->name);
583 			}
584 		}
585 	}
586 
587 	/* destroy main ruleset */
588 	pf_remove_if_empty_ruleset(&pf_main_ruleset);
589 
590 	/* destroy the pools */
591 	pool_destroy(&pf_pooladdr_pl);
592 	pool_destroy(&pf_state_pl);
593 	pool_destroy(&pf_rule_pl);
594 	pool_destroy(&pf_src_tree_pl);
595 
596 	/* destroy subsystems */
597 	pf_normalize_destroy();
598 	pf_osfp_destroy();
599 	pfr_destroy();
600 	pfi_destroy();
601 }
602 #endif
603 
604 static int
pfopen(dev_t dev,int flags,int fmt,struct proc * p)605 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
606 {
607 #pragma unused(flags, fmt, p)
608 	if (minor(dev) >= PFDEV_MAX) {
609 		return ENXIO;
610 	}
611 
612 	if (minor(dev) == PFDEV_PFM) {
613 		lck_mtx_lock(&pf_lock);
614 		if (pfdevcnt != 0) {
615 			lck_mtx_unlock(&pf_lock);
616 			return EBUSY;
617 		}
618 		pfdevcnt++;
619 		lck_mtx_unlock(&pf_lock);
620 	}
621 	return 0;
622 }
623 
624 static int
pfclose(dev_t dev,int flags,int fmt,struct proc * p)625 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
626 {
627 #pragma unused(flags, fmt, p)
628 	if (minor(dev) >= PFDEV_MAX) {
629 		return ENXIO;
630 	}
631 
632 	if (minor(dev) == PFDEV_PFM) {
633 		lck_mtx_lock(&pf_lock);
634 		VERIFY(pfdevcnt > 0);
635 		pfdevcnt--;
636 		lck_mtx_unlock(&pf_lock);
637 	}
638 	return 0;
639 }
640 
641 static struct pf_pool *
pf_get_pool(char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)642 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
643     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
644     u_int8_t check_ticket)
645 {
646 	struct pf_ruleset       *ruleset;
647 	struct pf_rule          *rule;
648 	int                      rs_num;
649 
650 	ruleset = pf_find_ruleset(anchor);
651 	if (ruleset == NULL) {
652 		return NULL;
653 	}
654 	rs_num = pf_get_ruleset_number(rule_action);
655 	if (rs_num >= PF_RULESET_MAX) {
656 		return NULL;
657 	}
658 	if (active) {
659 		if (check_ticket && ticket !=
660 		    ruleset->rules[rs_num].active.ticket) {
661 			return NULL;
662 		}
663 		if (r_last) {
664 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
665 			    pf_rulequeue);
666 		} else {
667 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
668 		}
669 	} else {
670 		if (check_ticket && ticket !=
671 		    ruleset->rules[rs_num].inactive.ticket) {
672 			return NULL;
673 		}
674 		if (r_last) {
675 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
676 			    pf_rulequeue);
677 		} else {
678 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
679 		}
680 	}
681 	if (!r_last) {
682 		while ((rule != NULL) && (rule->nr != rule_number)) {
683 			rule = TAILQ_NEXT(rule, entries);
684 		}
685 	}
686 	if (rule == NULL) {
687 		return NULL;
688 	}
689 
690 	return &rule->rpool;
691 }
692 
693 static void
pf_mv_pool(struct pf_palist * poola,struct pf_palist * poolb)694 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
695 {
696 	struct pf_pooladdr      *mv_pool_pa;
697 
698 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
699 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
700 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
701 	}
702 }
703 
704 static void
pf_empty_pool(struct pf_palist * poola)705 pf_empty_pool(struct pf_palist *poola)
706 {
707 	struct pf_pooladdr      *empty_pool_pa;
708 
709 	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
710 		pfi_dynaddr_remove(&empty_pool_pa->addr);
711 		pf_tbladdr_remove(&empty_pool_pa->addr);
712 		pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
713 		TAILQ_REMOVE(poola, empty_pool_pa, entries);
714 		pool_put(&pf_pooladdr_pl, empty_pool_pa);
715 	}
716 }
717 
718 void
pf_rm_rule(struct pf_rulequeue * rulequeue,struct pf_rule * rule)719 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
720 {
721 	if (rulequeue != NULL) {
722 		if (rule->states <= 0) {
723 			/*
724 			 * XXX - we need to remove the table *before* detaching
725 			 * the rule to make sure the table code does not delete
726 			 * the anchor under our feet.
727 			 */
728 			pf_tbladdr_remove(&rule->src.addr);
729 			pf_tbladdr_remove(&rule->dst.addr);
730 			if (rule->overload_tbl) {
731 				pfr_detach_table(rule->overload_tbl);
732 			}
733 		}
734 		TAILQ_REMOVE(rulequeue, rule, entries);
735 		rule->entries.tqe_prev = NULL;
736 		rule->nr = -1;
737 	}
738 
739 	if (rule->states > 0 || rule->src_nodes > 0 ||
740 	    rule->entries.tqe_prev != NULL) {
741 		return;
742 	}
743 	pf_tag_unref(rule->tag);
744 	pf_tag_unref(rule->match_tag);
745 	pf_rtlabel_remove(&rule->src.addr);
746 	pf_rtlabel_remove(&rule->dst.addr);
747 	pfi_dynaddr_remove(&rule->src.addr);
748 	pfi_dynaddr_remove(&rule->dst.addr);
749 	if (rulequeue == NULL) {
750 		pf_tbladdr_remove(&rule->src.addr);
751 		pf_tbladdr_remove(&rule->dst.addr);
752 		if (rule->overload_tbl) {
753 			pfr_detach_table(rule->overload_tbl);
754 		}
755 	}
756 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
757 	pf_anchor_remove(rule);
758 	pf_empty_pool(&rule->rpool.list);
759 	pool_put(&pf_rule_pl, rule);
760 }
761 
762 static u_int16_t
tagname2tag(struct pf_tags * head,char * tagname)763 tagname2tag(struct pf_tags *head, char *tagname)
764 {
765 	struct pf_tagname       *tag, *p = NULL;
766 	uint16_t                 new_tagid = 1;
767 	bool                     reserved_tag = false;
768 
769 	TAILQ_FOREACH(tag, head, entries)
770 	if (strcmp(tagname, tag->name) == 0) {
771 		tag->ref++;
772 		return tag->tag;
773 	}
774 
775 	/*
776 	 * check if it is a reserved tag.
777 	 */
778 	_CASSERT(RESERVED_TAG_ID_MIN > DYNAMIC_TAG_ID_MAX);
779 	for (int i = 0; i < NUM_RESERVED_TAGS; i++) {
780 		if (strncmp(tagname, pf_reserved_tag_table[i].tag_name,
781 		    PF_TAG_NAME_SIZE) == 0) {
782 			new_tagid = pf_reserved_tag_table[i].tag_id;
783 			reserved_tag = true;
784 			goto skip_dynamic_tag_alloc;
785 		}
786 	}
787 
788 	/*
789 	 * to avoid fragmentation, we do a linear search from the beginning
790 	 * and take the first free slot we find. if there is none or the list
791 	 * is empty, append a new entry at the end.
792 	 */
793 
794 	/* new entry */
795 	if (!TAILQ_EMPTY(head)) {
796 		/* skip reserved tags */
797 		for (p = TAILQ_FIRST(head); p != NULL &&
798 		    p->tag >= RESERVED_TAG_ID_MIN;
799 		    p = TAILQ_NEXT(p, entries)) {
800 			;
801 		}
802 
803 		for (; p != NULL && p->tag == new_tagid;
804 		    p = TAILQ_NEXT(p, entries)) {
805 			new_tagid = p->tag + 1;
806 		}
807 	}
808 
809 	if (new_tagid > DYNAMIC_TAG_ID_MAX) {
810 		return 0;
811 	}
812 
813 skip_dynamic_tag_alloc:
814 	/* allocate and fill new struct pf_tagname */
815 	tag = kalloc_type(struct pf_tagname, Z_WAITOK | Z_ZERO | Z_NOFAIL);
816 	strlcpy(tag->name, tagname, sizeof(tag->name));
817 	tag->tag = new_tagid;
818 	tag->ref++;
819 
820 	if (reserved_tag) { /* insert reserved tag at the head */
821 		TAILQ_INSERT_HEAD(head, tag, entries);
822 	} else if (p != NULL) { /* insert new entry before p */
823 		TAILQ_INSERT_BEFORE(p, tag, entries);
824 	} else { /* either list empty or no free slot in between */
825 		TAILQ_INSERT_TAIL(head, tag, entries);
826 	}
827 
828 	return tag->tag;
829 }
830 
831 static void
tag2tagname(struct pf_tags * head,u_int16_t tagid,char * p)832 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
833 {
834 	struct pf_tagname       *tag;
835 
836 	TAILQ_FOREACH(tag, head, entries)
837 	if (tag->tag == tagid) {
838 		strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
839 		return;
840 	}
841 }
842 
843 static void
tag_unref(struct pf_tags * head,u_int16_t tag)844 tag_unref(struct pf_tags *head, u_int16_t tag)
845 {
846 	struct pf_tagname       *p, *next;
847 
848 	if (tag == 0) {
849 		return;
850 	}
851 
852 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
853 		next = TAILQ_NEXT(p, entries);
854 		if (tag == p->tag) {
855 			if (--p->ref == 0) {
856 				TAILQ_REMOVE(head, p, entries);
857 				kfree_type(struct pf_tagname, p);
858 			}
859 			break;
860 		}
861 	}
862 }
863 
864 u_int16_t
pf_tagname2tag(char * tagname)865 pf_tagname2tag(char *tagname)
866 {
867 	return tagname2tag(&pf_tags, tagname);
868 }
869 
870 void
pf_tag2tagname(u_int16_t tagid,char * p)871 pf_tag2tagname(u_int16_t tagid, char *p)
872 {
873 	tag2tagname(&pf_tags, tagid, p);
874 }
875 
876 void
pf_tag_ref(u_int16_t tag)877 pf_tag_ref(u_int16_t tag)
878 {
879 	struct pf_tagname *t;
880 
881 	TAILQ_FOREACH(t, &pf_tags, entries)
882 	if (t->tag == tag) {
883 		break;
884 	}
885 	if (t != NULL) {
886 		t->ref++;
887 	}
888 }
889 
890 void
pf_tag_unref(u_int16_t tag)891 pf_tag_unref(u_int16_t tag)
892 {
893 	tag_unref(&pf_tags, tag);
894 }
895 
896 static int
pf_rtlabel_add(struct pf_addr_wrap * a)897 pf_rtlabel_add(struct pf_addr_wrap *a)
898 {
899 #pragma unused(a)
900 	return 0;
901 }
902 
903 static void
pf_rtlabel_remove(struct pf_addr_wrap * a)904 pf_rtlabel_remove(struct pf_addr_wrap *a)
905 {
906 #pragma unused(a)
907 }
908 
909 static void
pf_rtlabel_copyout(struct pf_addr_wrap * a)910 pf_rtlabel_copyout(struct pf_addr_wrap *a)
911 {
912 #pragma unused(a)
913 }
914 
915 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)916 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
917 {
918 	struct pf_ruleset       *rs;
919 	struct pf_rule          *rule;
920 
921 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
922 		return EINVAL;
923 	}
924 	rs = pf_find_or_create_ruleset(anchor);
925 	if (rs == NULL) {
926 		return EINVAL;
927 	}
928 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
929 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
930 		rs->rules[rs_num].inactive.rcount--;
931 	}
932 	*ticket = ++rs->rules[rs_num].inactive.ticket;
933 	rs->rules[rs_num].inactive.open = 1;
934 	return 0;
935 }
936 
937 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)938 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
939 {
940 	struct pf_ruleset       *rs;
941 	struct pf_rule          *rule;
942 
943 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
944 		return EINVAL;
945 	}
946 	rs = pf_find_ruleset(anchor);
947 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
948 	    rs->rules[rs_num].inactive.ticket != ticket) {
949 		return 0;
950 	}
951 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
952 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
953 		rs->rules[rs_num].inactive.rcount--;
954 	}
955 	rs->rules[rs_num].inactive.open = 0;
956 	return 0;
957 }
958 
959 #define PF_MD5_UPD(st, elm)                                             \
960 	MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
961 
962 #define PF_MD5_UPD_STR(st, elm)                                         \
963 	MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int)strlen((st)->elm))
964 
965 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
966 	(stor) = htonl((st)->elm);                                      \
967 	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t));        \
968 } while (0)
969 
970 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
971 	(stor) = htons((st)->elm);                                      \
972 	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t));        \
973 } while (0)
974 
975 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr,u_int8_t proto)976 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
977 {
978 	PF_MD5_UPD(pfr, addr.type);
979 	switch (pfr->addr.type) {
980 	case PF_ADDR_DYNIFTL:
981 		PF_MD5_UPD(pfr, addr.v.ifname);
982 		PF_MD5_UPD(pfr, addr.iflags);
983 		break;
984 	case PF_ADDR_TABLE:
985 		PF_MD5_UPD(pfr, addr.v.tblname);
986 		break;
987 	case PF_ADDR_ADDRMASK:
988 		/* XXX ignore af? */
989 		PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
990 		PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
991 		break;
992 	case PF_ADDR_RTLABEL:
993 		PF_MD5_UPD(pfr, addr.v.rtlabelname);
994 		break;
995 	}
996 
997 	switch (proto) {
998 	case IPPROTO_TCP:
999 	case IPPROTO_UDP:
1000 		PF_MD5_UPD(pfr, xport.range.port[0]);
1001 		PF_MD5_UPD(pfr, xport.range.port[1]);
1002 		PF_MD5_UPD(pfr, xport.range.op);
1003 		break;
1004 
1005 	default:
1006 		break;
1007 	}
1008 
1009 	PF_MD5_UPD(pfr, neg);
1010 }
1011 
1012 static void
pf_hash_rule(MD5_CTX * ctx,struct pf_rule * rule)1013 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1014 {
1015 	u_int16_t x;
1016 	u_int32_t y;
1017 
1018 	pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1019 	pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1020 	PF_MD5_UPD_STR(rule, label);
1021 	PF_MD5_UPD_STR(rule, ifname);
1022 	PF_MD5_UPD_STR(rule, match_tagname);
1023 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1024 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1025 	PF_MD5_UPD_HTONL(rule, prob, y);
1026 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1027 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1028 	PF_MD5_UPD(rule, uid.op);
1029 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1030 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1031 	PF_MD5_UPD(rule, gid.op);
1032 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1033 	PF_MD5_UPD(rule, action);
1034 	PF_MD5_UPD(rule, direction);
1035 	PF_MD5_UPD(rule, af);
1036 	PF_MD5_UPD(rule, quick);
1037 	PF_MD5_UPD(rule, ifnot);
1038 	PF_MD5_UPD(rule, match_tag_not);
1039 	PF_MD5_UPD(rule, natpass);
1040 	PF_MD5_UPD(rule, keep_state);
1041 	PF_MD5_UPD(rule, proto);
1042 	PF_MD5_UPD(rule, type);
1043 	PF_MD5_UPD(rule, code);
1044 	PF_MD5_UPD(rule, flags);
1045 	PF_MD5_UPD(rule, flagset);
1046 	PF_MD5_UPD(rule, allow_opts);
1047 	PF_MD5_UPD(rule, rt);
1048 	PF_MD5_UPD(rule, tos);
1049 }
1050 
1051 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1052 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1053 {
1054 	struct pf_ruleset       *rs;
1055 	struct pf_rule          *rule, **old_array, *r;
1056 	struct pf_rulequeue     *old_rules;
1057 	int                      error;
1058 	u_int32_t                old_rcount;
1059 
1060 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1061 
1062 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1063 		return EINVAL;
1064 	}
1065 	rs = pf_find_ruleset(anchor);
1066 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1067 	    ticket != rs->rules[rs_num].inactive.ticket) {
1068 		return EBUSY;
1069 	}
1070 
1071 	/* Calculate checksum for the main ruleset */
1072 	if (rs == &pf_main_ruleset) {
1073 		error = pf_setup_pfsync_matching(rs);
1074 		if (error != 0) {
1075 			return error;
1076 		}
1077 	}
1078 
1079 	/* Swap rules, keep the old. */
1080 	old_rules = rs->rules[rs_num].active.ptr;
1081 	old_rcount = rs->rules[rs_num].active.rcount;
1082 	old_array = rs->rules[rs_num].active.ptr_array;
1083 
1084 	if (old_rcount != 0) {
1085 		r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1086 		while (r) {
1087 			if (r->rule_flag & PFRULE_PFM) {
1088 				pffwrules--;
1089 			}
1090 			r = TAILQ_NEXT(r, entries);
1091 		}
1092 	}
1093 
1094 
1095 	rs->rules[rs_num].active.ptr =
1096 	    rs->rules[rs_num].inactive.ptr;
1097 	rs->rules[rs_num].active.ptr_array =
1098 	    rs->rules[rs_num].inactive.ptr_array;
1099 	rs->rules[rs_num].active.rcount =
1100 	    rs->rules[rs_num].inactive.rcount;
1101 	rs->rules[rs_num].inactive.ptr = old_rules;
1102 	rs->rules[rs_num].inactive.ptr_array = old_array;
1103 	rs->rules[rs_num].inactive.rcount = old_rcount;
1104 
1105 	rs->rules[rs_num].active.ticket =
1106 	    rs->rules[rs_num].inactive.ticket;
1107 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1108 
1109 
1110 	/* Purge the old rule list. */
1111 	while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1112 		pf_rm_rule(old_rules, rule);
1113 	}
1114 	if (rs->rules[rs_num].inactive.ptr_array) {
1115 		_FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1116 	}
1117 	rs->rules[rs_num].inactive.ptr_array = NULL;
1118 	rs->rules[rs_num].inactive.rcount = 0;
1119 	rs->rules[rs_num].inactive.open = 0;
1120 	pf_remove_if_empty_ruleset(rs);
1121 	return 0;
1122 }
1123 
1124 static void
pf_rule_copyin(struct pf_rule * src,struct pf_rule * dst,struct proc * p,int minordev)1125 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1126     int minordev)
1127 {
1128 	bcopy(src, dst, sizeof(struct pf_rule));
1129 
1130 	dst->label[sizeof(dst->label) - 1] = '\0';
1131 	dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1132 	dst->qname[sizeof(dst->qname) - 1] = '\0';
1133 	dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1134 	dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1135 	dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1136 	dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1137 	dst->owner[sizeof(dst->owner) - 1] = '\0';
1138 
1139 	dst->cuid = kauth_cred_getuid(kauth_cred_get());
1140 	dst->cpid = proc_getpid(p);
1141 
1142 	dst->anchor = NULL;
1143 	dst->kif = NULL;
1144 	dst->overload_tbl = NULL;
1145 
1146 	TAILQ_INIT(&dst->rpool.list);
1147 	dst->rpool.cur = NULL;
1148 
1149 	/* initialize refcounting */
1150 	dst->states = 0;
1151 	dst->src_nodes = 0;
1152 
1153 	dst->entries.tqe_prev = NULL;
1154 	dst->entries.tqe_next = NULL;
1155 	if ((uint8_t)minordev == PFDEV_PFM) {
1156 		dst->rule_flag |= PFRULE_PFM;
1157 	}
1158 }
1159 
1160 static void
pf_rule_copyout(struct pf_rule * src,struct pf_rule * dst)1161 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1162 {
1163 	bcopy(src, dst, sizeof(struct pf_rule));
1164 
1165 	dst->anchor = NULL;
1166 	dst->kif = NULL;
1167 	dst->overload_tbl = NULL;
1168 
1169 	dst->rpool.list.tqh_first = NULL;
1170 	dst->rpool.list.tqh_last = NULL;
1171 	dst->rpool.cur = NULL;
1172 
1173 	dst->entries.tqe_prev = NULL;
1174 	dst->entries.tqe_next = NULL;
1175 }
1176 
1177 static void
pf_state_export(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1178 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1179     struct pf_state *s)
1180 {
1181 	uint64_t secs = pf_time_second();
1182 	bzero(sp, sizeof(struct pfsync_state));
1183 
1184 	/* copy from state key */
1185 	sp->lan.addr = sk->lan.addr;
1186 	sp->lan.xport = sk->lan.xport;
1187 	sp->gwy.addr = sk->gwy.addr;
1188 	sp->gwy.xport = sk->gwy.xport;
1189 	sp->ext_lan.addr = sk->ext_lan.addr;
1190 	sp->ext_lan.xport = sk->ext_lan.xport;
1191 	sp->ext_gwy.addr = sk->ext_gwy.addr;
1192 	sp->ext_gwy.xport = sk->ext_gwy.xport;
1193 	sp->proto_variant = sk->proto_variant;
1194 	sp->tag = s->tag;
1195 	sp->proto = sk->proto;
1196 	sp->af_lan = sk->af_lan;
1197 	sp->af_gwy = sk->af_gwy;
1198 	sp->direction = sk->direction;
1199 	sp->flowhash = sk->flowhash;
1200 
1201 	/* copy from state */
1202 	memcpy(&sp->id, &s->id, sizeof(sp->id));
1203 	sp->creatorid = s->creatorid;
1204 	strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1205 	pf_state_peer_to_pfsync(&s->src, &sp->src);
1206 	pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1207 
1208 	sp->rule = s->rule.ptr->nr;
1209 	sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1210 	    (unsigned)-1 : s->nat_rule.ptr->nr;
1211 	sp->anchor = (s->anchor.ptr == NULL) ?
1212 	    (unsigned)-1 : s->anchor.ptr->nr;
1213 
1214 	pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1215 	pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1216 	pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1217 	pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1218 	sp->creation = secs - s->creation;
1219 	sp->expire = pf_state_expires(s);
1220 	sp->log = s->log;
1221 	sp->allow_opts = s->allow_opts;
1222 	sp->timeout = s->timeout;
1223 
1224 	if (s->src_node) {
1225 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1226 	}
1227 	if (s->nat_src_node) {
1228 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1229 	}
1230 
1231 	if (sp->expire > secs) {
1232 		sp->expire -= secs;
1233 	} else {
1234 		sp->expire = 0;
1235 	}
1236 }
1237 
1238 static void
pf_state_import(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1239 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1240     struct pf_state *s)
1241 {
1242 	/* copy to state key */
1243 	sk->lan.addr = sp->lan.addr;
1244 	sk->lan.xport = sp->lan.xport;
1245 	sk->gwy.addr = sp->gwy.addr;
1246 	sk->gwy.xport = sp->gwy.xport;
1247 	sk->ext_lan.addr = sp->ext_lan.addr;
1248 	sk->ext_lan.xport = sp->ext_lan.xport;
1249 	sk->ext_gwy.addr = sp->ext_gwy.addr;
1250 	sk->ext_gwy.xport = sp->ext_gwy.xport;
1251 	sk->proto_variant = sp->proto_variant;
1252 	s->tag = sp->tag;
1253 	sk->proto = sp->proto;
1254 	sk->af_lan = sp->af_lan;
1255 	sk->af_gwy = sp->af_gwy;
1256 	sk->direction = sp->direction;
1257 	sk->flowhash = pf_calc_state_key_flowhash(sk);
1258 
1259 	/* copy to state */
1260 	memcpy(&s->id, &sp->id, sizeof(sp->id));
1261 	s->creatorid = sp->creatorid;
1262 	pf_state_peer_from_pfsync(&sp->src, &s->src);
1263 	pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1264 
1265 	s->rule.ptr = &pf_default_rule;
1266 	s->nat_rule.ptr = NULL;
1267 	s->anchor.ptr = NULL;
1268 	s->rt_kif = NULL;
1269 	s->creation = pf_time_second();
1270 	s->expire = pf_time_second();
1271 	if (sp->expire > 0) {
1272 		s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1273 	}
1274 	s->pfsync_time = 0;
1275 	s->packets[0] = s->packets[1] = 0;
1276 	s->bytes[0] = s->bytes[1] = 0;
1277 }
1278 
1279 static void
pf_pooladdr_copyin(struct pf_pooladdr * src,struct pf_pooladdr * dst)1280 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1281 {
1282 	bcopy(src, dst, sizeof(struct pf_pooladdr));
1283 
1284 	dst->entries.tqe_prev = NULL;
1285 	dst->entries.tqe_next = NULL;
1286 	dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1287 	dst->kif = NULL;
1288 }
1289 
1290 static void
pf_pooladdr_copyout(struct pf_pooladdr * src,struct pf_pooladdr * dst)1291 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1292 {
1293 	bcopy(src, dst, sizeof(struct pf_pooladdr));
1294 
1295 	dst->entries.tqe_prev = NULL;
1296 	dst->entries.tqe_next = NULL;
1297 	dst->kif = NULL;
1298 }
1299 
1300 static int
pf_setup_pfsync_matching(struct pf_ruleset * rs)1301 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1302 {
1303 	MD5_CTX                  ctx;
1304 	struct pf_rule          *rule;
1305 	int                      rs_cnt;
1306 	u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
1307 
1308 	MD5Init(&ctx);
1309 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1310 		/* XXX PF_RULESET_SCRUB as well? */
1311 		if (rs_cnt == PF_RULESET_SCRUB) {
1312 			continue;
1313 		}
1314 
1315 		if (rs->rules[rs_cnt].inactive.ptr_array) {
1316 			_FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1317 		}
1318 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1319 
1320 		if (rs->rules[rs_cnt].inactive.rcount) {
1321 			rs->rules[rs_cnt].inactive.ptr_array =
1322 			    _MALLOC(sizeof(caddr_t) *
1323 			    rs->rules[rs_cnt].inactive.rcount,
1324 			    M_TEMP, M_WAITOK);
1325 
1326 			if (!rs->rules[rs_cnt].inactive.ptr_array) {
1327 				return ENOMEM;
1328 			}
1329 		}
1330 
1331 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1332 		    entries) {
1333 			pf_hash_rule(&ctx, rule);
1334 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1335 		}
1336 	}
1337 
1338 	MD5Final(digest, &ctx);
1339 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1340 	return 0;
1341 }
1342 
1343 static void
pf_start(void)1344 pf_start(void)
1345 {
1346 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1347 
1348 	VERIFY(pf_is_enabled == 0);
1349 
1350 	pf_is_enabled = 1;
1351 	pf_status.running = 1;
1352 	pf_status.since = pf_calendar_time_second();
1353 	if (pf_status.stateid == 0) {
1354 		pf_status.stateid = pf_time_second();
1355 		pf_status.stateid = pf_status.stateid << 32;
1356 	}
1357 	wakeup(pf_purge_thread_fn);
1358 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
1359 	net_filter_event_mark(NET_FILTER_EVENT_PF,
1360 	    pf_check_compatible_rules());
1361 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1362 	DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1363 }
1364 
1365 static void
pf_stop(void)1366 pf_stop(void)
1367 {
1368 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1369 
1370 	VERIFY(pf_is_enabled);
1371 
1372 	pf_status.running = 0;
1373 	pf_is_enabled = 0;
1374 	pf_status.since = pf_calendar_time_second();
1375 	wakeup(pf_purge_thread_fn);
1376 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
1377 	net_filter_event_mark(NET_FILTER_EVENT_PF,
1378 	    pf_check_compatible_rules());
1379 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1380 	DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1381 }
1382 
1383 static int
pfioctl(dev_t dev,u_long cmd,caddr_t addr,int flags,struct proc * p)1384 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1385 {
1386 #pragma unused(dev)
1387 	int p64 = proc_is64bit(p);
1388 	int error = 0;
1389 	int minordev = minor(dev);
1390 
1391 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1392 		return EPERM;
1393 	}
1394 
1395 	/* XXX keep in sync with switch() below */
1396 	if (securelevel > 1) {
1397 		switch (cmd) {
1398 		case DIOCGETRULES:
1399 		case DIOCGETRULE:
1400 		case DIOCGETADDRS:
1401 		case DIOCGETADDR:
1402 		case DIOCGETSTATE:
1403 		case DIOCSETSTATUSIF:
1404 		case DIOCGETSTATUS:
1405 		case DIOCCLRSTATUS:
1406 		case DIOCNATLOOK:
1407 		case DIOCSETDEBUG:
1408 		case DIOCGETSTATES:
1409 		case DIOCINSERTRULE:
1410 		case DIOCDELETERULE:
1411 		case DIOCGETTIMEOUT:
1412 		case DIOCCLRRULECTRS:
1413 		case DIOCGETLIMIT:
1414 		case DIOCGETALTQS:
1415 		case DIOCGETALTQ:
1416 		case DIOCGETQSTATS:
1417 		case DIOCGETRULESETS:
1418 		case DIOCGETRULESET:
1419 		case DIOCRGETTABLES:
1420 		case DIOCRGETTSTATS:
1421 		case DIOCRCLRTSTATS:
1422 		case DIOCRCLRADDRS:
1423 		case DIOCRADDADDRS:
1424 		case DIOCRDELADDRS:
1425 		case DIOCRSETADDRS:
1426 		case DIOCRGETADDRS:
1427 		case DIOCRGETASTATS:
1428 		case DIOCRCLRASTATS:
1429 		case DIOCRTSTADDRS:
1430 		case DIOCOSFPGET:
1431 		case DIOCGETSRCNODES:
1432 		case DIOCCLRSRCNODES:
1433 		case DIOCIGETIFACES:
1434 		case DIOCGIFSPEED:
1435 		case DIOCSETIFFLAG:
1436 		case DIOCCLRIFFLAG:
1437 			break;
1438 		case DIOCRCLRTABLES:
1439 		case DIOCRADDTABLES:
1440 		case DIOCRDELTABLES:
1441 		case DIOCRSETTFLAGS: {
1442 			int pfrio_flags;
1443 
1444 			bcopy(&((struct pfioc_table *)(void *)addr)->
1445 			    pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1446 
1447 			if (pfrio_flags & PFR_FLAG_DUMMY) {
1448 				break; /* dummy operation ok */
1449 			}
1450 			return EPERM;
1451 		}
1452 		default:
1453 			return EPERM;
1454 		}
1455 	}
1456 
1457 	if (!(flags & FWRITE)) {
1458 		switch (cmd) {
1459 		case DIOCSTART:
1460 		case DIOCSTARTREF:
1461 		case DIOCSTOP:
1462 		case DIOCSTOPREF:
1463 		case DIOCGETSTARTERS:
1464 		case DIOCGETRULES:
1465 		case DIOCGETADDRS:
1466 		case DIOCGETADDR:
1467 		case DIOCGETSTATE:
1468 		case DIOCGETSTATUS:
1469 		case DIOCGETSTATES:
1470 		case DIOCINSERTRULE:
1471 		case DIOCDELETERULE:
1472 		case DIOCGETTIMEOUT:
1473 		case DIOCGETLIMIT:
1474 		case DIOCGETALTQS:
1475 		case DIOCGETALTQ:
1476 		case DIOCGETQSTATS:
1477 		case DIOCGETRULESETS:
1478 		case DIOCGETRULESET:
1479 		case DIOCNATLOOK:
1480 		case DIOCRGETTABLES:
1481 		case DIOCRGETTSTATS:
1482 		case DIOCRGETADDRS:
1483 		case DIOCRGETASTATS:
1484 		case DIOCRTSTADDRS:
1485 		case DIOCOSFPGET:
1486 		case DIOCGETSRCNODES:
1487 		case DIOCIGETIFACES:
1488 		case DIOCGIFSPEED:
1489 			break;
1490 		case DIOCRCLRTABLES:
1491 		case DIOCRADDTABLES:
1492 		case DIOCRDELTABLES:
1493 		case DIOCRCLRTSTATS:
1494 		case DIOCRCLRADDRS:
1495 		case DIOCRADDADDRS:
1496 		case DIOCRDELADDRS:
1497 		case DIOCRSETADDRS:
1498 		case DIOCRSETTFLAGS: {
1499 			int pfrio_flags;
1500 
1501 			bcopy(&((struct pfioc_table *)(void *)addr)->
1502 			    pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1503 
1504 			if (pfrio_flags & PFR_FLAG_DUMMY) {
1505 				flags |= FWRITE; /* need write lock for dummy */
1506 				break; /* dummy operation ok */
1507 			}
1508 			return EACCES;
1509 		}
1510 		case DIOCGETRULE: {
1511 			u_int32_t action;
1512 
1513 			bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1514 			    &action, sizeof(action));
1515 
1516 			if (action == PF_GET_CLR_CNTR) {
1517 				return EACCES;
1518 			}
1519 			break;
1520 		}
1521 		default:
1522 			return EACCES;
1523 		}
1524 	}
1525 
1526 	if (flags & FWRITE) {
1527 		lck_rw_lock_exclusive(&pf_perim_lock);
1528 	} else {
1529 		lck_rw_lock_shared(&pf_perim_lock);
1530 	}
1531 
1532 	lck_mtx_lock(&pf_lock);
1533 
1534 	switch (cmd) {
1535 	case DIOCSTART:
1536 		if (pf_status.running) {
1537 			/*
1538 			 * Increment the reference for a simple -e enable, so
1539 			 * that even if other processes drop their references,
1540 			 * pf will still be available to processes that turned
1541 			 * it on without taking a reference
1542 			 */
1543 			if (nr_tokens == pf_enabled_ref_count) {
1544 				pf_enabled_ref_count++;
1545 				VERIFY(pf_enabled_ref_count != 0);
1546 			}
1547 			error = EEXIST;
1548 		} else if (pf_purge_thread == NULL) {
1549 			error = ENOMEM;
1550 		} else {
1551 			pf_start();
1552 			pf_enabled_ref_count++;
1553 			VERIFY(pf_enabled_ref_count != 0);
1554 		}
1555 		break;
1556 
1557 	case DIOCSTARTREF:              /* u_int64_t */
1558 		if (pf_purge_thread == NULL) {
1559 			error = ENOMEM;
1560 		} else {
1561 			u_int64_t token;
1562 
1563 			/* small enough to be on stack */
1564 			if ((token = generate_token(p)) != 0) {
1565 				if (pf_is_enabled == 0) {
1566 					pf_start();
1567 				}
1568 				pf_enabled_ref_count++;
1569 				VERIFY(pf_enabled_ref_count != 0);
1570 			} else {
1571 				error = ENOMEM;
1572 				DPFPRINTF(PF_DEBUG_URGENT,
1573 				    ("pf: unable to generate token\n"));
1574 			}
1575 			bcopy(&token, addr, sizeof(token));
1576 		}
1577 		break;
1578 
1579 	case DIOCSTOP:
1580 		if (!pf_status.running) {
1581 			error = ENOENT;
1582 		} else {
1583 			pf_stop();
1584 			pf_enabled_ref_count = 0;
1585 			invalidate_all_tokens();
1586 		}
1587 		break;
1588 
1589 	case DIOCSTOPREF:               /* struct pfioc_remove_token */
1590 		if (!pf_status.running) {
1591 			error = ENOENT;
1592 		} else {
1593 			struct pfioc_remove_token pfrt;
1594 
1595 			/* small enough to be on stack */
1596 			bcopy(addr, &pfrt, sizeof(pfrt));
1597 			if ((error = remove_token(&pfrt)) == 0) {
1598 				VERIFY(pf_enabled_ref_count != 0);
1599 				pf_enabled_ref_count--;
1600 				/* return currently held references */
1601 				pfrt.refcount = pf_enabled_ref_count;
1602 				DPFPRINTF(PF_DEBUG_MISC,
1603 				    ("pf: enabled refcount decremented\n"));
1604 			} else {
1605 				error = EINVAL;
1606 				DPFPRINTF(PF_DEBUG_URGENT,
1607 				    ("pf: token mismatch\n"));
1608 			}
1609 			bcopy(&pfrt, addr, sizeof(pfrt));
1610 
1611 			if (error == 0 && pf_enabled_ref_count == 0) {
1612 				pf_stop();
1613 			}
1614 		}
1615 		break;
1616 
1617 	case DIOCGETSTARTERS: {         /* struct pfioc_tokens */
1618 		PFIOCX_STRUCT_DECL(pfioc_tokens);
1619 
1620 		PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break; );
1621 		error = pfioctl_ioc_tokens(cmd,
1622 		    PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1623 		    PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1624 		PFIOCX_STRUCT_END(pfioc_tokens, addr);
1625 		break;
1626 	}
1627 
1628 	case DIOCADDRULE:               /* struct pfioc_rule */
1629 	case DIOCGETRULES:              /* struct pfioc_rule */
1630 	case DIOCGETRULE:               /* struct pfioc_rule */
1631 	case DIOCCHANGERULE:            /* struct pfioc_rule */
1632 	case DIOCINSERTRULE:            /* struct pfioc_rule */
1633 	case DIOCDELETERULE: {          /* struct pfioc_rule */
1634 		struct pfioc_rule *pr = NULL;
1635 
1636 		PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; );
1637 		error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1638 		PFIOC_STRUCT_END(pr, addr);
1639 		break;
1640 	}
1641 
1642 	case DIOCCLRSTATES:             /* struct pfioc_state_kill */
1643 	case DIOCKILLSTATES: {          /* struct pfioc_state_kill */
1644 		struct pfioc_state_kill *psk = NULL;
1645 
1646 		PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break; );
1647 		error = pfioctl_ioc_state_kill(cmd, psk, p);
1648 		PFIOC_STRUCT_END(psk, addr);
1649 		break;
1650 	}
1651 
1652 	case DIOCADDSTATE:              /* struct pfioc_state */
1653 	case DIOCGETSTATE: {            /* struct pfioc_state */
1654 		struct pfioc_state *ps = NULL;
1655 
1656 		PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break; );
1657 		error = pfioctl_ioc_state(cmd, ps, p);
1658 		PFIOC_STRUCT_END(ps, addr);
1659 		break;
1660 	}
1661 
1662 	case DIOCGETSTATES: {           /* struct pfioc_states */
1663 		PFIOCX_STRUCT_DECL(pfioc_states);
1664 
1665 		PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break; );
1666 		error = pfioctl_ioc_states(cmd,
1667 		    PFIOCX_STRUCT_ADDR32(pfioc_states),
1668 		    PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1669 		PFIOCX_STRUCT_END(pfioc_states, addr);
1670 		break;
1671 	}
1672 
1673 	case DIOCGETSTATUS: {           /* struct pf_status */
1674 		struct pf_status *s = NULL;
1675 
1676 		PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break; );
1677 		pfi_update_status(s->ifname, s);
1678 		PFIOC_STRUCT_END(s, addr);
1679 		break;
1680 	}
1681 
1682 	case DIOCSETSTATUSIF: {         /* struct pfioc_if */
1683 		struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1684 
1685 		/* OK for unaligned accesses */
1686 		if (pi->ifname[0] == 0) {
1687 			bzero(pf_status.ifname, IFNAMSIZ);
1688 			break;
1689 		}
1690 		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1691 		break;
1692 	}
1693 
1694 	case DIOCCLRSTATUS: {
1695 		bzero(pf_status.counters, sizeof(pf_status.counters));
1696 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1697 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1698 		pf_status.since = pf_calendar_time_second();
1699 		if (*pf_status.ifname) {
1700 			pfi_update_status(pf_status.ifname, NULL);
1701 		}
1702 		break;
1703 	}
1704 
1705 	case DIOCNATLOOK: {             /* struct pfioc_natlook */
1706 		struct pfioc_natlook *pnl = NULL;
1707 
1708 		PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break; );
1709 		error = pfioctl_ioc_natlook(cmd, pnl, p);
1710 		PFIOC_STRUCT_END(pnl, addr);
1711 		break;
1712 	}
1713 
1714 	case DIOCSETTIMEOUT:            /* struct pfioc_tm */
1715 	case DIOCGETTIMEOUT: {          /* struct pfioc_tm */
1716 		struct pfioc_tm pt;
1717 
1718 		/* small enough to be on stack */
1719 		bcopy(addr, &pt, sizeof(pt));
1720 		error = pfioctl_ioc_tm(cmd, &pt, p);
1721 		bcopy(&pt, addr, sizeof(pt));
1722 		break;
1723 	}
1724 
1725 	case DIOCGETLIMIT:              /* struct pfioc_limit */
1726 	case DIOCSETLIMIT: {            /* struct pfioc_limit */
1727 		struct pfioc_limit pl;
1728 
1729 		/* small enough to be on stack */
1730 		bcopy(addr, &pl, sizeof(pl));
1731 		error = pfioctl_ioc_limit(cmd, &pl, p);
1732 		bcopy(&pl, addr, sizeof(pl));
1733 		break;
1734 	}
1735 
1736 	case DIOCSETDEBUG: {            /* u_int32_t */
1737 		bcopy(addr, &pf_status.debug, sizeof(u_int32_t));
1738 		break;
1739 	}
1740 
1741 	case DIOCCLRRULECTRS: {
1742 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1743 		struct pf_ruleset       *ruleset = &pf_main_ruleset;
1744 		struct pf_rule          *rule;
1745 
1746 		TAILQ_FOREACH(rule,
1747 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1748 			rule->evaluations = 0;
1749 			rule->packets[0] = rule->packets[1] = 0;
1750 			rule->bytes[0] = rule->bytes[1] = 0;
1751 		}
1752 		break;
1753 	}
1754 
1755 	case DIOCGIFSPEED: {
1756 		struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1757 		struct pf_ifspeed ps;
1758 		struct ifnet *ifp;
1759 		u_int64_t baudrate;
1760 
1761 		if (psp->ifname[0] != '\0') {
1762 			/* Can we completely trust user-land? */
1763 			strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1764 			ps.ifname[IFNAMSIZ - 1] = '\0';
1765 			ifp = ifunit(ps.ifname);
1766 			if (ifp != NULL) {
1767 				baudrate = ifp->if_output_bw.max_bw;
1768 				bcopy(&baudrate, &psp->baudrate,
1769 				    sizeof(baudrate));
1770 			} else {
1771 				error = EINVAL;
1772 			}
1773 		} else {
1774 			error = EINVAL;
1775 		}
1776 		break;
1777 	}
1778 
1779 	case DIOCBEGINADDRS:            /* struct pfioc_pooladdr */
1780 	case DIOCADDADDR:               /* struct pfioc_pooladdr */
1781 	case DIOCGETADDRS:              /* struct pfioc_pooladdr */
1782 	case DIOCGETADDR:               /* struct pfioc_pooladdr */
1783 	case DIOCCHANGEADDR: {          /* struct pfioc_pooladdr */
1784 		struct pfioc_pooladdr *pp = NULL;
1785 
1786 		PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break; )
1787 		error = pfioctl_ioc_pooladdr(cmd, pp, p);
1788 		PFIOC_STRUCT_END(pp, addr);
1789 		break;
1790 	}
1791 
1792 	case DIOCGETRULESETS:           /* struct pfioc_ruleset */
1793 	case DIOCGETRULESET: {          /* struct pfioc_ruleset */
1794 		struct pfioc_ruleset *pr = NULL;
1795 
1796 		PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; );
1797 		error = pfioctl_ioc_ruleset(cmd, pr, p);
1798 		PFIOC_STRUCT_END(pr, addr);
1799 		break;
1800 	}
1801 
1802 	case DIOCRCLRTABLES:            /* struct pfioc_table */
1803 	case DIOCRADDTABLES:            /* struct pfioc_table */
1804 	case DIOCRDELTABLES:            /* struct pfioc_table */
1805 	case DIOCRGETTABLES:            /* struct pfioc_table */
1806 	case DIOCRGETTSTATS:            /* struct pfioc_table */
1807 	case DIOCRCLRTSTATS:            /* struct pfioc_table */
1808 	case DIOCRSETTFLAGS:            /* struct pfioc_table */
1809 	case DIOCRCLRADDRS:             /* struct pfioc_table */
1810 	case DIOCRADDADDRS:             /* struct pfioc_table */
1811 	case DIOCRDELADDRS:             /* struct pfioc_table */
1812 	case DIOCRSETADDRS:             /* struct pfioc_table */
1813 	case DIOCRGETADDRS:             /* struct pfioc_table */
1814 	case DIOCRGETASTATS:            /* struct pfioc_table */
1815 	case DIOCRCLRASTATS:            /* struct pfioc_table */
1816 	case DIOCRTSTADDRS:             /* struct pfioc_table */
1817 	case DIOCRINADEFINE: {          /* struct pfioc_table */
1818 		PFIOCX_STRUCT_DECL(pfioc_table);
1819 
1820 		PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break; );
1821 		error = pfioctl_ioc_table(cmd,
1822 		    PFIOCX_STRUCT_ADDR32(pfioc_table),
1823 		    PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1824 		PFIOCX_STRUCT_END(pfioc_table, addr);
1825 		break;
1826 	}
1827 
1828 	case DIOCOSFPADD:               /* struct pf_osfp_ioctl */
1829 	case DIOCOSFPGET: {             /* struct pf_osfp_ioctl */
1830 		struct pf_osfp_ioctl *io = NULL;
1831 
1832 		PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break; );
1833 		if (cmd == DIOCOSFPADD) {
1834 			error = pf_osfp_add(io);
1835 		} else {
1836 			VERIFY(cmd == DIOCOSFPGET);
1837 			error = pf_osfp_get(io);
1838 		}
1839 		PFIOC_STRUCT_END(io, addr);
1840 		break;
1841 	}
1842 
1843 	case DIOCXBEGIN:                /* struct pfioc_trans */
1844 	case DIOCXROLLBACK:             /* struct pfioc_trans */
1845 	case DIOCXCOMMIT: {             /* struct pfioc_trans */
1846 		PFIOCX_STRUCT_DECL(pfioc_trans);
1847 
1848 		PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break; );
1849 		error = pfioctl_ioc_trans(cmd,
1850 		    PFIOCX_STRUCT_ADDR32(pfioc_trans),
1851 		    PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1852 		PFIOCX_STRUCT_END(pfioc_trans, addr);
1853 		break;
1854 	}
1855 
1856 	case DIOCGETSRCNODES: {         /* struct pfioc_src_nodes */
1857 		PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1858 
1859 		PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
1860 		    error = ENOMEM; break; );
1861 		error = pfioctl_ioc_src_nodes(cmd,
1862 		    PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1863 		    PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1864 		PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1865 		break;
1866 	}
1867 
1868 	case DIOCCLRSRCNODES: {
1869 		struct pf_src_node      *n;
1870 		struct pf_state         *state;
1871 
1872 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1873 			state->src_node = NULL;
1874 			state->nat_src_node = NULL;
1875 		}
1876 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1877 			n->expire = 1;
1878 			n->states = 0;
1879 		}
1880 		pf_purge_expired_src_nodes();
1881 		pf_status.src_nodes = 0;
1882 		break;
1883 	}
1884 
1885 	case DIOCKILLSRCNODES: {        /* struct pfioc_src_node_kill */
1886 		struct pfioc_src_node_kill *psnk = NULL;
1887 
1888 		PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break; );
1889 		error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1890 		PFIOC_STRUCT_END(psnk, addr);
1891 		break;
1892 	}
1893 
1894 	case DIOCSETHOSTID: {           /* u_int32_t */
1895 		u_int32_t hid;
1896 
1897 		/* small enough to be on stack */
1898 		bcopy(addr, &hid, sizeof(hid));
1899 		if (hid == 0) {
1900 			pf_status.hostid = random();
1901 		} else {
1902 			pf_status.hostid = hid;
1903 		}
1904 		break;
1905 	}
1906 
1907 	case DIOCOSFPFLUSH:
1908 		pf_osfp_flush();
1909 		break;
1910 
1911 	case DIOCIGETIFACES:            /* struct pfioc_iface */
1912 	case DIOCSETIFFLAG:             /* struct pfioc_iface */
1913 	case DIOCCLRIFFLAG: {           /* struct pfioc_iface */
1914 		PFIOCX_STRUCT_DECL(pfioc_iface);
1915 
1916 		PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break; );
1917 		error = pfioctl_ioc_iface(cmd,
1918 		    PFIOCX_STRUCT_ADDR32(pfioc_iface),
1919 		    PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1920 		PFIOCX_STRUCT_END(pfioc_iface, addr);
1921 		break;
1922 	}
1923 
1924 	default:
1925 		error = ENODEV;
1926 		break;
1927 	}
1928 
1929 	lck_mtx_unlock(&pf_lock);
1930 	lck_rw_done(&pf_perim_lock);
1931 
1932 	return error;
1933 }
1934 
1935 static int
pfioctl_ioc_table(u_long cmd,struct pfioc_table_32 * io32,struct pfioc_table_64 * io64,struct proc * p)1936 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1937     struct pfioc_table_64 *io64, struct proc *p)
1938 {
1939 	int p64 = proc_is64bit(p);
1940 	int error = 0;
1941 
1942 	if (!p64) {
1943 		goto struct32;
1944 	}
1945 
1946 #ifdef __LP64__
1947 	/*
1948 	 * 64-bit structure processing
1949 	 */
1950 	switch (cmd) {
1951 	case DIOCRCLRTABLES:
1952 		if (io64->pfrio_esize != 0) {
1953 			error = ENODEV;
1954 			break;
1955 		}
1956 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1957 		error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1958 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1959 		break;
1960 
1961 	case DIOCRADDTABLES:
1962 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1963 			error = ENODEV;
1964 			break;
1965 		}
1966 		error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1967 		    &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1968 		break;
1969 
1970 	case DIOCRDELTABLES:
1971 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1972 			error = ENODEV;
1973 			break;
1974 		}
1975 		error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1976 		    &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1977 		break;
1978 
1979 	case DIOCRGETTABLES:
1980 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1981 			error = ENODEV;
1982 			break;
1983 		}
1984 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1985 		error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
1986 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1987 		break;
1988 
1989 	case DIOCRGETTSTATS:
1990 		if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
1991 			error = ENODEV;
1992 			break;
1993 		}
1994 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1995 		error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
1996 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1997 		break;
1998 
1999 	case DIOCRCLRTSTATS:
2000 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2001 			error = ENODEV;
2002 			break;
2003 		}
2004 		error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
2005 		    &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2006 		break;
2007 
2008 	case DIOCRSETTFLAGS:
2009 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2010 			error = ENODEV;
2011 			break;
2012 		}
2013 		error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
2014 		    io64->pfrio_setflag, io64->pfrio_clrflag,
2015 		    &io64->pfrio_nchange, &io64->pfrio_ndel,
2016 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2017 		break;
2018 
2019 	case DIOCRCLRADDRS:
2020 		if (io64->pfrio_esize != 0) {
2021 			error = ENODEV;
2022 			break;
2023 		}
2024 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2025 		error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2026 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2027 		break;
2028 
2029 	case DIOCRADDADDRS:
2030 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2031 			error = ENODEV;
2032 			break;
2033 		}
2034 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2035 		error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2036 		    io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2037 		    PFR_FLAG_USERIOCTL);
2038 		break;
2039 
2040 	case DIOCRDELADDRS:
2041 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2042 			error = ENODEV;
2043 			break;
2044 		}
2045 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2046 		error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2047 		    io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2048 		    PFR_FLAG_USERIOCTL);
2049 		break;
2050 
2051 	case DIOCRSETADDRS:
2052 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2053 			error = ENODEV;
2054 			break;
2055 		}
2056 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2057 		error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2058 		    io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2059 		    &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2060 		    PFR_FLAG_USERIOCTL, 0);
2061 		break;
2062 
2063 	case DIOCRGETADDRS:
2064 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2065 			error = ENODEV;
2066 			break;
2067 		}
2068 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2069 		error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2070 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2071 		break;
2072 
2073 	case DIOCRGETASTATS:
2074 		if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2075 			error = ENODEV;
2076 			break;
2077 		}
2078 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2079 		error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2080 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2081 		break;
2082 
2083 	case DIOCRCLRASTATS:
2084 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2085 			error = ENODEV;
2086 			break;
2087 		}
2088 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2089 		error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2090 		    io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2091 		    PFR_FLAG_USERIOCTL);
2092 		break;
2093 
2094 	case DIOCRTSTADDRS:
2095 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2096 			error = ENODEV;
2097 			break;
2098 		}
2099 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2100 		error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2101 		    io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2102 		    PFR_FLAG_USERIOCTL);
2103 		break;
2104 
2105 	case DIOCRINADEFINE:
2106 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2107 			error = ENODEV;
2108 			break;
2109 		}
2110 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2111 		error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2112 		    io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2113 		    io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2114 		break;
2115 
2116 	default:
2117 		VERIFY(0);
2118 		/* NOTREACHED */
2119 	}
2120 	goto done;
2121 #else
2122 #pragma unused(io64)
2123 #endif /* __LP64__ */
2124 
2125 struct32:
2126 	/*
2127 	 * 32-bit structure processing
2128 	 */
2129 	switch (cmd) {
2130 	case DIOCRCLRTABLES:
2131 		if (io32->pfrio_esize != 0) {
2132 			error = ENODEV;
2133 			break;
2134 		}
2135 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2136 		error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2137 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2138 		break;
2139 
2140 	case DIOCRADDTABLES:
2141 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2142 			error = ENODEV;
2143 			break;
2144 		}
2145 		error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2146 		    &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2147 		break;
2148 
2149 	case DIOCRDELTABLES:
2150 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2151 			error = ENODEV;
2152 			break;
2153 		}
2154 		error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2155 		    &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2156 		break;
2157 
2158 	case DIOCRGETTABLES:
2159 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2160 			error = ENODEV;
2161 			break;
2162 		}
2163 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2164 		error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2165 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2166 		break;
2167 
2168 	case DIOCRGETTSTATS:
2169 		if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2170 			error = ENODEV;
2171 			break;
2172 		}
2173 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2174 		error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2175 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2176 		break;
2177 
2178 	case DIOCRCLRTSTATS:
2179 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2180 			error = ENODEV;
2181 			break;
2182 		}
2183 		error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2184 		    &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2185 		break;
2186 
2187 	case DIOCRSETTFLAGS:
2188 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2189 			error = ENODEV;
2190 			break;
2191 		}
2192 		error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2193 		    io32->pfrio_setflag, io32->pfrio_clrflag,
2194 		    &io32->pfrio_nchange, &io32->pfrio_ndel,
2195 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2196 		break;
2197 
2198 	case DIOCRCLRADDRS:
2199 		if (io32->pfrio_esize != 0) {
2200 			error = ENODEV;
2201 			break;
2202 		}
2203 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2204 		error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2205 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2206 		break;
2207 
2208 	case DIOCRADDADDRS:
2209 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2210 			error = ENODEV;
2211 			break;
2212 		}
2213 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2214 		error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2215 		    io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2216 		    PFR_FLAG_USERIOCTL);
2217 		break;
2218 
2219 	case DIOCRDELADDRS:
2220 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2221 			error = ENODEV;
2222 			break;
2223 		}
2224 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2225 		error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2226 		    io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2227 		    PFR_FLAG_USERIOCTL);
2228 		break;
2229 
2230 	case DIOCRSETADDRS:
2231 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2232 			error = ENODEV;
2233 			break;
2234 		}
2235 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2236 		error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2237 		    io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2238 		    &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2239 		    PFR_FLAG_USERIOCTL, 0);
2240 		break;
2241 
2242 	case DIOCRGETADDRS:
2243 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2244 			error = ENODEV;
2245 			break;
2246 		}
2247 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2248 		error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2249 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2250 		break;
2251 
2252 	case DIOCRGETASTATS:
2253 		if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2254 			error = ENODEV;
2255 			break;
2256 		}
2257 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2258 		error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2259 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2260 		break;
2261 
2262 	case DIOCRCLRASTATS:
2263 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2264 			error = ENODEV;
2265 			break;
2266 		}
2267 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2268 		error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2269 		    io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2270 		    PFR_FLAG_USERIOCTL);
2271 		break;
2272 
2273 	case DIOCRTSTADDRS:
2274 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2275 			error = ENODEV;
2276 			break;
2277 		}
2278 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2279 		error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2280 		    io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2281 		    PFR_FLAG_USERIOCTL);
2282 		break;
2283 
2284 	case DIOCRINADEFINE:
2285 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2286 			error = ENODEV;
2287 			break;
2288 		}
2289 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2290 		error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2291 		    io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2292 		    io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2293 		break;
2294 
2295 	default:
2296 		VERIFY(0);
2297 		/* NOTREACHED */
2298 	}
2299 #ifdef __LP64__
2300 done:
2301 #endif
2302 	return error;
2303 }
2304 
2305 static int
pfioctl_ioc_tokens(u_long cmd,struct pfioc_tokens_32 * tok32,struct pfioc_tokens_64 * tok64,struct proc * p)2306 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2307     struct pfioc_tokens_64 *tok64, struct proc *p)
2308 {
2309 	struct pfioc_token *tokens;
2310 	struct pfioc_kernel_token *entry, *tmp;
2311 	user_addr_t token_buf;
2312 	int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2313 	char *ptr;
2314 
2315 	switch (cmd) {
2316 	case DIOCGETSTARTERS: {
2317 		int size;
2318 
2319 		if (nr_tokens == 0) {
2320 			error = ENOENT;
2321 			break;
2322 		}
2323 
2324 		size = sizeof(struct pfioc_token) * nr_tokens;
2325 		if (size / nr_tokens != sizeof(struct pfioc_token)) {
2326 			os_log_error(OS_LOG_DEFAULT, "%s: size overflows", __func__);
2327 			error = ERANGE;
2328 			break;
2329 		}
2330 		ocnt = cnt = (p64 ? tok64->size : tok32->size);
2331 		if (cnt == 0) {
2332 			if (p64) {
2333 				tok64->size = size;
2334 			} else {
2335 				tok32->size = size;
2336 			}
2337 			break;
2338 		}
2339 
2340 #ifdef __LP64__
2341 		token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2342 #else
2343 		token_buf = tok32->pgt_buf;
2344 #endif
2345 		tokens = (struct pfioc_token *)kalloc_data(size, Z_WAITOK | Z_ZERO);
2346 		if (tokens == NULL) {
2347 			error = ENOMEM;
2348 			break;
2349 		}
2350 
2351 		ptr = (void *)tokens;
2352 		SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2353 			struct pfioc_token *t;
2354 
2355 			if ((unsigned)cnt < sizeof(*tokens)) {
2356 				break;    /* no more buffer space left */
2357 			}
2358 			t = (struct pfioc_token *)(void *)ptr;
2359 			t->token_value  = entry->token.token_value;
2360 			t->timestamp    = entry->token.timestamp;
2361 			t->pid          = entry->token.pid;
2362 			bcopy(entry->token.proc_name, t->proc_name,
2363 			    PFTOK_PROCNAME_LEN);
2364 			ptr += sizeof(struct pfioc_token);
2365 
2366 			cnt -= sizeof(struct pfioc_token);
2367 		}
2368 
2369 		if (cnt < ocnt) {
2370 			error = copyout(tokens, token_buf, ocnt - cnt);
2371 		}
2372 
2373 		if (p64) {
2374 			tok64->size = ocnt - cnt;
2375 		} else {
2376 			tok32->size = ocnt - cnt;
2377 		}
2378 
2379 		kfree_data(tokens, size);
2380 		break;
2381 	}
2382 
2383 	default:
2384 		VERIFY(0);
2385 		/* NOTREACHED */
2386 	}
2387 
2388 	return error;
2389 }
2390 
2391 static void
pf_expire_states_and_src_nodes(struct pf_rule * rule)2392 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2393 {
2394 	struct pf_state         *state;
2395 	struct pf_src_node      *sn;
2396 	int                      killed = 0;
2397 
2398 	/* expire the states */
2399 	state = TAILQ_FIRST(&state_list);
2400 	while (state) {
2401 		if (state->rule.ptr == rule) {
2402 			state->timeout = PFTM_PURGE;
2403 		}
2404 		state = TAILQ_NEXT(state, entry_list);
2405 	}
2406 	pf_purge_expired_states(pf_status.states);
2407 
2408 	/* expire the src_nodes */
2409 	RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2410 		if (sn->rule.ptr != rule) {
2411 			continue;
2412 		}
2413 		if (sn->states != 0) {
2414 			RB_FOREACH(state, pf_state_tree_id,
2415 			    &tree_id) {
2416 				if (state->src_node == sn) {
2417 					state->src_node = NULL;
2418 				}
2419 				if (state->nat_src_node == sn) {
2420 					state->nat_src_node = NULL;
2421 				}
2422 			}
2423 			sn->states = 0;
2424 		}
2425 		sn->expire = 1;
2426 		killed++;
2427 	}
2428 	if (killed) {
2429 		pf_purge_expired_src_nodes();
2430 	}
2431 }
2432 
2433 static void
pf_delete_rule_from_ruleset(struct pf_ruleset * ruleset,int rs_num,struct pf_rule * rule)2434 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2435     struct pf_rule *rule)
2436 {
2437 	struct pf_rule *r;
2438 	int nr = 0;
2439 
2440 	pf_expire_states_and_src_nodes(rule);
2441 
2442 	pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2443 	if (ruleset->rules[rs_num].active.rcount-- == 0) {
2444 		panic("%s: rcount value broken!", __func__);
2445 	}
2446 	r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2447 
2448 	while (r) {
2449 		r->nr = nr++;
2450 		r = TAILQ_NEXT(r, entries);
2451 	}
2452 }
2453 
2454 
2455 static void
pf_ruleset_cleanup(struct pf_ruleset * ruleset,int rs)2456 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2457 {
2458 	pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2459 	ruleset->rules[rs].active.ticket =
2460 	    ++ruleset->rules[rs].inactive.ticket;
2461 }
2462 
2463 /*
2464  * req_dev encodes the PF interface. Currently, possible values are
2465  * 0 or PFRULE_PFM
2466  */
2467 static int
pf_delete_rule_by_ticket(struct pfioc_rule * pr,u_int32_t req_dev)2468 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2469 {
2470 	struct pf_ruleset       *ruleset;
2471 	struct pf_rule          *rule = NULL;
2472 	int                      is_anchor;
2473 	int                      error;
2474 	int                      i;
2475 
2476 	is_anchor = (pr->anchor_call[0] != '\0');
2477 	if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2478 	    pr->rule.owner, is_anchor, &error)) == NULL) {
2479 		return error;
2480 	}
2481 
2482 	for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2483 		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2484 		while (rule && (rule->ticket != pr->rule.ticket)) {
2485 			rule = TAILQ_NEXT(rule, entries);
2486 		}
2487 	}
2488 	if (rule == NULL) {
2489 		return ENOENT;
2490 	} else {
2491 		i--;
2492 	}
2493 
2494 	if (strcmp(rule->owner, pr->rule.owner)) {
2495 		return EACCES;
2496 	}
2497 
2498 delete_rule:
2499 	if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2500 	    ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2501 	    ((ruleset->rules[i].active.rcount - 1) == 0)) {
2502 		/* set rule & ruleset to parent and repeat */
2503 		struct pf_rule *delete_rule = rule;
2504 		struct pf_ruleset *delete_ruleset = ruleset;
2505 
2506 #define parent_ruleset          ruleset->anchor->parent->ruleset
2507 		if (ruleset->anchor->parent == NULL) {
2508 			ruleset = &pf_main_ruleset;
2509 		} else {
2510 			ruleset = &parent_ruleset;
2511 		}
2512 
2513 		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2514 		while (rule &&
2515 		    (rule->anchor != delete_ruleset->anchor)) {
2516 			rule = TAILQ_NEXT(rule, entries);
2517 		}
2518 		if (rule == NULL) {
2519 			panic("%s: rule not found!", __func__);
2520 		}
2521 
2522 		/*
2523 		 * if reqest device != rule's device, bail :
2524 		 * with error if ticket matches;
2525 		 * without error if ticket doesn't match (i.e. its just cleanup)
2526 		 */
2527 		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2528 			if (rule->ticket != pr->rule.ticket) {
2529 				return 0;
2530 			} else {
2531 				return EACCES;
2532 			}
2533 		}
2534 
2535 		if (delete_rule->rule_flag & PFRULE_PFM) {
2536 			pffwrules--;
2537 		}
2538 
2539 		pf_delete_rule_from_ruleset(delete_ruleset,
2540 		    i, delete_rule);
2541 		delete_ruleset->rules[i].active.ticket =
2542 		    ++delete_ruleset->rules[i].inactive.ticket;
2543 		goto delete_rule;
2544 	} else {
2545 		/*
2546 		 * process deleting rule only if device that added the
2547 		 * rule matches device that issued the request
2548 		 */
2549 		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2550 			return EACCES;
2551 		}
2552 		if (rule->rule_flag & PFRULE_PFM) {
2553 			pffwrules--;
2554 		}
2555 		pf_delete_rule_from_ruleset(ruleset, i,
2556 		    rule);
2557 		pf_ruleset_cleanup(ruleset, i);
2558 	}
2559 
2560 	return 0;
2561 }
2562 
2563 /*
2564  * req_dev encodes the PF interface. Currently, possible values are
2565  * 0 or PFRULE_PFM
2566  */
2567 static void
pf_delete_rule_by_owner(char * owner,u_int32_t req_dev)2568 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2569 {
2570 	struct pf_ruleset       *ruleset;
2571 	struct pf_rule          *rule, *next;
2572 	int                      deleted = 0;
2573 
2574 	for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2575 		rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2576 		ruleset = &pf_main_ruleset;
2577 		while (rule) {
2578 			next = TAILQ_NEXT(rule, entries);
2579 			/*
2580 			 * process deleting rule only if device that added the
2581 			 * rule matches device that issued the request
2582 			 */
2583 			if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2584 				rule = next;
2585 				continue;
2586 			}
2587 			if (rule->anchor) {
2588 				if (((strcmp(rule->owner, owner)) == 0) ||
2589 				    ((strcmp(rule->owner, "")) == 0)) {
2590 					if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2591 						if (deleted) {
2592 							pf_ruleset_cleanup(ruleset, rs);
2593 							deleted = 0;
2594 						}
2595 						/* step into anchor */
2596 						ruleset =
2597 						    &rule->anchor->ruleset;
2598 						rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2599 						continue;
2600 					} else {
2601 						if (rule->rule_flag &
2602 						    PFRULE_PFM) {
2603 							pffwrules--;
2604 						}
2605 						pf_delete_rule_from_ruleset(ruleset, rs, rule);
2606 						deleted = 1;
2607 						rule = next;
2608 					}
2609 				} else {
2610 					rule = next;
2611 				}
2612 			} else {
2613 				if (((strcmp(rule->owner, owner)) == 0)) {
2614 					/* delete rule */
2615 					if (rule->rule_flag & PFRULE_PFM) {
2616 						pffwrules--;
2617 					}
2618 					pf_delete_rule_from_ruleset(ruleset,
2619 					    rs, rule);
2620 					deleted = 1;
2621 				}
2622 				rule = next;
2623 			}
2624 			if (rule == NULL) {
2625 				if (deleted) {
2626 					pf_ruleset_cleanup(ruleset, rs);
2627 					deleted = 0;
2628 				}
2629 				if (ruleset != &pf_main_ruleset) {
2630 					pf_deleterule_anchor_step_out(&ruleset,
2631 					    rs, &rule);
2632 				}
2633 			}
2634 		}
2635 	}
2636 }
2637 
2638 static void
pf_deleterule_anchor_step_out(struct pf_ruleset ** ruleset_ptr,int rs,struct pf_rule ** rule_ptr)2639 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2640     int rs, struct pf_rule **rule_ptr)
2641 {
2642 	struct pf_ruleset *ruleset = *ruleset_ptr;
2643 	struct pf_rule *rule = *rule_ptr;
2644 
2645 	/* step out of anchor */
2646 	struct pf_ruleset *rs_copy = ruleset;
2647 	ruleset = ruleset->anchor->parent?
2648 	    &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2649 
2650 	rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2651 	while (rule && (rule->anchor != rs_copy->anchor)) {
2652 		rule = TAILQ_NEXT(rule, entries);
2653 	}
2654 	if (rule == NULL) {
2655 		panic("%s: parent rule of anchor not found!", __func__);
2656 	}
2657 	if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2658 		rule = TAILQ_NEXT(rule, entries);
2659 	}
2660 
2661 	*ruleset_ptr = ruleset;
2662 	*rule_ptr = rule;
2663 }
2664 
2665 static void
pf_addrwrap_setup(struct pf_addr_wrap * aw)2666 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2667 {
2668 	VERIFY(aw);
2669 	bzero(&aw->p, sizeof aw->p);
2670 }
2671 
2672 static int
pf_rule_setup(struct pfioc_rule * pr,struct pf_rule * rule,struct pf_ruleset * ruleset)2673 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2674     struct pf_ruleset *ruleset)
2675 {
2676 	struct pf_pooladdr      *apa;
2677 	int                      error = 0;
2678 
2679 	if (rule->ifname[0]) {
2680 		rule->kif = pfi_kif_get(rule->ifname);
2681 		if (rule->kif == NULL) {
2682 			pool_put(&pf_rule_pl, rule);
2683 			return EINVAL;
2684 		}
2685 		pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2686 	}
2687 	if (rule->tagname[0]) {
2688 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) {
2689 			error = EBUSY;
2690 		}
2691 	}
2692 	if (rule->match_tagname[0]) {
2693 		if ((rule->match_tag =
2694 		    pf_tagname2tag(rule->match_tagname)) == 0) {
2695 			error = EBUSY;
2696 		}
2697 	}
2698 	if (rule->rt && !rule->direction) {
2699 		error = EINVAL;
2700 	}
2701 #if PFLOG
2702 	if (!rule->log) {
2703 		rule->logif = 0;
2704 	}
2705 	if (rule->logif >= PFLOGIFS_MAX) {
2706 		error = EINVAL;
2707 	}
2708 #endif /* PFLOG */
2709 	pf_addrwrap_setup(&rule->src.addr);
2710 	pf_addrwrap_setup(&rule->dst.addr);
2711 	if (pf_rtlabel_add(&rule->src.addr) ||
2712 	    pf_rtlabel_add(&rule->dst.addr)) {
2713 		error = EBUSY;
2714 	}
2715 	if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2716 		error = EINVAL;
2717 	}
2718 	if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2719 		error = EINVAL;
2720 	}
2721 	if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2722 		error = EINVAL;
2723 	}
2724 	if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2725 		error = EINVAL;
2726 	}
2727 	if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) {
2728 		error = EINVAL;
2729 	}
2730 	TAILQ_FOREACH(apa, &pf_pabuf, entries)
2731 	if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2732 		error = EINVAL;
2733 	}
2734 
2735 	if (rule->overload_tblname[0]) {
2736 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2737 		    rule->overload_tblname)) == NULL) {
2738 			error = EINVAL;
2739 		} else {
2740 			rule->overload_tbl->pfrkt_flags |=
2741 			    PFR_TFLAG_ACTIVE;
2742 		}
2743 	}
2744 
2745 	pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2746 
2747 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2748 	    (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2749 	    rule->anchor == NULL) ||
2750 	    (rule->rt > PF_FASTROUTE)) &&
2751 	    (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2752 		error = EINVAL;
2753 	}
2754 
2755 	if (error) {
2756 		pf_rm_rule(NULL, rule);
2757 		return error;
2758 	}
2759 	/* For a NAT64 rule the rule's address family is AF_INET6 whereas
2760 	 * the address pool's family will be AF_INET
2761 	 */
2762 	rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2763 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2764 	rule->evaluations = rule->packets[0] = rule->packets[1] =
2765 	    rule->bytes[0] = rule->bytes[1] = 0;
2766 
2767 	return 0;
2768 }
2769 
2770 static int
pfioctl_ioc_rule(u_long cmd,int minordev,struct pfioc_rule * pr,struct proc * p)2771 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2772 {
2773 	int error = 0;
2774 	u_int32_t req_dev = 0;
2775 
2776 	switch (cmd) {
2777 	case DIOCADDRULE: {
2778 		struct pf_ruleset       *ruleset;
2779 		struct pf_rule          *rule, *tail;
2780 		int                     rs_num;
2781 
2782 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2783 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2784 		ruleset = pf_find_ruleset(pr->anchor);
2785 		if (ruleset == NULL) {
2786 			error = EINVAL;
2787 			break;
2788 		}
2789 		rs_num = pf_get_ruleset_number(pr->rule.action);
2790 		if (rs_num >= PF_RULESET_MAX) {
2791 			error = EINVAL;
2792 			break;
2793 		}
2794 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2795 			error = EINVAL;
2796 			break;
2797 		}
2798 		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2799 			error = EBUSY;
2800 			break;
2801 		}
2802 		if (pr->pool_ticket != ticket_pabuf) {
2803 			error = EBUSY;
2804 			break;
2805 		}
2806 		rule = pool_get(&pf_rule_pl, PR_WAITOK);
2807 		if (rule == NULL) {
2808 			error = ENOMEM;
2809 			break;
2810 		}
2811 		pf_rule_copyin(&pr->rule, rule, p, minordev);
2812 #if !INET
2813 		if (rule->af == AF_INET) {
2814 			pool_put(&pf_rule_pl, rule);
2815 			error = EAFNOSUPPORT;
2816 			break;
2817 		}
2818 #endif /* INET */
2819 		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2820 		    pf_rulequeue);
2821 		if (tail) {
2822 			rule->nr = tail->nr + 1;
2823 		} else {
2824 			rule->nr = 0;
2825 		}
2826 
2827 		if ((error = pf_rule_setup(pr, rule, ruleset))) {
2828 			break;
2829 		}
2830 
2831 		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2832 		    rule, entries);
2833 		ruleset->rules[rs_num].inactive.rcount++;
2834 		if (rule->rule_flag & PFRULE_PFM) {
2835 			pffwrules++;
2836 		}
2837 
2838 		if (rule->action == PF_NAT64) {
2839 			atomic_add_16(&pf_nat64_configured, 1);
2840 		}
2841 
2842 		if (pr->anchor_call[0] == '\0') {
2843 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2844 			if (rule->rule_flag & PFRULE_PFM) {
2845 				INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2846 			}
2847 		}
2848 
2849 #if DUMMYNET
2850 		if (rule->action == PF_DUMMYNET) {
2851 			struct dummynet_event dn_event;
2852 			uint32_t direction = DN_INOUT;
2853 			bzero(&dn_event, sizeof(dn_event));
2854 
2855 			dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2856 
2857 			if (rule->direction == PF_IN) {
2858 				direction = DN_IN;
2859 			} else if (rule->direction == PF_OUT) {
2860 				direction = DN_OUT;
2861 			}
2862 
2863 			dn_event.dn_event_rule_config.dir = direction;
2864 			dn_event.dn_event_rule_config.af = rule->af;
2865 			dn_event.dn_event_rule_config.proto = rule->proto;
2866 			dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2867 			dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2868 			strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2869 			    sizeof(dn_event.dn_event_rule_config.ifname));
2870 
2871 			dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2872 		}
2873 #endif
2874 		break;
2875 	}
2876 
2877 	case DIOCGETRULES: {
2878 		struct pf_ruleset       *ruleset;
2879 		struct pf_rule          *tail;
2880 		int                      rs_num;
2881 
2882 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2883 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2884 		ruleset = pf_find_ruleset(pr->anchor);
2885 		if (ruleset == NULL) {
2886 			error = EINVAL;
2887 			break;
2888 		}
2889 		rs_num = pf_get_ruleset_number(pr->rule.action);
2890 		if (rs_num >= PF_RULESET_MAX) {
2891 			error = EINVAL;
2892 			break;
2893 		}
2894 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2895 		    pf_rulequeue);
2896 		if (tail) {
2897 			pr->nr = tail->nr + 1;
2898 		} else {
2899 			pr->nr = 0;
2900 		}
2901 		pr->ticket = ruleset->rules[rs_num].active.ticket;
2902 		break;
2903 	}
2904 
2905 	case DIOCGETRULE: {
2906 		struct pf_ruleset       *ruleset;
2907 		struct pf_rule          *rule;
2908 		int                      rs_num, i;
2909 
2910 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2911 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2912 		ruleset = pf_find_ruleset(pr->anchor);
2913 		if (ruleset == NULL) {
2914 			error = EINVAL;
2915 			break;
2916 		}
2917 		rs_num = pf_get_ruleset_number(pr->rule.action);
2918 		if (rs_num >= PF_RULESET_MAX) {
2919 			error = EINVAL;
2920 			break;
2921 		}
2922 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2923 			error = EBUSY;
2924 			break;
2925 		}
2926 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2927 		while ((rule != NULL) && (rule->nr != pr->nr)) {
2928 			rule = TAILQ_NEXT(rule, entries);
2929 		}
2930 		if (rule == NULL) {
2931 			error = EBUSY;
2932 			break;
2933 		}
2934 		pf_rule_copyout(rule, &pr->rule);
2935 		if (pf_anchor_copyout(ruleset, rule, pr)) {
2936 			error = EBUSY;
2937 			break;
2938 		}
2939 		pfi_dynaddr_copyout(&pr->rule.src.addr);
2940 		pfi_dynaddr_copyout(&pr->rule.dst.addr);
2941 		pf_tbladdr_copyout(&pr->rule.src.addr);
2942 		pf_tbladdr_copyout(&pr->rule.dst.addr);
2943 		pf_rtlabel_copyout(&pr->rule.src.addr);
2944 		pf_rtlabel_copyout(&pr->rule.dst.addr);
2945 		for (i = 0; i < PF_SKIP_COUNT; ++i) {
2946 			if (rule->skip[i].ptr == NULL) {
2947 				pr->rule.skip[i].nr = -1;
2948 			} else {
2949 				pr->rule.skip[i].nr =
2950 				    rule->skip[i].ptr->nr;
2951 			}
2952 		}
2953 
2954 		if (pr->action == PF_GET_CLR_CNTR) {
2955 			rule->evaluations = 0;
2956 			rule->packets[0] = rule->packets[1] = 0;
2957 			rule->bytes[0] = rule->bytes[1] = 0;
2958 		}
2959 		break;
2960 	}
2961 
2962 	case DIOCCHANGERULE: {
2963 		struct pfioc_rule       *pcr = pr;
2964 		struct pf_ruleset       *ruleset;
2965 		struct pf_rule          *oldrule = NULL, *newrule = NULL;
2966 		struct pf_pooladdr      *pa;
2967 		u_int32_t                nr = 0;
2968 		int                      rs_num;
2969 
2970 		if (!(pcr->action == PF_CHANGE_REMOVE ||
2971 		    pcr->action == PF_CHANGE_GET_TICKET) &&
2972 		    pcr->pool_ticket != ticket_pabuf) {
2973 			error = EBUSY;
2974 			break;
2975 		}
2976 
2977 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
2978 		    pcr->action > PF_CHANGE_GET_TICKET) {
2979 			error = EINVAL;
2980 			break;
2981 		}
2982 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
2983 		pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
2984 		ruleset = pf_find_ruleset(pcr->anchor);
2985 		if (ruleset == NULL) {
2986 			error = EINVAL;
2987 			break;
2988 		}
2989 		rs_num = pf_get_ruleset_number(pcr->rule.action);
2990 		if (rs_num >= PF_RULESET_MAX) {
2991 			error = EINVAL;
2992 			break;
2993 		}
2994 
2995 		if (pcr->action == PF_CHANGE_GET_TICKET) {
2996 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2997 			break;
2998 		} else {
2999 			if (pcr->ticket !=
3000 			    ruleset->rules[rs_num].active.ticket) {
3001 				error = EINVAL;
3002 				break;
3003 			}
3004 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3005 				error = EINVAL;
3006 				break;
3007 			}
3008 		}
3009 
3010 		if (pcr->action != PF_CHANGE_REMOVE) {
3011 			newrule = pool_get(&pf_rule_pl, PR_WAITOK);
3012 			if (newrule == NULL) {
3013 				error = ENOMEM;
3014 				break;
3015 			}
3016 			pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3017 #if !INET
3018 			if (newrule->af == AF_INET) {
3019 				pool_put(&pf_rule_pl, newrule);
3020 				error = EAFNOSUPPORT;
3021 				break;
3022 			}
3023 #endif /* INET */
3024 			if (newrule->ifname[0]) {
3025 				newrule->kif = pfi_kif_get(newrule->ifname);
3026 				if (newrule->kif == NULL) {
3027 					pool_put(&pf_rule_pl, newrule);
3028 					error = EINVAL;
3029 					break;
3030 				}
3031 				pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3032 			} else {
3033 				newrule->kif = NULL;
3034 			}
3035 
3036 			if (newrule->tagname[0]) {
3037 				if ((newrule->tag =
3038 				    pf_tagname2tag(newrule->tagname)) == 0) {
3039 					error = EBUSY;
3040 				}
3041 			}
3042 			if (newrule->match_tagname[0]) {
3043 				if ((newrule->match_tag = pf_tagname2tag(
3044 					    newrule->match_tagname)) == 0) {
3045 					error = EBUSY;
3046 				}
3047 			}
3048 			if (newrule->rt && !newrule->direction) {
3049 				error = EINVAL;
3050 			}
3051 #if PFLOG
3052 			if (!newrule->log) {
3053 				newrule->logif = 0;
3054 			}
3055 			if (newrule->logif >= PFLOGIFS_MAX) {
3056 				error = EINVAL;
3057 			}
3058 #endif /* PFLOG */
3059 			pf_addrwrap_setup(&newrule->src.addr);
3060 			pf_addrwrap_setup(&newrule->dst.addr);
3061 			if (pf_rtlabel_add(&newrule->src.addr) ||
3062 			    pf_rtlabel_add(&newrule->dst.addr)) {
3063 				error = EBUSY;
3064 			}
3065 			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3066 				error = EINVAL;
3067 			}
3068 			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3069 				error = EINVAL;
3070 			}
3071 			if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3072 				error = EINVAL;
3073 			}
3074 			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3075 				error = EINVAL;
3076 			}
3077 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) {
3078 				error = EINVAL;
3079 			}
3080 			TAILQ_FOREACH(pa, &pf_pabuf, entries)
3081 			if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3082 				error = EINVAL;
3083 			}
3084 
3085 			if (newrule->overload_tblname[0]) {
3086 				if ((newrule->overload_tbl = pfr_attach_table(
3087 					    ruleset, newrule->overload_tblname)) ==
3088 				    NULL) {
3089 					error = EINVAL;
3090 				} else {
3091 					newrule->overload_tbl->pfrkt_flags |=
3092 					    PFR_TFLAG_ACTIVE;
3093 				}
3094 			}
3095 
3096 			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3097 			if (((((newrule->action == PF_NAT) ||
3098 			    (newrule->action == PF_RDR) ||
3099 			    (newrule->action == PF_BINAT) ||
3100 			    (newrule->rt > PF_FASTROUTE)) &&
3101 			    !newrule->anchor)) &&
3102 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3103 				error = EINVAL;
3104 			}
3105 
3106 			if (error) {
3107 				pf_rm_rule(NULL, newrule);
3108 				break;
3109 			}
3110 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3111 			newrule->evaluations = 0;
3112 			newrule->packets[0] = newrule->packets[1] = 0;
3113 			newrule->bytes[0] = newrule->bytes[1] = 0;
3114 		}
3115 		pf_empty_pool(&pf_pabuf);
3116 
3117 		if (pcr->action == PF_CHANGE_ADD_HEAD) {
3118 			oldrule = TAILQ_FIRST(
3119 				ruleset->rules[rs_num].active.ptr);
3120 		} else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3121 			oldrule = TAILQ_LAST(
3122 				ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3123 		} else {
3124 			oldrule = TAILQ_FIRST(
3125 				ruleset->rules[rs_num].active.ptr);
3126 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3127 				oldrule = TAILQ_NEXT(oldrule, entries);
3128 			}
3129 			if (oldrule == NULL) {
3130 				if (newrule != NULL) {
3131 					pf_rm_rule(NULL, newrule);
3132 				}
3133 				error = EINVAL;
3134 				break;
3135 			}
3136 		}
3137 
3138 		if (pcr->action == PF_CHANGE_REMOVE) {
3139 			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3140 			ruleset->rules[rs_num].active.rcount--;
3141 		} else {
3142 			if (oldrule == NULL) {
3143 				TAILQ_INSERT_TAIL(
3144 					ruleset->rules[rs_num].active.ptr,
3145 					newrule, entries);
3146 			} else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3147 			    pcr->action == PF_CHANGE_ADD_BEFORE) {
3148 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3149 			} else {
3150 				TAILQ_INSERT_AFTER(
3151 					ruleset->rules[rs_num].active.ptr,
3152 					oldrule, newrule, entries);
3153 			}
3154 			ruleset->rules[rs_num].active.rcount++;
3155 		}
3156 
3157 		nr = 0;
3158 		TAILQ_FOREACH(oldrule,
3159 		    ruleset->rules[rs_num].active.ptr, entries)
3160 		oldrule->nr = nr++;
3161 
3162 		ruleset->rules[rs_num].active.ticket++;
3163 
3164 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3165 		pf_remove_if_empty_ruleset(ruleset);
3166 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
3167 		net_filter_event_mark(NET_FILTER_EVENT_PF,
3168 		    pf_check_compatible_rules());
3169 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3170 		break;
3171 	}
3172 
3173 	case DIOCINSERTRULE: {
3174 		struct pf_ruleset       *ruleset;
3175 		struct pf_rule          *rule, *tail, *r;
3176 		int                     rs_num;
3177 		int                     is_anchor;
3178 
3179 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3180 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3181 		is_anchor = (pr->anchor_call[0] != '\0');
3182 
3183 		if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3184 		    pr->rule.owner, is_anchor, &error)) == NULL) {
3185 			break;
3186 		}
3187 
3188 		rs_num = pf_get_ruleset_number(pr->rule.action);
3189 		if (rs_num >= PF_RULESET_MAX) {
3190 			error = EINVAL;
3191 			break;
3192 		}
3193 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3194 			error = EINVAL;
3195 			break;
3196 		}
3197 
3198 		/* make sure this anchor rule doesn't exist already */
3199 		if (is_anchor) {
3200 			r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3201 			while (r) {
3202 				if (r->anchor &&
3203 				    ((strcmp(r->anchor->name,
3204 				    pr->anchor_call)) == 0)) {
3205 					if (((strcmp(pr->rule.owner,
3206 					    r->owner)) == 0) ||
3207 					    ((strcmp(r->owner, "")) == 0)) {
3208 						error = EEXIST;
3209 					} else {
3210 						error = EPERM;
3211 					}
3212 					break;
3213 				}
3214 				r = TAILQ_NEXT(r, entries);
3215 			}
3216 			if (error != 0) {
3217 				return error;
3218 			}
3219 		}
3220 
3221 		rule = pool_get(&pf_rule_pl, PR_WAITOK);
3222 		if (rule == NULL) {
3223 			error = ENOMEM;
3224 			break;
3225 		}
3226 		pf_rule_copyin(&pr->rule, rule, p, minordev);
3227 #if !INET
3228 		if (rule->af == AF_INET) {
3229 			pool_put(&pf_rule_pl, rule);
3230 			error = EAFNOSUPPORT;
3231 			break;
3232 		}
3233 #endif /* INET */
3234 		r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3235 		while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3236 			r = TAILQ_NEXT(r, entries);
3237 		}
3238 		if (r == NULL) {
3239 			if ((tail =
3240 			    TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3241 			    pf_rulequeue)) != NULL) {
3242 				rule->nr = tail->nr + 1;
3243 			} else {
3244 				rule->nr = 0;
3245 			}
3246 		} else {
3247 			rule->nr = r->nr;
3248 		}
3249 
3250 		if ((error = pf_rule_setup(pr, rule, ruleset))) {
3251 			break;
3252 		}
3253 
3254 		if (rule->anchor != NULL) {
3255 			strlcpy(rule->anchor->owner, rule->owner,
3256 			    PF_OWNER_NAME_SIZE);
3257 		}
3258 
3259 		if (r) {
3260 			TAILQ_INSERT_BEFORE(r, rule, entries);
3261 			while (r && ++r->nr) {
3262 				r = TAILQ_NEXT(r, entries);
3263 			}
3264 		} else {
3265 			TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3266 			    rule, entries);
3267 		}
3268 		ruleset->rules[rs_num].active.rcount++;
3269 
3270 		/* Calculate checksum for the main ruleset */
3271 		if (ruleset == &pf_main_ruleset) {
3272 			error = pf_setup_pfsync_matching(ruleset);
3273 		}
3274 
3275 		pf_ruleset_cleanup(ruleset, rs_num);
3276 		rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3277 
3278 		pr->rule.ticket = rule->ticket;
3279 		pf_rule_copyout(rule, &pr->rule);
3280 		if (rule->rule_flag & PFRULE_PFM) {
3281 			pffwrules++;
3282 		}
3283 		if (rule->action == PF_NAT64) {
3284 			atomic_add_16(&pf_nat64_configured, 1);
3285 		}
3286 
3287 		if (pr->anchor_call[0] == '\0') {
3288 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3289 			if (rule->rule_flag & PFRULE_PFM) {
3290 				INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3291 			}
3292 		}
3293 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
3294 		net_filter_event_mark(NET_FILTER_EVENT_PF,
3295 		    pf_check_compatible_rules());
3296 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3297 		break;
3298 	}
3299 
3300 	case DIOCDELETERULE: {
3301 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3302 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3303 
3304 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3305 			error = EINVAL;
3306 			break;
3307 		}
3308 
3309 		/* get device through which request is made */
3310 		if ((uint8_t)minordev == PFDEV_PFM) {
3311 			req_dev |= PFRULE_PFM;
3312 		}
3313 
3314 		if (pr->rule.ticket) {
3315 			if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3316 				break;
3317 			}
3318 		} else {
3319 			pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3320 		}
3321 		pr->nr = pffwrules;
3322 		if (pr->rule.action == PF_NAT64) {
3323 			atomic_add_16(&pf_nat64_configured, -1);
3324 		}
3325 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
3326 		net_filter_event_mark(NET_FILTER_EVENT_PF,
3327 		    pf_check_compatible_rules());
3328 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3329 		break;
3330 	}
3331 
3332 	default:
3333 		VERIFY(0);
3334 		/* NOTREACHED */
3335 	}
3336 
3337 	return error;
3338 }
3339 
3340 static int
pfioctl_ioc_state_kill(u_long cmd,struct pfioc_state_kill * psk,struct proc * p)3341 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3342 {
3343 #pragma unused(p)
3344 	int error = 0;
3345 
3346 	psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3347 	psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3348 
3349 	bool ifname_matched = true;
3350 	bool owner_matched = true;
3351 
3352 	switch (cmd) {
3353 	case DIOCCLRSTATES: {
3354 		struct pf_state         *s, *nexts;
3355 		int                      killed = 0;
3356 
3357 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3358 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3359 			/*
3360 			 * Purge all states only when neither ifname
3361 			 * or owner is provided. If any of these are provided
3362 			 * we purge only the states with meta data that match
3363 			 */
3364 			bool unlink_state = false;
3365 			ifname_matched = true;
3366 			owner_matched = true;
3367 
3368 			if (psk->psk_ifname[0] &&
3369 			    strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3370 				ifname_matched = false;
3371 			}
3372 
3373 			if (psk->psk_ownername[0] &&
3374 			    ((NULL == s->rule.ptr) ||
3375 			    strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3376 				owner_matched = false;
3377 			}
3378 
3379 			unlink_state = ifname_matched && owner_matched;
3380 
3381 			if (unlink_state) {
3382 #if NPFSYNC
3383 				/* don't send out individual delete messages */
3384 				s->sync_flags = PFSTATE_NOSYNC;
3385 #endif
3386 				pf_unlink_state(s);
3387 				killed++;
3388 			}
3389 		}
3390 		psk->psk_af = (sa_family_t)killed;
3391 #if NPFSYNC
3392 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3393 #endif
3394 		break;
3395 	}
3396 
3397 	case DIOCKILLSTATES: {
3398 		struct pf_state         *s, *nexts;
3399 		struct pf_state_key     *sk;
3400 		struct pf_state_host    *src, *dst;
3401 		int                      killed = 0;
3402 
3403 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3404 		    s = nexts) {
3405 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3406 			sk = s->state_key;
3407 			ifname_matched = true;
3408 			owner_matched = true;
3409 
3410 			if (psk->psk_ifname[0] &&
3411 			    strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3412 				ifname_matched = false;
3413 			}
3414 
3415 			if (psk->psk_ownername[0] &&
3416 			    ((NULL == s->rule.ptr) ||
3417 			    strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3418 				owner_matched = false;
3419 			}
3420 
3421 			if (sk->direction == PF_OUT) {
3422 				src = &sk->lan;
3423 				dst = &sk->ext_lan;
3424 			} else {
3425 				src = &sk->ext_lan;
3426 				dst = &sk->lan;
3427 			}
3428 			if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3429 			    (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3430 			    PF_MATCHA(psk->psk_src.neg,
3431 			    &psk->psk_src.addr.v.a.addr,
3432 			    &psk->psk_src.addr.v.a.mask,
3433 			    &src->addr, sk->af_lan) &&
3434 			    PF_MATCHA(psk->psk_dst.neg,
3435 			    &psk->psk_dst.addr.v.a.addr,
3436 			    &psk->psk_dst.addr.v.a.mask,
3437 			    &dst->addr, sk->af_lan) &&
3438 			    (pf_match_xport(psk->psk_proto,
3439 			    psk->psk_proto_variant, &psk->psk_src.xport,
3440 			    &src->xport)) &&
3441 			    (pf_match_xport(psk->psk_proto,
3442 			    psk->psk_proto_variant, &psk->psk_dst.xport,
3443 			    &dst->xport)) &&
3444 			    ifname_matched &&
3445 			    owner_matched) {
3446 #if NPFSYNC
3447 				/* send immediate delete of state */
3448 				pfsync_delete_state(s);
3449 				s->sync_flags |= PFSTATE_NOSYNC;
3450 #endif
3451 				pf_unlink_state(s);
3452 				killed++;
3453 			}
3454 		}
3455 		psk->psk_af = (sa_family_t)killed;
3456 		break;
3457 	}
3458 
3459 	default:
3460 		VERIFY(0);
3461 		/* NOTREACHED */
3462 	}
3463 
3464 	return error;
3465 }
3466 
3467 static int
pfioctl_ioc_state(u_long cmd,struct pfioc_state * ps,struct proc * p)3468 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3469 {
3470 #pragma unused(p)
3471 	int error = 0;
3472 
3473 	switch (cmd) {
3474 	case DIOCADDSTATE: {
3475 		struct pfsync_state     *sp = &ps->state;
3476 		struct pf_state         *s;
3477 		struct pf_state_key     *sk;
3478 		struct pfi_kif          *kif;
3479 
3480 		if (sp->timeout >= PFTM_MAX) {
3481 			error = EINVAL;
3482 			break;
3483 		}
3484 		s = pool_get(&pf_state_pl, PR_WAITOK);
3485 		if (s == NULL) {
3486 			error = ENOMEM;
3487 			break;
3488 		}
3489 		bzero(s, sizeof(struct pf_state));
3490 		if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3491 			pool_put(&pf_state_pl, s);
3492 			error = ENOMEM;
3493 			break;
3494 		}
3495 		pf_state_import(sp, sk, s);
3496 		kif = pfi_kif_get(sp->ifname);
3497 		if (kif == NULL) {
3498 			pool_put(&pf_state_pl, s);
3499 			pool_put(&pf_state_key_pl, sk);
3500 			error = ENOENT;
3501 			break;
3502 		}
3503 		TAILQ_INIT(&s->unlink_hooks);
3504 		s->state_key->app_state = 0;
3505 		if (pf_insert_state(kif, s)) {
3506 			pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3507 			pool_put(&pf_state_pl, s);
3508 			error = EEXIST;
3509 			break;
3510 		}
3511 		pf_default_rule.states++;
3512 		VERIFY(pf_default_rule.states != 0);
3513 		break;
3514 	}
3515 
3516 	case DIOCGETSTATE: {
3517 		struct pf_state         *s;
3518 		struct pf_state_cmp      id_key;
3519 
3520 		bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3521 		id_key.creatorid = ps->state.creatorid;
3522 
3523 		s = pf_find_state_byid(&id_key);
3524 		if (s == NULL) {
3525 			error = ENOENT;
3526 			break;
3527 		}
3528 
3529 		pf_state_export(&ps->state, s->state_key, s);
3530 		break;
3531 	}
3532 
3533 	default:
3534 		VERIFY(0);
3535 		/* NOTREACHED */
3536 	}
3537 
3538 	return error;
3539 }
3540 
3541 static int
pfioctl_ioc_states(u_long cmd,struct pfioc_states_32 * ps32,struct pfioc_states_64 * ps64,struct proc * p)3542 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3543     struct pfioc_states_64 *ps64, struct proc *p)
3544 {
3545 	int p64 = proc_is64bit(p);
3546 	int error = 0;
3547 
3548 	switch (cmd) {
3549 	case DIOCGETSTATES: {           /* struct pfioc_states */
3550 		struct pf_state         *state;
3551 		struct pfsync_state     *pstore;
3552 		user_addr_t              buf;
3553 		u_int32_t                nr = 0;
3554 		int                      len, size;
3555 
3556 		len = (p64 ? ps64->ps_len : ps32->ps_len);
3557 		if (len == 0) {
3558 			size = sizeof(struct pfsync_state) * pf_status.states;
3559 			if (p64) {
3560 				ps64->ps_len = size;
3561 			} else {
3562 				ps32->ps_len = size;
3563 			}
3564 			break;
3565 		}
3566 
3567 		pstore = kalloc_type(struct pfsync_state,
3568 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
3569 #ifdef __LP64__
3570 		buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3571 #else
3572 		buf = ps32->ps_buf;
3573 #endif
3574 
3575 		state = TAILQ_FIRST(&state_list);
3576 		while (state) {
3577 			if (state->timeout != PFTM_UNLINKED) {
3578 				if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3579 					break;
3580 				}
3581 
3582 				pf_state_export(pstore,
3583 				    state->state_key, state);
3584 				error = copyout(pstore, buf, sizeof(*pstore));
3585 				if (error) {
3586 					kfree_type(struct pfsync_state, pstore);
3587 					goto fail;
3588 				}
3589 				buf += sizeof(*pstore);
3590 				nr++;
3591 			}
3592 			state = TAILQ_NEXT(state, entry_list);
3593 		}
3594 
3595 		size = sizeof(struct pfsync_state) * nr;
3596 		if (p64) {
3597 			ps64->ps_len = size;
3598 		} else {
3599 			ps32->ps_len = size;
3600 		}
3601 
3602 		kfree_type(struct pfsync_state, pstore);
3603 		break;
3604 	}
3605 
3606 	default:
3607 		VERIFY(0);
3608 		/* NOTREACHED */
3609 	}
3610 fail:
3611 	return error;
3612 }
3613 
3614 static int
pfioctl_ioc_natlook(u_long cmd,struct pfioc_natlook * pnl,struct proc * p)3615 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3616 {
3617 #pragma unused(p)
3618 	int error = 0;
3619 
3620 	switch (cmd) {
3621 	case DIOCNATLOOK: {
3622 		struct pf_state_key     *sk;
3623 		struct pf_state         *state;
3624 		struct pf_state_key_cmp  key;
3625 		int                      m = 0, direction = pnl->direction;
3626 
3627 		key.proto = pnl->proto;
3628 		key.proto_variant = pnl->proto_variant;
3629 
3630 		if (!pnl->proto ||
3631 		    PF_AZERO(&pnl->saddr, pnl->af) ||
3632 		    PF_AZERO(&pnl->daddr, pnl->af) ||
3633 		    ((pnl->proto == IPPROTO_TCP ||
3634 		    pnl->proto == IPPROTO_UDP) &&
3635 		    (!pnl->dxport.port || !pnl->sxport.port))) {
3636 			error = EINVAL;
3637 		} else {
3638 			/*
3639 			 * userland gives us source and dest of connection,
3640 			 * reverse the lookup so we ask for what happens with
3641 			 * the return traffic, enabling us to find it in the
3642 			 * state tree.
3643 			 */
3644 			if (direction == PF_IN) {
3645 				key.af_gwy = pnl->af;
3646 				PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3647 				    pnl->af);
3648 				memcpy(&key.ext_gwy.xport, &pnl->dxport,
3649 				    sizeof(key.ext_gwy.xport));
3650 				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3651 				memcpy(&key.gwy.xport, &pnl->sxport,
3652 				    sizeof(key.gwy.xport));
3653 				state = pf_find_state_all(&key, PF_IN, &m);
3654 			} else {
3655 				key.af_lan = pnl->af;
3656 				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3657 				memcpy(&key.lan.xport, &pnl->dxport,
3658 				    sizeof(key.lan.xport));
3659 				PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3660 				    pnl->af);
3661 				memcpy(&key.ext_lan.xport, &pnl->sxport,
3662 				    sizeof(key.ext_lan.xport));
3663 				state = pf_find_state_all(&key, PF_OUT, &m);
3664 			}
3665 			if (m > 1) {
3666 				error = E2BIG;  /* more than one state */
3667 			} else if (state != NULL) {
3668 				sk = state->state_key;
3669 				if (direction == PF_IN) {
3670 					PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3671 					    sk->af_lan);
3672 					memcpy(&pnl->rsxport, &sk->lan.xport,
3673 					    sizeof(pnl->rsxport));
3674 					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3675 					    pnl->af);
3676 					memcpy(&pnl->rdxport, &pnl->dxport,
3677 					    sizeof(pnl->rdxport));
3678 				} else {
3679 					PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3680 					    sk->af_gwy);
3681 					memcpy(&pnl->rdxport, &sk->gwy.xport,
3682 					    sizeof(pnl->rdxport));
3683 					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3684 					    pnl->af);
3685 					memcpy(&pnl->rsxport, &pnl->sxport,
3686 					    sizeof(pnl->rsxport));
3687 				}
3688 			} else {
3689 				error = ENOENT;
3690 			}
3691 		}
3692 		break;
3693 	}
3694 
3695 	default:
3696 		VERIFY(0);
3697 		/* NOTREACHED */
3698 	}
3699 
3700 	return error;
3701 }
3702 
3703 static int
pfioctl_ioc_tm(u_long cmd,struct pfioc_tm * pt,struct proc * p)3704 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3705 {
3706 #pragma unused(p)
3707 	int error = 0;
3708 
3709 	switch (cmd) {
3710 	case DIOCSETTIMEOUT: {
3711 		int old;
3712 
3713 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3714 		    pt->seconds < 0) {
3715 			error = EINVAL;
3716 			goto fail;
3717 		}
3718 		old = pf_default_rule.timeout[pt->timeout];
3719 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3720 			pt->seconds = 1;
3721 		}
3722 		pf_default_rule.timeout[pt->timeout] = pt->seconds;
3723 		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3724 			wakeup(pf_purge_thread_fn);
3725 		}
3726 		pt->seconds = old;
3727 		break;
3728 	}
3729 
3730 	case DIOCGETTIMEOUT: {
3731 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3732 			error = EINVAL;
3733 			goto fail;
3734 		}
3735 		pt->seconds = pf_default_rule.timeout[pt->timeout];
3736 		break;
3737 	}
3738 
3739 	default:
3740 		VERIFY(0);
3741 		/* NOTREACHED */
3742 	}
3743 fail:
3744 	return error;
3745 }
3746 
3747 static int
pfioctl_ioc_limit(u_long cmd,struct pfioc_limit * pl,struct proc * p)3748 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3749 {
3750 #pragma unused(p)
3751 	int error = 0;
3752 
3753 	switch (cmd) {
3754 	case DIOCGETLIMIT: {
3755 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3756 			error = EINVAL;
3757 			goto fail;
3758 		}
3759 		pl->limit = pf_pool_limits[pl->index].limit;
3760 		break;
3761 	}
3762 
3763 	case DIOCSETLIMIT: {
3764 		int old_limit;
3765 
3766 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3767 		    pf_pool_limits[pl->index].pp == NULL) {
3768 			error = EINVAL;
3769 			goto fail;
3770 		}
3771 		pool_sethardlimit(pf_pool_limits[pl->index].pp,
3772 		    pl->limit, NULL, 0);
3773 		old_limit = pf_pool_limits[pl->index].limit;
3774 		pf_pool_limits[pl->index].limit = pl->limit;
3775 		pl->limit = old_limit;
3776 		break;
3777 	}
3778 
3779 	default:
3780 		VERIFY(0);
3781 		/* NOTREACHED */
3782 	}
3783 fail:
3784 	return error;
3785 }
3786 
3787 static int
pfioctl_ioc_pooladdr(u_long cmd,struct pfioc_pooladdr * pp,struct proc * p)3788 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3789 {
3790 #pragma unused(p)
3791 	struct pf_pooladdr *pa = NULL;
3792 	struct pf_pool *pool = NULL;
3793 	int error = 0;
3794 
3795 	switch (cmd) {
3796 	case DIOCBEGINADDRS: {
3797 		pf_empty_pool(&pf_pabuf);
3798 		pp->ticket = ++ticket_pabuf;
3799 		break;
3800 	}
3801 
3802 	case DIOCADDADDR: {
3803 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3804 		if (pp->ticket != ticket_pabuf) {
3805 			error = EBUSY;
3806 			break;
3807 		}
3808 #if !INET
3809 		if (pp->af == AF_INET) {
3810 			error = EAFNOSUPPORT;
3811 			break;
3812 		}
3813 #endif /* INET */
3814 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3815 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3816 		    pp->addr.addr.type != PF_ADDR_TABLE) {
3817 			error = EINVAL;
3818 			break;
3819 		}
3820 		pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3821 		if (pa == NULL) {
3822 			error = ENOMEM;
3823 			break;
3824 		}
3825 		pf_pooladdr_copyin(&pp->addr, pa);
3826 		if (pa->ifname[0]) {
3827 			pa->kif = pfi_kif_get(pa->ifname);
3828 			if (pa->kif == NULL) {
3829 				pool_put(&pf_pooladdr_pl, pa);
3830 				error = EINVAL;
3831 				break;
3832 			}
3833 			pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3834 		}
3835 		pf_addrwrap_setup(&pa->addr);
3836 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3837 			pfi_dynaddr_remove(&pa->addr);
3838 			pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3839 			pool_put(&pf_pooladdr_pl, pa);
3840 			error = EINVAL;
3841 			break;
3842 		}
3843 		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3844 		break;
3845 	}
3846 
3847 	case DIOCGETADDRS: {
3848 		pp->nr = 0;
3849 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3850 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3851 		    pp->r_num, 0, 1, 0);
3852 		if (pool == NULL) {
3853 			error = EBUSY;
3854 			break;
3855 		}
3856 		TAILQ_FOREACH(pa, &pool->list, entries)
3857 		pp->nr++;
3858 		break;
3859 	}
3860 
3861 	case DIOCGETADDR: {
3862 		u_int32_t                nr = 0;
3863 
3864 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3865 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3866 		    pp->r_num, 0, 1, 1);
3867 		if (pool == NULL) {
3868 			error = EBUSY;
3869 			break;
3870 		}
3871 		pa = TAILQ_FIRST(&pool->list);
3872 		while ((pa != NULL) && (nr < pp->nr)) {
3873 			pa = TAILQ_NEXT(pa, entries);
3874 			nr++;
3875 		}
3876 		if (pa == NULL) {
3877 			error = EBUSY;
3878 			break;
3879 		}
3880 		pf_pooladdr_copyout(pa, &pp->addr);
3881 		pfi_dynaddr_copyout(&pp->addr.addr);
3882 		pf_tbladdr_copyout(&pp->addr.addr);
3883 		pf_rtlabel_copyout(&pp->addr.addr);
3884 		break;
3885 	}
3886 
3887 	case DIOCCHANGEADDR: {
3888 		struct pfioc_pooladdr   *pca = pp;
3889 		struct pf_pooladdr      *oldpa = NULL, *newpa = NULL;
3890 		struct pf_ruleset       *ruleset;
3891 
3892 		if (pca->action < PF_CHANGE_ADD_HEAD ||
3893 		    pca->action > PF_CHANGE_REMOVE) {
3894 			error = EINVAL;
3895 			break;
3896 		}
3897 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3898 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3899 		    pca->addr.addr.type != PF_ADDR_TABLE) {
3900 			error = EINVAL;
3901 			break;
3902 		}
3903 
3904 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3905 		ruleset = pf_find_ruleset(pca->anchor);
3906 		if (ruleset == NULL) {
3907 			error = EBUSY;
3908 			break;
3909 		}
3910 		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3911 		    pca->r_num, pca->r_last, 1, 1);
3912 		if (pool == NULL) {
3913 			error = EBUSY;
3914 			break;
3915 		}
3916 		if (pca->action != PF_CHANGE_REMOVE) {
3917 			newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3918 			if (newpa == NULL) {
3919 				error = ENOMEM;
3920 				break;
3921 			}
3922 			pf_pooladdr_copyin(&pca->addr, newpa);
3923 #if !INET
3924 			if (pca->af == AF_INET) {
3925 				pool_put(&pf_pooladdr_pl, newpa);
3926 				error = EAFNOSUPPORT;
3927 				break;
3928 			}
3929 #endif /* INET */
3930 			if (newpa->ifname[0]) {
3931 				newpa->kif = pfi_kif_get(newpa->ifname);
3932 				if (newpa->kif == NULL) {
3933 					pool_put(&pf_pooladdr_pl, newpa);
3934 					error = EINVAL;
3935 					break;
3936 				}
3937 				pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3938 			} else {
3939 				newpa->kif = NULL;
3940 			}
3941 			pf_addrwrap_setup(&newpa->addr);
3942 			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3943 			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
3944 				pfi_dynaddr_remove(&newpa->addr);
3945 				pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3946 				pool_put(&pf_pooladdr_pl, newpa);
3947 				error = EINVAL;
3948 				break;
3949 			}
3950 		}
3951 
3952 		if (pca->action == PF_CHANGE_ADD_HEAD) {
3953 			oldpa = TAILQ_FIRST(&pool->list);
3954 		} else if (pca->action == PF_CHANGE_ADD_TAIL) {
3955 			oldpa = TAILQ_LAST(&pool->list, pf_palist);
3956 		} else {
3957 			int     i = 0;
3958 
3959 			oldpa = TAILQ_FIRST(&pool->list);
3960 			while ((oldpa != NULL) && (i < (int)pca->nr)) {
3961 				oldpa = TAILQ_NEXT(oldpa, entries);
3962 				i++;
3963 			}
3964 			if (oldpa == NULL) {
3965 				error = EINVAL;
3966 				break;
3967 			}
3968 		}
3969 
3970 		if (pca->action == PF_CHANGE_REMOVE) {
3971 			TAILQ_REMOVE(&pool->list, oldpa, entries);
3972 			pfi_dynaddr_remove(&oldpa->addr);
3973 			pf_tbladdr_remove(&oldpa->addr);
3974 			pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3975 			pool_put(&pf_pooladdr_pl, oldpa);
3976 		} else {
3977 			if (oldpa == NULL) {
3978 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3979 			} else if (pca->action == PF_CHANGE_ADD_HEAD ||
3980 			    pca->action == PF_CHANGE_ADD_BEFORE) {
3981 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3982 			} else {
3983 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
3984 				    newpa, entries);
3985 			}
3986 		}
3987 
3988 		pool->cur = TAILQ_FIRST(&pool->list);
3989 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
3990 		    pca->af);
3991 		break;
3992 	}
3993 
3994 	default:
3995 		VERIFY(0);
3996 		/* NOTREACHED */
3997 	}
3998 
3999 	return error;
4000 }
4001 
4002 static int
pfioctl_ioc_ruleset(u_long cmd,struct pfioc_ruleset * pr,struct proc * p)4003 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4004 {
4005 #pragma unused(p)
4006 	int error = 0;
4007 
4008 	switch (cmd) {
4009 	case DIOCGETRULESETS: {
4010 		struct pf_ruleset       *ruleset;
4011 		struct pf_anchor        *anchor;
4012 
4013 		pr->path[sizeof(pr->path) - 1] = '\0';
4014 		pr->name[sizeof(pr->name) - 1] = '\0';
4015 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4016 			error = EINVAL;
4017 			break;
4018 		}
4019 		pr->nr = 0;
4020 		if (ruleset->anchor == NULL) {
4021 			/* XXX kludge for pf_main_ruleset */
4022 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4023 			if (anchor->parent == NULL) {
4024 				pr->nr++;
4025 			}
4026 		} else {
4027 			RB_FOREACH(anchor, pf_anchor_node,
4028 			    &ruleset->anchor->children)
4029 			pr->nr++;
4030 		}
4031 		break;
4032 	}
4033 
4034 	case DIOCGETRULESET: {
4035 		struct pf_ruleset       *ruleset;
4036 		struct pf_anchor        *anchor;
4037 		u_int32_t                nr = 0;
4038 
4039 		pr->path[sizeof(pr->path) - 1] = '\0';
4040 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4041 			error = EINVAL;
4042 			break;
4043 		}
4044 		pr->name[0] = 0;
4045 		if (ruleset->anchor == NULL) {
4046 			/* XXX kludge for pf_main_ruleset */
4047 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4048 			if (anchor->parent == NULL && nr++ == pr->nr) {
4049 				strlcpy(pr->name, anchor->name,
4050 				    sizeof(pr->name));
4051 				break;
4052 			}
4053 		} else {
4054 			RB_FOREACH(anchor, pf_anchor_node,
4055 			    &ruleset->anchor->children)
4056 			if (nr++ == pr->nr) {
4057 				strlcpy(pr->name, anchor->name,
4058 				    sizeof(pr->name));
4059 				break;
4060 			}
4061 		}
4062 		if (!pr->name[0]) {
4063 			error = EBUSY;
4064 		}
4065 		break;
4066 	}
4067 
4068 	default:
4069 		VERIFY(0);
4070 		/* NOTREACHED */
4071 	}
4072 
4073 	return error;
4074 }
4075 
4076 static int
pfioctl_ioc_trans(u_long cmd,struct pfioc_trans_32 * io32,struct pfioc_trans_64 * io64,struct proc * p)4077 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4078     struct pfioc_trans_64 *io64, struct proc *p)
4079 {
4080 	int error = 0, esize, size;
4081 	user_addr_t buf;
4082 
4083 #ifdef __LP64__
4084 	int p64 = proc_is64bit(p);
4085 
4086 	esize = (p64 ? io64->esize : io32->esize);
4087 	size = (p64 ? io64->size : io32->size);
4088 	buf = (p64 ? io64->array : io32->array);
4089 #else
4090 #pragma unused(io64, p)
4091 	esize = io32->esize;
4092 	size = io32->size;
4093 	buf = io32->array;
4094 #endif
4095 
4096 	switch (cmd) {
4097 	case DIOCXBEGIN: {
4098 		struct pfioc_trans_e    *ioe;
4099 		struct pfr_table        *table;
4100 		int                      i;
4101 
4102 		if (esize != sizeof(*ioe)) {
4103 			error = ENODEV;
4104 			goto fail;
4105 		}
4106 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4107 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4108 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4109 			if (copyin(buf, ioe, sizeof(*ioe))) {
4110 				kfree_type(struct pfr_table, table);
4111 				kfree_type(struct pfioc_trans_e, ioe);
4112 				error = EFAULT;
4113 				goto fail;
4114 			}
4115 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4116 			switch (ioe->rs_num) {
4117 			case PF_RULESET_ALTQ:
4118 				break;
4119 			case PF_RULESET_TABLE:
4120 				bzero(table, sizeof(*table));
4121 				strlcpy(table->pfrt_anchor, ioe->anchor,
4122 				    sizeof(table->pfrt_anchor));
4123 				if ((error = pfr_ina_begin(table,
4124 				    &ioe->ticket, NULL, 0))) {
4125 					kfree_type(struct pfr_table, table);
4126 					kfree_type(struct pfioc_trans_e, ioe);
4127 					goto fail;
4128 				}
4129 				break;
4130 			default:
4131 				if ((error = pf_begin_rules(&ioe->ticket,
4132 				    ioe->rs_num, ioe->anchor))) {
4133 					kfree_type(struct pfr_table, table);
4134 					kfree_type(struct pfioc_trans_e, ioe);
4135 					goto fail;
4136 				}
4137 				break;
4138 			}
4139 			if (copyout(ioe, buf, sizeof(*ioe))) {
4140 				kfree_type(struct pfr_table, table);
4141 				kfree_type(struct pfioc_trans_e, ioe);
4142 				error = EFAULT;
4143 				goto fail;
4144 			}
4145 		}
4146 		kfree_type(struct pfr_table, table);
4147 		kfree_type(struct pfioc_trans_e, ioe);
4148 		break;
4149 	}
4150 
4151 	case DIOCXROLLBACK: {
4152 		struct pfioc_trans_e    *ioe;
4153 		struct pfr_table        *table;
4154 		int                      i;
4155 
4156 		if (esize != sizeof(*ioe)) {
4157 			error = ENODEV;
4158 			goto fail;
4159 		}
4160 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4161 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4162 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4163 			if (copyin(buf, ioe, sizeof(*ioe))) {
4164 				kfree_type(struct pfr_table, table);
4165 				kfree_type(struct pfioc_trans_e, ioe);
4166 				error = EFAULT;
4167 				goto fail;
4168 			}
4169 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4170 			switch (ioe->rs_num) {
4171 			case PF_RULESET_ALTQ:
4172 				break;
4173 			case PF_RULESET_TABLE:
4174 				bzero(table, sizeof(*table));
4175 				strlcpy(table->pfrt_anchor, ioe->anchor,
4176 				    sizeof(table->pfrt_anchor));
4177 				if ((error = pfr_ina_rollback(table,
4178 				    ioe->ticket, NULL, 0))) {
4179 					kfree_type(struct pfr_table, table);
4180 					kfree_type(struct pfioc_trans_e, ioe);
4181 					goto fail; /* really bad */
4182 				}
4183 				break;
4184 			default:
4185 				if ((error = pf_rollback_rules(ioe->ticket,
4186 				    ioe->rs_num, ioe->anchor))) {
4187 					kfree_type(struct pfr_table, table);
4188 					kfree_type(struct pfioc_trans_e, ioe);
4189 					goto fail; /* really bad */
4190 				}
4191 				break;
4192 			}
4193 		}
4194 		kfree_type(struct pfr_table, table);
4195 		kfree_type(struct pfioc_trans_e, ioe);
4196 		break;
4197 	}
4198 
4199 	case DIOCXCOMMIT: {
4200 		struct pfioc_trans_e    *ioe;
4201 		struct pfr_table        *table;
4202 		struct pf_ruleset       *rs;
4203 		user_addr_t              _buf = buf;
4204 		int                      i;
4205 
4206 		if (esize != sizeof(*ioe)) {
4207 			error = ENODEV;
4208 			goto fail;
4209 		}
4210 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4211 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4212 		/* first makes sure everything will succeed */
4213 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4214 			if (copyin(buf, ioe, sizeof(*ioe))) {
4215 				kfree_type(struct pfr_table, table);
4216 				kfree_type(struct pfioc_trans_e, ioe);
4217 				error = EFAULT;
4218 				goto fail;
4219 			}
4220 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4221 			switch (ioe->rs_num) {
4222 			case PF_RULESET_ALTQ:
4223 				break;
4224 			case PF_RULESET_TABLE:
4225 				rs = pf_find_ruleset(ioe->anchor);
4226 				if (rs == NULL || !rs->topen || ioe->ticket !=
4227 				    rs->tticket) {
4228 					kfree_type(struct pfr_table, table);
4229 					kfree_type(struct pfioc_trans_e, ioe);
4230 					error = EBUSY;
4231 					goto fail;
4232 				}
4233 				break;
4234 			default:
4235 				if (ioe->rs_num < 0 || ioe->rs_num >=
4236 				    PF_RULESET_MAX) {
4237 					kfree_type(struct pfr_table, table);
4238 					kfree_type(struct pfioc_trans_e, ioe);
4239 					error = EINVAL;
4240 					goto fail;
4241 				}
4242 				rs = pf_find_ruleset(ioe->anchor);
4243 				if (rs == NULL ||
4244 				    !rs->rules[ioe->rs_num].inactive.open ||
4245 				    rs->rules[ioe->rs_num].inactive.ticket !=
4246 				    ioe->ticket) {
4247 					kfree_type(struct pfr_table, table);
4248 					kfree_type(struct pfioc_trans_e, ioe);
4249 					error = EBUSY;
4250 					goto fail;
4251 				}
4252 				break;
4253 			}
4254 		}
4255 		buf = _buf;
4256 		/* now do the commit - no errors should happen here */
4257 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4258 			if (copyin(buf, ioe, sizeof(*ioe))) {
4259 				kfree_type(struct pfr_table, table);
4260 				kfree_type(struct pfioc_trans_e, ioe);
4261 				error = EFAULT;
4262 				goto fail;
4263 			}
4264 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4265 			switch (ioe->rs_num) {
4266 			case PF_RULESET_ALTQ:
4267 				break;
4268 			case PF_RULESET_TABLE:
4269 				bzero(table, sizeof(*table));
4270 				strlcpy(table->pfrt_anchor, ioe->anchor,
4271 				    sizeof(table->pfrt_anchor));
4272 				if ((error = pfr_ina_commit(table, ioe->ticket,
4273 				    NULL, NULL, 0))) {
4274 					kfree_type(struct pfr_table, table);
4275 					kfree_type(struct pfioc_trans_e, ioe);
4276 					goto fail; /* really bad */
4277 				}
4278 				break;
4279 			default:
4280 				if ((error = pf_commit_rules(ioe->ticket,
4281 				    ioe->rs_num, ioe->anchor))) {
4282 					kfree_type(struct pfr_table, table);
4283 					kfree_type(struct pfioc_trans_e, ioe);
4284 					goto fail; /* really bad */
4285 				}
4286 				break;
4287 			}
4288 		}
4289 		kfree_type(struct pfr_table, table);
4290 		kfree_type(struct pfioc_trans_e, ioe);
4291 #if defined(SKYWALK) && defined(XNU_TARGET_OS_OSX)
4292 		net_filter_event_mark(NET_FILTER_EVENT_PF,
4293 		    pf_check_compatible_rules());
4294 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
4295 		break;
4296 	}
4297 
4298 	default:
4299 		VERIFY(0);
4300 		/* NOTREACHED */
4301 	}
4302 fail:
4303 	return error;
4304 }
4305 
4306 static int
pfioctl_ioc_src_nodes(u_long cmd,struct pfioc_src_nodes_32 * psn32,struct pfioc_src_nodes_64 * psn64,struct proc * p)4307 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4308     struct pfioc_src_nodes_64 *psn64, struct proc *p)
4309 {
4310 	int p64 = proc_is64bit(p);
4311 	int error = 0;
4312 
4313 	switch (cmd) {
4314 	case DIOCGETSRCNODES: {
4315 		struct pf_src_node      *n, *pstore;
4316 		user_addr_t              buf;
4317 		u_int32_t                nr = 0;
4318 		int                      space, size;
4319 
4320 		space = (p64 ? psn64->psn_len : psn32->psn_len);
4321 		if (space == 0) {
4322 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4323 			nr++;
4324 
4325 			size = sizeof(struct pf_src_node) * nr;
4326 			if (p64) {
4327 				psn64->psn_len = size;
4328 			} else {
4329 				psn32->psn_len = size;
4330 			}
4331 			break;
4332 		}
4333 
4334 		pstore = kalloc_type(struct pf_src_node, Z_WAITOK | Z_NOFAIL);
4335 #ifdef __LP64__
4336 		buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4337 #else
4338 		buf = psn32->psn_buf;
4339 #endif
4340 
4341 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4342 			uint64_t secs = pf_time_second(), diff;
4343 
4344 			if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4345 				break;
4346 			}
4347 
4348 			bcopy(n, pstore, sizeof(*pstore));
4349 			if (n->rule.ptr != NULL) {
4350 				pstore->rule.nr = n->rule.ptr->nr;
4351 			}
4352 			pstore->creation = secs - pstore->creation;
4353 			if (pstore->expire > secs) {
4354 				pstore->expire -= secs;
4355 			} else {
4356 				pstore->expire = 0;
4357 			}
4358 
4359 			/* adjust the connection rate estimate */
4360 			diff = secs - n->conn_rate.last;
4361 			if (diff >= n->conn_rate.seconds) {
4362 				pstore->conn_rate.count = 0;
4363 			} else {
4364 				pstore->conn_rate.count -=
4365 				    n->conn_rate.count * diff /
4366 				    n->conn_rate.seconds;
4367 			}
4368 
4369 			_RB_PARENT(pstore, entry) = NULL;
4370 			RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4371 			pstore->kif = NULL;
4372 
4373 			error = copyout(pstore, buf, sizeof(*pstore));
4374 			if (error) {
4375 				kfree_type(struct pf_src_node, pstore);
4376 				goto fail;
4377 			}
4378 			buf += sizeof(*pstore);
4379 			nr++;
4380 		}
4381 
4382 		size = sizeof(struct pf_src_node) * nr;
4383 		if (p64) {
4384 			psn64->psn_len = size;
4385 		} else {
4386 			psn32->psn_len = size;
4387 		}
4388 
4389 		kfree_type(struct pf_src_node, pstore);
4390 		break;
4391 	}
4392 
4393 	default:
4394 		VERIFY(0);
4395 		/* NOTREACHED */
4396 	}
4397 fail:
4398 	return error;
4399 }
4400 
4401 static int
pfioctl_ioc_src_node_kill(u_long cmd,struct pfioc_src_node_kill * psnk,struct proc * p)4402 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4403     struct proc *p)
4404 {
4405 #pragma unused(p)
4406 	int error = 0;
4407 
4408 	switch (cmd) {
4409 	case DIOCKILLSRCNODES: {
4410 		struct pf_src_node      *sn;
4411 		struct pf_state         *s;
4412 		int                     killed = 0;
4413 
4414 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4415 			if (PF_MATCHA(psnk->psnk_src.neg,
4416 			    &psnk->psnk_src.addr.v.a.addr,
4417 			    &psnk->psnk_src.addr.v.a.mask,
4418 			    &sn->addr, sn->af) &&
4419 			    PF_MATCHA(psnk->psnk_dst.neg,
4420 			    &psnk->psnk_dst.addr.v.a.addr,
4421 			    &psnk->psnk_dst.addr.v.a.mask,
4422 			    &sn->raddr, sn->af)) {
4423 				/* Handle state to src_node linkage */
4424 				if (sn->states != 0) {
4425 					RB_FOREACH(s, pf_state_tree_id,
4426 					    &tree_id) {
4427 						if (s->src_node == sn) {
4428 							s->src_node = NULL;
4429 						}
4430 						if (s->nat_src_node == sn) {
4431 							s->nat_src_node = NULL;
4432 						}
4433 					}
4434 					sn->states = 0;
4435 				}
4436 				sn->expire = 1;
4437 				killed++;
4438 			}
4439 		}
4440 
4441 		if (killed > 0) {
4442 			pf_purge_expired_src_nodes();
4443 		}
4444 
4445 		psnk->psnk_af = (sa_family_t)killed;
4446 		break;
4447 	}
4448 
4449 	default:
4450 		VERIFY(0);
4451 		/* NOTREACHED */
4452 	}
4453 
4454 	return error;
4455 }
4456 
4457 static int
pfioctl_ioc_iface(u_long cmd,struct pfioc_iface_32 * io32,struct pfioc_iface_64 * io64,struct proc * p)4458 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4459     struct pfioc_iface_64 *io64, struct proc *p)
4460 {
4461 	int p64 = proc_is64bit(p);
4462 	int error = 0;
4463 
4464 	switch (cmd) {
4465 	case DIOCIGETIFACES: {
4466 		user_addr_t buf;
4467 		int esize;
4468 
4469 #ifdef __LP64__
4470 		buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4471 		esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4472 #else
4473 		buf = io32->pfiio_buffer;
4474 		esize = io32->pfiio_esize;
4475 #endif
4476 
4477 		/* esize must be that of the user space version of pfi_kif */
4478 		if (esize != sizeof(struct pfi_uif)) {
4479 			error = ENODEV;
4480 			break;
4481 		}
4482 		if (p64) {
4483 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4484 		} else {
4485 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4486 		}
4487 		error = pfi_get_ifaces(
4488 			p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4489 			p64 ? &io64->pfiio_size : &io32->pfiio_size);
4490 		break;
4491 	}
4492 
4493 	case DIOCSETIFFLAG: {
4494 		if (p64) {
4495 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4496 		} else {
4497 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4498 		}
4499 
4500 		error = pfi_set_flags(
4501 			p64 ? io64->pfiio_name : io32->pfiio_name,
4502 			p64 ? io64->pfiio_flags : io32->pfiio_flags);
4503 		break;
4504 	}
4505 
4506 	case DIOCCLRIFFLAG: {
4507 		if (p64) {
4508 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4509 		} else {
4510 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4511 		}
4512 
4513 		error = pfi_clear_flags(
4514 			p64 ? io64->pfiio_name : io32->pfiio_name,
4515 			p64 ? io64->pfiio_flags : io32->pfiio_flags);
4516 		break;
4517 	}
4518 
4519 	default:
4520 		VERIFY(0);
4521 		/* NOTREACHED */
4522 	}
4523 
4524 	return error;
4525 }
4526 
4527 int
pf_af_hook(struct ifnet * ifp,struct mbuf ** mppn,struct mbuf ** mp,unsigned int af,int input,struct ip_fw_args * fwa)4528 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4529     unsigned int af, int input, struct ip_fw_args *fwa)
4530 {
4531 	int error = 0;
4532 	struct mbuf *nextpkt;
4533 	net_thread_marks_t marks;
4534 	struct ifnet * pf_ifp = ifp;
4535 
4536 	/* Always allow traffic on co-processor interfaces. */
4537 	if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) {
4538 		return 0;
4539 	}
4540 
4541 	marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4542 
4543 	if (marks != net_thread_marks_none) {
4544 		lck_rw_lock_shared(&pf_perim_lock);
4545 		if (!pf_is_enabled) {
4546 			goto done;
4547 		}
4548 		lck_mtx_lock(&pf_lock);
4549 	}
4550 
4551 	if (mppn != NULL && *mppn != NULL) {
4552 		VERIFY(*mppn == *mp);
4553 	}
4554 	if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4555 		(*mp)->m_nextpkt = NULL;
4556 	}
4557 
4558 	/*
4559 	 * For packets destined to locally hosted IP address
4560 	 * ip_output_list sets Mbuf's pkt header's rcvif to
4561 	 * the interface hosting the IP address.
4562 	 * While on the output path ifp passed to pf_af_hook
4563 	 * to such local communication is the loopback interface,
4564 	 * the input path derives ifp from mbuf packet header's
4565 	 * rcvif.
4566 	 * This asymmetry caues issues with PF.
4567 	 * To handle that case, we have a limited change here to
4568 	 * pass interface as loopback if packets are looped in.
4569 	 */
4570 	if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4571 		pf_ifp = lo_ifp;
4572 	}
4573 
4574 	switch (af) {
4575 #if INET
4576 	case AF_INET: {
4577 		error = pf_inet_hook(pf_ifp, mp, input, fwa);
4578 		break;
4579 	}
4580 #endif /* INET */
4581 	case AF_INET6:
4582 		error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4583 		break;
4584 	default:
4585 		break;
4586 	}
4587 
4588 	/* When packet valid, link to the next packet */
4589 	if (*mp != NULL && nextpkt != NULL) {
4590 		struct mbuf *m = *mp;
4591 		while (m->m_nextpkt != NULL) {
4592 			m = m->m_nextpkt;
4593 		}
4594 		m->m_nextpkt = nextpkt;
4595 	}
4596 	/* Fix up linkage of previous packet in the chain */
4597 	if (mppn != NULL) {
4598 		if (*mp != NULL) {
4599 			*mppn = *mp;
4600 		} else {
4601 			*mppn = nextpkt;
4602 		}
4603 	}
4604 
4605 	if (marks != net_thread_marks_none) {
4606 		lck_mtx_unlock(&pf_lock);
4607 	}
4608 
4609 done:
4610 	if (marks != net_thread_marks_none) {
4611 		lck_rw_done(&pf_perim_lock);
4612 	}
4613 
4614 	net_thread_marks_pop(marks);
4615 	return error;
4616 }
4617 
4618 
4619 #if INET
4620 static __attribute__((noinline)) int
pf_inet_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4621 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4622     struct ip_fw_args *fwa)
4623 {
4624 	struct mbuf *m = *mp;
4625 #if BYTE_ORDER != BIG_ENDIAN
4626 	struct ip *ip = mtod(m, struct ip *);
4627 #endif
4628 	int error = 0;
4629 
4630 	/*
4631 	 * If the packet is outbound, is originated locally, is flagged for
4632 	 * delayed UDP/TCP checksum calculation, and is about to be processed
4633 	 * for an interface that doesn't support the appropriate checksum
4634 	 * offloading, then calculated the checksum here so that PF can adjust
4635 	 * it properly.
4636 	 */
4637 	if (!input && m->m_pkthdr.rcvif == NULL) {
4638 		static const int mask = CSUM_DELAY_DATA;
4639 		const int flags = m->m_pkthdr.csum_flags &
4640 		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4641 
4642 		if (flags & mask) {
4643 			in_delayed_cksum(m);
4644 			m->m_pkthdr.csum_flags &= ~mask;
4645 		}
4646 	}
4647 
4648 #if BYTE_ORDER != BIG_ENDIAN
4649 	HTONS(ip->ip_len);
4650 	HTONS(ip->ip_off);
4651 #endif
4652 	if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4653 		if (*mp != NULL) {
4654 			m_freem(*mp);
4655 			*mp = NULL;
4656 			error = EHOSTUNREACH;
4657 		} else {
4658 			error = ENOBUFS;
4659 		}
4660 	}
4661 #if BYTE_ORDER != BIG_ENDIAN
4662 	else {
4663 		if (*mp != NULL) {
4664 			ip = mtod(*mp, struct ip *);
4665 			NTOHS(ip->ip_len);
4666 			NTOHS(ip->ip_off);
4667 		}
4668 	}
4669 #endif
4670 	return error;
4671 }
4672 #endif /* INET */
4673 
4674 int __attribute__((noinline))
pf_inet6_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4675 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4676     struct ip_fw_args *fwa)
4677 {
4678 	int error = 0;
4679 
4680 	/*
4681 	 * If the packet is outbound, is originated locally, is flagged for
4682 	 * delayed UDP/TCP checksum calculation, and is about to be processed
4683 	 * for an interface that doesn't support the appropriate checksum
4684 	 * offloading, then calculated the checksum here so that PF can adjust
4685 	 * it properly.
4686 	 */
4687 	if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4688 		static const int mask = CSUM_DELAY_IPV6_DATA;
4689 		const int flags = (*mp)->m_pkthdr.csum_flags &
4690 		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4691 
4692 		if (flags & mask) {
4693 			/*
4694 			 * Checksum offload should not have been enabled
4695 			 * when extension headers exist, thus 0 for optlen.
4696 			 */
4697 			in6_delayed_cksum(*mp);
4698 			(*mp)->m_pkthdr.csum_flags &= ~mask;
4699 		}
4700 	}
4701 
4702 	if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4703 		if (*mp != NULL) {
4704 			m_freem(*mp);
4705 			*mp = NULL;
4706 			error = EHOSTUNREACH;
4707 		} else {
4708 			error = ENOBUFS;
4709 		}
4710 	}
4711 	return error;
4712 }
4713 
4714 int
pf_ifaddr_hook(struct ifnet * ifp)4715 pf_ifaddr_hook(struct ifnet *ifp)
4716 {
4717 	struct pfi_kif *kif = ifp->if_pf_kif;
4718 
4719 	if (kif != NULL) {
4720 		lck_rw_lock_shared(&pf_perim_lock);
4721 		lck_mtx_lock(&pf_lock);
4722 
4723 		pfi_kifaddr_update(kif);
4724 
4725 		lck_mtx_unlock(&pf_lock);
4726 		lck_rw_done(&pf_perim_lock);
4727 	}
4728 	return 0;
4729 }
4730 
4731 /*
4732  * Caller acquires dlil lock as writer (exclusive)
4733  */
4734 void
pf_ifnet_hook(struct ifnet * ifp,int attach)4735 pf_ifnet_hook(struct ifnet *ifp, int attach)
4736 {
4737 	lck_rw_lock_shared(&pf_perim_lock);
4738 	lck_mtx_lock(&pf_lock);
4739 	if (attach) {
4740 		pfi_attach_ifnet(ifp);
4741 	} else {
4742 		pfi_detach_ifnet(ifp);
4743 	}
4744 	lck_mtx_unlock(&pf_lock);
4745 	lck_rw_done(&pf_perim_lock);
4746 }
4747 
4748 static void
pf_attach_hooks(void)4749 pf_attach_hooks(void)
4750 {
4751 	ifnet_head_lock_shared();
4752 	/*
4753 	 * Check against ifnet_addrs[] before proceeding, in case this
4754 	 * is called very early on, e.g. during dlil_init() before any
4755 	 * network interface is attached.
4756 	 */
4757 	if (ifnet_addrs != NULL) {
4758 		int i;
4759 
4760 		for (i = 0; i <= if_index; i++) {
4761 			struct ifnet *ifp = ifindex2ifnet[i];
4762 			if (ifp != NULL) {
4763 				pfi_attach_ifnet(ifp);
4764 			}
4765 		}
4766 	}
4767 	ifnet_head_done();
4768 }
4769 
4770 #if 0
4771 /* currently unused along with pfdetach() */
4772 static void
4773 pf_detach_hooks(void)
4774 {
4775 	ifnet_head_lock_shared();
4776 	if (ifnet_addrs != NULL) {
4777 		for (i = 0; i <= if_index; i++) {
4778 			int i;
4779 
4780 			struct ifnet *ifp = ifindex2ifnet[i];
4781 			if (ifp != NULL && ifp->if_pf_kif != NULL) {
4782 				pfi_detach_ifnet(ifp);
4783 			}
4784 		}
4785 	}
4786 	ifnet_head_done();
4787 }
4788 #endif
4789 
4790 /*
4791  * 'D' group ioctls.
4792  *
4793  * The switch statement below does nothing at runtime, as it serves as a
4794  * compile time check to ensure that all of the socket 'D' ioctls (those
4795  * in the 'D' group going thru soo_ioctl) that are made available by the
4796  * networking stack is unique.  This works as long as this routine gets
4797  * updated each time a new interface ioctl gets added.
4798  *
4799  * Any failures at compile time indicates duplicated ioctl values.
4800  */
4801 static __attribute__((unused)) void
pfioctl_cassert(void)4802 pfioctl_cassert(void)
4803 {
4804 	/*
4805 	 * This is equivalent to _CASSERT() and the compiler wouldn't
4806 	 * generate any instructions, thus for compile time only.
4807 	 */
4808 	switch ((u_long)0) {
4809 	case 0:
4810 
4811 	/* bsd/net/pfvar.h */
4812 	case DIOCSTART:
4813 	case DIOCSTOP:
4814 	case DIOCADDRULE:
4815 	case DIOCGETSTARTERS:
4816 	case DIOCGETRULES:
4817 	case DIOCGETRULE:
4818 	case DIOCSTARTREF:
4819 	case DIOCSTOPREF:
4820 	case DIOCCLRSTATES:
4821 	case DIOCGETSTATE:
4822 	case DIOCSETSTATUSIF:
4823 	case DIOCGETSTATUS:
4824 	case DIOCCLRSTATUS:
4825 	case DIOCNATLOOK:
4826 	case DIOCSETDEBUG:
4827 	case DIOCGETSTATES:
4828 	case DIOCCHANGERULE:
4829 	case DIOCINSERTRULE:
4830 	case DIOCDELETERULE:
4831 	case DIOCSETTIMEOUT:
4832 	case DIOCGETTIMEOUT:
4833 	case DIOCADDSTATE:
4834 	case DIOCCLRRULECTRS:
4835 	case DIOCGETLIMIT:
4836 	case DIOCSETLIMIT:
4837 	case DIOCKILLSTATES:
4838 	case DIOCSTARTALTQ:
4839 	case DIOCSTOPALTQ:
4840 	case DIOCADDALTQ:
4841 	case DIOCGETALTQS:
4842 	case DIOCGETALTQ:
4843 	case DIOCCHANGEALTQ:
4844 	case DIOCGETQSTATS:
4845 	case DIOCBEGINADDRS:
4846 	case DIOCADDADDR:
4847 	case DIOCGETADDRS:
4848 	case DIOCGETADDR:
4849 	case DIOCCHANGEADDR:
4850 	case DIOCGETRULESETS:
4851 	case DIOCGETRULESET:
4852 	case DIOCRCLRTABLES:
4853 	case DIOCRADDTABLES:
4854 	case DIOCRDELTABLES:
4855 	case DIOCRGETTABLES:
4856 	case DIOCRGETTSTATS:
4857 	case DIOCRCLRTSTATS:
4858 	case DIOCRCLRADDRS:
4859 	case DIOCRADDADDRS:
4860 	case DIOCRDELADDRS:
4861 	case DIOCRSETADDRS:
4862 	case DIOCRGETADDRS:
4863 	case DIOCRGETASTATS:
4864 	case DIOCRCLRASTATS:
4865 	case DIOCRTSTADDRS:
4866 	case DIOCRSETTFLAGS:
4867 	case DIOCRINADEFINE:
4868 	case DIOCOSFPFLUSH:
4869 	case DIOCOSFPADD:
4870 	case DIOCOSFPGET:
4871 	case DIOCXBEGIN:
4872 	case DIOCXCOMMIT:
4873 	case DIOCXROLLBACK:
4874 	case DIOCGETSRCNODES:
4875 	case DIOCCLRSRCNODES:
4876 	case DIOCSETHOSTID:
4877 	case DIOCIGETIFACES:
4878 	case DIOCSETIFFLAG:
4879 	case DIOCCLRIFFLAG:
4880 	case DIOCKILLSRCNODES:
4881 	case DIOCGIFSPEED:
4882 		;
4883 	}
4884 }
4885