xref: /xnu-8792.81.2/bsd/net/pf_ioctl.c (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*	$apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /*	$OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31 
32 /*
33  * Copyright (c) 2001 Daniel Hartmeier
34  * Copyright (c) 2002,2003 Henning Brauer
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *    - Redistributions of source code must retain the above copyright
42  *      notice, this list of conditions and the following disclaimer.
43  *    - Redistributions in binary form must reproduce the above
44  *      copyright notice, this list of conditions and the following
45  *      disclaimer in the documentation and/or other materials provided
46  *      with the distribution.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59  * POSSIBILITY OF SUCH DAMAGE.
60  *
61  * Effort sponsored in part by the Defense Advanced Research Projects
62  * Agency (DARPA) and Air Force Research Laboratory, Air Force
63  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64  *
65  */
66 
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83 #include <os/log.h>
84 
85 #include <mach/vm_param.h>
86 
87 #include <net/dlil.h>
88 #include <net/if.h>
89 #include <net/if_types.h>
90 #include <net/net_api_stats.h>
91 #include <net/route.h>
92 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
93 #include <skywalk/lib/net_filter_event.h>
94 #endif
95 
96 #include <netinet/in.h>
97 #include <netinet/in_var.h>
98 #include <netinet/in_systm.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/ip_icmp.h>
102 #include <netinet/if_ether.h>
103 
104 #if DUMMYNET
105 #include <netinet/ip_dummynet.h>
106 #else
107 struct ip_fw_args;
108 #endif /* DUMMYNET */
109 
110 #include <libkern/crypto/md5.h>
111 
112 #include <machine/machine_routines.h>
113 
114 #include <miscfs/devfs/devfs.h>
115 
116 #include <net/pfvar.h>
117 
118 #if NPFSYNC
119 #include <net/if_pfsync.h>
120 #endif /* NPFSYNC */
121 
122 #if PFLOG
123 #include <net/if_pflog.h>
124 #endif /* PFLOG */
125 
126 #include <netinet/ip6.h>
127 #include <netinet/in_pcb.h>
128 
129 #include <dev/random/randomdev.h>
130 
131 #if 0
132 static void pfdetach(void);
133 #endif
134 static int pfopen(dev_t, int, int, struct proc *);
135 static int pfclose(dev_t, int, int, struct proc *);
136 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
137 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
138     struct pfioc_table_64 *, struct proc *);
139 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
140     struct pfioc_tokens_64 *, struct proc *);
141 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
142 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
143     struct proc *);
144 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
145 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
146     struct pfioc_states_64 *, struct proc *);
147 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
148 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
149 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
150 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
151 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
152 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
153     struct pfioc_trans_64 *, struct proc *);
154 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
155     struct pfioc_src_nodes_64 *, struct proc *);
156 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
157     struct proc *);
158 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
159     struct pfioc_iface_64 *, struct proc *);
160 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
161     u_int8_t, u_int8_t, u_int8_t);
162 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
163 static void pf_empty_pool(struct pf_palist *);
164 static int pf_begin_rules(u_int32_t *, int, const char *);
165 static int pf_rollback_rules(u_int32_t, int, char *);
166 static int pf_setup_pfsync_matching(struct pf_ruleset *);
167 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
168 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
169 static int pf_commit_rules(u_int32_t, int, char *);
170 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
171     int);
172 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
173 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
174     struct pf_state *);
175 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
176     struct pf_state *);
177 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
178 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
179 static void pf_expire_states_and_src_nodes(struct pf_rule *);
180 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
181     int, struct pf_rule *);
182 static void pf_addrwrap_setup(struct pf_addr_wrap *);
183 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
184     struct pf_ruleset *);
185 static void pf_delete_rule_by_owner(char *, u_int32_t);
186 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
187 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
188 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
189     int, struct pf_rule **);
190 
191 #define PF_CDEV_MAJOR   (-1)
192 
193 static const struct cdevsw pf_cdevsw = {
194 	.d_open       = pfopen,
195 	.d_close      = pfclose,
196 	.d_read       = eno_rdwrt,
197 	.d_write      = eno_rdwrt,
198 	.d_ioctl      = pfioctl,
199 	.d_stop       = eno_stop,
200 	.d_reset      = eno_reset,
201 	.d_ttys       = NULL,
202 	.d_select     = eno_select,
203 	.d_mmap       = eno_mmap,
204 	.d_strategy   = eno_strat,
205 	.d_reserved_1 = eno_getc,
206 	.d_reserved_2 = eno_putc,
207 	.d_type       = 0
208 };
209 
210 static void pf_attach_hooks(void);
211 #if 0
212 /* currently unused along with pfdetach() */
213 static void pf_detach_hooks(void);
214 #endif
215 
216 /*
217  * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
218  * and used in pf_af_hook() for performance optimization, such that packets
219  * will enter pf_test() or pf_test6() only when PF is running.
220  */
221 int pf_is_enabled = 0;
222 
223 u_int32_t pf_hash_seed;
224 int16_t pf_nat64_configured = 0;
225 
226 /*
227  * These are the pf enabled reference counting variables
228  */
229 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token))
230 
231 static u_int64_t pf_enabled_ref_count;
232 static u_int32_t nr_tokens = 0;
233 static u_int32_t pffwrules;
234 static u_int32_t pfdevcnt;
235 
236 SLIST_HEAD(list_head, pfioc_kernel_token);
237 static struct list_head token_list_head;
238 
239 struct pf_rule           pf_default_rule;
240 
241 typedef struct {
242 	char tag_name[PF_TAG_NAME_SIZE];
243 	uint16_t tag_id;
244 } pf_reserved_tag_table_t;
245 
246 #define NUM_RESERVED_TAGS    2
247 static pf_reserved_tag_table_t pf_reserved_tag_table[NUM_RESERVED_TAGS] = {
248 	{ PF_TAG_NAME_SYSTEM_SERVICE, PF_TAG_ID_SYSTEM_SERVICE},
249 	{ PF_TAG_NAME_STACK_DROP, PF_TAG_ID_STACK_DROP},
250 };
251 #define RESERVED_TAG_ID_MIN    PF_TAG_ID_SYSTEM_SERVICE
252 
253 #define DYNAMIC_TAG_ID_MAX    50000
254 static TAILQ_HEAD(pf_tags, pf_tagname)  pf_tags =
255     TAILQ_HEAD_INITIALIZER(pf_tags);
256 
257 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
258 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
259 #endif
260 static u_int16_t         tagname2tag(struct pf_tags *, char *);
261 static void              tag_unref(struct pf_tags *, u_int16_t);
262 static int               pf_rtlabel_add(struct pf_addr_wrap *);
263 static void              pf_rtlabel_remove(struct pf_addr_wrap *);
264 static void              pf_rtlabel_copyout(struct pf_addr_wrap *);
265 
266 #if INET
267 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
268     struct ip_fw_args *);
269 #endif /* INET */
270 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
271     struct ip_fw_args *);
272 
273 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
274 
275 /*
276  * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
277  */
278 #define PFIOCX_STRUCT_DECL(s)                                           \
279 struct {                                                                \
280 	union {                                                         \
281 	        struct s##_32	_s##_32;                                \
282 	        struct s##_64	_s##_64;                                \
283 	} _u;                                                           \
284 } *s##_un = NULL                                                        \
285 
286 #define PFIOCX_STRUCT_BEGIN(a, s) {                                     \
287 	VERIFY(s##_un == NULL);                                         \
288 	s##_un = kalloc_type(typeof(*s##_un), Z_WAITOK_ZERO_NOFAIL);    \
289 	if (p64)                                                        \
290 	        bcopy(a, &s##_un->_u._s##_64,                           \
291 	            sizeof (struct s##_64));                            \
292 	else                                                            \
293 	        bcopy(a, &s##_un->_u._s##_32,                           \
294 	            sizeof (struct s##_32));                            \
295 }
296 
297 #define PFIOCX_STRUCT_END(s, a) {                                       \
298 	VERIFY(s##_un != NULL);                                         \
299 	if (p64)                                                        \
300 	        bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64));  \
301 	else                                                            \
302 	        bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32));  \
303 	kfree_type(typeof(*s##_un), s##_un);                            \
304 }
305 
306 #define PFIOCX_STRUCT_ADDR32(s)         (&s##_un->_u._s##_32)
307 #define PFIOCX_STRUCT_ADDR64(s)         (&s##_un->_u._s##_64)
308 
309 /*
310  * Helper macros for regular ioctl structures.
311  */
312 #define PFIOC_STRUCT_BEGIN(a, v) {                                      \
313 	VERIFY((v) == NULL);                                            \
314 	(v) = kalloc_type(typeof(*(v)), Z_WAITOK_ZERO_NOFAIL);          \
315 	bcopy(a, v, sizeof (*(v)));                                     \
316 }
317 
318 #define PFIOC_STRUCT_END(v, a) {                                        \
319 	VERIFY((v) != NULL);                                            \
320 	bcopy(v, a, sizeof (*(v)));                                     \
321 	kfree_type(typeof(*(v)), v);                                    \
322 }
323 
324 #define PFIOC_STRUCT_ADDR32(s)          (&s##_un->_u._s##_32)
325 #define PFIOC_STRUCT_ADDR64(s)          (&s##_un->_u._s##_64)
326 
327 struct thread *pf_purge_thread;
328 
329 extern void pfi_kifaddr_update(void *);
330 
331 /* pf enable ref-counting helper functions */
332 static u_int64_t                generate_token(struct proc *);
333 static int                      remove_token(struct pfioc_remove_token *);
334 static void                     invalidate_all_tokens(void);
335 
336 static u_int64_t
generate_token(struct proc * p)337 generate_token(struct proc *p)
338 {
339 	u_int64_t token_value;
340 	struct pfioc_kernel_token *new_token;
341 
342 	if (nr_tokens + 1 > NR_TOKENS_LIMIT) {
343 		os_log_error(OS_LOG_DEFAULT, "%s: NR_TOKENS_LIMIT reached", __func__);
344 		return 0;
345 	}
346 
347 	new_token = kalloc_type(struct pfioc_kernel_token,
348 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
349 
350 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
351 
352 	token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
353 
354 	new_token->token.token_value = token_value;
355 	new_token->token.pid = proc_pid(p);
356 	proc_name(new_token->token.pid, new_token->token.proc_name,
357 	    sizeof(new_token->token.proc_name));
358 	new_token->token.timestamp = pf_calendar_time_second();
359 
360 	SLIST_INSERT_HEAD(&token_list_head, new_token, next);
361 	nr_tokens++;
362 
363 	return token_value;
364 }
365 
366 static int
remove_token(struct pfioc_remove_token * tok)367 remove_token(struct pfioc_remove_token *tok)
368 {
369 	struct pfioc_kernel_token *entry, *tmp;
370 
371 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
372 
373 	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
374 		if (tok->token_value == entry->token.token_value) {
375 			SLIST_REMOVE(&token_list_head, entry,
376 			    pfioc_kernel_token, next);
377 			kfree_type(struct pfioc_kernel_token, entry);
378 			nr_tokens--;
379 			return 0;    /* success */
380 		}
381 	}
382 
383 	printf("pf : remove failure\n");
384 	return ESRCH;    /* failure */
385 }
386 
387 static void
invalidate_all_tokens(void)388 invalidate_all_tokens(void)
389 {
390 	struct pfioc_kernel_token *entry, *tmp;
391 
392 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
393 
394 	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
395 		SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
396 		kfree_type(struct pfioc_kernel_token, entry);
397 	}
398 
399 	nr_tokens = 0;
400 }
401 
402 void
pfinit(void)403 pfinit(void)
404 {
405 	u_int32_t *t = pf_default_rule.timeout;
406 	int maj;
407 
408 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
409 	    NULL);
410 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
411 	    "pfsrctrpl", NULL);
412 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
413 	    NULL);
414 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
415 	    "pfstatekeypl", NULL);
416 	pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
417 	    "pfappstatepl", NULL);
418 	pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
419 	    "pfpooladdrpl", NULL);
420 	pfr_initialize();
421 	pfi_initialize();
422 	pf_osfp_initialize();
423 
424 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
425 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
426 
427 	if (max_mem <= 256 * 1024 * 1024) {
428 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
429 		    PFR_KENTRY_HIWAT_SMALL;
430 	}
431 
432 	RB_INIT(&tree_src_tracking);
433 	RB_INIT(&pf_anchors);
434 	pf_init_ruleset(&pf_main_ruleset);
435 	TAILQ_INIT(&pf_pabuf);
436 	TAILQ_INIT(&state_list);
437 
438 	_CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
439 	_CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
440 	_CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
441 	_CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
442 	_CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
443 	_CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
444 	_CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
445 	_CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
446 	_CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
447 	_CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
448 	_CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
449 
450 	/* default rule should never be garbage collected */
451 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
452 	pf_default_rule.action = PF_PASS;
453 	pf_default_rule.nr = -1;
454 	pf_default_rule.rtableid = IFSCOPE_NONE;
455 
456 	/* initialize default timeouts */
457 	t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
458 	t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
459 	t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
460 	t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
461 	t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
462 	t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
463 	t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
464 	t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
465 	t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
466 	t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
467 	t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
468 	t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
469 	t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
470 	t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
471 	t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
472 	t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
473 	t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
474 	t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
475 	t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
476 	t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
477 	t[PFTM_FRAG] = PFTM_FRAG_VAL;
478 	t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
479 	t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
480 	t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
481 	t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
482 	t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
483 
484 	pf_normalize_init();
485 	bzero(&pf_status, sizeof(pf_status));
486 	pf_status.debug = PF_DEBUG_URGENT;
487 	pf_hash_seed = RandomULong();
488 
489 	/* XXX do our best to avoid a conflict */
490 	pf_status.hostid = random();
491 
492 	if (kernel_thread_start(pf_purge_thread_fn, NULL,
493 	    &pf_purge_thread) != 0) {
494 		printf("%s: unable to start purge thread!", __func__);
495 		return;
496 	}
497 
498 	maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
499 	if (maj == -1) {
500 		printf("%s: failed to allocate major number!\n", __func__);
501 		return;
502 	}
503 	(void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
504 	    UID_ROOT, GID_WHEEL, 0600, "pf");
505 
506 	(void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
507 	    UID_ROOT, GID_WHEEL, 0600, "pfm");
508 
509 	pf_attach_hooks();
510 #if DUMMYNET
511 	dummynet_init();
512 #endif
513 }
514 
515 #if 0
516 static void
517 pfdetach(void)
518 {
519 	struct pf_anchor        *anchor;
520 	struct pf_state         *state;
521 	struct pf_src_node      *node;
522 	struct pfioc_table      pt;
523 	u_int32_t               ticket;
524 	int                     i;
525 	char                    r = '\0';
526 
527 	pf_detach_hooks();
528 
529 	pf_status.running = 0;
530 	wakeup(pf_purge_thread_fn);
531 
532 	/* clear the rulesets */
533 	for (i = 0; i < PF_RULESET_MAX; i++) {
534 		if (pf_begin_rules(&ticket, i, &r) == 0) {
535 			pf_commit_rules(ticket, i, &r);
536 		}
537 	}
538 
539 	/* clear states */
540 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
541 		state->timeout = PFTM_PURGE;
542 #if NPFSYNC
543 		state->sync_flags = PFSTATE_NOSYNC;
544 #endif
545 	}
546 	pf_purge_expired_states(pf_status.states);
547 
548 #if NPFSYNC
549 	pfsync_clear_states(pf_status.hostid, NULL);
550 #endif
551 
552 	/* clear source nodes */
553 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
554 		state->src_node = NULL;
555 		state->nat_src_node = NULL;
556 	}
557 	RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
558 		node->expire = 1;
559 		node->states = 0;
560 	}
561 	pf_purge_expired_src_nodes();
562 
563 	/* clear tables */
564 	memset(&pt, '\0', sizeof(pt));
565 	pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
566 
567 	/* destroy anchors */
568 	while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
569 		for (i = 0; i < PF_RULESET_MAX; i++) {
570 			if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
571 				pf_commit_rules(ticket, i, anchor->name);
572 			}
573 		}
574 	}
575 
576 	/* destroy main ruleset */
577 	pf_remove_if_empty_ruleset(&pf_main_ruleset);
578 
579 	/* destroy the pools */
580 	pool_destroy(&pf_pooladdr_pl);
581 	pool_destroy(&pf_state_pl);
582 	pool_destroy(&pf_rule_pl);
583 	pool_destroy(&pf_src_tree_pl);
584 
585 	/* destroy subsystems */
586 	pf_normalize_destroy();
587 	pf_osfp_destroy();
588 	pfr_destroy();
589 	pfi_destroy();
590 }
591 #endif
592 
593 static int
pfopen(dev_t dev,int flags,int fmt,struct proc * p)594 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
595 {
596 #pragma unused(flags, fmt, p)
597 	if (minor(dev) >= PFDEV_MAX) {
598 		return ENXIO;
599 	}
600 
601 	if (minor(dev) == PFDEV_PFM) {
602 		lck_mtx_lock(&pf_lock);
603 		if (pfdevcnt != 0) {
604 			lck_mtx_unlock(&pf_lock);
605 			return EBUSY;
606 		}
607 		pfdevcnt++;
608 		lck_mtx_unlock(&pf_lock);
609 	}
610 	return 0;
611 }
612 
613 static int
pfclose(dev_t dev,int flags,int fmt,struct proc * p)614 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
615 {
616 #pragma unused(flags, fmt, p)
617 	if (minor(dev) >= PFDEV_MAX) {
618 		return ENXIO;
619 	}
620 
621 	if (minor(dev) == PFDEV_PFM) {
622 		lck_mtx_lock(&pf_lock);
623 		VERIFY(pfdevcnt > 0);
624 		pfdevcnt--;
625 		lck_mtx_unlock(&pf_lock);
626 	}
627 	return 0;
628 }
629 
630 static struct pf_pool *
pf_get_pool(char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)631 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
632     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
633     u_int8_t check_ticket)
634 {
635 	struct pf_ruleset       *ruleset;
636 	struct pf_rule          *rule;
637 	int                      rs_num;
638 	struct pf_pool          *p = NULL;
639 
640 	ruleset = pf_find_ruleset(anchor);
641 	if (ruleset == NULL) {
642 		goto done;
643 	}
644 	rs_num = pf_get_ruleset_number(rule_action);
645 	if (rs_num >= PF_RULESET_MAX) {
646 		goto done;
647 	}
648 	if (active) {
649 		if (check_ticket && ticket !=
650 		    ruleset->rules[rs_num].active.ticket) {
651 			goto done;
652 		}
653 		if (r_last) {
654 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
655 			    pf_rulequeue);
656 		} else {
657 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
658 		}
659 	} else {
660 		if (check_ticket && ticket !=
661 		    ruleset->rules[rs_num].inactive.ticket) {
662 			goto done;
663 		}
664 		if (r_last) {
665 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
666 			    pf_rulequeue);
667 		} else {
668 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
669 		}
670 	}
671 	if (!r_last) {
672 		while ((rule != NULL) && (rule->nr != rule_number)) {
673 			rule = TAILQ_NEXT(rule, entries);
674 		}
675 	}
676 	if (rule == NULL) {
677 		goto done;
678 	}
679 
680 	p = &rule->rpool;
681 done:
682 
683 	if (ruleset) {
684 		pf_release_ruleset(ruleset);
685 		ruleset = NULL;
686 	}
687 
688 	return p;
689 }
690 
691 static void
pf_mv_pool(struct pf_palist * poola,struct pf_palist * poolb)692 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
693 {
694 	struct pf_pooladdr      *mv_pool_pa;
695 
696 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
697 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
698 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
699 	}
700 }
701 
702 static void
pf_empty_pool(struct pf_palist * poola)703 pf_empty_pool(struct pf_palist *poola)
704 {
705 	struct pf_pooladdr      *empty_pool_pa;
706 
707 	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
708 		pfi_dynaddr_remove(&empty_pool_pa->addr);
709 		pf_tbladdr_remove(&empty_pool_pa->addr);
710 		pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
711 		TAILQ_REMOVE(poola, empty_pool_pa, entries);
712 		pool_put(&pf_pooladdr_pl, empty_pool_pa);
713 	}
714 }
715 
716 void
pf_rm_rule(struct pf_rulequeue * rulequeue,struct pf_rule * rule)717 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
718 {
719 	if (rulequeue != NULL) {
720 		if (rule->states <= 0) {
721 			/*
722 			 * XXX - we need to remove the table *before* detaching
723 			 * the rule to make sure the table code does not delete
724 			 * the anchor under our feet.
725 			 */
726 			pf_tbladdr_remove(&rule->src.addr);
727 			pf_tbladdr_remove(&rule->dst.addr);
728 			if (rule->overload_tbl) {
729 				pfr_detach_table(rule->overload_tbl);
730 			}
731 		}
732 		TAILQ_REMOVE(rulequeue, rule, entries);
733 		rule->entries.tqe_prev = NULL;
734 		rule->nr = -1;
735 	}
736 
737 	if (rule->states > 0 || rule->src_nodes > 0 ||
738 	    rule->entries.tqe_prev != NULL) {
739 		return;
740 	}
741 	pf_tag_unref(rule->tag);
742 	pf_tag_unref(rule->match_tag);
743 	pf_rtlabel_remove(&rule->src.addr);
744 	pf_rtlabel_remove(&rule->dst.addr);
745 	pfi_dynaddr_remove(&rule->src.addr);
746 	pfi_dynaddr_remove(&rule->dst.addr);
747 	if (rulequeue == NULL) {
748 		pf_tbladdr_remove(&rule->src.addr);
749 		pf_tbladdr_remove(&rule->dst.addr);
750 		if (rule->overload_tbl) {
751 			pfr_detach_table(rule->overload_tbl);
752 		}
753 	}
754 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
755 	pf_anchor_remove(rule);
756 	pf_empty_pool(&rule->rpool.list);
757 	pool_put(&pf_rule_pl, rule);
758 }
759 
760 static u_int16_t
tagname2tag(struct pf_tags * head,char * tagname)761 tagname2tag(struct pf_tags *head, char *tagname)
762 {
763 	struct pf_tagname       *tag, *p = NULL;
764 	uint16_t                 new_tagid = 1;
765 	bool                     reserved_tag = false;
766 
767 	TAILQ_FOREACH(tag, head, entries)
768 	if (strcmp(tagname, tag->name) == 0) {
769 		tag->ref++;
770 		return tag->tag;
771 	}
772 
773 	/*
774 	 * check if it is a reserved tag.
775 	 */
776 	_CASSERT(RESERVED_TAG_ID_MIN > DYNAMIC_TAG_ID_MAX);
777 	for (int i = 0; i < NUM_RESERVED_TAGS; i++) {
778 		if (strncmp(tagname, pf_reserved_tag_table[i].tag_name,
779 		    PF_TAG_NAME_SIZE) == 0) {
780 			new_tagid = pf_reserved_tag_table[i].tag_id;
781 			reserved_tag = true;
782 			goto skip_dynamic_tag_alloc;
783 		}
784 	}
785 
786 	/*
787 	 * to avoid fragmentation, we do a linear search from the beginning
788 	 * and take the first free slot we find. if there is none or the list
789 	 * is empty, append a new entry at the end.
790 	 */
791 
792 	/* new entry */
793 	if (!TAILQ_EMPTY(head)) {
794 		/* skip reserved tags */
795 		for (p = TAILQ_FIRST(head); p != NULL &&
796 		    p->tag >= RESERVED_TAG_ID_MIN;
797 		    p = TAILQ_NEXT(p, entries)) {
798 			;
799 		}
800 
801 		for (; p != NULL && p->tag == new_tagid;
802 		    p = TAILQ_NEXT(p, entries)) {
803 			new_tagid = p->tag + 1;
804 		}
805 	}
806 
807 	if (new_tagid > DYNAMIC_TAG_ID_MAX) {
808 		return 0;
809 	}
810 
811 skip_dynamic_tag_alloc:
812 	/* allocate and fill new struct pf_tagname */
813 	tag = kalloc_type(struct pf_tagname, Z_WAITOK | Z_ZERO | Z_NOFAIL);
814 	strlcpy(tag->name, tagname, sizeof(tag->name));
815 	tag->tag = new_tagid;
816 	tag->ref++;
817 
818 	if (reserved_tag) { /* insert reserved tag at the head */
819 		TAILQ_INSERT_HEAD(head, tag, entries);
820 	} else if (p != NULL) { /* insert new entry before p */
821 		TAILQ_INSERT_BEFORE(p, tag, entries);
822 	} else { /* either list empty or no free slot in between */
823 		TAILQ_INSERT_TAIL(head, tag, entries);
824 	}
825 
826 	return tag->tag;
827 }
828 
829 static void
tag_unref(struct pf_tags * head,u_int16_t tag)830 tag_unref(struct pf_tags *head, u_int16_t tag)
831 {
832 	struct pf_tagname       *p, *next;
833 
834 	if (tag == 0) {
835 		return;
836 	}
837 
838 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
839 		next = TAILQ_NEXT(p, entries);
840 		if (tag == p->tag) {
841 			if (--p->ref == 0) {
842 				TAILQ_REMOVE(head, p, entries);
843 				kfree_type(struct pf_tagname, p);
844 			}
845 			break;
846 		}
847 	}
848 }
849 
850 u_int16_t
pf_tagname2tag(char * tagname)851 pf_tagname2tag(char *tagname)
852 {
853 	return tagname2tag(&pf_tags, tagname);
854 }
855 
856 u_int16_t
pf_tagname2tag_ext(char * tagname)857 pf_tagname2tag_ext(char *tagname)
858 {
859 	u_int16_t       tag;
860 
861 	lck_rw_lock_exclusive(&pf_perim_lock);
862 	lck_mtx_lock(&pf_lock);
863 	tag = pf_tagname2tag(tagname);
864 	lck_mtx_unlock(&pf_lock);
865 	lck_rw_done(&pf_perim_lock);
866 	return tag;
867 }
868 
869 void
pf_tag_ref(u_int16_t tag)870 pf_tag_ref(u_int16_t tag)
871 {
872 	struct pf_tagname *t;
873 
874 	TAILQ_FOREACH(t, &pf_tags, entries)
875 	if (t->tag == tag) {
876 		break;
877 	}
878 	if (t != NULL) {
879 		t->ref++;
880 	}
881 }
882 
883 void
pf_tag_unref(u_int16_t tag)884 pf_tag_unref(u_int16_t tag)
885 {
886 	tag_unref(&pf_tags, tag);
887 }
888 
889 static int
pf_rtlabel_add(struct pf_addr_wrap * a)890 pf_rtlabel_add(struct pf_addr_wrap *a)
891 {
892 #pragma unused(a)
893 	return 0;
894 }
895 
896 static void
pf_rtlabel_remove(struct pf_addr_wrap * a)897 pf_rtlabel_remove(struct pf_addr_wrap *a)
898 {
899 #pragma unused(a)
900 }
901 
902 static void
pf_rtlabel_copyout(struct pf_addr_wrap * a)903 pf_rtlabel_copyout(struct pf_addr_wrap *a)
904 {
905 #pragma unused(a)
906 }
907 
908 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)909 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
910 {
911 	struct pf_ruleset       *rs;
912 	struct pf_rule          *rule;
913 
914 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
915 		return EINVAL;
916 	}
917 	rs = pf_find_or_create_ruleset(anchor);
918 	if (rs == NULL) {
919 		return EINVAL;
920 	}
921 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
922 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
923 		rs->rules[rs_num].inactive.rcount--;
924 	}
925 	*ticket = ++rs->rules[rs_num].inactive.ticket;
926 	rs->rules[rs_num].inactive.open = 1;
927 	pf_release_ruleset(rs);
928 	rs = NULL;
929 	return 0;
930 }
931 
932 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)933 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
934 {
935 	struct pf_ruleset       *rs = NULL;
936 	struct pf_rule          *rule;
937 	int                     err = 0;
938 
939 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
940 		err = EINVAL;
941 		goto done;
942 	}
943 	rs = pf_find_ruleset(anchor);
944 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
945 	    rs->rules[rs_num].inactive.ticket != ticket) {
946 		goto done;
947 	}
948 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
949 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
950 		rs->rules[rs_num].inactive.rcount--;
951 	}
952 	rs->rules[rs_num].inactive.open = 0;
953 
954 done:
955 	if (rs) {
956 		pf_release_ruleset(rs);
957 		rs = NULL;
958 	}
959 	return err;
960 }
961 
962 #define PF_MD5_UPD(st, elm)                                             \
963 	MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
964 
965 #define PF_MD5_UPD_STR(st, elm)                                         \
966 	MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int)strlen((st)->elm))
967 
968 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
969 	(stor) = htonl((st)->elm);                                      \
970 	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t));        \
971 } while (0)
972 
973 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
974 	(stor) = htons((st)->elm);                                      \
975 	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t));        \
976 } while (0)
977 
978 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr,u_int8_t proto)979 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
980 {
981 	PF_MD5_UPD(pfr, addr.type);
982 	switch (pfr->addr.type) {
983 	case PF_ADDR_DYNIFTL:
984 		PF_MD5_UPD(pfr, addr.v.ifname);
985 		PF_MD5_UPD(pfr, addr.iflags);
986 		break;
987 	case PF_ADDR_TABLE:
988 		PF_MD5_UPD(pfr, addr.v.tblname);
989 		break;
990 	case PF_ADDR_ADDRMASK:
991 		/* XXX ignore af? */
992 		PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
993 		PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
994 		break;
995 	case PF_ADDR_RTLABEL:
996 		PF_MD5_UPD(pfr, addr.v.rtlabelname);
997 		break;
998 	}
999 
1000 	switch (proto) {
1001 	case IPPROTO_TCP:
1002 	case IPPROTO_UDP:
1003 		PF_MD5_UPD(pfr, xport.range.port[0]);
1004 		PF_MD5_UPD(pfr, xport.range.port[1]);
1005 		PF_MD5_UPD(pfr, xport.range.op);
1006 		break;
1007 
1008 	default:
1009 		break;
1010 	}
1011 
1012 	PF_MD5_UPD(pfr, neg);
1013 }
1014 
1015 static void
pf_hash_rule(MD5_CTX * ctx,struct pf_rule * rule)1016 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1017 {
1018 	u_int16_t x;
1019 	u_int32_t y;
1020 
1021 	pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1022 	pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1023 	PF_MD5_UPD_STR(rule, label);
1024 	PF_MD5_UPD_STR(rule, ifname);
1025 	PF_MD5_UPD_STR(rule, match_tagname);
1026 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1027 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1028 	PF_MD5_UPD_HTONL(rule, prob, y);
1029 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1030 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1031 	PF_MD5_UPD(rule, uid.op);
1032 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1033 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1034 	PF_MD5_UPD(rule, gid.op);
1035 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1036 	PF_MD5_UPD(rule, action);
1037 	PF_MD5_UPD(rule, direction);
1038 	PF_MD5_UPD(rule, af);
1039 	PF_MD5_UPD(rule, quick);
1040 	PF_MD5_UPD(rule, ifnot);
1041 	PF_MD5_UPD(rule, match_tag_not);
1042 	PF_MD5_UPD(rule, natpass);
1043 	PF_MD5_UPD(rule, keep_state);
1044 	PF_MD5_UPD(rule, proto);
1045 	PF_MD5_UPD(rule, type);
1046 	PF_MD5_UPD(rule, code);
1047 	PF_MD5_UPD(rule, flags);
1048 	PF_MD5_UPD(rule, flagset);
1049 	PF_MD5_UPD(rule, allow_opts);
1050 	PF_MD5_UPD(rule, rt);
1051 	PF_MD5_UPD(rule, tos);
1052 }
1053 
1054 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1055 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1056 {
1057 	struct pf_ruleset       *rs = NULL;
1058 	struct pf_rule          *rule, **old_array, *r;
1059 	struct pf_rulequeue     *old_rules;
1060 	int                      error = 0;
1061 	u_int32_t                old_rcount;
1062 	u_int32_t                old_rsize;
1063 
1064 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1065 
1066 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1067 		error = EINVAL;
1068 		goto done;
1069 	}
1070 	rs = pf_find_ruleset(anchor);
1071 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1072 	    ticket != rs->rules[rs_num].inactive.ticket) {
1073 		error = EBUSY;
1074 		goto done;
1075 	}
1076 
1077 	/* Calculate checksum for the main ruleset */
1078 	if (rs == &pf_main_ruleset) {
1079 		error = pf_setup_pfsync_matching(rs);
1080 		if (error != 0) {
1081 			goto done;
1082 		}
1083 	}
1084 
1085 	/* Swap rules, keep the old. */
1086 	old_rules = rs->rules[rs_num].active.ptr;
1087 	old_rcount = rs->rules[rs_num].active.rcount;
1088 	old_rsize  = rs->rules[rs_num].active.rsize;
1089 	old_array = rs->rules[rs_num].active.ptr_array;
1090 
1091 	if (old_rcount != 0) {
1092 		r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1093 		while (r) {
1094 			if (r->rule_flag & PFRULE_PFM) {
1095 				pffwrules--;
1096 			}
1097 			r = TAILQ_NEXT(r, entries);
1098 		}
1099 	}
1100 
1101 
1102 	rs->rules[rs_num].active.ptr =
1103 	    rs->rules[rs_num].inactive.ptr;
1104 	rs->rules[rs_num].active.ptr_array =
1105 	    rs->rules[rs_num].inactive.ptr_array;
1106 	rs->rules[rs_num].active.rsize =
1107 	    rs->rules[rs_num].inactive.rsize;
1108 	rs->rules[rs_num].active.rcount =
1109 	    rs->rules[rs_num].inactive.rcount;
1110 	rs->rules[rs_num].inactive.ptr = old_rules;
1111 	rs->rules[rs_num].inactive.ptr_array = old_array;
1112 	rs->rules[rs_num].inactive.rcount = old_rcount;
1113 	rs->rules[rs_num].inactive.rsize = old_rsize;
1114 
1115 	rs->rules[rs_num].active.ticket =
1116 	    rs->rules[rs_num].inactive.ticket;
1117 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1118 
1119 
1120 	/* Purge the old rule list. */
1121 	while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1122 		pf_rm_rule(old_rules, rule);
1123 	}
1124 	kfree_type(struct pf_rule *, rs->rules[rs_num].inactive.rsize,
1125 	    rs->rules[rs_num].inactive.ptr_array);
1126 	rs->rules[rs_num].inactive.ptr_array = NULL;
1127 	rs->rules[rs_num].inactive.rcount = 0;
1128 	rs->rules[rs_num].inactive.rsize = 0;
1129 	rs->rules[rs_num].inactive.open = 0;
1130 
1131 done:
1132 	if (rs) {
1133 		pf_release_ruleset(rs);
1134 	}
1135 	return error;
1136 }
1137 
1138 static void
pf_rule_copyin(struct pf_rule * src,struct pf_rule * dst,struct proc * p,int minordev)1139 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1140     int minordev)
1141 {
1142 	bcopy(src, dst, sizeof(struct pf_rule));
1143 
1144 	dst->label[sizeof(dst->label) - 1] = '\0';
1145 	dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1146 	dst->qname[sizeof(dst->qname) - 1] = '\0';
1147 	dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1148 	dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1149 	dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1150 	dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1151 	dst->owner[sizeof(dst->owner) - 1] = '\0';
1152 
1153 	dst->cuid = kauth_cred_getuid(kauth_cred_get());
1154 	dst->cpid = proc_getpid(p);
1155 
1156 	dst->anchor = NULL;
1157 	dst->kif = NULL;
1158 	dst->overload_tbl = NULL;
1159 
1160 	TAILQ_INIT(&dst->rpool.list);
1161 	dst->rpool.cur = NULL;
1162 
1163 	/* initialize refcounting */
1164 	dst->states = 0;
1165 	dst->src_nodes = 0;
1166 
1167 	dst->entries.tqe_prev = NULL;
1168 	dst->entries.tqe_next = NULL;
1169 	if ((uint8_t)minordev == PFDEV_PFM) {
1170 		dst->rule_flag |= PFRULE_PFM;
1171 	}
1172 }
1173 
1174 static void
pf_rule_copyout(struct pf_rule * src,struct pf_rule * dst)1175 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1176 {
1177 	bcopy(src, dst, sizeof(struct pf_rule));
1178 
1179 	dst->anchor = NULL;
1180 	dst->kif = NULL;
1181 	dst->overload_tbl = NULL;
1182 
1183 	dst->rpool.list.tqh_first = NULL;
1184 	dst->rpool.list.tqh_last = NULL;
1185 	dst->rpool.cur = NULL;
1186 
1187 	dst->entries.tqe_prev = NULL;
1188 	dst->entries.tqe_next = NULL;
1189 }
1190 
1191 static void
pf_state_export(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1192 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1193     struct pf_state *s)
1194 {
1195 	uint64_t secs = pf_time_second();
1196 	bzero(sp, sizeof(struct pfsync_state));
1197 
1198 	/* copy from state key */
1199 	sp->lan.addr = sk->lan.addr;
1200 	sp->lan.xport = sk->lan.xport;
1201 	sp->gwy.addr = sk->gwy.addr;
1202 	sp->gwy.xport = sk->gwy.xport;
1203 	sp->ext_lan.addr = sk->ext_lan.addr;
1204 	sp->ext_lan.xport = sk->ext_lan.xport;
1205 	sp->ext_gwy.addr = sk->ext_gwy.addr;
1206 	sp->ext_gwy.xport = sk->ext_gwy.xport;
1207 	sp->proto_variant = sk->proto_variant;
1208 	sp->tag = s->tag;
1209 	sp->proto = sk->proto;
1210 	sp->af_lan = sk->af_lan;
1211 	sp->af_gwy = sk->af_gwy;
1212 	sp->direction = sk->direction;
1213 	sp->flowhash = sk->flowhash;
1214 
1215 	/* copy from state */
1216 	memcpy(&sp->id, &s->id, sizeof(sp->id));
1217 	sp->creatorid = s->creatorid;
1218 	strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1219 	pf_state_peer_to_pfsync(&s->src, &sp->src);
1220 	pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1221 
1222 	sp->rule = s->rule.ptr->nr;
1223 	sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1224 	    (unsigned)-1 : s->nat_rule.ptr->nr;
1225 	sp->anchor = (s->anchor.ptr == NULL) ?
1226 	    (unsigned)-1 : s->anchor.ptr->nr;
1227 
1228 	pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1229 	pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1230 	pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1231 	pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1232 	sp->creation = secs - s->creation;
1233 	sp->expire = pf_state_expires(s);
1234 	sp->log = s->log;
1235 	sp->allow_opts = s->allow_opts;
1236 	sp->timeout = s->timeout;
1237 
1238 	if (s->src_node) {
1239 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1240 	}
1241 	if (s->nat_src_node) {
1242 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1243 	}
1244 
1245 	if (sp->expire > secs) {
1246 		sp->expire -= secs;
1247 	} else {
1248 		sp->expire = 0;
1249 	}
1250 }
1251 
1252 static void
pf_state_import(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1253 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1254     struct pf_state *s)
1255 {
1256 	/* copy to state key */
1257 	sk->lan.addr = sp->lan.addr;
1258 	sk->lan.xport = sp->lan.xport;
1259 	sk->gwy.addr = sp->gwy.addr;
1260 	sk->gwy.xport = sp->gwy.xport;
1261 	sk->ext_lan.addr = sp->ext_lan.addr;
1262 	sk->ext_lan.xport = sp->ext_lan.xport;
1263 	sk->ext_gwy.addr = sp->ext_gwy.addr;
1264 	sk->ext_gwy.xport = sp->ext_gwy.xport;
1265 	sk->proto_variant = sp->proto_variant;
1266 	s->tag = sp->tag;
1267 	sk->proto = sp->proto;
1268 	sk->af_lan = sp->af_lan;
1269 	sk->af_gwy = sp->af_gwy;
1270 	sk->direction = sp->direction;
1271 	ASSERT(sk->flowsrc == FLOWSRC_PF);
1272 	ASSERT(sk->flowhash != 0);
1273 
1274 	/* copy to state */
1275 	memcpy(&s->id, &sp->id, sizeof(sp->id));
1276 	s->creatorid = sp->creatorid;
1277 	pf_state_peer_from_pfsync(&sp->src, &s->src);
1278 	pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1279 
1280 	s->rule.ptr = &pf_default_rule;
1281 	s->nat_rule.ptr = NULL;
1282 	s->anchor.ptr = NULL;
1283 	s->rt_kif = NULL;
1284 	s->creation = pf_time_second();
1285 	s->expire = pf_time_second();
1286 	if (sp->expire > 0) {
1287 		s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1288 	}
1289 	s->pfsync_time = 0;
1290 	s->packets[0] = s->packets[1] = 0;
1291 	s->bytes[0] = s->bytes[1] = 0;
1292 }
1293 
1294 static void
pf_pooladdr_copyin(struct pf_pooladdr * src,struct pf_pooladdr * dst)1295 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1296 {
1297 	bcopy(src, dst, sizeof(struct pf_pooladdr));
1298 
1299 	dst->entries.tqe_prev = NULL;
1300 	dst->entries.tqe_next = NULL;
1301 	dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1302 	dst->kif = NULL;
1303 }
1304 
1305 static void
pf_pooladdr_copyout(struct pf_pooladdr * src,struct pf_pooladdr * dst)1306 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1307 {
1308 	bcopy(src, dst, sizeof(struct pf_pooladdr));
1309 
1310 	dst->entries.tqe_prev = NULL;
1311 	dst->entries.tqe_next = NULL;
1312 	dst->kif = NULL;
1313 }
1314 
1315 static int
pf_setup_pfsync_matching(struct pf_ruleset * rs)1316 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1317 {
1318 	MD5_CTX                  ctx;
1319 	struct pf_rule          *rule;
1320 	int                      rs_cnt;
1321 	u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
1322 
1323 	MD5Init(&ctx);
1324 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1325 		/* XXX PF_RULESET_SCRUB as well? */
1326 		if (rs_cnt == PF_RULESET_SCRUB) {
1327 			continue;
1328 		}
1329 
1330 		rs->rules[rs_cnt].inactive.ptr_array = krealloc_type(struct pf_rule *,
1331 		    rs->rules[rs_cnt].inactive.rsize, rs->rules[rs_cnt].inactive.rcount,
1332 		    rs->rules[rs_cnt].inactive.ptr_array, Z_WAITOK | Z_REALLOCF);
1333 
1334 		if (rs->rules[rs_cnt].inactive.rcount &&
1335 		    !rs->rules[rs_cnt].inactive.ptr_array) {
1336 			rs->rules[rs_cnt].inactive.rsize = 0;
1337 			return ENOMEM;
1338 		}
1339 		rs->rules[rs_cnt].inactive.rsize =
1340 		    rs->rules[rs_cnt].inactive.rcount;
1341 
1342 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1343 		    entries) {
1344 			pf_hash_rule(&ctx, rule);
1345 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1346 		}
1347 	}
1348 
1349 	MD5Final(digest, &ctx);
1350 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1351 	return 0;
1352 }
1353 
1354 static void
pf_start(void)1355 pf_start(void)
1356 {
1357 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1358 
1359 	VERIFY(pf_is_enabled == 0);
1360 
1361 	pf_is_enabled = 1;
1362 	pf_status.running = 1;
1363 	pf_status.since = pf_calendar_time_second();
1364 	if (pf_status.stateid == 0) {
1365 		pf_status.stateid = pf_time_second();
1366 		pf_status.stateid = pf_status.stateid << 32;
1367 	}
1368 	wakeup(pf_purge_thread_fn);
1369 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
1370 	net_filter_event_mark(NET_FILTER_EVENT_PF,
1371 	    pf_check_compatible_rules());
1372 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1373 	DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1374 }
1375 
1376 static void
pf_stop(void)1377 pf_stop(void)
1378 {
1379 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1380 
1381 	VERIFY(pf_is_enabled);
1382 
1383 	pf_status.running = 0;
1384 	pf_is_enabled = 0;
1385 	pf_status.since = pf_calendar_time_second();
1386 	wakeup(pf_purge_thread_fn);
1387 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
1388 	net_filter_event_mark(NET_FILTER_EVENT_PF,
1389 	    pf_check_compatible_rules());
1390 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1391 	DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1392 }
1393 
1394 static int
pfioctl(dev_t dev,u_long cmd,caddr_t addr,int flags,struct proc * p)1395 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1396 {
1397 #pragma unused(dev)
1398 	int p64 = proc_is64bit(p);
1399 	int error = 0;
1400 	int minordev = minor(dev);
1401 
1402 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1403 		return EPERM;
1404 	}
1405 
1406 	/* XXX keep in sync with switch() below */
1407 	if (securelevel > 1) {
1408 		switch (cmd) {
1409 		case DIOCGETRULES:
1410 		case DIOCGETRULE:
1411 		case DIOCGETADDRS:
1412 		case DIOCGETADDR:
1413 		case DIOCGETSTATE:
1414 		case DIOCSETSTATUSIF:
1415 		case DIOCGETSTATUS:
1416 		case DIOCCLRSTATUS:
1417 		case DIOCNATLOOK:
1418 		case DIOCSETDEBUG:
1419 		case DIOCGETSTATES:
1420 		case DIOCINSERTRULE:
1421 		case DIOCDELETERULE:
1422 		case DIOCGETTIMEOUT:
1423 		case DIOCCLRRULECTRS:
1424 		case DIOCGETLIMIT:
1425 		case DIOCGETALTQS:
1426 		case DIOCGETALTQ:
1427 		case DIOCGETQSTATS:
1428 		case DIOCGETRULESETS:
1429 		case DIOCGETRULESET:
1430 		case DIOCRGETTABLES:
1431 		case DIOCRGETTSTATS:
1432 		case DIOCRCLRTSTATS:
1433 		case DIOCRCLRADDRS:
1434 		case DIOCRADDADDRS:
1435 		case DIOCRDELADDRS:
1436 		case DIOCRSETADDRS:
1437 		case DIOCRGETADDRS:
1438 		case DIOCRGETASTATS:
1439 		case DIOCRCLRASTATS:
1440 		case DIOCRTSTADDRS:
1441 		case DIOCOSFPGET:
1442 		case DIOCGETSRCNODES:
1443 		case DIOCCLRSRCNODES:
1444 		case DIOCIGETIFACES:
1445 		case DIOCGIFSPEED:
1446 		case DIOCSETIFFLAG:
1447 		case DIOCCLRIFFLAG:
1448 			break;
1449 		case DIOCRCLRTABLES:
1450 		case DIOCRADDTABLES:
1451 		case DIOCRDELTABLES:
1452 		case DIOCRSETTFLAGS: {
1453 			int pfrio_flags;
1454 
1455 			bcopy(&((struct pfioc_table *)(void *)addr)->
1456 			    pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1457 
1458 			if (pfrio_flags & PFR_FLAG_DUMMY) {
1459 				break; /* dummy operation ok */
1460 			}
1461 			return EPERM;
1462 		}
1463 		default:
1464 			return EPERM;
1465 		}
1466 	}
1467 
1468 	if (!(flags & FWRITE)) {
1469 		switch (cmd) {
1470 		case DIOCSTART:
1471 		case DIOCSTARTREF:
1472 		case DIOCSTOP:
1473 		case DIOCSTOPREF:
1474 		case DIOCGETSTARTERS:
1475 		case DIOCGETRULES:
1476 		case DIOCGETADDRS:
1477 		case DIOCGETADDR:
1478 		case DIOCGETSTATE:
1479 		case DIOCGETSTATUS:
1480 		case DIOCGETSTATES:
1481 		case DIOCINSERTRULE:
1482 		case DIOCDELETERULE:
1483 		case DIOCGETTIMEOUT:
1484 		case DIOCGETLIMIT:
1485 		case DIOCGETALTQS:
1486 		case DIOCGETALTQ:
1487 		case DIOCGETQSTATS:
1488 		case DIOCGETRULESETS:
1489 		case DIOCGETRULESET:
1490 		case DIOCNATLOOK:
1491 		case DIOCRGETTABLES:
1492 		case DIOCRGETTSTATS:
1493 		case DIOCRGETADDRS:
1494 		case DIOCRGETASTATS:
1495 		case DIOCRTSTADDRS:
1496 		case DIOCOSFPGET:
1497 		case DIOCGETSRCNODES:
1498 		case DIOCIGETIFACES:
1499 		case DIOCGIFSPEED:
1500 			break;
1501 		case DIOCRCLRTABLES:
1502 		case DIOCRADDTABLES:
1503 		case DIOCRDELTABLES:
1504 		case DIOCRCLRTSTATS:
1505 		case DIOCRCLRADDRS:
1506 		case DIOCRADDADDRS:
1507 		case DIOCRDELADDRS:
1508 		case DIOCRSETADDRS:
1509 		case DIOCRSETTFLAGS: {
1510 			int pfrio_flags;
1511 
1512 			bcopy(&((struct pfioc_table *)(void *)addr)->
1513 			    pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1514 
1515 			if (pfrio_flags & PFR_FLAG_DUMMY) {
1516 				flags |= FWRITE; /* need write lock for dummy */
1517 				break; /* dummy operation ok */
1518 			}
1519 			return EACCES;
1520 		}
1521 		case DIOCGETRULE: {
1522 			u_int32_t action;
1523 
1524 			bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1525 			    &action, sizeof(action));
1526 
1527 			if (action == PF_GET_CLR_CNTR) {
1528 				return EACCES;
1529 			}
1530 			break;
1531 		}
1532 		default:
1533 			return EACCES;
1534 		}
1535 	}
1536 
1537 	if (flags & FWRITE) {
1538 		lck_rw_lock_exclusive(&pf_perim_lock);
1539 	} else {
1540 		lck_rw_lock_shared(&pf_perim_lock);
1541 	}
1542 
1543 	lck_mtx_lock(&pf_lock);
1544 
1545 	switch (cmd) {
1546 	case DIOCSTART:
1547 		if (pf_status.running) {
1548 			/*
1549 			 * Increment the reference for a simple -e enable, so
1550 			 * that even if other processes drop their references,
1551 			 * pf will still be available to processes that turned
1552 			 * it on without taking a reference
1553 			 */
1554 			if (nr_tokens == pf_enabled_ref_count) {
1555 				pf_enabled_ref_count++;
1556 				VERIFY(pf_enabled_ref_count != 0);
1557 			}
1558 			error = EEXIST;
1559 		} else if (pf_purge_thread == NULL) {
1560 			error = ENOMEM;
1561 		} else {
1562 			pf_start();
1563 			pf_enabled_ref_count++;
1564 			VERIFY(pf_enabled_ref_count != 0);
1565 		}
1566 		break;
1567 
1568 	case DIOCSTARTREF:              /* u_int64_t */
1569 		if (pf_purge_thread == NULL) {
1570 			error = ENOMEM;
1571 		} else {
1572 			u_int64_t token;
1573 
1574 			/* small enough to be on stack */
1575 			if ((token = generate_token(p)) != 0) {
1576 				if (pf_is_enabled == 0) {
1577 					pf_start();
1578 				}
1579 				pf_enabled_ref_count++;
1580 				VERIFY(pf_enabled_ref_count != 0);
1581 			} else {
1582 				error = ENOMEM;
1583 				DPFPRINTF(PF_DEBUG_URGENT,
1584 				    ("pf: unable to generate token\n"));
1585 			}
1586 			bcopy(&token, addr, sizeof(token));
1587 		}
1588 		break;
1589 
1590 	case DIOCSTOP:
1591 		if (!pf_status.running) {
1592 			error = ENOENT;
1593 		} else {
1594 			pf_stop();
1595 			pf_enabled_ref_count = 0;
1596 			invalidate_all_tokens();
1597 		}
1598 		break;
1599 
1600 	case DIOCSTOPREF:               /* struct pfioc_remove_token */
1601 		if (!pf_status.running) {
1602 			error = ENOENT;
1603 		} else {
1604 			struct pfioc_remove_token pfrt;
1605 
1606 			/* small enough to be on stack */
1607 			bcopy(addr, &pfrt, sizeof(pfrt));
1608 			if ((error = remove_token(&pfrt)) == 0) {
1609 				VERIFY(pf_enabled_ref_count != 0);
1610 				pf_enabled_ref_count--;
1611 				/* return currently held references */
1612 				pfrt.refcount = pf_enabled_ref_count;
1613 				DPFPRINTF(PF_DEBUG_MISC,
1614 				    ("pf: enabled refcount decremented\n"));
1615 			} else {
1616 				error = EINVAL;
1617 				DPFPRINTF(PF_DEBUG_URGENT,
1618 				    ("pf: token mismatch\n"));
1619 			}
1620 			bcopy(&pfrt, addr, sizeof(pfrt));
1621 
1622 			if (error == 0 && pf_enabled_ref_count == 0) {
1623 				pf_stop();
1624 			}
1625 		}
1626 		break;
1627 
1628 	case DIOCGETSTARTERS: {         /* struct pfioc_tokens */
1629 		PFIOCX_STRUCT_DECL(pfioc_tokens);
1630 
1631 		PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens);
1632 		error = pfioctl_ioc_tokens(cmd,
1633 		    PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1634 		    PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1635 		PFIOCX_STRUCT_END(pfioc_tokens, addr);
1636 		break;
1637 	}
1638 
1639 	case DIOCADDRULE:               /* struct pfioc_rule */
1640 	case DIOCGETRULES:              /* struct pfioc_rule */
1641 	case DIOCGETRULE:               /* struct pfioc_rule */
1642 	case DIOCCHANGERULE:            /* struct pfioc_rule */
1643 	case DIOCINSERTRULE:            /* struct pfioc_rule */
1644 	case DIOCDELETERULE: {          /* struct pfioc_rule */
1645 		struct pfioc_rule *pr = NULL;
1646 
1647 		PFIOC_STRUCT_BEGIN(addr, pr);
1648 		error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1649 		PFIOC_STRUCT_END(pr, addr);
1650 		break;
1651 	}
1652 
1653 	case DIOCCLRSTATES:             /* struct pfioc_state_kill */
1654 	case DIOCKILLSTATES: {          /* struct pfioc_state_kill */
1655 		struct pfioc_state_kill *psk = NULL;
1656 
1657 		PFIOC_STRUCT_BEGIN(addr, psk);
1658 		error = pfioctl_ioc_state_kill(cmd, psk, p);
1659 		PFIOC_STRUCT_END(psk, addr);
1660 		break;
1661 	}
1662 
1663 	case DIOCADDSTATE:              /* struct pfioc_state */
1664 	case DIOCGETSTATE: {            /* struct pfioc_state */
1665 		struct pfioc_state *ps = NULL;
1666 
1667 		PFIOC_STRUCT_BEGIN(addr, ps);
1668 		error = pfioctl_ioc_state(cmd, ps, p);
1669 		PFIOC_STRUCT_END(ps, addr);
1670 		break;
1671 	}
1672 
1673 	case DIOCGETSTATES: {           /* struct pfioc_states */
1674 		PFIOCX_STRUCT_DECL(pfioc_states);
1675 
1676 		PFIOCX_STRUCT_BEGIN(addr, pfioc_states);
1677 		error = pfioctl_ioc_states(cmd,
1678 		    PFIOCX_STRUCT_ADDR32(pfioc_states),
1679 		    PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1680 		PFIOCX_STRUCT_END(pfioc_states, addr);
1681 		break;
1682 	}
1683 
1684 	case DIOCGETSTATUS: {           /* struct pf_status */
1685 		struct pf_status *s = NULL;
1686 
1687 		PFIOC_STRUCT_BEGIN(&pf_status, s);
1688 		pfi_update_status(s->ifname, s);
1689 		PFIOC_STRUCT_END(s, addr);
1690 		break;
1691 	}
1692 
1693 	case DIOCSETSTATUSIF: {         /* struct pfioc_if */
1694 		struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1695 
1696 		/* OK for unaligned accesses */
1697 		if (pi->ifname[0] == 0) {
1698 			bzero(pf_status.ifname, IFNAMSIZ);
1699 			break;
1700 		}
1701 		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1702 		break;
1703 	}
1704 
1705 	case DIOCCLRSTATUS: {
1706 		bzero(pf_status.counters, sizeof(pf_status.counters));
1707 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1708 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1709 		pf_status.since = pf_calendar_time_second();
1710 		if (*pf_status.ifname) {
1711 			pfi_update_status(pf_status.ifname, NULL);
1712 		}
1713 		break;
1714 	}
1715 
1716 	case DIOCNATLOOK: {             /* struct pfioc_natlook */
1717 		struct pfioc_natlook *pnl = NULL;
1718 
1719 		PFIOC_STRUCT_BEGIN(addr, pnl);
1720 		error = pfioctl_ioc_natlook(cmd, pnl, p);
1721 		PFIOC_STRUCT_END(pnl, addr);
1722 		break;
1723 	}
1724 
1725 	case DIOCSETTIMEOUT:            /* struct pfioc_tm */
1726 	case DIOCGETTIMEOUT: {          /* struct pfioc_tm */
1727 		struct pfioc_tm pt;
1728 
1729 		/* small enough to be on stack */
1730 		bcopy(addr, &pt, sizeof(pt));
1731 		error = pfioctl_ioc_tm(cmd, &pt, p);
1732 		bcopy(&pt, addr, sizeof(pt));
1733 		break;
1734 	}
1735 
1736 	case DIOCGETLIMIT:              /* struct pfioc_limit */
1737 	case DIOCSETLIMIT: {            /* struct pfioc_limit */
1738 		struct pfioc_limit pl;
1739 
1740 		/* small enough to be on stack */
1741 		bcopy(addr, &pl, sizeof(pl));
1742 		error = pfioctl_ioc_limit(cmd, &pl, p);
1743 		bcopy(&pl, addr, sizeof(pl));
1744 		break;
1745 	}
1746 
1747 	case DIOCSETDEBUG: {            /* u_int32_t */
1748 		bcopy(addr, &pf_status.debug, sizeof(u_int32_t));
1749 		break;
1750 	}
1751 
1752 	case DIOCCLRRULECTRS: {
1753 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1754 		struct pf_ruleset       *ruleset = &pf_main_ruleset;
1755 		struct pf_rule          *rule;
1756 
1757 		TAILQ_FOREACH(rule,
1758 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1759 			rule->evaluations = 0;
1760 			rule->packets[0] = rule->packets[1] = 0;
1761 			rule->bytes[0] = rule->bytes[1] = 0;
1762 		}
1763 		break;
1764 	}
1765 
1766 	case DIOCGIFSPEED: {
1767 		struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1768 		struct pf_ifspeed ps;
1769 		struct ifnet *ifp;
1770 		u_int64_t baudrate;
1771 
1772 		if (psp->ifname[0] != '\0') {
1773 			/* Can we completely trust user-land? */
1774 			strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1775 			ps.ifname[IFNAMSIZ - 1] = '\0';
1776 			ifp = ifunit(ps.ifname);
1777 			if (ifp != NULL) {
1778 				baudrate = ifp->if_output_bw.max_bw;
1779 				bcopy(&baudrate, &psp->baudrate,
1780 				    sizeof(baudrate));
1781 			} else {
1782 				error = EINVAL;
1783 			}
1784 		} else {
1785 			error = EINVAL;
1786 		}
1787 		break;
1788 	}
1789 
1790 	case DIOCBEGINADDRS:            /* struct pfioc_pooladdr */
1791 	case DIOCADDADDR:               /* struct pfioc_pooladdr */
1792 	case DIOCGETADDRS:              /* struct pfioc_pooladdr */
1793 	case DIOCGETADDR:               /* struct pfioc_pooladdr */
1794 	case DIOCCHANGEADDR: {          /* struct pfioc_pooladdr */
1795 		struct pfioc_pooladdr *pp = NULL;
1796 
1797 		PFIOC_STRUCT_BEGIN(addr, pp);
1798 		error = pfioctl_ioc_pooladdr(cmd, pp, p);
1799 		PFIOC_STRUCT_END(pp, addr);
1800 		break;
1801 	}
1802 
1803 	case DIOCGETRULESETS:           /* struct pfioc_ruleset */
1804 	case DIOCGETRULESET: {          /* struct pfioc_ruleset */
1805 		struct pfioc_ruleset *pr = NULL;
1806 
1807 		PFIOC_STRUCT_BEGIN(addr, pr);
1808 		error = pfioctl_ioc_ruleset(cmd, pr, p);
1809 		PFIOC_STRUCT_END(pr, addr);
1810 		break;
1811 	}
1812 
1813 	case DIOCRCLRTABLES:            /* struct pfioc_table */
1814 	case DIOCRADDTABLES:            /* struct pfioc_table */
1815 	case DIOCRDELTABLES:            /* struct pfioc_table */
1816 	case DIOCRGETTABLES:            /* struct pfioc_table */
1817 	case DIOCRGETTSTATS:            /* struct pfioc_table */
1818 	case DIOCRCLRTSTATS:            /* struct pfioc_table */
1819 	case DIOCRSETTFLAGS:            /* struct pfioc_table */
1820 	case DIOCRCLRADDRS:             /* struct pfioc_table */
1821 	case DIOCRADDADDRS:             /* struct pfioc_table */
1822 	case DIOCRDELADDRS:             /* struct pfioc_table */
1823 	case DIOCRSETADDRS:             /* struct pfioc_table */
1824 	case DIOCRGETADDRS:             /* struct pfioc_table */
1825 	case DIOCRGETASTATS:            /* struct pfioc_table */
1826 	case DIOCRCLRASTATS:            /* struct pfioc_table */
1827 	case DIOCRTSTADDRS:             /* struct pfioc_table */
1828 	case DIOCRINADEFINE: {          /* struct pfioc_table */
1829 		PFIOCX_STRUCT_DECL(pfioc_table);
1830 
1831 		PFIOCX_STRUCT_BEGIN(addr, pfioc_table);
1832 		error = pfioctl_ioc_table(cmd,
1833 		    PFIOCX_STRUCT_ADDR32(pfioc_table),
1834 		    PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1835 		PFIOCX_STRUCT_END(pfioc_table, addr);
1836 		break;
1837 	}
1838 
1839 	case DIOCOSFPADD:               /* struct pf_osfp_ioctl */
1840 	case DIOCOSFPGET: {             /* struct pf_osfp_ioctl */
1841 		struct pf_osfp_ioctl *io = NULL;
1842 
1843 		PFIOC_STRUCT_BEGIN(addr, io);
1844 		if (cmd == DIOCOSFPADD) {
1845 			error = pf_osfp_add(io);
1846 		} else {
1847 			VERIFY(cmd == DIOCOSFPGET);
1848 			error = pf_osfp_get(io);
1849 		}
1850 		PFIOC_STRUCT_END(io, addr);
1851 		break;
1852 	}
1853 
1854 	case DIOCXBEGIN:                /* struct pfioc_trans */
1855 	case DIOCXROLLBACK:             /* struct pfioc_trans */
1856 	case DIOCXCOMMIT: {             /* struct pfioc_trans */
1857 		PFIOCX_STRUCT_DECL(pfioc_trans);
1858 
1859 		PFIOCX_STRUCT_BEGIN(addr, pfioc_trans);
1860 		error = pfioctl_ioc_trans(cmd,
1861 		    PFIOCX_STRUCT_ADDR32(pfioc_trans),
1862 		    PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1863 		PFIOCX_STRUCT_END(pfioc_trans, addr);
1864 		break;
1865 	}
1866 
1867 	case DIOCGETSRCNODES: {         /* struct pfioc_src_nodes */
1868 		PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1869 
1870 		PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes);
1871 		error = pfioctl_ioc_src_nodes(cmd,
1872 		    PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1873 		    PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1874 		PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1875 		break;
1876 	}
1877 
1878 	case DIOCCLRSRCNODES: {
1879 		struct pf_src_node      *n;
1880 		struct pf_state         *state;
1881 
1882 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1883 			state->src_node = NULL;
1884 			state->nat_src_node = NULL;
1885 		}
1886 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1887 			n->expire = 1;
1888 			n->states = 0;
1889 		}
1890 		pf_purge_expired_src_nodes();
1891 		pf_status.src_nodes = 0;
1892 		break;
1893 	}
1894 
1895 	case DIOCKILLSRCNODES: {        /* struct pfioc_src_node_kill */
1896 		struct pfioc_src_node_kill *psnk = NULL;
1897 
1898 		PFIOC_STRUCT_BEGIN(addr, psnk);
1899 		error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1900 		PFIOC_STRUCT_END(psnk, addr);
1901 		break;
1902 	}
1903 
1904 	case DIOCSETHOSTID: {           /* u_int32_t */
1905 		u_int32_t hid;
1906 
1907 		/* small enough to be on stack */
1908 		bcopy(addr, &hid, sizeof(hid));
1909 		if (hid == 0) {
1910 			pf_status.hostid = random();
1911 		} else {
1912 			pf_status.hostid = hid;
1913 		}
1914 		break;
1915 	}
1916 
1917 	case DIOCOSFPFLUSH:
1918 		pf_osfp_flush();
1919 		break;
1920 
1921 	case DIOCIGETIFACES:            /* struct pfioc_iface */
1922 	case DIOCSETIFFLAG:             /* struct pfioc_iface */
1923 	case DIOCCLRIFFLAG: {           /* struct pfioc_iface */
1924 		PFIOCX_STRUCT_DECL(pfioc_iface);
1925 
1926 		PFIOCX_STRUCT_BEGIN(addr, pfioc_iface);
1927 		error = pfioctl_ioc_iface(cmd,
1928 		    PFIOCX_STRUCT_ADDR32(pfioc_iface),
1929 		    PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1930 		PFIOCX_STRUCT_END(pfioc_iface, addr);
1931 		break;
1932 	}
1933 
1934 	default:
1935 		error = ENODEV;
1936 		break;
1937 	}
1938 
1939 	lck_mtx_unlock(&pf_lock);
1940 	lck_rw_done(&pf_perim_lock);
1941 
1942 	return error;
1943 }
1944 
1945 static int
pfioctl_ioc_table(u_long cmd,struct pfioc_table_32 * io32,struct pfioc_table_64 * io64,struct proc * p)1946 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1947     struct pfioc_table_64 *io64, struct proc *p)
1948 {
1949 	int p64 = proc_is64bit(p);
1950 	int error = 0;
1951 
1952 	if (!p64) {
1953 		goto struct32;
1954 	}
1955 
1956 #ifdef __LP64__
1957 	/*
1958 	 * 64-bit structure processing
1959 	 */
1960 	switch (cmd) {
1961 	case DIOCRCLRTABLES:
1962 		if (io64->pfrio_esize != 0) {
1963 			error = ENODEV;
1964 			break;
1965 		}
1966 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1967 		error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1968 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1969 		break;
1970 
1971 	case DIOCRADDTABLES:
1972 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1973 			error = ENODEV;
1974 			break;
1975 		}
1976 		error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1977 		    &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1978 		break;
1979 
1980 	case DIOCRDELTABLES:
1981 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1982 			error = ENODEV;
1983 			break;
1984 		}
1985 		error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1986 		    &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1987 		break;
1988 
1989 	case DIOCRGETTABLES:
1990 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1991 			error = ENODEV;
1992 			break;
1993 		}
1994 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1995 		error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
1996 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1997 		break;
1998 
1999 	case DIOCRGETTSTATS:
2000 		if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
2001 			error = ENODEV;
2002 			break;
2003 		}
2004 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2005 		error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
2006 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2007 		break;
2008 
2009 	case DIOCRCLRTSTATS:
2010 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2011 			error = ENODEV;
2012 			break;
2013 		}
2014 		error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
2015 		    &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2016 		break;
2017 
2018 	case DIOCRSETTFLAGS:
2019 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2020 			error = ENODEV;
2021 			break;
2022 		}
2023 		error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
2024 		    io64->pfrio_setflag, io64->pfrio_clrflag,
2025 		    &io64->pfrio_nchange, &io64->pfrio_ndel,
2026 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2027 		break;
2028 
2029 	case DIOCRCLRADDRS:
2030 		if (io64->pfrio_esize != 0) {
2031 			error = ENODEV;
2032 			break;
2033 		}
2034 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2035 		error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2036 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2037 		break;
2038 
2039 	case DIOCRADDADDRS:
2040 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2041 			error = ENODEV;
2042 			break;
2043 		}
2044 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2045 		error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2046 		    io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2047 		    PFR_FLAG_USERIOCTL);
2048 		break;
2049 
2050 	case DIOCRDELADDRS:
2051 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2052 			error = ENODEV;
2053 			break;
2054 		}
2055 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2056 		error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2057 		    io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2058 		    PFR_FLAG_USERIOCTL);
2059 		break;
2060 
2061 	case DIOCRSETADDRS:
2062 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2063 			error = ENODEV;
2064 			break;
2065 		}
2066 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2067 		error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2068 		    io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2069 		    &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2070 		    PFR_FLAG_USERIOCTL, 0);
2071 		break;
2072 
2073 	case DIOCRGETADDRS:
2074 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2075 			error = ENODEV;
2076 			break;
2077 		}
2078 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2079 		error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2080 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2081 		break;
2082 
2083 	case DIOCRGETASTATS:
2084 		if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2085 			error = ENODEV;
2086 			break;
2087 		}
2088 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2089 		error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2090 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2091 		break;
2092 
2093 	case DIOCRCLRASTATS:
2094 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2095 			error = ENODEV;
2096 			break;
2097 		}
2098 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2099 		error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2100 		    io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2101 		    PFR_FLAG_USERIOCTL);
2102 		break;
2103 
2104 	case DIOCRTSTADDRS:
2105 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2106 			error = ENODEV;
2107 			break;
2108 		}
2109 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2110 		error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2111 		    io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2112 		    PFR_FLAG_USERIOCTL);
2113 		break;
2114 
2115 	case DIOCRINADEFINE:
2116 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2117 			error = ENODEV;
2118 			break;
2119 		}
2120 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2121 		error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2122 		    io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2123 		    io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2124 		break;
2125 
2126 	default:
2127 		VERIFY(0);
2128 		/* NOTREACHED */
2129 	}
2130 	goto done;
2131 #else
2132 #pragma unused(io64)
2133 #endif /* __LP64__ */
2134 
2135 struct32:
2136 	/*
2137 	 * 32-bit structure processing
2138 	 */
2139 	switch (cmd) {
2140 	case DIOCRCLRTABLES:
2141 		if (io32->pfrio_esize != 0) {
2142 			error = ENODEV;
2143 			break;
2144 		}
2145 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2146 		error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2147 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2148 		break;
2149 
2150 	case DIOCRADDTABLES:
2151 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2152 			error = ENODEV;
2153 			break;
2154 		}
2155 		error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2156 		    &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2157 		break;
2158 
2159 	case DIOCRDELTABLES:
2160 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2161 			error = ENODEV;
2162 			break;
2163 		}
2164 		error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2165 		    &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2166 		break;
2167 
2168 	case DIOCRGETTABLES:
2169 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2170 			error = ENODEV;
2171 			break;
2172 		}
2173 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2174 		error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2175 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2176 		break;
2177 
2178 	case DIOCRGETTSTATS:
2179 		if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2180 			error = ENODEV;
2181 			break;
2182 		}
2183 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2184 		error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2185 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2186 		break;
2187 
2188 	case DIOCRCLRTSTATS:
2189 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2190 			error = ENODEV;
2191 			break;
2192 		}
2193 		error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2194 		    &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2195 		break;
2196 
2197 	case DIOCRSETTFLAGS:
2198 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2199 			error = ENODEV;
2200 			break;
2201 		}
2202 		error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2203 		    io32->pfrio_setflag, io32->pfrio_clrflag,
2204 		    &io32->pfrio_nchange, &io32->pfrio_ndel,
2205 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2206 		break;
2207 
2208 	case DIOCRCLRADDRS:
2209 		if (io32->pfrio_esize != 0) {
2210 			error = ENODEV;
2211 			break;
2212 		}
2213 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2214 		error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2215 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2216 		break;
2217 
2218 	case DIOCRADDADDRS:
2219 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2220 			error = ENODEV;
2221 			break;
2222 		}
2223 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2224 		error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2225 		    io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2226 		    PFR_FLAG_USERIOCTL);
2227 		break;
2228 
2229 	case DIOCRDELADDRS:
2230 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2231 			error = ENODEV;
2232 			break;
2233 		}
2234 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2235 		error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2236 		    io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2237 		    PFR_FLAG_USERIOCTL);
2238 		break;
2239 
2240 	case DIOCRSETADDRS:
2241 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2242 			error = ENODEV;
2243 			break;
2244 		}
2245 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2246 		error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2247 		    io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2248 		    &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2249 		    PFR_FLAG_USERIOCTL, 0);
2250 		break;
2251 
2252 	case DIOCRGETADDRS:
2253 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2254 			error = ENODEV;
2255 			break;
2256 		}
2257 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2258 		error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2259 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2260 		break;
2261 
2262 	case DIOCRGETASTATS:
2263 		if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2264 			error = ENODEV;
2265 			break;
2266 		}
2267 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2268 		error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2269 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2270 		break;
2271 
2272 	case DIOCRCLRASTATS:
2273 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2274 			error = ENODEV;
2275 			break;
2276 		}
2277 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2278 		error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2279 		    io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2280 		    PFR_FLAG_USERIOCTL);
2281 		break;
2282 
2283 	case DIOCRTSTADDRS:
2284 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2285 			error = ENODEV;
2286 			break;
2287 		}
2288 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2289 		error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2290 		    io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2291 		    PFR_FLAG_USERIOCTL);
2292 		break;
2293 
2294 	case DIOCRINADEFINE:
2295 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2296 			error = ENODEV;
2297 			break;
2298 		}
2299 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2300 		error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2301 		    io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2302 		    io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2303 		break;
2304 
2305 	default:
2306 		VERIFY(0);
2307 		/* NOTREACHED */
2308 	}
2309 #ifdef __LP64__
2310 done:
2311 #endif
2312 	return error;
2313 }
2314 
2315 static int
pfioctl_ioc_tokens(u_long cmd,struct pfioc_tokens_32 * tok32,struct pfioc_tokens_64 * tok64,struct proc * p)2316 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2317     struct pfioc_tokens_64 *tok64, struct proc *p)
2318 {
2319 	struct pfioc_token *tokens;
2320 	struct pfioc_kernel_token *entry, *tmp;
2321 	user_addr_t token_buf;
2322 	int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2323 	char *ptr;
2324 
2325 	switch (cmd) {
2326 	case DIOCGETSTARTERS: {
2327 		int size;
2328 
2329 		if (nr_tokens == 0) {
2330 			error = ENOENT;
2331 			break;
2332 		}
2333 
2334 		size = sizeof(struct pfioc_token) * nr_tokens;
2335 		if (size / nr_tokens != sizeof(struct pfioc_token)) {
2336 			os_log_error(OS_LOG_DEFAULT, "%s: size overflows", __func__);
2337 			error = ERANGE;
2338 			break;
2339 		}
2340 		ocnt = cnt = (p64 ? tok64->size : tok32->size);
2341 		if (cnt == 0) {
2342 			if (p64) {
2343 				tok64->size = size;
2344 			} else {
2345 				tok32->size = size;
2346 			}
2347 			break;
2348 		}
2349 
2350 #ifdef __LP64__
2351 		token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2352 #else
2353 		token_buf = tok32->pgt_buf;
2354 #endif
2355 		tokens = (struct pfioc_token *)kalloc_data(size, Z_WAITOK | Z_ZERO);
2356 		if (tokens == NULL) {
2357 			error = ENOMEM;
2358 			break;
2359 		}
2360 
2361 		ptr = (void *)tokens;
2362 		SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2363 			struct pfioc_token *t;
2364 
2365 			if ((unsigned)cnt < sizeof(*tokens)) {
2366 				break;    /* no more buffer space left */
2367 			}
2368 			t = (struct pfioc_token *)(void *)ptr;
2369 			t->token_value  = entry->token.token_value;
2370 			t->timestamp    = entry->token.timestamp;
2371 			t->pid          = entry->token.pid;
2372 			bcopy(entry->token.proc_name, t->proc_name,
2373 			    PFTOK_PROCNAME_LEN);
2374 			ptr += sizeof(struct pfioc_token);
2375 
2376 			cnt -= sizeof(struct pfioc_token);
2377 		}
2378 
2379 		if (cnt < ocnt) {
2380 			error = copyout(tokens, token_buf, ocnt - cnt);
2381 		}
2382 
2383 		if (p64) {
2384 			tok64->size = ocnt - cnt;
2385 		} else {
2386 			tok32->size = ocnt - cnt;
2387 		}
2388 
2389 		kfree_data(tokens, size);
2390 		break;
2391 	}
2392 
2393 	default:
2394 		VERIFY(0);
2395 		/* NOTREACHED */
2396 	}
2397 
2398 	return error;
2399 }
2400 
2401 static void
pf_expire_states_and_src_nodes(struct pf_rule * rule)2402 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2403 {
2404 	struct pf_state         *state;
2405 	struct pf_src_node      *sn;
2406 	int                      killed = 0;
2407 
2408 	/* expire the states */
2409 	state = TAILQ_FIRST(&state_list);
2410 	while (state) {
2411 		if (state->rule.ptr == rule) {
2412 			state->timeout = PFTM_PURGE;
2413 		}
2414 		state = TAILQ_NEXT(state, entry_list);
2415 	}
2416 	pf_purge_expired_states(pf_status.states);
2417 
2418 	/* expire the src_nodes */
2419 	RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2420 		if (sn->rule.ptr != rule) {
2421 			continue;
2422 		}
2423 		if (sn->states != 0) {
2424 			RB_FOREACH(state, pf_state_tree_id,
2425 			    &tree_id) {
2426 				if (state->src_node == sn) {
2427 					state->src_node = NULL;
2428 				}
2429 				if (state->nat_src_node == sn) {
2430 					state->nat_src_node = NULL;
2431 				}
2432 			}
2433 			sn->states = 0;
2434 		}
2435 		sn->expire = 1;
2436 		killed++;
2437 	}
2438 	if (killed) {
2439 		pf_purge_expired_src_nodes();
2440 	}
2441 }
2442 
2443 static void
pf_delete_rule_from_ruleset(struct pf_ruleset * ruleset,int rs_num,struct pf_rule * rule)2444 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2445     struct pf_rule *rule)
2446 {
2447 	struct pf_rule *r;
2448 	int nr = 0;
2449 
2450 	pf_expire_states_and_src_nodes(rule);
2451 
2452 	pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2453 	if (ruleset->rules[rs_num].active.rcount-- == 0) {
2454 		panic("%s: rcount value broken!", __func__);
2455 	}
2456 	r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2457 
2458 	while (r) {
2459 		r->nr = nr++;
2460 		r = TAILQ_NEXT(r, entries);
2461 	}
2462 }
2463 
2464 
2465 static void
pf_ruleset_cleanup(struct pf_ruleset * ruleset,int rs)2466 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2467 {
2468 	pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2469 	ruleset->rules[rs].active.ticket =
2470 	    ++ruleset->rules[rs].inactive.ticket;
2471 }
2472 
2473 /*
2474  * req_dev encodes the PF interface. Currently, possible values are
2475  * 0 or PFRULE_PFM
2476  */
2477 static int
pf_delete_rule_by_ticket(struct pfioc_rule * pr,u_int32_t req_dev)2478 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2479 {
2480 	struct pf_ruleset       *ruleset;
2481 	struct pf_rule          *rule = NULL;
2482 	int                      is_anchor;
2483 	int                      error = 0;
2484 	int                      i;
2485 
2486 	is_anchor = (pr->anchor_call[0] != '\0');
2487 	if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2488 	    pr->rule.owner, is_anchor, &error)) == NULL) {
2489 		goto done;
2490 	}
2491 
2492 	for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2493 		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2494 		while (rule && (rule->ticket != pr->rule.ticket)) {
2495 			rule = TAILQ_NEXT(rule, entries);
2496 		}
2497 	}
2498 	if (rule == NULL) {
2499 		error = ENOENT;
2500 		goto done;
2501 	} else {
2502 		i--;
2503 	}
2504 
2505 	if (strcmp(rule->owner, pr->rule.owner)) {
2506 		error = EACCES;
2507 		goto done;
2508 	}
2509 
2510 delete_rule:
2511 	if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2512 	    ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2513 	    ((ruleset->rules[i].active.rcount - 1) == 0)) {
2514 		/* set rule & ruleset to parent and repeat */
2515 		struct pf_rule *delete_rule = rule;
2516 		struct pf_ruleset *delete_ruleset = ruleset;
2517 
2518 #define parent_ruleset          ruleset->anchor->parent->ruleset
2519 		if (ruleset->anchor->parent == NULL) {
2520 			ruleset = &pf_main_ruleset;
2521 		} else {
2522 			ruleset = &parent_ruleset;
2523 		}
2524 
2525 		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2526 		while (rule &&
2527 		    (rule->anchor != delete_ruleset->anchor)) {
2528 			rule = TAILQ_NEXT(rule, entries);
2529 		}
2530 		if (rule == NULL) {
2531 			panic("%s: rule not found!", __func__);
2532 		}
2533 
2534 		/*
2535 		 * if reqest device != rule's device, bail :
2536 		 * with error if ticket matches;
2537 		 * without error if ticket doesn't match (i.e. its just cleanup)
2538 		 */
2539 		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2540 			if (rule->ticket != pr->rule.ticket) {
2541 				goto done;
2542 			} else {
2543 				error = EACCES;
2544 				goto done;
2545 			}
2546 		}
2547 
2548 		if (delete_rule->rule_flag & PFRULE_PFM) {
2549 			pffwrules--;
2550 		}
2551 
2552 		pf_delete_rule_from_ruleset(delete_ruleset,
2553 		    i, delete_rule);
2554 		delete_ruleset->rules[i].active.ticket =
2555 		    ++delete_ruleset->rules[i].inactive.ticket;
2556 		goto delete_rule;
2557 	} else {
2558 		/*
2559 		 * process deleting rule only if device that added the
2560 		 * rule matches device that issued the request
2561 		 */
2562 		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2563 			error = EACCES;
2564 			goto done;
2565 		}
2566 		if (rule->rule_flag & PFRULE_PFM) {
2567 			pffwrules--;
2568 		}
2569 		pf_delete_rule_from_ruleset(ruleset, i,
2570 		    rule);
2571 		pf_ruleset_cleanup(ruleset, i);
2572 	}
2573 
2574 done:
2575 	if (ruleset) {
2576 		pf_release_ruleset(ruleset);
2577 		ruleset = NULL;
2578 	}
2579 	return error;
2580 }
2581 
2582 /*
2583  * req_dev encodes the PF interface. Currently, possible values are
2584  * 0 or PFRULE_PFM
2585  */
2586 static void
pf_delete_rule_by_owner(char * owner,u_int32_t req_dev)2587 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2588 {
2589 	struct pf_ruleset       *ruleset;
2590 	struct pf_rule          *rule, *next;
2591 	int                      deleted = 0;
2592 
2593 	for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2594 		rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2595 		ruleset = &pf_main_ruleset;
2596 		while (rule) {
2597 			next = TAILQ_NEXT(rule, entries);
2598 			/*
2599 			 * process deleting rule only if device that added the
2600 			 * rule matches device that issued the request
2601 			 */
2602 			if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2603 				rule = next;
2604 				continue;
2605 			}
2606 			if (rule->anchor) {
2607 				if (((strcmp(rule->owner, owner)) == 0) ||
2608 				    ((strcmp(rule->owner, "")) == 0)) {
2609 					if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2610 						if (deleted) {
2611 							pf_ruleset_cleanup(ruleset, rs);
2612 							deleted = 0;
2613 						}
2614 						/* step into anchor */
2615 						ruleset =
2616 						    &rule->anchor->ruleset;
2617 						rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2618 						continue;
2619 					} else {
2620 						if (rule->rule_flag &
2621 						    PFRULE_PFM) {
2622 							pffwrules--;
2623 						}
2624 						pf_delete_rule_from_ruleset(ruleset, rs, rule);
2625 						deleted = 1;
2626 						rule = next;
2627 					}
2628 				} else {
2629 					rule = next;
2630 				}
2631 			} else {
2632 				if (((strcmp(rule->owner, owner)) == 0)) {
2633 					/* delete rule */
2634 					if (rule->rule_flag & PFRULE_PFM) {
2635 						pffwrules--;
2636 					}
2637 					pf_delete_rule_from_ruleset(ruleset,
2638 					    rs, rule);
2639 					deleted = 1;
2640 				}
2641 				rule = next;
2642 			}
2643 			if (rule == NULL) {
2644 				if (deleted) {
2645 					pf_ruleset_cleanup(ruleset, rs);
2646 					deleted = 0;
2647 				}
2648 				if (ruleset != &pf_main_ruleset) {
2649 					pf_deleterule_anchor_step_out(&ruleset,
2650 					    rs, &rule);
2651 				}
2652 			}
2653 		}
2654 	}
2655 }
2656 
2657 static void
pf_deleterule_anchor_step_out(struct pf_ruleset ** ruleset_ptr,int rs,struct pf_rule ** rule_ptr)2658 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2659     int rs, struct pf_rule **rule_ptr)
2660 {
2661 	struct pf_ruleset *ruleset = *ruleset_ptr;
2662 	struct pf_rule *rule = *rule_ptr;
2663 
2664 	/* step out of anchor */
2665 	struct pf_ruleset *rs_copy = ruleset;
2666 	ruleset = ruleset->anchor->parent?
2667 	    &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2668 
2669 	rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2670 	while (rule && (rule->anchor != rs_copy->anchor)) {
2671 		rule = TAILQ_NEXT(rule, entries);
2672 	}
2673 	if (rule == NULL) {
2674 		panic("%s: parent rule of anchor not found!", __func__);
2675 	}
2676 	if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2677 		rule = TAILQ_NEXT(rule, entries);
2678 	}
2679 
2680 	*ruleset_ptr = ruleset;
2681 	*rule_ptr = rule;
2682 }
2683 
2684 static void
pf_addrwrap_setup(struct pf_addr_wrap * aw)2685 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2686 {
2687 	VERIFY(aw);
2688 	bzero(&aw->p, sizeof aw->p);
2689 }
2690 
2691 static int
pf_rule_setup(struct pfioc_rule * pr,struct pf_rule * rule,struct pf_ruleset * ruleset)2692 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2693     struct pf_ruleset *ruleset)
2694 {
2695 	struct pf_pooladdr      *apa;
2696 	int                      error = 0;
2697 
2698 	if (rule->ifname[0]) {
2699 		rule->kif = pfi_kif_get(rule->ifname);
2700 		if (rule->kif == NULL) {
2701 			pool_put(&pf_rule_pl, rule);
2702 			return EINVAL;
2703 		}
2704 		pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2705 	}
2706 	if (rule->tagname[0]) {
2707 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) {
2708 			error = EBUSY;
2709 		}
2710 	}
2711 	if (rule->match_tagname[0]) {
2712 		if ((rule->match_tag =
2713 		    pf_tagname2tag(rule->match_tagname)) == 0) {
2714 			error = EBUSY;
2715 		}
2716 	}
2717 	if (rule->rt && !rule->direction) {
2718 		error = EINVAL;
2719 	}
2720 #if PFLOG
2721 	if (!rule->log) {
2722 		rule->logif = 0;
2723 	}
2724 	if (rule->logif >= PFLOGIFS_MAX) {
2725 		error = EINVAL;
2726 	}
2727 #endif /* PFLOG */
2728 	pf_addrwrap_setup(&rule->src.addr);
2729 	pf_addrwrap_setup(&rule->dst.addr);
2730 	if (pf_rtlabel_add(&rule->src.addr) ||
2731 	    pf_rtlabel_add(&rule->dst.addr)) {
2732 		error = EBUSY;
2733 	}
2734 	if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2735 		error = EINVAL;
2736 	}
2737 	if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2738 		error = EINVAL;
2739 	}
2740 	if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2741 		error = EINVAL;
2742 	}
2743 	if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2744 		error = EINVAL;
2745 	}
2746 	if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) {
2747 		error = EINVAL;
2748 	}
2749 	TAILQ_FOREACH(apa, &pf_pabuf, entries)
2750 	if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2751 		error = EINVAL;
2752 	}
2753 
2754 	if (rule->overload_tblname[0]) {
2755 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2756 		    rule->overload_tblname)) == NULL) {
2757 			error = EINVAL;
2758 		} else {
2759 			rule->overload_tbl->pfrkt_flags |=
2760 			    PFR_TFLAG_ACTIVE;
2761 		}
2762 	}
2763 
2764 	pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2765 
2766 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2767 	    (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2768 	    rule->anchor == NULL) ||
2769 	    (rule->rt > PF_FASTROUTE)) &&
2770 	    (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2771 		error = EINVAL;
2772 	}
2773 
2774 	if (error) {
2775 		pf_rm_rule(NULL, rule);
2776 		return error;
2777 	}
2778 	/* For a NAT64 rule the rule's address family is AF_INET6 whereas
2779 	 * the address pool's family will be AF_INET
2780 	 */
2781 	rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2782 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2783 	rule->evaluations = rule->packets[0] = rule->packets[1] =
2784 	    rule->bytes[0] = rule->bytes[1] = 0;
2785 
2786 	return 0;
2787 }
2788 
2789 static int
pfioctl_ioc_rule(u_long cmd,int minordev,struct pfioc_rule * pr,struct proc * p)2790 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2791 {
2792 	int error = 0;
2793 	u_int32_t req_dev = 0;
2794 	struct pf_ruleset *ruleset = NULL;
2795 
2796 	switch (cmd) {
2797 	case DIOCADDRULE: {
2798 		struct pf_rule          *rule, *tail;
2799 		int                     rs_num;
2800 
2801 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2802 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2803 		ruleset = pf_find_ruleset(pr->anchor);
2804 		if (ruleset == NULL) {
2805 			error = EINVAL;
2806 			break;
2807 		}
2808 		rs_num = pf_get_ruleset_number(pr->rule.action);
2809 		if (rs_num >= PF_RULESET_MAX) {
2810 			error = EINVAL;
2811 			break;
2812 		}
2813 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2814 			error = EINVAL;
2815 			break;
2816 		}
2817 		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2818 			error = EBUSY;
2819 			break;
2820 		}
2821 		if (pr->pool_ticket != ticket_pabuf) {
2822 			error = EBUSY;
2823 			break;
2824 		}
2825 		rule = pool_get(&pf_rule_pl, PR_WAITOK);
2826 		if (rule == NULL) {
2827 			error = ENOMEM;
2828 			break;
2829 		}
2830 		pf_rule_copyin(&pr->rule, rule, p, minordev);
2831 #if !INET
2832 		if (rule->af == AF_INET) {
2833 			pool_put(&pf_rule_pl, rule);
2834 			error = EAFNOSUPPORT;
2835 			break;
2836 		}
2837 #endif /* INET */
2838 		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2839 		    pf_rulequeue);
2840 		if (tail) {
2841 			rule->nr = tail->nr + 1;
2842 		} else {
2843 			rule->nr = 0;
2844 		}
2845 
2846 		if ((error = pf_rule_setup(pr, rule, ruleset))) {
2847 			break;
2848 		}
2849 
2850 		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2851 		    rule, entries);
2852 		ruleset->rules[rs_num].inactive.rcount++;
2853 		if (rule->rule_flag & PFRULE_PFM) {
2854 			pffwrules++;
2855 		}
2856 
2857 		if (rule->action == PF_NAT64) {
2858 			atomic_add_16(&pf_nat64_configured, 1);
2859 		}
2860 
2861 		if (pr->anchor_call[0] == '\0') {
2862 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2863 			if (rule->rule_flag & PFRULE_PFM) {
2864 				INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2865 			}
2866 		}
2867 
2868 #if DUMMYNET
2869 		if (rule->action == PF_DUMMYNET) {
2870 			struct dummynet_event dn_event;
2871 			uint32_t direction = DN_INOUT;
2872 			bzero(&dn_event, sizeof(dn_event));
2873 
2874 			dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2875 
2876 			if (rule->direction == PF_IN) {
2877 				direction = DN_IN;
2878 			} else if (rule->direction == PF_OUT) {
2879 				direction = DN_OUT;
2880 			}
2881 
2882 			dn_event.dn_event_rule_config.dir = direction;
2883 			dn_event.dn_event_rule_config.af = rule->af;
2884 			dn_event.dn_event_rule_config.proto = rule->proto;
2885 			dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2886 			dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2887 			strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2888 			    sizeof(dn_event.dn_event_rule_config.ifname));
2889 
2890 			dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2891 		}
2892 #endif
2893 		break;
2894 	}
2895 
2896 	case DIOCGETRULES: {
2897 		struct pf_rule          *tail;
2898 		int                      rs_num;
2899 
2900 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2901 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2902 		ruleset = pf_find_ruleset(pr->anchor);
2903 		if (ruleset == NULL) {
2904 			error = EINVAL;
2905 			break;
2906 		}
2907 		rs_num = pf_get_ruleset_number(pr->rule.action);
2908 		if (rs_num >= PF_RULESET_MAX) {
2909 			error = EINVAL;
2910 			break;
2911 		}
2912 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2913 		    pf_rulequeue);
2914 		if (tail) {
2915 			pr->nr = tail->nr + 1;
2916 		} else {
2917 			pr->nr = 0;
2918 		}
2919 		pr->ticket = ruleset->rules[rs_num].active.ticket;
2920 		break;
2921 	}
2922 
2923 	case DIOCGETRULE: {
2924 		struct pf_rule          *rule;
2925 		int                      rs_num, i;
2926 
2927 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2928 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2929 		ruleset = pf_find_ruleset(pr->anchor);
2930 		if (ruleset == NULL) {
2931 			error = EINVAL;
2932 			break;
2933 		}
2934 		rs_num = pf_get_ruleset_number(pr->rule.action);
2935 		if (rs_num >= PF_RULESET_MAX) {
2936 			error = EINVAL;
2937 			break;
2938 		}
2939 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2940 			error = EBUSY;
2941 			break;
2942 		}
2943 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2944 		while ((rule != NULL) && (rule->nr != pr->nr)) {
2945 			rule = TAILQ_NEXT(rule, entries);
2946 		}
2947 		if (rule == NULL) {
2948 			error = EBUSY;
2949 			break;
2950 		}
2951 		pf_rule_copyout(rule, &pr->rule);
2952 		if (pf_anchor_copyout(ruleset, rule, pr)) {
2953 			error = EBUSY;
2954 			break;
2955 		}
2956 		pfi_dynaddr_copyout(&pr->rule.src.addr);
2957 		pfi_dynaddr_copyout(&pr->rule.dst.addr);
2958 		pf_tbladdr_copyout(&pr->rule.src.addr);
2959 		pf_tbladdr_copyout(&pr->rule.dst.addr);
2960 		pf_rtlabel_copyout(&pr->rule.src.addr);
2961 		pf_rtlabel_copyout(&pr->rule.dst.addr);
2962 		for (i = 0; i < PF_SKIP_COUNT; ++i) {
2963 			if (rule->skip[i].ptr == NULL) {
2964 				pr->rule.skip[i].nr = -1;
2965 			} else {
2966 				pr->rule.skip[i].nr =
2967 				    rule->skip[i].ptr->nr;
2968 			}
2969 		}
2970 
2971 		if (pr->action == PF_GET_CLR_CNTR) {
2972 			rule->evaluations = 0;
2973 			rule->packets[0] = rule->packets[1] = 0;
2974 			rule->bytes[0] = rule->bytes[1] = 0;
2975 		}
2976 		break;
2977 	}
2978 
2979 	case DIOCCHANGERULE: {
2980 		struct pfioc_rule       *pcr = pr;
2981 		struct pf_rule          *oldrule = NULL, *newrule = NULL;
2982 		struct pf_pooladdr      *pa;
2983 		u_int32_t                nr = 0;
2984 		int                      rs_num;
2985 
2986 		if (!(pcr->action == PF_CHANGE_REMOVE ||
2987 		    pcr->action == PF_CHANGE_GET_TICKET) &&
2988 		    pcr->pool_ticket != ticket_pabuf) {
2989 			error = EBUSY;
2990 			break;
2991 		}
2992 
2993 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
2994 		    pcr->action > PF_CHANGE_GET_TICKET) {
2995 			error = EINVAL;
2996 			break;
2997 		}
2998 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
2999 		pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
3000 		ruleset = pf_find_ruleset(pcr->anchor);
3001 		if (ruleset == NULL) {
3002 			error = EINVAL;
3003 			break;
3004 		}
3005 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3006 		if (rs_num >= PF_RULESET_MAX) {
3007 			error = EINVAL;
3008 			break;
3009 		}
3010 
3011 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3012 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3013 			break;
3014 		} else {
3015 			if (pcr->ticket !=
3016 			    ruleset->rules[rs_num].active.ticket) {
3017 				error = EINVAL;
3018 				break;
3019 			}
3020 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3021 				error = EINVAL;
3022 				break;
3023 			}
3024 		}
3025 
3026 		if (pcr->action != PF_CHANGE_REMOVE) {
3027 			newrule = pool_get(&pf_rule_pl, PR_WAITOK);
3028 			if (newrule == NULL) {
3029 				error = ENOMEM;
3030 				break;
3031 			}
3032 			pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3033 #if !INET
3034 			if (newrule->af == AF_INET) {
3035 				pool_put(&pf_rule_pl, newrule);
3036 				error = EAFNOSUPPORT;
3037 				break;
3038 			}
3039 #endif /* INET */
3040 			if (newrule->ifname[0]) {
3041 				newrule->kif = pfi_kif_get(newrule->ifname);
3042 				if (newrule->kif == NULL) {
3043 					pool_put(&pf_rule_pl, newrule);
3044 					error = EINVAL;
3045 					break;
3046 				}
3047 				pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3048 			} else {
3049 				newrule->kif = NULL;
3050 			}
3051 
3052 			if (newrule->tagname[0]) {
3053 				if ((newrule->tag =
3054 				    pf_tagname2tag(newrule->tagname)) == 0) {
3055 					error = EBUSY;
3056 				}
3057 			}
3058 			if (newrule->match_tagname[0]) {
3059 				if ((newrule->match_tag = pf_tagname2tag(
3060 					    newrule->match_tagname)) == 0) {
3061 					error = EBUSY;
3062 				}
3063 			}
3064 			if (newrule->rt && !newrule->direction) {
3065 				error = EINVAL;
3066 			}
3067 #if PFLOG
3068 			if (!newrule->log) {
3069 				newrule->logif = 0;
3070 			}
3071 			if (newrule->logif >= PFLOGIFS_MAX) {
3072 				error = EINVAL;
3073 			}
3074 #endif /* PFLOG */
3075 			pf_addrwrap_setup(&newrule->src.addr);
3076 			pf_addrwrap_setup(&newrule->dst.addr);
3077 			if (pf_rtlabel_add(&newrule->src.addr) ||
3078 			    pf_rtlabel_add(&newrule->dst.addr)) {
3079 				error = EBUSY;
3080 			}
3081 			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3082 				error = EINVAL;
3083 			}
3084 			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3085 				error = EINVAL;
3086 			}
3087 			if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3088 				error = EINVAL;
3089 			}
3090 			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3091 				error = EINVAL;
3092 			}
3093 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) {
3094 				error = EINVAL;
3095 			}
3096 			TAILQ_FOREACH(pa, &pf_pabuf, entries)
3097 			if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3098 				error = EINVAL;
3099 			}
3100 
3101 			if (newrule->overload_tblname[0]) {
3102 				if ((newrule->overload_tbl = pfr_attach_table(
3103 					    ruleset, newrule->overload_tblname)) ==
3104 				    NULL) {
3105 					error = EINVAL;
3106 				} else {
3107 					newrule->overload_tbl->pfrkt_flags |=
3108 					    PFR_TFLAG_ACTIVE;
3109 				}
3110 			}
3111 
3112 			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3113 			if (((((newrule->action == PF_NAT) ||
3114 			    (newrule->action == PF_RDR) ||
3115 			    (newrule->action == PF_BINAT) ||
3116 			    (newrule->rt > PF_FASTROUTE)) &&
3117 			    !newrule->anchor)) &&
3118 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3119 				error = EINVAL;
3120 			}
3121 
3122 			if (error) {
3123 				pf_rm_rule(NULL, newrule);
3124 				break;
3125 			}
3126 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3127 			newrule->evaluations = 0;
3128 			newrule->packets[0] = newrule->packets[1] = 0;
3129 			newrule->bytes[0] = newrule->bytes[1] = 0;
3130 		}
3131 		pf_empty_pool(&pf_pabuf);
3132 
3133 		if (pcr->action == PF_CHANGE_ADD_HEAD) {
3134 			oldrule = TAILQ_FIRST(
3135 				ruleset->rules[rs_num].active.ptr);
3136 		} else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3137 			oldrule = TAILQ_LAST(
3138 				ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3139 		} else {
3140 			oldrule = TAILQ_FIRST(
3141 				ruleset->rules[rs_num].active.ptr);
3142 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3143 				oldrule = TAILQ_NEXT(oldrule, entries);
3144 			}
3145 			if (oldrule == NULL) {
3146 				if (newrule != NULL) {
3147 					pf_rm_rule(NULL, newrule);
3148 				}
3149 				error = EINVAL;
3150 				break;
3151 			}
3152 		}
3153 
3154 		if (pcr->action == PF_CHANGE_REMOVE) {
3155 			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3156 			ruleset->rules[rs_num].active.rcount--;
3157 		} else {
3158 			if (oldrule == NULL) {
3159 				TAILQ_INSERT_TAIL(
3160 					ruleset->rules[rs_num].active.ptr,
3161 					newrule, entries);
3162 			} else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3163 			    pcr->action == PF_CHANGE_ADD_BEFORE) {
3164 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3165 			} else {
3166 				TAILQ_INSERT_AFTER(
3167 					ruleset->rules[rs_num].active.ptr,
3168 					oldrule, newrule, entries);
3169 			}
3170 			ruleset->rules[rs_num].active.rcount++;
3171 		}
3172 
3173 		nr = 0;
3174 		TAILQ_FOREACH(oldrule,
3175 		    ruleset->rules[rs_num].active.ptr, entries)
3176 		oldrule->nr = nr++;
3177 
3178 		ruleset->rules[rs_num].active.ticket++;
3179 
3180 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3181 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
3182 		net_filter_event_mark(NET_FILTER_EVENT_PF,
3183 		    pf_check_compatible_rules());
3184 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3185 		break;
3186 	}
3187 
3188 	case DIOCINSERTRULE: {
3189 		struct pf_rule          *rule, *tail, *r;
3190 		int                     rs_num;
3191 		int                     is_anchor;
3192 
3193 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3194 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3195 		is_anchor = (pr->anchor_call[0] != '\0');
3196 
3197 		if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3198 		    pr->rule.owner, is_anchor, &error)) == NULL) {
3199 			break;
3200 		}
3201 
3202 		rs_num = pf_get_ruleset_number(pr->rule.action);
3203 		if (rs_num >= PF_RULESET_MAX) {
3204 			error = EINVAL;
3205 			break;
3206 		}
3207 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3208 			error = EINVAL;
3209 			break;
3210 		}
3211 
3212 		/* make sure this anchor rule doesn't exist already */
3213 		if (is_anchor) {
3214 			r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3215 			while (r) {
3216 				if (r->anchor &&
3217 				    ((strcmp(r->anchor->name,
3218 				    pr->anchor_call)) == 0)) {
3219 					if (((strcmp(pr->rule.owner,
3220 					    r->owner)) == 0) ||
3221 					    ((strcmp(r->owner, "")) == 0)) {
3222 						error = EEXIST;
3223 					} else {
3224 						error = EPERM;
3225 					}
3226 					break;
3227 				}
3228 				r = TAILQ_NEXT(r, entries);
3229 			}
3230 			if (error != 0) {
3231 				break;
3232 			}
3233 		}
3234 
3235 		rule = pool_get(&pf_rule_pl, PR_WAITOK);
3236 		if (rule == NULL) {
3237 			error = ENOMEM;
3238 			break;
3239 		}
3240 		pf_rule_copyin(&pr->rule, rule, p, minordev);
3241 #if !INET
3242 		if (rule->af == AF_INET) {
3243 			pool_put(&pf_rule_pl, rule);
3244 			error = EAFNOSUPPORT;
3245 			break;
3246 		}
3247 #endif /* INET */
3248 		r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3249 		while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3250 			r = TAILQ_NEXT(r, entries);
3251 		}
3252 		if (r == NULL) {
3253 			if ((tail =
3254 			    TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3255 			    pf_rulequeue)) != NULL) {
3256 				rule->nr = tail->nr + 1;
3257 			} else {
3258 				rule->nr = 0;
3259 			}
3260 		} else {
3261 			rule->nr = r->nr;
3262 		}
3263 
3264 		if ((error = pf_rule_setup(pr, rule, ruleset))) {
3265 			break;
3266 		}
3267 
3268 		if (rule->anchor != NULL) {
3269 			strlcpy(rule->anchor->owner, rule->owner,
3270 			    PF_OWNER_NAME_SIZE);
3271 		}
3272 
3273 		if (r) {
3274 			TAILQ_INSERT_BEFORE(r, rule, entries);
3275 			while (r && ++r->nr) {
3276 				r = TAILQ_NEXT(r, entries);
3277 			}
3278 		} else {
3279 			TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3280 			    rule, entries);
3281 		}
3282 		ruleset->rules[rs_num].active.rcount++;
3283 
3284 		/* Calculate checksum for the main ruleset */
3285 		if (ruleset == &pf_main_ruleset) {
3286 			error = pf_setup_pfsync_matching(ruleset);
3287 		}
3288 
3289 		pf_ruleset_cleanup(ruleset, rs_num);
3290 		rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3291 
3292 		pr->rule.ticket = rule->ticket;
3293 		pf_rule_copyout(rule, &pr->rule);
3294 		if (rule->rule_flag & PFRULE_PFM) {
3295 			pffwrules++;
3296 		}
3297 		if (rule->action == PF_NAT64) {
3298 			atomic_add_16(&pf_nat64_configured, 1);
3299 		}
3300 
3301 		if (pr->anchor_call[0] == '\0') {
3302 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3303 			if (rule->rule_flag & PFRULE_PFM) {
3304 				INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3305 			}
3306 		}
3307 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
3308 		net_filter_event_mark(NET_FILTER_EVENT_PF,
3309 		    pf_check_compatible_rules());
3310 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3311 		break;
3312 	}
3313 
3314 	case DIOCDELETERULE: {
3315 		ASSERT(ruleset == NULL);
3316 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3317 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3318 
3319 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3320 			error = EINVAL;
3321 			break;
3322 		}
3323 
3324 		/* get device through which request is made */
3325 		if ((uint8_t)minordev == PFDEV_PFM) {
3326 			req_dev |= PFRULE_PFM;
3327 		}
3328 
3329 		if (pr->rule.ticket) {
3330 			if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3331 				break;
3332 			}
3333 		} else {
3334 			pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3335 		}
3336 		pr->nr = pffwrules;
3337 		if (pr->rule.action == PF_NAT64) {
3338 			atomic_add_16(&pf_nat64_configured, -1);
3339 		}
3340 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
3341 		net_filter_event_mark(NET_FILTER_EVENT_PF,
3342 		    pf_check_compatible_rules());
3343 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3344 		break;
3345 	}
3346 
3347 	default:
3348 		VERIFY(0);
3349 		/* NOTREACHED */
3350 	}
3351 	if (ruleset != NULL) {
3352 		pf_release_ruleset(ruleset);
3353 		ruleset = NULL;
3354 	}
3355 
3356 	return error;
3357 }
3358 
3359 static int
pfioctl_ioc_state_kill(u_long cmd,struct pfioc_state_kill * psk,struct proc * p)3360 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3361 {
3362 #pragma unused(p)
3363 	int error = 0;
3364 
3365 	psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3366 	psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3367 
3368 	bool ifname_matched = true;
3369 	bool owner_matched = true;
3370 
3371 	switch (cmd) {
3372 	case DIOCCLRSTATES: {
3373 		struct pf_state         *s, *nexts;
3374 		int                      killed = 0;
3375 
3376 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3377 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3378 			/*
3379 			 * Purge all states only when neither ifname
3380 			 * or owner is provided. If any of these are provided
3381 			 * we purge only the states with meta data that match
3382 			 */
3383 			bool unlink_state = false;
3384 			ifname_matched = true;
3385 			owner_matched = true;
3386 
3387 			if (psk->psk_ifname[0] &&
3388 			    strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3389 				ifname_matched = false;
3390 			}
3391 
3392 			if (psk->psk_ownername[0] &&
3393 			    ((NULL == s->rule.ptr) ||
3394 			    strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3395 				owner_matched = false;
3396 			}
3397 
3398 			unlink_state = ifname_matched && owner_matched;
3399 
3400 			if (unlink_state) {
3401 #if NPFSYNC
3402 				/* don't send out individual delete messages */
3403 				s->sync_flags = PFSTATE_NOSYNC;
3404 #endif
3405 				pf_unlink_state(s);
3406 				killed++;
3407 			}
3408 		}
3409 		psk->psk_af = (sa_family_t)killed;
3410 #if NPFSYNC
3411 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3412 #endif
3413 		break;
3414 	}
3415 
3416 	case DIOCKILLSTATES: {
3417 		struct pf_state         *s, *nexts;
3418 		struct pf_state_key     *sk;
3419 		struct pf_state_host    *src, *dst;
3420 		int                      killed = 0;
3421 
3422 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3423 		    s = nexts) {
3424 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3425 			sk = s->state_key;
3426 			ifname_matched = true;
3427 			owner_matched = true;
3428 
3429 			if (psk->psk_ifname[0] &&
3430 			    strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3431 				ifname_matched = false;
3432 			}
3433 
3434 			if (psk->psk_ownername[0] &&
3435 			    ((NULL == s->rule.ptr) ||
3436 			    strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3437 				owner_matched = false;
3438 			}
3439 
3440 			if (sk->direction == PF_OUT) {
3441 				src = &sk->lan;
3442 				dst = &sk->ext_lan;
3443 			} else {
3444 				src = &sk->ext_lan;
3445 				dst = &sk->lan;
3446 			}
3447 			if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3448 			    (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3449 			    PF_MATCHA(psk->psk_src.neg,
3450 			    &psk->psk_src.addr.v.a.addr,
3451 			    &psk->psk_src.addr.v.a.mask,
3452 			    &src->addr, sk->af_lan) &&
3453 			    PF_MATCHA(psk->psk_dst.neg,
3454 			    &psk->psk_dst.addr.v.a.addr,
3455 			    &psk->psk_dst.addr.v.a.mask,
3456 			    &dst->addr, sk->af_lan) &&
3457 			    (pf_match_xport(psk->psk_proto,
3458 			    psk->psk_proto_variant, &psk->psk_src.xport,
3459 			    &src->xport)) &&
3460 			    (pf_match_xport(psk->psk_proto,
3461 			    psk->psk_proto_variant, &psk->psk_dst.xport,
3462 			    &dst->xport)) &&
3463 			    ifname_matched &&
3464 			    owner_matched) {
3465 #if NPFSYNC
3466 				/* send immediate delete of state */
3467 				pfsync_delete_state(s);
3468 				s->sync_flags |= PFSTATE_NOSYNC;
3469 #endif
3470 				pf_unlink_state(s);
3471 				killed++;
3472 			}
3473 		}
3474 		psk->psk_af = (sa_family_t)killed;
3475 		break;
3476 	}
3477 
3478 	default:
3479 		VERIFY(0);
3480 		/* NOTREACHED */
3481 	}
3482 
3483 	return error;
3484 }
3485 
3486 static int
pfioctl_ioc_state(u_long cmd,struct pfioc_state * ps,struct proc * p)3487 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3488 {
3489 #pragma unused(p)
3490 	int error = 0;
3491 
3492 	switch (cmd) {
3493 	case DIOCADDSTATE: {
3494 		struct pfsync_state     *sp = &ps->state;
3495 		struct pf_state         *s;
3496 		struct pf_state_key     *sk;
3497 		struct pfi_kif          *kif;
3498 
3499 		if (sp->timeout >= PFTM_MAX) {
3500 			error = EINVAL;
3501 			break;
3502 		}
3503 		s = pool_get(&pf_state_pl, PR_WAITOK);
3504 		if (s == NULL) {
3505 			error = ENOMEM;
3506 			break;
3507 		}
3508 		bzero(s, sizeof(struct pf_state));
3509 		if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3510 			pool_put(&pf_state_pl, s);
3511 			error = ENOMEM;
3512 			break;
3513 		}
3514 		pf_state_import(sp, sk, s);
3515 		kif = pfi_kif_get(sp->ifname);
3516 		if (kif == NULL) {
3517 			pf_detach_state(s, 0);
3518 			pool_put(&pf_state_pl, s);
3519 			error = ENOENT;
3520 			break;
3521 		}
3522 		TAILQ_INIT(&s->unlink_hooks);
3523 		s->state_key->app_state = 0;
3524 		if (pf_insert_state(kif, s)) {
3525 			pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3526 			pool_put(&pf_state_pl, s);
3527 			error = EEXIST;
3528 			break;
3529 		}
3530 		pf_default_rule.states++;
3531 		VERIFY(pf_default_rule.states != 0);
3532 		break;
3533 	}
3534 
3535 	case DIOCGETSTATE: {
3536 		struct pf_state         *s;
3537 		struct pf_state_cmp      id_key;
3538 
3539 		bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3540 		id_key.creatorid = ps->state.creatorid;
3541 
3542 		s = pf_find_state_byid(&id_key);
3543 		if (s == NULL) {
3544 			error = ENOENT;
3545 			break;
3546 		}
3547 
3548 		pf_state_export(&ps->state, s->state_key, s);
3549 		break;
3550 	}
3551 
3552 	default:
3553 		VERIFY(0);
3554 		/* NOTREACHED */
3555 	}
3556 
3557 	return error;
3558 }
3559 
3560 static int
pfioctl_ioc_states(u_long cmd,struct pfioc_states_32 * ps32,struct pfioc_states_64 * ps64,struct proc * p)3561 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3562     struct pfioc_states_64 *ps64, struct proc *p)
3563 {
3564 	int p64 = proc_is64bit(p);
3565 	int error = 0;
3566 
3567 	switch (cmd) {
3568 	case DIOCGETSTATES: {           /* struct pfioc_states */
3569 		struct pf_state         *state;
3570 		struct pfsync_state     *pstore;
3571 		user_addr_t              buf;
3572 		u_int32_t                nr = 0;
3573 		int                      len, size;
3574 
3575 		len = (p64 ? ps64->ps_len : ps32->ps_len);
3576 		if (len == 0) {
3577 			size = sizeof(struct pfsync_state) * pf_status.states;
3578 			if (p64) {
3579 				ps64->ps_len = size;
3580 			} else {
3581 				ps32->ps_len = size;
3582 			}
3583 			break;
3584 		}
3585 
3586 		pstore = kalloc_type(struct pfsync_state,
3587 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
3588 #ifdef __LP64__
3589 		buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3590 #else
3591 		buf = ps32->ps_buf;
3592 #endif
3593 
3594 		state = TAILQ_FIRST(&state_list);
3595 		while (state) {
3596 			if (state->timeout != PFTM_UNLINKED) {
3597 				if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3598 					break;
3599 				}
3600 
3601 				pf_state_export(pstore,
3602 				    state->state_key, state);
3603 				error = copyout(pstore, buf, sizeof(*pstore));
3604 				if (error) {
3605 					kfree_type(struct pfsync_state, pstore);
3606 					goto fail;
3607 				}
3608 				buf += sizeof(*pstore);
3609 				nr++;
3610 			}
3611 			state = TAILQ_NEXT(state, entry_list);
3612 		}
3613 
3614 		size = sizeof(struct pfsync_state) * nr;
3615 		if (p64) {
3616 			ps64->ps_len = size;
3617 		} else {
3618 			ps32->ps_len = size;
3619 		}
3620 
3621 		kfree_type(struct pfsync_state, pstore);
3622 		break;
3623 	}
3624 
3625 	default:
3626 		VERIFY(0);
3627 		/* NOTREACHED */
3628 	}
3629 fail:
3630 	return error;
3631 }
3632 
3633 static int
pfioctl_ioc_natlook(u_long cmd,struct pfioc_natlook * pnl,struct proc * p)3634 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3635 {
3636 #pragma unused(p)
3637 	int error = 0;
3638 
3639 	switch (cmd) {
3640 	case DIOCNATLOOK: {
3641 		struct pf_state_key     *sk;
3642 		struct pf_state         *state;
3643 		struct pf_state_key_cmp  key;
3644 		int                      m = 0, direction = pnl->direction;
3645 
3646 		key.proto = pnl->proto;
3647 		key.proto_variant = pnl->proto_variant;
3648 
3649 		if (!pnl->proto ||
3650 		    PF_AZERO(&pnl->saddr, pnl->af) ||
3651 		    PF_AZERO(&pnl->daddr, pnl->af) ||
3652 		    ((pnl->proto == IPPROTO_TCP ||
3653 		    pnl->proto == IPPROTO_UDP) &&
3654 		    (!pnl->dxport.port || !pnl->sxport.port))) {
3655 			error = EINVAL;
3656 		} else {
3657 			/*
3658 			 * userland gives us source and dest of connection,
3659 			 * reverse the lookup so we ask for what happens with
3660 			 * the return traffic, enabling us to find it in the
3661 			 * state tree.
3662 			 */
3663 			if (direction == PF_IN) {
3664 				key.af_gwy = pnl->af;
3665 				PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3666 				    pnl->af);
3667 				memcpy(&key.ext_gwy.xport, &pnl->dxport,
3668 				    sizeof(key.ext_gwy.xport));
3669 				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3670 				memcpy(&key.gwy.xport, &pnl->sxport,
3671 				    sizeof(key.gwy.xport));
3672 				state = pf_find_state_all(&key, PF_IN, &m);
3673 			} else {
3674 				key.af_lan = pnl->af;
3675 				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3676 				memcpy(&key.lan.xport, &pnl->dxport,
3677 				    sizeof(key.lan.xport));
3678 				PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3679 				    pnl->af);
3680 				memcpy(&key.ext_lan.xport, &pnl->sxport,
3681 				    sizeof(key.ext_lan.xport));
3682 				state = pf_find_state_all(&key, PF_OUT, &m);
3683 			}
3684 			if (m > 1) {
3685 				error = E2BIG;  /* more than one state */
3686 			} else if (state != NULL) {
3687 				sk = state->state_key;
3688 				if (direction == PF_IN) {
3689 					PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3690 					    sk->af_lan);
3691 					memcpy(&pnl->rsxport, &sk->lan.xport,
3692 					    sizeof(pnl->rsxport));
3693 					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3694 					    pnl->af);
3695 					memcpy(&pnl->rdxport, &pnl->dxport,
3696 					    sizeof(pnl->rdxport));
3697 				} else {
3698 					PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3699 					    sk->af_gwy);
3700 					memcpy(&pnl->rdxport, &sk->gwy.xport,
3701 					    sizeof(pnl->rdxport));
3702 					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3703 					    pnl->af);
3704 					memcpy(&pnl->rsxport, &pnl->sxport,
3705 					    sizeof(pnl->rsxport));
3706 				}
3707 			} else {
3708 				error = ENOENT;
3709 			}
3710 		}
3711 		break;
3712 	}
3713 
3714 	default:
3715 		VERIFY(0);
3716 		/* NOTREACHED */
3717 	}
3718 
3719 	return error;
3720 }
3721 
3722 static int
pfioctl_ioc_tm(u_long cmd,struct pfioc_tm * pt,struct proc * p)3723 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3724 {
3725 #pragma unused(p)
3726 	int error = 0;
3727 
3728 	switch (cmd) {
3729 	case DIOCSETTIMEOUT: {
3730 		int old;
3731 
3732 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3733 		    pt->seconds < 0) {
3734 			error = EINVAL;
3735 			goto fail;
3736 		}
3737 		old = pf_default_rule.timeout[pt->timeout];
3738 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3739 			pt->seconds = 1;
3740 		}
3741 		pf_default_rule.timeout[pt->timeout] = pt->seconds;
3742 		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3743 			wakeup(pf_purge_thread_fn);
3744 		}
3745 		pt->seconds = old;
3746 		break;
3747 	}
3748 
3749 	case DIOCGETTIMEOUT: {
3750 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3751 			error = EINVAL;
3752 			goto fail;
3753 		}
3754 		pt->seconds = pf_default_rule.timeout[pt->timeout];
3755 		break;
3756 	}
3757 
3758 	default:
3759 		VERIFY(0);
3760 		/* NOTREACHED */
3761 	}
3762 fail:
3763 	return error;
3764 }
3765 
3766 static int
pfioctl_ioc_limit(u_long cmd,struct pfioc_limit * pl,struct proc * p)3767 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3768 {
3769 #pragma unused(p)
3770 	int error = 0;
3771 
3772 	switch (cmd) {
3773 	case DIOCGETLIMIT: {
3774 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3775 			error = EINVAL;
3776 			goto fail;
3777 		}
3778 		pl->limit = pf_pool_limits[pl->index].limit;
3779 		break;
3780 	}
3781 
3782 	case DIOCSETLIMIT: {
3783 		int old_limit;
3784 
3785 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3786 		    pf_pool_limits[pl->index].pp == NULL) {
3787 			error = EINVAL;
3788 			goto fail;
3789 		}
3790 		pool_sethardlimit(pf_pool_limits[pl->index].pp,
3791 		    pl->limit, NULL, 0);
3792 		old_limit = pf_pool_limits[pl->index].limit;
3793 		pf_pool_limits[pl->index].limit = pl->limit;
3794 		pl->limit = old_limit;
3795 		break;
3796 	}
3797 
3798 	default:
3799 		VERIFY(0);
3800 		/* NOTREACHED */
3801 	}
3802 fail:
3803 	return error;
3804 }
3805 
3806 static int
pfioctl_ioc_pooladdr(u_long cmd,struct pfioc_pooladdr * pp,struct proc * p)3807 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3808 {
3809 #pragma unused(p)
3810 	struct pf_pooladdr *pa = NULL;
3811 	struct pf_pool *pool = NULL;
3812 	int error = 0;
3813 	struct pf_ruleset *ruleset = NULL;
3814 
3815 	switch (cmd) {
3816 	case DIOCBEGINADDRS: {
3817 		pf_empty_pool(&pf_pabuf);
3818 		pp->ticket = ++ticket_pabuf;
3819 		break;
3820 	}
3821 
3822 	case DIOCADDADDR: {
3823 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3824 		if (pp->ticket != ticket_pabuf) {
3825 			error = EBUSY;
3826 			break;
3827 		}
3828 #if !INET
3829 		if (pp->af == AF_INET) {
3830 			error = EAFNOSUPPORT;
3831 			break;
3832 		}
3833 #endif /* INET */
3834 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3835 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3836 		    pp->addr.addr.type != PF_ADDR_TABLE) {
3837 			error = EINVAL;
3838 			break;
3839 		}
3840 		pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3841 		if (pa == NULL) {
3842 			error = ENOMEM;
3843 			break;
3844 		}
3845 		pf_pooladdr_copyin(&pp->addr, pa);
3846 		if (pa->ifname[0]) {
3847 			pa->kif = pfi_kif_get(pa->ifname);
3848 			if (pa->kif == NULL) {
3849 				pool_put(&pf_pooladdr_pl, pa);
3850 				error = EINVAL;
3851 				break;
3852 			}
3853 			pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3854 		}
3855 		pf_addrwrap_setup(&pa->addr);
3856 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3857 			pfi_dynaddr_remove(&pa->addr);
3858 			pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3859 			pool_put(&pf_pooladdr_pl, pa);
3860 			error = EINVAL;
3861 			break;
3862 		}
3863 		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3864 		break;
3865 	}
3866 
3867 	case DIOCGETADDRS: {
3868 		pp->nr = 0;
3869 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3870 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3871 		    pp->r_num, 0, 1, 0);
3872 		if (pool == NULL) {
3873 			error = EBUSY;
3874 			break;
3875 		}
3876 		TAILQ_FOREACH(pa, &pool->list, entries)
3877 		pp->nr++;
3878 		break;
3879 	}
3880 
3881 	case DIOCGETADDR: {
3882 		u_int32_t                nr = 0;
3883 
3884 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3885 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3886 		    pp->r_num, 0, 1, 1);
3887 		if (pool == NULL) {
3888 			error = EBUSY;
3889 			break;
3890 		}
3891 		pa = TAILQ_FIRST(&pool->list);
3892 		while ((pa != NULL) && (nr < pp->nr)) {
3893 			pa = TAILQ_NEXT(pa, entries);
3894 			nr++;
3895 		}
3896 		if (pa == NULL) {
3897 			error = EBUSY;
3898 			break;
3899 		}
3900 		pf_pooladdr_copyout(pa, &pp->addr);
3901 		pfi_dynaddr_copyout(&pp->addr.addr);
3902 		pf_tbladdr_copyout(&pp->addr.addr);
3903 		pf_rtlabel_copyout(&pp->addr.addr);
3904 		break;
3905 	}
3906 
3907 	case DIOCCHANGEADDR: {
3908 		struct pfioc_pooladdr   *pca = pp;
3909 		struct pf_pooladdr      *oldpa = NULL, *newpa = NULL;
3910 
3911 		if (pca->action < PF_CHANGE_ADD_HEAD ||
3912 		    pca->action > PF_CHANGE_REMOVE) {
3913 			error = EINVAL;
3914 			break;
3915 		}
3916 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3917 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3918 		    pca->addr.addr.type != PF_ADDR_TABLE) {
3919 			error = EINVAL;
3920 			break;
3921 		}
3922 
3923 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3924 		ruleset = pf_find_ruleset(pca->anchor);
3925 		if (ruleset == NULL) {
3926 			error = EBUSY;
3927 			break;
3928 		}
3929 		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3930 		    pca->r_num, pca->r_last, 1, 1);
3931 		if (pool == NULL) {
3932 			error = EBUSY;
3933 			break;
3934 		}
3935 		if (pca->action != PF_CHANGE_REMOVE) {
3936 			newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3937 			if (newpa == NULL) {
3938 				error = ENOMEM;
3939 				break;
3940 			}
3941 			pf_pooladdr_copyin(&pca->addr, newpa);
3942 #if !INET
3943 			if (pca->af == AF_INET) {
3944 				pool_put(&pf_pooladdr_pl, newpa);
3945 				error = EAFNOSUPPORT;
3946 				break;
3947 			}
3948 #endif /* INET */
3949 			if (newpa->ifname[0]) {
3950 				newpa->kif = pfi_kif_get(newpa->ifname);
3951 				if (newpa->kif == NULL) {
3952 					pool_put(&pf_pooladdr_pl, newpa);
3953 					error = EINVAL;
3954 					break;
3955 				}
3956 				pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3957 			} else {
3958 				newpa->kif = NULL;
3959 			}
3960 			pf_addrwrap_setup(&newpa->addr);
3961 			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3962 			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
3963 				pfi_dynaddr_remove(&newpa->addr);
3964 				pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3965 				pool_put(&pf_pooladdr_pl, newpa);
3966 				error = EINVAL;
3967 				break;
3968 			}
3969 		}
3970 
3971 		if (pca->action == PF_CHANGE_ADD_HEAD) {
3972 			oldpa = TAILQ_FIRST(&pool->list);
3973 		} else if (pca->action == PF_CHANGE_ADD_TAIL) {
3974 			oldpa = TAILQ_LAST(&pool->list, pf_palist);
3975 		} else {
3976 			int     i = 0;
3977 
3978 			oldpa = TAILQ_FIRST(&pool->list);
3979 			while ((oldpa != NULL) && (i < (int)pca->nr)) {
3980 				oldpa = TAILQ_NEXT(oldpa, entries);
3981 				i++;
3982 			}
3983 			if (oldpa == NULL) {
3984 				error = EINVAL;
3985 				break;
3986 			}
3987 		}
3988 
3989 		if (pca->action == PF_CHANGE_REMOVE) {
3990 			TAILQ_REMOVE(&pool->list, oldpa, entries);
3991 			pfi_dynaddr_remove(&oldpa->addr);
3992 			pf_tbladdr_remove(&oldpa->addr);
3993 			pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3994 			pool_put(&pf_pooladdr_pl, oldpa);
3995 		} else {
3996 			if (oldpa == NULL) {
3997 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3998 			} else if (pca->action == PF_CHANGE_ADD_HEAD ||
3999 			    pca->action == PF_CHANGE_ADD_BEFORE) {
4000 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4001 			} else {
4002 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4003 				    newpa, entries);
4004 			}
4005 		}
4006 
4007 		pool->cur = TAILQ_FIRST(&pool->list);
4008 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
4009 		    pca->af);
4010 		break;
4011 	}
4012 
4013 	default:
4014 		VERIFY(0);
4015 		/* NOTREACHED */
4016 	}
4017 
4018 	if (ruleset) {
4019 		pf_release_ruleset(ruleset);
4020 		ruleset = NULL;
4021 	}
4022 
4023 	return error;
4024 }
4025 
4026 static int
pfioctl_ioc_ruleset(u_long cmd,struct pfioc_ruleset * pr,struct proc * p)4027 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4028 {
4029 #pragma unused(p)
4030 	int error = 0;
4031 	struct pf_ruleset *ruleset = NULL;
4032 
4033 	switch (cmd) {
4034 	case DIOCGETRULESETS: {
4035 		struct pf_anchor        *anchor;
4036 
4037 		pr->path[sizeof(pr->path) - 1] = '\0';
4038 		pr->name[sizeof(pr->name) - 1] = '\0';
4039 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4040 			error = EINVAL;
4041 			break;
4042 		}
4043 		pr->nr = 0;
4044 		if (ruleset->anchor == NULL) {
4045 			/* XXX kludge for pf_main_ruleset */
4046 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4047 			if (anchor->parent == NULL) {
4048 				pr->nr++;
4049 			}
4050 		} else {
4051 			RB_FOREACH(anchor, pf_anchor_node,
4052 			    &ruleset->anchor->children)
4053 			pr->nr++;
4054 		}
4055 		break;
4056 	}
4057 
4058 	case DIOCGETRULESET: {
4059 		struct pf_anchor        *anchor;
4060 		u_int32_t                nr = 0;
4061 
4062 		pr->path[sizeof(pr->path) - 1] = '\0';
4063 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4064 			error = EINVAL;
4065 			break;
4066 		}
4067 		pr->name[0] = 0;
4068 		if (ruleset->anchor == NULL) {
4069 			/* XXX kludge for pf_main_ruleset */
4070 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4071 			if (anchor->parent == NULL && nr++ == pr->nr) {
4072 				strlcpy(pr->name, anchor->name,
4073 				    sizeof(pr->name));
4074 				break;
4075 			}
4076 		} else {
4077 			RB_FOREACH(anchor, pf_anchor_node,
4078 			    &ruleset->anchor->children)
4079 			if (nr++ == pr->nr) {
4080 				strlcpy(pr->name, anchor->name,
4081 				    sizeof(pr->name));
4082 				break;
4083 			}
4084 		}
4085 		if (!pr->name[0]) {
4086 			error = EBUSY;
4087 		}
4088 		break;
4089 	}
4090 
4091 	default:
4092 		VERIFY(0);
4093 		/* NOTREACHED */
4094 	}
4095 
4096 	if (ruleset) {
4097 		pf_release_ruleset(ruleset);
4098 		ruleset = NULL;
4099 	}
4100 	return error;
4101 }
4102 
4103 static int
pfioctl_ioc_trans(u_long cmd,struct pfioc_trans_32 * io32,struct pfioc_trans_64 * io64,struct proc * p)4104 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4105     struct pfioc_trans_64 *io64, struct proc *p)
4106 {
4107 	int error = 0, esize, size;
4108 	user_addr_t buf;
4109 	struct pf_ruleset *rs = NULL;
4110 
4111 #ifdef __LP64__
4112 	int p64 = proc_is64bit(p);
4113 
4114 	esize = (p64 ? io64->esize : io32->esize);
4115 	size = (p64 ? io64->size : io32->size);
4116 	buf = (p64 ? io64->array : io32->array);
4117 #else
4118 #pragma unused(io64, p)
4119 	esize = io32->esize;
4120 	size = io32->size;
4121 	buf = io32->array;
4122 #endif
4123 
4124 	switch (cmd) {
4125 	case DIOCXBEGIN: {
4126 		struct pfioc_trans_e    *ioe;
4127 		struct pfr_table        *table;
4128 		int                      i;
4129 
4130 		if (esize != sizeof(*ioe)) {
4131 			error = ENODEV;
4132 			goto fail;
4133 		}
4134 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4135 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4136 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4137 			if (copyin(buf, ioe, sizeof(*ioe))) {
4138 				kfree_type(struct pfr_table, table);
4139 				kfree_type(struct pfioc_trans_e, ioe);
4140 				error = EFAULT;
4141 				goto fail;
4142 			}
4143 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4144 			switch (ioe->rs_num) {
4145 			case PF_RULESET_ALTQ:
4146 				break;
4147 			case PF_RULESET_TABLE:
4148 				bzero(table, sizeof(*table));
4149 				strlcpy(table->pfrt_anchor, ioe->anchor,
4150 				    sizeof(table->pfrt_anchor));
4151 				if ((error = pfr_ina_begin(table,
4152 				    &ioe->ticket, NULL, 0))) {
4153 					kfree_type(struct pfr_table, table);
4154 					kfree_type(struct pfioc_trans_e, ioe);
4155 					goto fail;
4156 				}
4157 				break;
4158 			default:
4159 				if ((error = pf_begin_rules(&ioe->ticket,
4160 				    ioe->rs_num, ioe->anchor))) {
4161 					kfree_type(struct pfr_table, table);
4162 					kfree_type(struct pfioc_trans_e, ioe);
4163 					goto fail;
4164 				}
4165 				break;
4166 			}
4167 			if (copyout(ioe, buf, sizeof(*ioe))) {
4168 				kfree_type(struct pfr_table, table);
4169 				kfree_type(struct pfioc_trans_e, ioe);
4170 				error = EFAULT;
4171 				goto fail;
4172 			}
4173 		}
4174 		kfree_type(struct pfr_table, table);
4175 		kfree_type(struct pfioc_trans_e, ioe);
4176 		break;
4177 	}
4178 
4179 	case DIOCXROLLBACK: {
4180 		struct pfioc_trans_e    *ioe;
4181 		struct pfr_table        *table;
4182 		int                      i;
4183 
4184 		if (esize != sizeof(*ioe)) {
4185 			error = ENODEV;
4186 			goto fail;
4187 		}
4188 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4189 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4190 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4191 			if (copyin(buf, ioe, sizeof(*ioe))) {
4192 				kfree_type(struct pfr_table, table);
4193 				kfree_type(struct pfioc_trans_e, ioe);
4194 				error = EFAULT;
4195 				goto fail;
4196 			}
4197 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4198 			switch (ioe->rs_num) {
4199 			case PF_RULESET_ALTQ:
4200 				break;
4201 			case PF_RULESET_TABLE:
4202 				bzero(table, sizeof(*table));
4203 				strlcpy(table->pfrt_anchor, ioe->anchor,
4204 				    sizeof(table->pfrt_anchor));
4205 				if ((error = pfr_ina_rollback(table,
4206 				    ioe->ticket, NULL, 0))) {
4207 					kfree_type(struct pfr_table, table);
4208 					kfree_type(struct pfioc_trans_e, ioe);
4209 					goto fail; /* really bad */
4210 				}
4211 				break;
4212 			default:
4213 				if ((error = pf_rollback_rules(ioe->ticket,
4214 				    ioe->rs_num, ioe->anchor))) {
4215 					kfree_type(struct pfr_table, table);
4216 					kfree_type(struct pfioc_trans_e, ioe);
4217 					goto fail; /* really bad */
4218 				}
4219 				break;
4220 			}
4221 		}
4222 		kfree_type(struct pfr_table, table);
4223 		kfree_type(struct pfioc_trans_e, ioe);
4224 		break;
4225 	}
4226 
4227 	case DIOCXCOMMIT: {
4228 		struct pfioc_trans_e    *ioe;
4229 		struct pfr_table        *table;
4230 		user_addr_t              _buf = buf;
4231 		int                      i;
4232 
4233 		if (esize != sizeof(*ioe)) {
4234 			error = ENODEV;
4235 			goto fail;
4236 		}
4237 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4238 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4239 		/* first makes sure everything will succeed */
4240 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4241 			if (copyin(buf, ioe, sizeof(*ioe))) {
4242 				kfree_type(struct pfr_table, table);
4243 				kfree_type(struct pfioc_trans_e, ioe);
4244 				error = EFAULT;
4245 				goto fail;
4246 			}
4247 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4248 			switch (ioe->rs_num) {
4249 			case PF_RULESET_ALTQ:
4250 				break;
4251 			case PF_RULESET_TABLE:
4252 				rs = pf_find_ruleset(ioe->anchor);
4253 				if (rs == NULL || !rs->topen || ioe->ticket !=
4254 				    rs->tticket) {
4255 					kfree_type(struct pfr_table, table);
4256 					kfree_type(struct pfioc_trans_e, ioe);
4257 					error = EBUSY;
4258 					goto fail;
4259 				}
4260 				break;
4261 			default:
4262 				if (ioe->rs_num < 0 || ioe->rs_num >=
4263 				    PF_RULESET_MAX) {
4264 					kfree_type(struct pfr_table, table);
4265 					kfree_type(struct pfioc_trans_e, ioe);
4266 					error = EINVAL;
4267 					goto fail;
4268 				}
4269 				rs = pf_find_ruleset(ioe->anchor);
4270 				if (rs == NULL ||
4271 				    !rs->rules[ioe->rs_num].inactive.open ||
4272 				    rs->rules[ioe->rs_num].inactive.ticket !=
4273 				    ioe->ticket) {
4274 					kfree_type(struct pfr_table, table);
4275 					kfree_type(struct pfioc_trans_e, ioe);
4276 					error = EBUSY;
4277 					goto fail;
4278 				}
4279 				break;
4280 			}
4281 		}
4282 		buf = _buf;
4283 		/* now do the commit - no errors should happen here */
4284 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4285 			if (copyin(buf, ioe, sizeof(*ioe))) {
4286 				kfree_type(struct pfr_table, table);
4287 				kfree_type(struct pfioc_trans_e, ioe);
4288 				error = EFAULT;
4289 				goto fail;
4290 			}
4291 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4292 			switch (ioe->rs_num) {
4293 			case PF_RULESET_ALTQ:
4294 				break;
4295 			case PF_RULESET_TABLE:
4296 				bzero(table, sizeof(*table));
4297 				strlcpy(table->pfrt_anchor, ioe->anchor,
4298 				    sizeof(table->pfrt_anchor));
4299 				if ((error = pfr_ina_commit(table, ioe->ticket,
4300 				    NULL, NULL, 0))) {
4301 					kfree_type(struct pfr_table, table);
4302 					kfree_type(struct pfioc_trans_e, ioe);
4303 					goto fail;
4304 				}
4305 				break;
4306 			default:
4307 				if ((error = pf_commit_rules(ioe->ticket,
4308 				    ioe->rs_num, ioe->anchor))) {
4309 					kfree_type(struct pfr_table, table);
4310 					kfree_type(struct pfioc_trans_e, ioe);
4311 					goto fail;
4312 				}
4313 				break;
4314 			}
4315 		}
4316 		kfree_type(struct pfr_table, table);
4317 		kfree_type(struct pfioc_trans_e, ioe);
4318 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
4319 		net_filter_event_mark(NET_FILTER_EVENT_PF,
4320 		    pf_check_compatible_rules());
4321 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
4322 		break;
4323 	}
4324 
4325 	default:
4326 		VERIFY(0);
4327 		/* NOTREACHED */
4328 	}
4329 fail:
4330 	if (rs) {
4331 		pf_release_ruleset(rs);
4332 		rs = NULL;
4333 	}
4334 	return error;
4335 }
4336 
4337 static int
pfioctl_ioc_src_nodes(u_long cmd,struct pfioc_src_nodes_32 * psn32,struct pfioc_src_nodes_64 * psn64,struct proc * p)4338 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4339     struct pfioc_src_nodes_64 *psn64, struct proc *p)
4340 {
4341 	int p64 = proc_is64bit(p);
4342 	int error = 0;
4343 
4344 	switch (cmd) {
4345 	case DIOCGETSRCNODES: {
4346 		struct pf_src_node      *n, *pstore;
4347 		user_addr_t              buf;
4348 		u_int32_t                nr = 0;
4349 		int                      space, size;
4350 
4351 		space = (p64 ? psn64->psn_len : psn32->psn_len);
4352 		if (space == 0) {
4353 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4354 			nr++;
4355 
4356 			size = sizeof(struct pf_src_node) * nr;
4357 			if (p64) {
4358 				psn64->psn_len = size;
4359 			} else {
4360 				psn32->psn_len = size;
4361 			}
4362 			break;
4363 		}
4364 
4365 		pstore = kalloc_type(struct pf_src_node, Z_WAITOK | Z_NOFAIL);
4366 #ifdef __LP64__
4367 		buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4368 #else
4369 		buf = psn32->psn_buf;
4370 #endif
4371 
4372 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4373 			uint64_t secs = pf_time_second(), diff;
4374 
4375 			if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4376 				break;
4377 			}
4378 
4379 			bcopy(n, pstore, sizeof(*pstore));
4380 			if (n->rule.ptr != NULL) {
4381 				pstore->rule.nr = n->rule.ptr->nr;
4382 			}
4383 			pstore->creation = secs - pstore->creation;
4384 			if (pstore->expire > secs) {
4385 				pstore->expire -= secs;
4386 			} else {
4387 				pstore->expire = 0;
4388 			}
4389 
4390 			/* adjust the connection rate estimate */
4391 			diff = secs - n->conn_rate.last;
4392 			if (diff >= n->conn_rate.seconds) {
4393 				pstore->conn_rate.count = 0;
4394 			} else {
4395 				pstore->conn_rate.count -=
4396 				    n->conn_rate.count * diff /
4397 				    n->conn_rate.seconds;
4398 			}
4399 
4400 			_RB_PARENT(pstore, entry) = NULL;
4401 			RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4402 			pstore->kif = NULL;
4403 
4404 			error = copyout(pstore, buf, sizeof(*pstore));
4405 			if (error) {
4406 				kfree_type(struct pf_src_node, pstore);
4407 				goto fail;
4408 			}
4409 			buf += sizeof(*pstore);
4410 			nr++;
4411 		}
4412 
4413 		size = sizeof(struct pf_src_node) * nr;
4414 		if (p64) {
4415 			psn64->psn_len = size;
4416 		} else {
4417 			psn32->psn_len = size;
4418 		}
4419 
4420 		kfree_type(struct pf_src_node, pstore);
4421 		break;
4422 	}
4423 
4424 	default:
4425 		VERIFY(0);
4426 		/* NOTREACHED */
4427 	}
4428 fail:
4429 	return error;
4430 }
4431 
4432 static int
pfioctl_ioc_src_node_kill(u_long cmd,struct pfioc_src_node_kill * psnk,struct proc * p)4433 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4434     struct proc *p)
4435 {
4436 #pragma unused(p)
4437 	int error = 0;
4438 
4439 	switch (cmd) {
4440 	case DIOCKILLSRCNODES: {
4441 		struct pf_src_node      *sn;
4442 		struct pf_state         *s;
4443 		int                     killed = 0;
4444 
4445 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4446 			if (PF_MATCHA(psnk->psnk_src.neg,
4447 			    &psnk->psnk_src.addr.v.a.addr,
4448 			    &psnk->psnk_src.addr.v.a.mask,
4449 			    &sn->addr, sn->af) &&
4450 			    PF_MATCHA(psnk->psnk_dst.neg,
4451 			    &psnk->psnk_dst.addr.v.a.addr,
4452 			    &psnk->psnk_dst.addr.v.a.mask,
4453 			    &sn->raddr, sn->af)) {
4454 				/* Handle state to src_node linkage */
4455 				if (sn->states != 0) {
4456 					RB_FOREACH(s, pf_state_tree_id,
4457 					    &tree_id) {
4458 						if (s->src_node == sn) {
4459 							s->src_node = NULL;
4460 						}
4461 						if (s->nat_src_node == sn) {
4462 							s->nat_src_node = NULL;
4463 						}
4464 					}
4465 					sn->states = 0;
4466 				}
4467 				sn->expire = 1;
4468 				killed++;
4469 			}
4470 		}
4471 
4472 		if (killed > 0) {
4473 			pf_purge_expired_src_nodes();
4474 		}
4475 
4476 		psnk->psnk_af = (sa_family_t)killed;
4477 		break;
4478 	}
4479 
4480 	default:
4481 		VERIFY(0);
4482 		/* NOTREACHED */
4483 	}
4484 
4485 	return error;
4486 }
4487 
4488 static int
pfioctl_ioc_iface(u_long cmd,struct pfioc_iface_32 * io32,struct pfioc_iface_64 * io64,struct proc * p)4489 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4490     struct pfioc_iface_64 *io64, struct proc *p)
4491 {
4492 	int p64 = proc_is64bit(p);
4493 	int error = 0;
4494 
4495 	switch (cmd) {
4496 	case DIOCIGETIFACES: {
4497 		user_addr_t buf;
4498 		int esize;
4499 
4500 #ifdef __LP64__
4501 		buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4502 		esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4503 #else
4504 		buf = io32->pfiio_buffer;
4505 		esize = io32->pfiio_esize;
4506 #endif
4507 
4508 		/* esize must be that of the user space version of pfi_kif */
4509 		if (esize != sizeof(struct pfi_uif)) {
4510 			error = ENODEV;
4511 			break;
4512 		}
4513 		if (p64) {
4514 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4515 		} else {
4516 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4517 		}
4518 		error = pfi_get_ifaces(
4519 			p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4520 			p64 ? &io64->pfiio_size : &io32->pfiio_size);
4521 		break;
4522 	}
4523 
4524 	case DIOCSETIFFLAG: {
4525 		if (p64) {
4526 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4527 		} else {
4528 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4529 		}
4530 
4531 		error = pfi_set_flags(
4532 			p64 ? io64->pfiio_name : io32->pfiio_name,
4533 			p64 ? io64->pfiio_flags : io32->pfiio_flags);
4534 		break;
4535 	}
4536 
4537 	case DIOCCLRIFFLAG: {
4538 		if (p64) {
4539 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4540 		} else {
4541 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4542 		}
4543 
4544 		error = pfi_clear_flags(
4545 			p64 ? io64->pfiio_name : io32->pfiio_name,
4546 			p64 ? io64->pfiio_flags : io32->pfiio_flags);
4547 		break;
4548 	}
4549 
4550 	default:
4551 		VERIFY(0);
4552 		/* NOTREACHED */
4553 	}
4554 
4555 	return error;
4556 }
4557 
4558 int
pf_af_hook(struct ifnet * ifp,struct mbuf ** mppn,struct mbuf ** mp,unsigned int af,int input,struct ip_fw_args * fwa)4559 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4560     unsigned int af, int input, struct ip_fw_args *fwa)
4561 {
4562 	int error = 0;
4563 	struct mbuf *nextpkt;
4564 	net_thread_marks_t marks;
4565 	struct ifnet * pf_ifp = ifp;
4566 
4567 	/* Always allow traffic on co-processor interfaces. */
4568 	if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) {
4569 		return 0;
4570 	}
4571 
4572 	marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4573 
4574 	if (marks != net_thread_marks_none) {
4575 		lck_rw_lock_shared(&pf_perim_lock);
4576 		if (!pf_is_enabled) {
4577 			goto done;
4578 		}
4579 		lck_mtx_lock(&pf_lock);
4580 	}
4581 
4582 	if (mppn != NULL && *mppn != NULL) {
4583 		VERIFY(*mppn == *mp);
4584 	}
4585 	if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4586 		(*mp)->m_nextpkt = NULL;
4587 	}
4588 
4589 	/*
4590 	 * For packets destined to locally hosted IP address
4591 	 * ip_output_list sets Mbuf's pkt header's rcvif to
4592 	 * the interface hosting the IP address.
4593 	 * While on the output path ifp passed to pf_af_hook
4594 	 * to such local communication is the loopback interface,
4595 	 * the input path derives ifp from mbuf packet header's
4596 	 * rcvif.
4597 	 * This asymmetry caues issues with PF.
4598 	 * To handle that case, we have a limited change here to
4599 	 * pass interface as loopback if packets are looped in.
4600 	 */
4601 	if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4602 		pf_ifp = lo_ifp;
4603 	}
4604 
4605 	switch (af) {
4606 #if INET
4607 	case AF_INET: {
4608 		error = pf_inet_hook(pf_ifp, mp, input, fwa);
4609 		break;
4610 	}
4611 #endif /* INET */
4612 	case AF_INET6:
4613 		error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4614 		break;
4615 	default:
4616 		break;
4617 	}
4618 
4619 	/* When packet valid, link to the next packet */
4620 	if (*mp != NULL && nextpkt != NULL) {
4621 		struct mbuf *m = *mp;
4622 		while (m->m_nextpkt != NULL) {
4623 			m = m->m_nextpkt;
4624 		}
4625 		m->m_nextpkt = nextpkt;
4626 	}
4627 	/* Fix up linkage of previous packet in the chain */
4628 	if (mppn != NULL) {
4629 		if (*mp != NULL) {
4630 			*mppn = *mp;
4631 		} else {
4632 			*mppn = nextpkt;
4633 		}
4634 	}
4635 
4636 	if (marks != net_thread_marks_none) {
4637 		lck_mtx_unlock(&pf_lock);
4638 	}
4639 
4640 done:
4641 	if (marks != net_thread_marks_none) {
4642 		lck_rw_done(&pf_perim_lock);
4643 	}
4644 
4645 	net_thread_marks_pop(marks);
4646 	return error;
4647 }
4648 
4649 
4650 #if INET
4651 static __attribute__((noinline)) int
pf_inet_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4652 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4653     struct ip_fw_args *fwa)
4654 {
4655 	struct mbuf *m = *mp;
4656 #if BYTE_ORDER != BIG_ENDIAN
4657 	struct ip *ip = mtod(m, struct ip *);
4658 #endif
4659 	int error = 0;
4660 
4661 	/*
4662 	 * If the packet is outbound, is originated locally, is flagged for
4663 	 * delayed UDP/TCP checksum calculation, and is about to be processed
4664 	 * for an interface that doesn't support the appropriate checksum
4665 	 * offloading, then calculated the checksum here so that PF can adjust
4666 	 * it properly.
4667 	 */
4668 	if (!input && m->m_pkthdr.rcvif == NULL) {
4669 		static const int mask = CSUM_DELAY_DATA;
4670 		const int flags = m->m_pkthdr.csum_flags &
4671 		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4672 
4673 		if (flags & mask) {
4674 			in_delayed_cksum(m);
4675 			m->m_pkthdr.csum_flags &= ~mask;
4676 		}
4677 	}
4678 
4679 #if BYTE_ORDER != BIG_ENDIAN
4680 	HTONS(ip->ip_len);
4681 	HTONS(ip->ip_off);
4682 #endif
4683 	if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4684 		if (*mp != NULL) {
4685 			m_freem(*mp);
4686 			*mp = NULL;
4687 			error = EHOSTUNREACH;
4688 		} else {
4689 			error = ENOBUFS;
4690 		}
4691 	}
4692 #if BYTE_ORDER != BIG_ENDIAN
4693 	else {
4694 		if (*mp != NULL) {
4695 			ip = mtod(*mp, struct ip *);
4696 			NTOHS(ip->ip_len);
4697 			NTOHS(ip->ip_off);
4698 		}
4699 	}
4700 #endif
4701 	return error;
4702 }
4703 #endif /* INET */
4704 
4705 int __attribute__((noinline))
pf_inet6_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4706 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4707     struct ip_fw_args *fwa)
4708 {
4709 	int error = 0;
4710 
4711 	/*
4712 	 * If the packet is outbound, is originated locally, is flagged for
4713 	 * delayed UDP/TCP checksum calculation, and is about to be processed
4714 	 * for an interface that doesn't support the appropriate checksum
4715 	 * offloading, then calculated the checksum here so that PF can adjust
4716 	 * it properly.
4717 	 */
4718 	if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4719 		static const int mask = CSUM_DELAY_IPV6_DATA;
4720 		const int flags = (*mp)->m_pkthdr.csum_flags &
4721 		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4722 
4723 		if (flags & mask) {
4724 			/*
4725 			 * Checksum offload should not have been enabled
4726 			 * when extension headers exist, thus 0 for optlen.
4727 			 */
4728 			in6_delayed_cksum(*mp);
4729 			(*mp)->m_pkthdr.csum_flags &= ~mask;
4730 		}
4731 	}
4732 
4733 	if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4734 		if (*mp != NULL) {
4735 			m_freem(*mp);
4736 			*mp = NULL;
4737 			error = EHOSTUNREACH;
4738 		} else {
4739 			error = ENOBUFS;
4740 		}
4741 	}
4742 	return error;
4743 }
4744 
4745 int
pf_ifaddr_hook(struct ifnet * ifp)4746 pf_ifaddr_hook(struct ifnet *ifp)
4747 {
4748 	struct pfi_kif *kif = ifp->if_pf_kif;
4749 
4750 	if (kif != NULL) {
4751 		lck_rw_lock_shared(&pf_perim_lock);
4752 		lck_mtx_lock(&pf_lock);
4753 
4754 		pfi_kifaddr_update(kif);
4755 
4756 		lck_mtx_unlock(&pf_lock);
4757 		lck_rw_done(&pf_perim_lock);
4758 	}
4759 	return 0;
4760 }
4761 
4762 /*
4763  * Caller acquires dlil lock as writer (exclusive)
4764  */
4765 void
pf_ifnet_hook(struct ifnet * ifp,int attach)4766 pf_ifnet_hook(struct ifnet *ifp, int attach)
4767 {
4768 	lck_rw_lock_shared(&pf_perim_lock);
4769 	lck_mtx_lock(&pf_lock);
4770 	if (attach) {
4771 		pfi_attach_ifnet(ifp);
4772 	} else {
4773 		pfi_detach_ifnet(ifp);
4774 	}
4775 	lck_mtx_unlock(&pf_lock);
4776 	lck_rw_done(&pf_perim_lock);
4777 }
4778 
4779 static void
pf_attach_hooks(void)4780 pf_attach_hooks(void)
4781 {
4782 	ifnet_head_lock_shared();
4783 	/*
4784 	 * Check against ifnet_addrs[] before proceeding, in case this
4785 	 * is called very early on, e.g. during dlil_init() before any
4786 	 * network interface is attached.
4787 	 */
4788 	if (ifnet_addrs != NULL) {
4789 		int i;
4790 
4791 		for (i = 0; i <= if_index; i++) {
4792 			struct ifnet *ifp = ifindex2ifnet[i];
4793 			if (ifp != NULL) {
4794 				pfi_attach_ifnet(ifp);
4795 			}
4796 		}
4797 	}
4798 	ifnet_head_done();
4799 }
4800 
4801 #if 0
4802 /* currently unused along with pfdetach() */
4803 static void
4804 pf_detach_hooks(void)
4805 {
4806 	ifnet_head_lock_shared();
4807 	if (ifnet_addrs != NULL) {
4808 		for (i = 0; i <= if_index; i++) {
4809 			int i;
4810 
4811 			struct ifnet *ifp = ifindex2ifnet[i];
4812 			if (ifp != NULL && ifp->if_pf_kif != NULL) {
4813 				pfi_detach_ifnet(ifp);
4814 			}
4815 		}
4816 	}
4817 	ifnet_head_done();
4818 }
4819 #endif
4820 
4821 /*
4822  * 'D' group ioctls.
4823  *
4824  * The switch statement below does nothing at runtime, as it serves as a
4825  * compile time check to ensure that all of the socket 'D' ioctls (those
4826  * in the 'D' group going thru soo_ioctl) that are made available by the
4827  * networking stack is unique.  This works as long as this routine gets
4828  * updated each time a new interface ioctl gets added.
4829  *
4830  * Any failures at compile time indicates duplicated ioctl values.
4831  */
4832 static __attribute__((unused)) void
pfioctl_cassert(void)4833 pfioctl_cassert(void)
4834 {
4835 	/*
4836 	 * This is equivalent to _CASSERT() and the compiler wouldn't
4837 	 * generate any instructions, thus for compile time only.
4838 	 */
4839 	switch ((u_long)0) {
4840 	case 0:
4841 
4842 	/* bsd/net/pfvar.h */
4843 	case DIOCSTART:
4844 	case DIOCSTOP:
4845 	case DIOCADDRULE:
4846 	case DIOCGETSTARTERS:
4847 	case DIOCGETRULES:
4848 	case DIOCGETRULE:
4849 	case DIOCSTARTREF:
4850 	case DIOCSTOPREF:
4851 	case DIOCCLRSTATES:
4852 	case DIOCGETSTATE:
4853 	case DIOCSETSTATUSIF:
4854 	case DIOCGETSTATUS:
4855 	case DIOCCLRSTATUS:
4856 	case DIOCNATLOOK:
4857 	case DIOCSETDEBUG:
4858 	case DIOCGETSTATES:
4859 	case DIOCCHANGERULE:
4860 	case DIOCINSERTRULE:
4861 	case DIOCDELETERULE:
4862 	case DIOCSETTIMEOUT:
4863 	case DIOCGETTIMEOUT:
4864 	case DIOCADDSTATE:
4865 	case DIOCCLRRULECTRS:
4866 	case DIOCGETLIMIT:
4867 	case DIOCSETLIMIT:
4868 	case DIOCKILLSTATES:
4869 	case DIOCSTARTALTQ:
4870 	case DIOCSTOPALTQ:
4871 	case DIOCADDALTQ:
4872 	case DIOCGETALTQS:
4873 	case DIOCGETALTQ:
4874 	case DIOCCHANGEALTQ:
4875 	case DIOCGETQSTATS:
4876 	case DIOCBEGINADDRS:
4877 	case DIOCADDADDR:
4878 	case DIOCGETADDRS:
4879 	case DIOCGETADDR:
4880 	case DIOCCHANGEADDR:
4881 	case DIOCGETRULESETS:
4882 	case DIOCGETRULESET:
4883 	case DIOCRCLRTABLES:
4884 	case DIOCRADDTABLES:
4885 	case DIOCRDELTABLES:
4886 	case DIOCRGETTABLES:
4887 	case DIOCRGETTSTATS:
4888 	case DIOCRCLRTSTATS:
4889 	case DIOCRCLRADDRS:
4890 	case DIOCRADDADDRS:
4891 	case DIOCRDELADDRS:
4892 	case DIOCRSETADDRS:
4893 	case DIOCRGETADDRS:
4894 	case DIOCRGETASTATS:
4895 	case DIOCRCLRASTATS:
4896 	case DIOCRTSTADDRS:
4897 	case DIOCRSETTFLAGS:
4898 	case DIOCRINADEFINE:
4899 	case DIOCOSFPFLUSH:
4900 	case DIOCOSFPADD:
4901 	case DIOCOSFPGET:
4902 	case DIOCXBEGIN:
4903 	case DIOCXCOMMIT:
4904 	case DIOCXROLLBACK:
4905 	case DIOCGETSRCNODES:
4906 	case DIOCCLRSRCNODES:
4907 	case DIOCSETHOSTID:
4908 	case DIOCIGETIFACES:
4909 	case DIOCSETIFFLAG:
4910 	case DIOCCLRIFFLAG:
4911 	case DIOCKILLSRCNODES:
4912 	case DIOCGIFSPEED:
4913 		;
4914 	}
4915 }
4916