xref: /xnu-8792.41.9/bsd/net/pf_ioctl.c (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*	$apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /*	$OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31 
32 /*
33  * Copyright (c) 2001 Daniel Hartmeier
34  * Copyright (c) 2002,2003 Henning Brauer
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *    - Redistributions of source code must retain the above copyright
42  *      notice, this list of conditions and the following disclaimer.
43  *    - Redistributions in binary form must reproduce the above
44  *      copyright notice, this list of conditions and the following
45  *      disclaimer in the documentation and/or other materials provided
46  *      with the distribution.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59  * POSSIBILITY OF SUCH DAMAGE.
60  *
61  * Effort sponsored in part by the Defense Advanced Research Projects
62  * Agency (DARPA) and Air Force Research Laboratory, Air Force
63  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64  *
65  */
66 
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83 #include <os/log.h>
84 
85 #include <mach/vm_param.h>
86 
87 #include <net/dlil.h>
88 #include <net/if.h>
89 #include <net/if_types.h>
90 #include <net/net_api_stats.h>
91 #include <net/route.h>
92 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
93 #include <skywalk/lib/net_filter_event.h>
94 #endif
95 
96 #include <netinet/in.h>
97 #include <netinet/in_var.h>
98 #include <netinet/in_systm.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/ip_icmp.h>
102 #include <netinet/if_ether.h>
103 
104 #if DUMMYNET
105 #include <netinet/ip_dummynet.h>
106 #else
107 struct ip_fw_args;
108 #endif /* DUMMYNET */
109 
110 #include <libkern/crypto/md5.h>
111 
112 #include <machine/machine_routines.h>
113 
114 #include <miscfs/devfs/devfs.h>
115 
116 #include <net/pfvar.h>
117 
118 #if NPFSYNC
119 #include <net/if_pfsync.h>
120 #endif /* NPFSYNC */
121 
122 #if PFLOG
123 #include <net/if_pflog.h>
124 #endif /* PFLOG */
125 
126 #include <netinet/ip6.h>
127 #include <netinet/in_pcb.h>
128 
129 #include <dev/random/randomdev.h>
130 
131 #if 0
132 static void pfdetach(void);
133 #endif
134 static int pfopen(dev_t, int, int, struct proc *);
135 static int pfclose(dev_t, int, int, struct proc *);
136 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
137 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
138     struct pfioc_table_64 *, struct proc *);
139 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
140     struct pfioc_tokens_64 *, struct proc *);
141 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
142 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
143     struct proc *);
144 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
145 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
146     struct pfioc_states_64 *, struct proc *);
147 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
148 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
149 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
150 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
151 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
152 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
153     struct pfioc_trans_64 *, struct proc *);
154 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
155     struct pfioc_src_nodes_64 *, struct proc *);
156 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
157     struct proc *);
158 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
159     struct pfioc_iface_64 *, struct proc *);
160 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
161     u_int8_t, u_int8_t, u_int8_t);
162 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
163 static void pf_empty_pool(struct pf_palist *);
164 static int pf_begin_rules(u_int32_t *, int, const char *);
165 static int pf_rollback_rules(u_int32_t, int, char *);
166 static int pf_setup_pfsync_matching(struct pf_ruleset *);
167 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
168 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
169 static int pf_commit_rules(u_int32_t, int, char *);
170 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
171     int);
172 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
173 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
174     struct pf_state *);
175 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
176     struct pf_state *);
177 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
178 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
179 static void pf_expire_states_and_src_nodes(struct pf_rule *);
180 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
181     int, struct pf_rule *);
182 static void pf_addrwrap_setup(struct pf_addr_wrap *);
183 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
184     struct pf_ruleset *);
185 static void pf_delete_rule_by_owner(char *, u_int32_t);
186 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
187 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
188 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
189     int, struct pf_rule **);
190 
191 #define PF_CDEV_MAJOR   (-1)
192 
193 static const struct cdevsw pf_cdevsw = {
194 	.d_open       = pfopen,
195 	.d_close      = pfclose,
196 	.d_read       = eno_rdwrt,
197 	.d_write      = eno_rdwrt,
198 	.d_ioctl      = pfioctl,
199 	.d_stop       = eno_stop,
200 	.d_reset      = eno_reset,
201 	.d_ttys       = NULL,
202 	.d_select     = eno_select,
203 	.d_mmap       = eno_mmap,
204 	.d_strategy   = eno_strat,
205 	.d_reserved_1 = eno_getc,
206 	.d_reserved_2 = eno_putc,
207 	.d_type       = 0
208 };
209 
210 static void pf_attach_hooks(void);
211 #if 0
212 /* currently unused along with pfdetach() */
213 static void pf_detach_hooks(void);
214 #endif
215 
216 /*
217  * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
218  * and used in pf_af_hook() for performance optimization, such that packets
219  * will enter pf_test() or pf_test6() only when PF is running.
220  */
221 int pf_is_enabled = 0;
222 
223 u_int32_t pf_hash_seed;
224 int16_t pf_nat64_configured = 0;
225 
226 /*
227  * These are the pf enabled reference counting variables
228  */
229 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token))
230 
231 static u_int64_t pf_enabled_ref_count;
232 static u_int32_t nr_tokens = 0;
233 static u_int32_t pffwrules;
234 static u_int32_t pfdevcnt;
235 
236 SLIST_HEAD(list_head, pfioc_kernel_token);
237 static struct list_head token_list_head;
238 
239 struct pf_rule           pf_default_rule;
240 
241 typedef struct {
242 	char tag_name[PF_TAG_NAME_SIZE];
243 	uint16_t tag_id;
244 } pf_reserved_tag_table_t;
245 
246 #define NUM_RESERVED_TAGS    2
247 static pf_reserved_tag_table_t pf_reserved_tag_table[NUM_RESERVED_TAGS] = {
248 	{ PF_TAG_NAME_SYSTEM_SERVICE, PF_TAG_ID_SYSTEM_SERVICE},
249 	{ PF_TAG_NAME_STACK_DROP, PF_TAG_ID_STACK_DROP},
250 };
251 #define RESERVED_TAG_ID_MIN    PF_TAG_ID_SYSTEM_SERVICE
252 
253 #define DYNAMIC_TAG_ID_MAX    50000
254 static TAILQ_HEAD(pf_tags, pf_tagname)  pf_tags =
255     TAILQ_HEAD_INITIALIZER(pf_tags);
256 
257 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
258 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
259 #endif
260 static u_int16_t         tagname2tag(struct pf_tags *, char *);
261 static void              tag_unref(struct pf_tags *, u_int16_t);
262 static int               pf_rtlabel_add(struct pf_addr_wrap *);
263 static void              pf_rtlabel_remove(struct pf_addr_wrap *);
264 static void              pf_rtlabel_copyout(struct pf_addr_wrap *);
265 
266 #if INET
267 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
268     struct ip_fw_args *);
269 #endif /* INET */
270 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
271     struct ip_fw_args *);
272 
273 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
274 
275 /*
276  * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
277  */
278 #define PFIOCX_STRUCT_DECL(s)                                           \
279 struct {                                                                \
280 	union {                                                         \
281 	        struct s##_32	_s##_32;                                \
282 	        struct s##_64	_s##_64;                                \
283 	} _u;                                                           \
284 } *s##_un = NULL                                                        \
285 
286 #define PFIOCX_STRUCT_BEGIN(a, s) {                                     \
287 	VERIFY(s##_un == NULL);                                         \
288 	s##_un = kalloc_type(typeof(*s##_un), Z_WAITOK_ZERO_NOFAIL);    \
289 	if (p64)                                                        \
290 	        bcopy(a, &s##_un->_u._s##_64,                           \
291 	            sizeof (struct s##_64));                            \
292 	else                                                            \
293 	        bcopy(a, &s##_un->_u._s##_32,                           \
294 	            sizeof (struct s##_32));                            \
295 }
296 
297 #define PFIOCX_STRUCT_END(s, a) {                                       \
298 	VERIFY(s##_un != NULL);                                         \
299 	if (p64)                                                        \
300 	        bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64));  \
301 	else                                                            \
302 	        bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32));  \
303 	kfree_type(typeof(*s##_un), s##_un);                            \
304 }
305 
306 #define PFIOCX_STRUCT_ADDR32(s)         (&s##_un->_u._s##_32)
307 #define PFIOCX_STRUCT_ADDR64(s)         (&s##_un->_u._s##_64)
308 
309 /*
310  * Helper macros for regular ioctl structures.
311  */
312 #define PFIOC_STRUCT_BEGIN(a, v) {                                      \
313 	VERIFY((v) == NULL);                                            \
314 	(v) = kalloc_type(typeof(*(v)), Z_WAITOK_ZERO_NOFAIL);          \
315 	bcopy(a, v, sizeof (*(v)));                                     \
316 }
317 
318 #define PFIOC_STRUCT_END(v, a) {                                        \
319 	VERIFY((v) != NULL);                                            \
320 	bcopy(v, a, sizeof (*(v)));                                     \
321 	kfree_type(typeof(*(v)), v);                                    \
322 }
323 
324 #define PFIOC_STRUCT_ADDR32(s)          (&s##_un->_u._s##_32)
325 #define PFIOC_STRUCT_ADDR64(s)          (&s##_un->_u._s##_64)
326 
327 struct thread *pf_purge_thread;
328 
329 extern void pfi_kifaddr_update(void *);
330 
331 /* pf enable ref-counting helper functions */
332 static u_int64_t                generate_token(struct proc *);
333 static int                      remove_token(struct pfioc_remove_token *);
334 static void                     invalidate_all_tokens(void);
335 
336 static u_int64_t
generate_token(struct proc * p)337 generate_token(struct proc *p)
338 {
339 	u_int64_t token_value;
340 	struct pfioc_kernel_token *new_token;
341 
342 	if (nr_tokens + 1 > NR_TOKENS_LIMIT) {
343 		os_log_error(OS_LOG_DEFAULT, "%s: NR_TOKENS_LIMIT reached", __func__);
344 		return 0;
345 	}
346 
347 	new_token = kalloc_type(struct pfioc_kernel_token,
348 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
349 
350 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
351 
352 	token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
353 
354 	new_token->token.token_value = token_value;
355 	new_token->token.pid = proc_pid(p);
356 	proc_name(new_token->token.pid, new_token->token.proc_name,
357 	    sizeof(new_token->token.proc_name));
358 	new_token->token.timestamp = pf_calendar_time_second();
359 
360 	SLIST_INSERT_HEAD(&token_list_head, new_token, next);
361 	nr_tokens++;
362 
363 	return token_value;
364 }
365 
366 static int
remove_token(struct pfioc_remove_token * tok)367 remove_token(struct pfioc_remove_token *tok)
368 {
369 	struct pfioc_kernel_token *entry, *tmp;
370 
371 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
372 
373 	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
374 		if (tok->token_value == entry->token.token_value) {
375 			SLIST_REMOVE(&token_list_head, entry,
376 			    pfioc_kernel_token, next);
377 			kfree_type(struct pfioc_kernel_token, entry);
378 			nr_tokens--;
379 			return 0;    /* success */
380 		}
381 	}
382 
383 	printf("pf : remove failure\n");
384 	return ESRCH;    /* failure */
385 }
386 
387 static void
invalidate_all_tokens(void)388 invalidate_all_tokens(void)
389 {
390 	struct pfioc_kernel_token *entry, *tmp;
391 
392 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
393 
394 	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
395 		SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
396 		kfree_type(struct pfioc_kernel_token, entry);
397 	}
398 
399 	nr_tokens = 0;
400 }
401 
402 void
pfinit(void)403 pfinit(void)
404 {
405 	u_int32_t *t = pf_default_rule.timeout;
406 	int maj;
407 
408 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
409 	    NULL);
410 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
411 	    "pfsrctrpl", NULL);
412 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
413 	    NULL);
414 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
415 	    "pfstatekeypl", NULL);
416 	pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
417 	    "pfappstatepl", NULL);
418 	pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
419 	    "pfpooladdrpl", NULL);
420 	pfr_initialize();
421 	pfi_initialize();
422 	pf_osfp_initialize();
423 
424 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
425 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
426 
427 	if (max_mem <= 256 * 1024 * 1024) {
428 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
429 		    PFR_KENTRY_HIWAT_SMALL;
430 	}
431 
432 	RB_INIT(&tree_src_tracking);
433 	RB_INIT(&pf_anchors);
434 	pf_init_ruleset(&pf_main_ruleset);
435 	TAILQ_INIT(&pf_pabuf);
436 	TAILQ_INIT(&state_list);
437 
438 	_CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
439 	_CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
440 	_CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
441 	_CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
442 	_CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
443 	_CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
444 	_CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
445 	_CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
446 	_CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
447 	_CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
448 	_CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
449 
450 	/* default rule should never be garbage collected */
451 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
452 	pf_default_rule.action = PF_PASS;
453 	pf_default_rule.nr = -1;
454 	pf_default_rule.rtableid = IFSCOPE_NONE;
455 
456 	/* initialize default timeouts */
457 	t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
458 	t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
459 	t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
460 	t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
461 	t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
462 	t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
463 	t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
464 	t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
465 	t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
466 	t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
467 	t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
468 	t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
469 	t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
470 	t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
471 	t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
472 	t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
473 	t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
474 	t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
475 	t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
476 	t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
477 	t[PFTM_FRAG] = PFTM_FRAG_VAL;
478 	t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
479 	t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
480 	t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
481 	t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
482 	t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
483 
484 	pf_normalize_init();
485 	bzero(&pf_status, sizeof(pf_status));
486 	pf_status.debug = PF_DEBUG_URGENT;
487 	pf_hash_seed = RandomULong();
488 
489 	/* XXX do our best to avoid a conflict */
490 	pf_status.hostid = random();
491 
492 	if (kernel_thread_start(pf_purge_thread_fn, NULL,
493 	    &pf_purge_thread) != 0) {
494 		printf("%s: unable to start purge thread!", __func__);
495 		return;
496 	}
497 
498 	maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
499 	if (maj == -1) {
500 		printf("%s: failed to allocate major number!\n", __func__);
501 		return;
502 	}
503 	(void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
504 	    UID_ROOT, GID_WHEEL, 0600, "pf");
505 
506 	(void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
507 	    UID_ROOT, GID_WHEEL, 0600, "pfm");
508 
509 	pf_attach_hooks();
510 #if DUMMYNET
511 	dummynet_init();
512 #endif
513 }
514 
515 #if 0
516 static void
517 pfdetach(void)
518 {
519 	struct pf_anchor        *anchor;
520 	struct pf_state         *state;
521 	struct pf_src_node      *node;
522 	struct pfioc_table      pt;
523 	u_int32_t               ticket;
524 	int                     i;
525 	char                    r = '\0';
526 
527 	pf_detach_hooks();
528 
529 	pf_status.running = 0;
530 	wakeup(pf_purge_thread_fn);
531 
532 	/* clear the rulesets */
533 	for (i = 0; i < PF_RULESET_MAX; i++) {
534 		if (pf_begin_rules(&ticket, i, &r) == 0) {
535 			pf_commit_rules(ticket, i, &r);
536 		}
537 	}
538 
539 	/* clear states */
540 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
541 		state->timeout = PFTM_PURGE;
542 #if NPFSYNC
543 		state->sync_flags = PFSTATE_NOSYNC;
544 #endif
545 	}
546 	pf_purge_expired_states(pf_status.states);
547 
548 #if NPFSYNC
549 	pfsync_clear_states(pf_status.hostid, NULL);
550 #endif
551 
552 	/* clear source nodes */
553 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
554 		state->src_node = NULL;
555 		state->nat_src_node = NULL;
556 	}
557 	RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
558 		node->expire = 1;
559 		node->states = 0;
560 	}
561 	pf_purge_expired_src_nodes();
562 
563 	/* clear tables */
564 	memset(&pt, '\0', sizeof(pt));
565 	pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
566 
567 	/* destroy anchors */
568 	while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
569 		for (i = 0; i < PF_RULESET_MAX; i++) {
570 			if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
571 				pf_commit_rules(ticket, i, anchor->name);
572 			}
573 		}
574 	}
575 
576 	/* destroy main ruleset */
577 	pf_remove_if_empty_ruleset(&pf_main_ruleset);
578 
579 	/* destroy the pools */
580 	pool_destroy(&pf_pooladdr_pl);
581 	pool_destroy(&pf_state_pl);
582 	pool_destroy(&pf_rule_pl);
583 	pool_destroy(&pf_src_tree_pl);
584 
585 	/* destroy subsystems */
586 	pf_normalize_destroy();
587 	pf_osfp_destroy();
588 	pfr_destroy();
589 	pfi_destroy();
590 }
591 #endif
592 
593 static int
pfopen(dev_t dev,int flags,int fmt,struct proc * p)594 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
595 {
596 #pragma unused(flags, fmt, p)
597 	if (minor(dev) >= PFDEV_MAX) {
598 		return ENXIO;
599 	}
600 
601 	if (minor(dev) == PFDEV_PFM) {
602 		lck_mtx_lock(&pf_lock);
603 		if (pfdevcnt != 0) {
604 			lck_mtx_unlock(&pf_lock);
605 			return EBUSY;
606 		}
607 		pfdevcnt++;
608 		lck_mtx_unlock(&pf_lock);
609 	}
610 	return 0;
611 }
612 
613 static int
pfclose(dev_t dev,int flags,int fmt,struct proc * p)614 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
615 {
616 #pragma unused(flags, fmt, p)
617 	if (minor(dev) >= PFDEV_MAX) {
618 		return ENXIO;
619 	}
620 
621 	if (minor(dev) == PFDEV_PFM) {
622 		lck_mtx_lock(&pf_lock);
623 		VERIFY(pfdevcnt > 0);
624 		pfdevcnt--;
625 		lck_mtx_unlock(&pf_lock);
626 	}
627 	return 0;
628 }
629 
630 static struct pf_pool *
pf_get_pool(char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)631 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
632     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
633     u_int8_t check_ticket)
634 {
635 	struct pf_ruleset       *ruleset;
636 	struct pf_rule          *rule;
637 	int                      rs_num;
638 
639 	ruleset = pf_find_ruleset(anchor);
640 	if (ruleset == NULL) {
641 		return NULL;
642 	}
643 	rs_num = pf_get_ruleset_number(rule_action);
644 	if (rs_num >= PF_RULESET_MAX) {
645 		return NULL;
646 	}
647 	if (active) {
648 		if (check_ticket && ticket !=
649 		    ruleset->rules[rs_num].active.ticket) {
650 			return NULL;
651 		}
652 		if (r_last) {
653 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
654 			    pf_rulequeue);
655 		} else {
656 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
657 		}
658 	} else {
659 		if (check_ticket && ticket !=
660 		    ruleset->rules[rs_num].inactive.ticket) {
661 			return NULL;
662 		}
663 		if (r_last) {
664 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
665 			    pf_rulequeue);
666 		} else {
667 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
668 		}
669 	}
670 	if (!r_last) {
671 		while ((rule != NULL) && (rule->nr != rule_number)) {
672 			rule = TAILQ_NEXT(rule, entries);
673 		}
674 	}
675 	if (rule == NULL) {
676 		return NULL;
677 	}
678 
679 	return &rule->rpool;
680 }
681 
682 static void
pf_mv_pool(struct pf_palist * poola,struct pf_palist * poolb)683 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
684 {
685 	struct pf_pooladdr      *mv_pool_pa;
686 
687 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
688 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
689 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
690 	}
691 }
692 
693 static void
pf_empty_pool(struct pf_palist * poola)694 pf_empty_pool(struct pf_palist *poola)
695 {
696 	struct pf_pooladdr      *empty_pool_pa;
697 
698 	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
699 		pfi_dynaddr_remove(&empty_pool_pa->addr);
700 		pf_tbladdr_remove(&empty_pool_pa->addr);
701 		pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
702 		TAILQ_REMOVE(poola, empty_pool_pa, entries);
703 		pool_put(&pf_pooladdr_pl, empty_pool_pa);
704 	}
705 }
706 
707 void
pf_rm_rule(struct pf_rulequeue * rulequeue,struct pf_rule * rule)708 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
709 {
710 	if (rulequeue != NULL) {
711 		if (rule->states <= 0) {
712 			/*
713 			 * XXX - we need to remove the table *before* detaching
714 			 * the rule to make sure the table code does not delete
715 			 * the anchor under our feet.
716 			 */
717 			pf_tbladdr_remove(&rule->src.addr);
718 			pf_tbladdr_remove(&rule->dst.addr);
719 			if (rule->overload_tbl) {
720 				pfr_detach_table(rule->overload_tbl);
721 			}
722 		}
723 		TAILQ_REMOVE(rulequeue, rule, entries);
724 		rule->entries.tqe_prev = NULL;
725 		rule->nr = -1;
726 	}
727 
728 	if (rule->states > 0 || rule->src_nodes > 0 ||
729 	    rule->entries.tqe_prev != NULL) {
730 		return;
731 	}
732 	pf_tag_unref(rule->tag);
733 	pf_tag_unref(rule->match_tag);
734 	pf_rtlabel_remove(&rule->src.addr);
735 	pf_rtlabel_remove(&rule->dst.addr);
736 	pfi_dynaddr_remove(&rule->src.addr);
737 	pfi_dynaddr_remove(&rule->dst.addr);
738 	if (rulequeue == NULL) {
739 		pf_tbladdr_remove(&rule->src.addr);
740 		pf_tbladdr_remove(&rule->dst.addr);
741 		if (rule->overload_tbl) {
742 			pfr_detach_table(rule->overload_tbl);
743 		}
744 	}
745 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
746 	pf_anchor_remove(rule);
747 	pf_empty_pool(&rule->rpool.list);
748 	pool_put(&pf_rule_pl, rule);
749 }
750 
751 static u_int16_t
tagname2tag(struct pf_tags * head,char * tagname)752 tagname2tag(struct pf_tags *head, char *tagname)
753 {
754 	struct pf_tagname       *tag, *p = NULL;
755 	uint16_t                 new_tagid = 1;
756 	bool                     reserved_tag = false;
757 
758 	TAILQ_FOREACH(tag, head, entries)
759 	if (strcmp(tagname, tag->name) == 0) {
760 		tag->ref++;
761 		return tag->tag;
762 	}
763 
764 	/*
765 	 * check if it is a reserved tag.
766 	 */
767 	_CASSERT(RESERVED_TAG_ID_MIN > DYNAMIC_TAG_ID_MAX);
768 	for (int i = 0; i < NUM_RESERVED_TAGS; i++) {
769 		if (strncmp(tagname, pf_reserved_tag_table[i].tag_name,
770 		    PF_TAG_NAME_SIZE) == 0) {
771 			new_tagid = pf_reserved_tag_table[i].tag_id;
772 			reserved_tag = true;
773 			goto skip_dynamic_tag_alloc;
774 		}
775 	}
776 
777 	/*
778 	 * to avoid fragmentation, we do a linear search from the beginning
779 	 * and take the first free slot we find. if there is none or the list
780 	 * is empty, append a new entry at the end.
781 	 */
782 
783 	/* new entry */
784 	if (!TAILQ_EMPTY(head)) {
785 		/* skip reserved tags */
786 		for (p = TAILQ_FIRST(head); p != NULL &&
787 		    p->tag >= RESERVED_TAG_ID_MIN;
788 		    p = TAILQ_NEXT(p, entries)) {
789 			;
790 		}
791 
792 		for (; p != NULL && p->tag == new_tagid;
793 		    p = TAILQ_NEXT(p, entries)) {
794 			new_tagid = p->tag + 1;
795 		}
796 	}
797 
798 	if (new_tagid > DYNAMIC_TAG_ID_MAX) {
799 		return 0;
800 	}
801 
802 skip_dynamic_tag_alloc:
803 	/* allocate and fill new struct pf_tagname */
804 	tag = kalloc_type(struct pf_tagname, Z_WAITOK | Z_ZERO | Z_NOFAIL);
805 	strlcpy(tag->name, tagname, sizeof(tag->name));
806 	tag->tag = new_tagid;
807 	tag->ref++;
808 
809 	if (reserved_tag) { /* insert reserved tag at the head */
810 		TAILQ_INSERT_HEAD(head, tag, entries);
811 	} else if (p != NULL) { /* insert new entry before p */
812 		TAILQ_INSERT_BEFORE(p, tag, entries);
813 	} else { /* either list empty or no free slot in between */
814 		TAILQ_INSERT_TAIL(head, tag, entries);
815 	}
816 
817 	return tag->tag;
818 }
819 
820 static void
tag_unref(struct pf_tags * head,u_int16_t tag)821 tag_unref(struct pf_tags *head, u_int16_t tag)
822 {
823 	struct pf_tagname       *p, *next;
824 
825 	if (tag == 0) {
826 		return;
827 	}
828 
829 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
830 		next = TAILQ_NEXT(p, entries);
831 		if (tag == p->tag) {
832 			if (--p->ref == 0) {
833 				TAILQ_REMOVE(head, p, entries);
834 				kfree_type(struct pf_tagname, p);
835 			}
836 			break;
837 		}
838 	}
839 }
840 
841 u_int16_t
pf_tagname2tag(char * tagname)842 pf_tagname2tag(char *tagname)
843 {
844 	return tagname2tag(&pf_tags, tagname);
845 }
846 
847 u_int16_t
pf_tagname2tag_ext(char * tagname)848 pf_tagname2tag_ext(char *tagname)
849 {
850 	u_int16_t       tag;
851 
852 	lck_rw_lock_exclusive(&pf_perim_lock);
853 	lck_mtx_lock(&pf_lock);
854 	tag = pf_tagname2tag(tagname);
855 	lck_mtx_unlock(&pf_lock);
856 	lck_rw_done(&pf_perim_lock);
857 	return tag;
858 }
859 
860 void
pf_tag_ref(u_int16_t tag)861 pf_tag_ref(u_int16_t tag)
862 {
863 	struct pf_tagname *t;
864 
865 	TAILQ_FOREACH(t, &pf_tags, entries)
866 	if (t->tag == tag) {
867 		break;
868 	}
869 	if (t != NULL) {
870 		t->ref++;
871 	}
872 }
873 
874 void
pf_tag_unref(u_int16_t tag)875 pf_tag_unref(u_int16_t tag)
876 {
877 	tag_unref(&pf_tags, tag);
878 }
879 
880 static int
pf_rtlabel_add(struct pf_addr_wrap * a)881 pf_rtlabel_add(struct pf_addr_wrap *a)
882 {
883 #pragma unused(a)
884 	return 0;
885 }
886 
887 static void
pf_rtlabel_remove(struct pf_addr_wrap * a)888 pf_rtlabel_remove(struct pf_addr_wrap *a)
889 {
890 #pragma unused(a)
891 }
892 
893 static void
pf_rtlabel_copyout(struct pf_addr_wrap * a)894 pf_rtlabel_copyout(struct pf_addr_wrap *a)
895 {
896 #pragma unused(a)
897 }
898 
899 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)900 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
901 {
902 	struct pf_ruleset       *rs;
903 	struct pf_rule          *rule;
904 
905 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
906 		return EINVAL;
907 	}
908 	rs = pf_find_or_create_ruleset(anchor);
909 	if (rs == NULL) {
910 		return EINVAL;
911 	}
912 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
913 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
914 		rs->rules[rs_num].inactive.rcount--;
915 	}
916 	*ticket = ++rs->rules[rs_num].inactive.ticket;
917 	rs->rules[rs_num].inactive.open = 1;
918 	return 0;
919 }
920 
921 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)922 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
923 {
924 	struct pf_ruleset       *rs;
925 	struct pf_rule          *rule;
926 
927 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
928 		return EINVAL;
929 	}
930 	rs = pf_find_ruleset(anchor);
931 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
932 	    rs->rules[rs_num].inactive.ticket != ticket) {
933 		return 0;
934 	}
935 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
936 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
937 		rs->rules[rs_num].inactive.rcount--;
938 	}
939 	rs->rules[rs_num].inactive.open = 0;
940 	return 0;
941 }
942 
943 #define PF_MD5_UPD(st, elm)                                             \
944 	MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
945 
946 #define PF_MD5_UPD_STR(st, elm)                                         \
947 	MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int)strlen((st)->elm))
948 
949 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
950 	(stor) = htonl((st)->elm);                                      \
951 	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t));        \
952 } while (0)
953 
954 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
955 	(stor) = htons((st)->elm);                                      \
956 	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t));        \
957 } while (0)
958 
959 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr,u_int8_t proto)960 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
961 {
962 	PF_MD5_UPD(pfr, addr.type);
963 	switch (pfr->addr.type) {
964 	case PF_ADDR_DYNIFTL:
965 		PF_MD5_UPD(pfr, addr.v.ifname);
966 		PF_MD5_UPD(pfr, addr.iflags);
967 		break;
968 	case PF_ADDR_TABLE:
969 		PF_MD5_UPD(pfr, addr.v.tblname);
970 		break;
971 	case PF_ADDR_ADDRMASK:
972 		/* XXX ignore af? */
973 		PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
974 		PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
975 		break;
976 	case PF_ADDR_RTLABEL:
977 		PF_MD5_UPD(pfr, addr.v.rtlabelname);
978 		break;
979 	}
980 
981 	switch (proto) {
982 	case IPPROTO_TCP:
983 	case IPPROTO_UDP:
984 		PF_MD5_UPD(pfr, xport.range.port[0]);
985 		PF_MD5_UPD(pfr, xport.range.port[1]);
986 		PF_MD5_UPD(pfr, xport.range.op);
987 		break;
988 
989 	default:
990 		break;
991 	}
992 
993 	PF_MD5_UPD(pfr, neg);
994 }
995 
996 static void
pf_hash_rule(MD5_CTX * ctx,struct pf_rule * rule)997 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
998 {
999 	u_int16_t x;
1000 	u_int32_t y;
1001 
1002 	pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1003 	pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1004 	PF_MD5_UPD_STR(rule, label);
1005 	PF_MD5_UPD_STR(rule, ifname);
1006 	PF_MD5_UPD_STR(rule, match_tagname);
1007 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1008 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1009 	PF_MD5_UPD_HTONL(rule, prob, y);
1010 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1011 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1012 	PF_MD5_UPD(rule, uid.op);
1013 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1014 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1015 	PF_MD5_UPD(rule, gid.op);
1016 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1017 	PF_MD5_UPD(rule, action);
1018 	PF_MD5_UPD(rule, direction);
1019 	PF_MD5_UPD(rule, af);
1020 	PF_MD5_UPD(rule, quick);
1021 	PF_MD5_UPD(rule, ifnot);
1022 	PF_MD5_UPD(rule, match_tag_not);
1023 	PF_MD5_UPD(rule, natpass);
1024 	PF_MD5_UPD(rule, keep_state);
1025 	PF_MD5_UPD(rule, proto);
1026 	PF_MD5_UPD(rule, type);
1027 	PF_MD5_UPD(rule, code);
1028 	PF_MD5_UPD(rule, flags);
1029 	PF_MD5_UPD(rule, flagset);
1030 	PF_MD5_UPD(rule, allow_opts);
1031 	PF_MD5_UPD(rule, rt);
1032 	PF_MD5_UPD(rule, tos);
1033 }
1034 
1035 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1036 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1037 {
1038 	struct pf_ruleset       *rs;
1039 	struct pf_rule          *rule, **old_array, *r;
1040 	struct pf_rulequeue     *old_rules;
1041 	int                      error;
1042 	u_int32_t                old_rcount;
1043 	u_int32_t                old_rsize;
1044 
1045 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1046 
1047 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1048 		return EINVAL;
1049 	}
1050 	rs = pf_find_ruleset(anchor);
1051 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1052 	    ticket != rs->rules[rs_num].inactive.ticket) {
1053 		return EBUSY;
1054 	}
1055 
1056 	/* Calculate checksum for the main ruleset */
1057 	if (rs == &pf_main_ruleset) {
1058 		error = pf_setup_pfsync_matching(rs);
1059 		if (error != 0) {
1060 			return error;
1061 		}
1062 	}
1063 
1064 	/* Swap rules, keep the old. */
1065 	old_rules = rs->rules[rs_num].active.ptr;
1066 	old_rcount = rs->rules[rs_num].active.rcount;
1067 	old_rsize  = rs->rules[rs_num].active.rsize;
1068 	old_array = rs->rules[rs_num].active.ptr_array;
1069 
1070 	if (old_rcount != 0) {
1071 		r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1072 		while (r) {
1073 			if (r->rule_flag & PFRULE_PFM) {
1074 				pffwrules--;
1075 			}
1076 			r = TAILQ_NEXT(r, entries);
1077 		}
1078 	}
1079 
1080 
1081 	rs->rules[rs_num].active.ptr =
1082 	    rs->rules[rs_num].inactive.ptr;
1083 	rs->rules[rs_num].active.ptr_array =
1084 	    rs->rules[rs_num].inactive.ptr_array;
1085 	rs->rules[rs_num].active.rsize =
1086 	    rs->rules[rs_num].inactive.rsize;
1087 	rs->rules[rs_num].active.rcount =
1088 	    rs->rules[rs_num].inactive.rcount;
1089 	rs->rules[rs_num].inactive.ptr = old_rules;
1090 	rs->rules[rs_num].inactive.ptr_array = old_array;
1091 	rs->rules[rs_num].inactive.rcount = old_rcount;
1092 	rs->rules[rs_num].inactive.rsize = old_rsize;
1093 
1094 	rs->rules[rs_num].active.ticket =
1095 	    rs->rules[rs_num].inactive.ticket;
1096 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1097 
1098 
1099 	/* Purge the old rule list. */
1100 	while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1101 		pf_rm_rule(old_rules, rule);
1102 	}
1103 	kfree_type(struct pf_rule *, rs->rules[rs_num].inactive.rsize,
1104 	    rs->rules[rs_num].inactive.ptr_array);
1105 	rs->rules[rs_num].inactive.ptr_array = NULL;
1106 	rs->rules[rs_num].inactive.rcount = 0;
1107 	rs->rules[rs_num].inactive.rsize = 0;
1108 	rs->rules[rs_num].inactive.open = 0;
1109 	pf_remove_if_empty_ruleset(rs);
1110 	return 0;
1111 }
1112 
1113 static void
pf_rule_copyin(struct pf_rule * src,struct pf_rule * dst,struct proc * p,int minordev)1114 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1115     int minordev)
1116 {
1117 	bcopy(src, dst, sizeof(struct pf_rule));
1118 
1119 	dst->label[sizeof(dst->label) - 1] = '\0';
1120 	dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1121 	dst->qname[sizeof(dst->qname) - 1] = '\0';
1122 	dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1123 	dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1124 	dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1125 	dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1126 	dst->owner[sizeof(dst->owner) - 1] = '\0';
1127 
1128 	dst->cuid = kauth_cred_getuid(kauth_cred_get());
1129 	dst->cpid = proc_getpid(p);
1130 
1131 	dst->anchor = NULL;
1132 	dst->kif = NULL;
1133 	dst->overload_tbl = NULL;
1134 
1135 	TAILQ_INIT(&dst->rpool.list);
1136 	dst->rpool.cur = NULL;
1137 
1138 	/* initialize refcounting */
1139 	dst->states = 0;
1140 	dst->src_nodes = 0;
1141 
1142 	dst->entries.tqe_prev = NULL;
1143 	dst->entries.tqe_next = NULL;
1144 	if ((uint8_t)minordev == PFDEV_PFM) {
1145 		dst->rule_flag |= PFRULE_PFM;
1146 	}
1147 }
1148 
1149 static void
pf_rule_copyout(struct pf_rule * src,struct pf_rule * dst)1150 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1151 {
1152 	bcopy(src, dst, sizeof(struct pf_rule));
1153 
1154 	dst->anchor = NULL;
1155 	dst->kif = NULL;
1156 	dst->overload_tbl = NULL;
1157 
1158 	dst->rpool.list.tqh_first = NULL;
1159 	dst->rpool.list.tqh_last = NULL;
1160 	dst->rpool.cur = NULL;
1161 
1162 	dst->entries.tqe_prev = NULL;
1163 	dst->entries.tqe_next = NULL;
1164 }
1165 
1166 static void
pf_state_export(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1167 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1168     struct pf_state *s)
1169 {
1170 	uint64_t secs = pf_time_second();
1171 	bzero(sp, sizeof(struct pfsync_state));
1172 
1173 	/* copy from state key */
1174 	sp->lan.addr = sk->lan.addr;
1175 	sp->lan.xport = sk->lan.xport;
1176 	sp->gwy.addr = sk->gwy.addr;
1177 	sp->gwy.xport = sk->gwy.xport;
1178 	sp->ext_lan.addr = sk->ext_lan.addr;
1179 	sp->ext_lan.xport = sk->ext_lan.xport;
1180 	sp->ext_gwy.addr = sk->ext_gwy.addr;
1181 	sp->ext_gwy.xport = sk->ext_gwy.xport;
1182 	sp->proto_variant = sk->proto_variant;
1183 	sp->tag = s->tag;
1184 	sp->proto = sk->proto;
1185 	sp->af_lan = sk->af_lan;
1186 	sp->af_gwy = sk->af_gwy;
1187 	sp->direction = sk->direction;
1188 	sp->flowhash = sk->flowhash;
1189 
1190 	/* copy from state */
1191 	memcpy(&sp->id, &s->id, sizeof(sp->id));
1192 	sp->creatorid = s->creatorid;
1193 	strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1194 	pf_state_peer_to_pfsync(&s->src, &sp->src);
1195 	pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1196 
1197 	sp->rule = s->rule.ptr->nr;
1198 	sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1199 	    (unsigned)-1 : s->nat_rule.ptr->nr;
1200 	sp->anchor = (s->anchor.ptr == NULL) ?
1201 	    (unsigned)-1 : s->anchor.ptr->nr;
1202 
1203 	pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1204 	pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1205 	pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1206 	pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1207 	sp->creation = secs - s->creation;
1208 	sp->expire = pf_state_expires(s);
1209 	sp->log = s->log;
1210 	sp->allow_opts = s->allow_opts;
1211 	sp->timeout = s->timeout;
1212 
1213 	if (s->src_node) {
1214 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1215 	}
1216 	if (s->nat_src_node) {
1217 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1218 	}
1219 
1220 	if (sp->expire > secs) {
1221 		sp->expire -= secs;
1222 	} else {
1223 		sp->expire = 0;
1224 	}
1225 }
1226 
1227 static void
pf_state_import(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1228 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1229     struct pf_state *s)
1230 {
1231 	/* copy to state key */
1232 	sk->lan.addr = sp->lan.addr;
1233 	sk->lan.xport = sp->lan.xport;
1234 	sk->gwy.addr = sp->gwy.addr;
1235 	sk->gwy.xport = sp->gwy.xport;
1236 	sk->ext_lan.addr = sp->ext_lan.addr;
1237 	sk->ext_lan.xport = sp->ext_lan.xport;
1238 	sk->ext_gwy.addr = sp->ext_gwy.addr;
1239 	sk->ext_gwy.xport = sp->ext_gwy.xport;
1240 	sk->proto_variant = sp->proto_variant;
1241 	s->tag = sp->tag;
1242 	sk->proto = sp->proto;
1243 	sk->af_lan = sp->af_lan;
1244 	sk->af_gwy = sp->af_gwy;
1245 	sk->direction = sp->direction;
1246 	ASSERT(sk->flowsrc == FLOWSRC_PF);
1247 	ASSERT(sk->flowhash != 0);
1248 
1249 	/* copy to state */
1250 	memcpy(&s->id, &sp->id, sizeof(sp->id));
1251 	s->creatorid = sp->creatorid;
1252 	pf_state_peer_from_pfsync(&sp->src, &s->src);
1253 	pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1254 
1255 	s->rule.ptr = &pf_default_rule;
1256 	s->nat_rule.ptr = NULL;
1257 	s->anchor.ptr = NULL;
1258 	s->rt_kif = NULL;
1259 	s->creation = pf_time_second();
1260 	s->expire = pf_time_second();
1261 	if (sp->expire > 0) {
1262 		s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1263 	}
1264 	s->pfsync_time = 0;
1265 	s->packets[0] = s->packets[1] = 0;
1266 	s->bytes[0] = s->bytes[1] = 0;
1267 }
1268 
1269 static void
pf_pooladdr_copyin(struct pf_pooladdr * src,struct pf_pooladdr * dst)1270 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1271 {
1272 	bcopy(src, dst, sizeof(struct pf_pooladdr));
1273 
1274 	dst->entries.tqe_prev = NULL;
1275 	dst->entries.tqe_next = NULL;
1276 	dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1277 	dst->kif = NULL;
1278 }
1279 
1280 static void
pf_pooladdr_copyout(struct pf_pooladdr * src,struct pf_pooladdr * dst)1281 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1282 {
1283 	bcopy(src, dst, sizeof(struct pf_pooladdr));
1284 
1285 	dst->entries.tqe_prev = NULL;
1286 	dst->entries.tqe_next = NULL;
1287 	dst->kif = NULL;
1288 }
1289 
1290 static int
pf_setup_pfsync_matching(struct pf_ruleset * rs)1291 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1292 {
1293 	MD5_CTX                  ctx;
1294 	struct pf_rule          *rule;
1295 	int                      rs_cnt;
1296 	u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
1297 
1298 	MD5Init(&ctx);
1299 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1300 		/* XXX PF_RULESET_SCRUB as well? */
1301 		if (rs_cnt == PF_RULESET_SCRUB) {
1302 			continue;
1303 		}
1304 
1305 		rs->rules[rs_cnt].inactive.ptr_array = krealloc_type(struct pf_rule *,
1306 		    rs->rules[rs_cnt].inactive.rsize, rs->rules[rs_cnt].inactive.rcount,
1307 		    rs->rules[rs_cnt].inactive.ptr_array, Z_WAITOK | Z_REALLOCF);
1308 
1309 		if (rs->rules[rs_cnt].inactive.rcount &&
1310 		    !rs->rules[rs_cnt].inactive.ptr_array) {
1311 			rs->rules[rs_cnt].inactive.rsize = 0;
1312 			return ENOMEM;
1313 		}
1314 		rs->rules[rs_cnt].inactive.rsize =
1315 		    rs->rules[rs_cnt].inactive.rcount;
1316 
1317 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1318 		    entries) {
1319 			pf_hash_rule(&ctx, rule);
1320 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1321 		}
1322 	}
1323 
1324 	MD5Final(digest, &ctx);
1325 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1326 	return 0;
1327 }
1328 
1329 static void
pf_start(void)1330 pf_start(void)
1331 {
1332 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1333 
1334 	VERIFY(pf_is_enabled == 0);
1335 
1336 	pf_is_enabled = 1;
1337 	pf_status.running = 1;
1338 	pf_status.since = pf_calendar_time_second();
1339 	if (pf_status.stateid == 0) {
1340 		pf_status.stateid = pf_time_second();
1341 		pf_status.stateid = pf_status.stateid << 32;
1342 	}
1343 	wakeup(pf_purge_thread_fn);
1344 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
1345 	net_filter_event_mark(NET_FILTER_EVENT_PF,
1346 	    pf_check_compatible_rules());
1347 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1348 	DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1349 }
1350 
1351 static void
pf_stop(void)1352 pf_stop(void)
1353 {
1354 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1355 
1356 	VERIFY(pf_is_enabled);
1357 
1358 	pf_status.running = 0;
1359 	pf_is_enabled = 0;
1360 	pf_status.since = pf_calendar_time_second();
1361 	wakeup(pf_purge_thread_fn);
1362 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
1363 	net_filter_event_mark(NET_FILTER_EVENT_PF,
1364 	    pf_check_compatible_rules());
1365 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1366 	DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1367 }
1368 
1369 static int
pfioctl(dev_t dev,u_long cmd,caddr_t addr,int flags,struct proc * p)1370 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1371 {
1372 #pragma unused(dev)
1373 	int p64 = proc_is64bit(p);
1374 	int error = 0;
1375 	int minordev = minor(dev);
1376 
1377 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1378 		return EPERM;
1379 	}
1380 
1381 	/* XXX keep in sync with switch() below */
1382 	if (securelevel > 1) {
1383 		switch (cmd) {
1384 		case DIOCGETRULES:
1385 		case DIOCGETRULE:
1386 		case DIOCGETADDRS:
1387 		case DIOCGETADDR:
1388 		case DIOCGETSTATE:
1389 		case DIOCSETSTATUSIF:
1390 		case DIOCGETSTATUS:
1391 		case DIOCCLRSTATUS:
1392 		case DIOCNATLOOK:
1393 		case DIOCSETDEBUG:
1394 		case DIOCGETSTATES:
1395 		case DIOCINSERTRULE:
1396 		case DIOCDELETERULE:
1397 		case DIOCGETTIMEOUT:
1398 		case DIOCCLRRULECTRS:
1399 		case DIOCGETLIMIT:
1400 		case DIOCGETALTQS:
1401 		case DIOCGETALTQ:
1402 		case DIOCGETQSTATS:
1403 		case DIOCGETRULESETS:
1404 		case DIOCGETRULESET:
1405 		case DIOCRGETTABLES:
1406 		case DIOCRGETTSTATS:
1407 		case DIOCRCLRTSTATS:
1408 		case DIOCRCLRADDRS:
1409 		case DIOCRADDADDRS:
1410 		case DIOCRDELADDRS:
1411 		case DIOCRSETADDRS:
1412 		case DIOCRGETADDRS:
1413 		case DIOCRGETASTATS:
1414 		case DIOCRCLRASTATS:
1415 		case DIOCRTSTADDRS:
1416 		case DIOCOSFPGET:
1417 		case DIOCGETSRCNODES:
1418 		case DIOCCLRSRCNODES:
1419 		case DIOCIGETIFACES:
1420 		case DIOCGIFSPEED:
1421 		case DIOCSETIFFLAG:
1422 		case DIOCCLRIFFLAG:
1423 			break;
1424 		case DIOCRCLRTABLES:
1425 		case DIOCRADDTABLES:
1426 		case DIOCRDELTABLES:
1427 		case DIOCRSETTFLAGS: {
1428 			int pfrio_flags;
1429 
1430 			bcopy(&((struct pfioc_table *)(void *)addr)->
1431 			    pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1432 
1433 			if (pfrio_flags & PFR_FLAG_DUMMY) {
1434 				break; /* dummy operation ok */
1435 			}
1436 			return EPERM;
1437 		}
1438 		default:
1439 			return EPERM;
1440 		}
1441 	}
1442 
1443 	if (!(flags & FWRITE)) {
1444 		switch (cmd) {
1445 		case DIOCSTART:
1446 		case DIOCSTARTREF:
1447 		case DIOCSTOP:
1448 		case DIOCSTOPREF:
1449 		case DIOCGETSTARTERS:
1450 		case DIOCGETRULES:
1451 		case DIOCGETADDRS:
1452 		case DIOCGETADDR:
1453 		case DIOCGETSTATE:
1454 		case DIOCGETSTATUS:
1455 		case DIOCGETSTATES:
1456 		case DIOCINSERTRULE:
1457 		case DIOCDELETERULE:
1458 		case DIOCGETTIMEOUT:
1459 		case DIOCGETLIMIT:
1460 		case DIOCGETALTQS:
1461 		case DIOCGETALTQ:
1462 		case DIOCGETQSTATS:
1463 		case DIOCGETRULESETS:
1464 		case DIOCGETRULESET:
1465 		case DIOCNATLOOK:
1466 		case DIOCRGETTABLES:
1467 		case DIOCRGETTSTATS:
1468 		case DIOCRGETADDRS:
1469 		case DIOCRGETASTATS:
1470 		case DIOCRTSTADDRS:
1471 		case DIOCOSFPGET:
1472 		case DIOCGETSRCNODES:
1473 		case DIOCIGETIFACES:
1474 		case DIOCGIFSPEED:
1475 			break;
1476 		case DIOCRCLRTABLES:
1477 		case DIOCRADDTABLES:
1478 		case DIOCRDELTABLES:
1479 		case DIOCRCLRTSTATS:
1480 		case DIOCRCLRADDRS:
1481 		case DIOCRADDADDRS:
1482 		case DIOCRDELADDRS:
1483 		case DIOCRSETADDRS:
1484 		case DIOCRSETTFLAGS: {
1485 			int pfrio_flags;
1486 
1487 			bcopy(&((struct pfioc_table *)(void *)addr)->
1488 			    pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1489 
1490 			if (pfrio_flags & PFR_FLAG_DUMMY) {
1491 				flags |= FWRITE; /* need write lock for dummy */
1492 				break; /* dummy operation ok */
1493 			}
1494 			return EACCES;
1495 		}
1496 		case DIOCGETRULE: {
1497 			u_int32_t action;
1498 
1499 			bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1500 			    &action, sizeof(action));
1501 
1502 			if (action == PF_GET_CLR_CNTR) {
1503 				return EACCES;
1504 			}
1505 			break;
1506 		}
1507 		default:
1508 			return EACCES;
1509 		}
1510 	}
1511 
1512 	if (flags & FWRITE) {
1513 		lck_rw_lock_exclusive(&pf_perim_lock);
1514 	} else {
1515 		lck_rw_lock_shared(&pf_perim_lock);
1516 	}
1517 
1518 	lck_mtx_lock(&pf_lock);
1519 
1520 	switch (cmd) {
1521 	case DIOCSTART:
1522 		if (pf_status.running) {
1523 			/*
1524 			 * Increment the reference for a simple -e enable, so
1525 			 * that even if other processes drop their references,
1526 			 * pf will still be available to processes that turned
1527 			 * it on without taking a reference
1528 			 */
1529 			if (nr_tokens == pf_enabled_ref_count) {
1530 				pf_enabled_ref_count++;
1531 				VERIFY(pf_enabled_ref_count != 0);
1532 			}
1533 			error = EEXIST;
1534 		} else if (pf_purge_thread == NULL) {
1535 			error = ENOMEM;
1536 		} else {
1537 			pf_start();
1538 			pf_enabled_ref_count++;
1539 			VERIFY(pf_enabled_ref_count != 0);
1540 		}
1541 		break;
1542 
1543 	case DIOCSTARTREF:              /* u_int64_t */
1544 		if (pf_purge_thread == NULL) {
1545 			error = ENOMEM;
1546 		} else {
1547 			u_int64_t token;
1548 
1549 			/* small enough to be on stack */
1550 			if ((token = generate_token(p)) != 0) {
1551 				if (pf_is_enabled == 0) {
1552 					pf_start();
1553 				}
1554 				pf_enabled_ref_count++;
1555 				VERIFY(pf_enabled_ref_count != 0);
1556 			} else {
1557 				error = ENOMEM;
1558 				DPFPRINTF(PF_DEBUG_URGENT,
1559 				    ("pf: unable to generate token\n"));
1560 			}
1561 			bcopy(&token, addr, sizeof(token));
1562 		}
1563 		break;
1564 
1565 	case DIOCSTOP:
1566 		if (!pf_status.running) {
1567 			error = ENOENT;
1568 		} else {
1569 			pf_stop();
1570 			pf_enabled_ref_count = 0;
1571 			invalidate_all_tokens();
1572 		}
1573 		break;
1574 
1575 	case DIOCSTOPREF:               /* struct pfioc_remove_token */
1576 		if (!pf_status.running) {
1577 			error = ENOENT;
1578 		} else {
1579 			struct pfioc_remove_token pfrt;
1580 
1581 			/* small enough to be on stack */
1582 			bcopy(addr, &pfrt, sizeof(pfrt));
1583 			if ((error = remove_token(&pfrt)) == 0) {
1584 				VERIFY(pf_enabled_ref_count != 0);
1585 				pf_enabled_ref_count--;
1586 				/* return currently held references */
1587 				pfrt.refcount = pf_enabled_ref_count;
1588 				DPFPRINTF(PF_DEBUG_MISC,
1589 				    ("pf: enabled refcount decremented\n"));
1590 			} else {
1591 				error = EINVAL;
1592 				DPFPRINTF(PF_DEBUG_URGENT,
1593 				    ("pf: token mismatch\n"));
1594 			}
1595 			bcopy(&pfrt, addr, sizeof(pfrt));
1596 
1597 			if (error == 0 && pf_enabled_ref_count == 0) {
1598 				pf_stop();
1599 			}
1600 		}
1601 		break;
1602 
1603 	case DIOCGETSTARTERS: {         /* struct pfioc_tokens */
1604 		PFIOCX_STRUCT_DECL(pfioc_tokens);
1605 
1606 		PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens);
1607 		error = pfioctl_ioc_tokens(cmd,
1608 		    PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1609 		    PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1610 		PFIOCX_STRUCT_END(pfioc_tokens, addr);
1611 		break;
1612 	}
1613 
1614 	case DIOCADDRULE:               /* struct pfioc_rule */
1615 	case DIOCGETRULES:              /* struct pfioc_rule */
1616 	case DIOCGETRULE:               /* struct pfioc_rule */
1617 	case DIOCCHANGERULE:            /* struct pfioc_rule */
1618 	case DIOCINSERTRULE:            /* struct pfioc_rule */
1619 	case DIOCDELETERULE: {          /* struct pfioc_rule */
1620 		struct pfioc_rule *pr = NULL;
1621 
1622 		PFIOC_STRUCT_BEGIN(addr, pr);
1623 		error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1624 		PFIOC_STRUCT_END(pr, addr);
1625 		break;
1626 	}
1627 
1628 	case DIOCCLRSTATES:             /* struct pfioc_state_kill */
1629 	case DIOCKILLSTATES: {          /* struct pfioc_state_kill */
1630 		struct pfioc_state_kill *psk = NULL;
1631 
1632 		PFIOC_STRUCT_BEGIN(addr, psk);
1633 		error = pfioctl_ioc_state_kill(cmd, psk, p);
1634 		PFIOC_STRUCT_END(psk, addr);
1635 		break;
1636 	}
1637 
1638 	case DIOCADDSTATE:              /* struct pfioc_state */
1639 	case DIOCGETSTATE: {            /* struct pfioc_state */
1640 		struct pfioc_state *ps = NULL;
1641 
1642 		PFIOC_STRUCT_BEGIN(addr, ps);
1643 		error = pfioctl_ioc_state(cmd, ps, p);
1644 		PFIOC_STRUCT_END(ps, addr);
1645 		break;
1646 	}
1647 
1648 	case DIOCGETSTATES: {           /* struct pfioc_states */
1649 		PFIOCX_STRUCT_DECL(pfioc_states);
1650 
1651 		PFIOCX_STRUCT_BEGIN(addr, pfioc_states);
1652 		error = pfioctl_ioc_states(cmd,
1653 		    PFIOCX_STRUCT_ADDR32(pfioc_states),
1654 		    PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1655 		PFIOCX_STRUCT_END(pfioc_states, addr);
1656 		break;
1657 	}
1658 
1659 	case DIOCGETSTATUS: {           /* struct pf_status */
1660 		struct pf_status *s = NULL;
1661 
1662 		PFIOC_STRUCT_BEGIN(&pf_status, s);
1663 		pfi_update_status(s->ifname, s);
1664 		PFIOC_STRUCT_END(s, addr);
1665 		break;
1666 	}
1667 
1668 	case DIOCSETSTATUSIF: {         /* struct pfioc_if */
1669 		struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1670 
1671 		/* OK for unaligned accesses */
1672 		if (pi->ifname[0] == 0) {
1673 			bzero(pf_status.ifname, IFNAMSIZ);
1674 			break;
1675 		}
1676 		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1677 		break;
1678 	}
1679 
1680 	case DIOCCLRSTATUS: {
1681 		bzero(pf_status.counters, sizeof(pf_status.counters));
1682 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1683 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1684 		pf_status.since = pf_calendar_time_second();
1685 		if (*pf_status.ifname) {
1686 			pfi_update_status(pf_status.ifname, NULL);
1687 		}
1688 		break;
1689 	}
1690 
1691 	case DIOCNATLOOK: {             /* struct pfioc_natlook */
1692 		struct pfioc_natlook *pnl = NULL;
1693 
1694 		PFIOC_STRUCT_BEGIN(addr, pnl);
1695 		error = pfioctl_ioc_natlook(cmd, pnl, p);
1696 		PFIOC_STRUCT_END(pnl, addr);
1697 		break;
1698 	}
1699 
1700 	case DIOCSETTIMEOUT:            /* struct pfioc_tm */
1701 	case DIOCGETTIMEOUT: {          /* struct pfioc_tm */
1702 		struct pfioc_tm pt;
1703 
1704 		/* small enough to be on stack */
1705 		bcopy(addr, &pt, sizeof(pt));
1706 		error = pfioctl_ioc_tm(cmd, &pt, p);
1707 		bcopy(&pt, addr, sizeof(pt));
1708 		break;
1709 	}
1710 
1711 	case DIOCGETLIMIT:              /* struct pfioc_limit */
1712 	case DIOCSETLIMIT: {            /* struct pfioc_limit */
1713 		struct pfioc_limit pl;
1714 
1715 		/* small enough to be on stack */
1716 		bcopy(addr, &pl, sizeof(pl));
1717 		error = pfioctl_ioc_limit(cmd, &pl, p);
1718 		bcopy(&pl, addr, sizeof(pl));
1719 		break;
1720 	}
1721 
1722 	case DIOCSETDEBUG: {            /* u_int32_t */
1723 		bcopy(addr, &pf_status.debug, sizeof(u_int32_t));
1724 		break;
1725 	}
1726 
1727 	case DIOCCLRRULECTRS: {
1728 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1729 		struct pf_ruleset       *ruleset = &pf_main_ruleset;
1730 		struct pf_rule          *rule;
1731 
1732 		TAILQ_FOREACH(rule,
1733 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1734 			rule->evaluations = 0;
1735 			rule->packets[0] = rule->packets[1] = 0;
1736 			rule->bytes[0] = rule->bytes[1] = 0;
1737 		}
1738 		break;
1739 	}
1740 
1741 	case DIOCGIFSPEED: {
1742 		struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1743 		struct pf_ifspeed ps;
1744 		struct ifnet *ifp;
1745 		u_int64_t baudrate;
1746 
1747 		if (psp->ifname[0] != '\0') {
1748 			/* Can we completely trust user-land? */
1749 			strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1750 			ps.ifname[IFNAMSIZ - 1] = '\0';
1751 			ifp = ifunit(ps.ifname);
1752 			if (ifp != NULL) {
1753 				baudrate = ifp->if_output_bw.max_bw;
1754 				bcopy(&baudrate, &psp->baudrate,
1755 				    sizeof(baudrate));
1756 			} else {
1757 				error = EINVAL;
1758 			}
1759 		} else {
1760 			error = EINVAL;
1761 		}
1762 		break;
1763 	}
1764 
1765 	case DIOCBEGINADDRS:            /* struct pfioc_pooladdr */
1766 	case DIOCADDADDR:               /* struct pfioc_pooladdr */
1767 	case DIOCGETADDRS:              /* struct pfioc_pooladdr */
1768 	case DIOCGETADDR:               /* struct pfioc_pooladdr */
1769 	case DIOCCHANGEADDR: {          /* struct pfioc_pooladdr */
1770 		struct pfioc_pooladdr *pp = NULL;
1771 
1772 		PFIOC_STRUCT_BEGIN(addr, pp);
1773 		error = pfioctl_ioc_pooladdr(cmd, pp, p);
1774 		PFIOC_STRUCT_END(pp, addr);
1775 		break;
1776 	}
1777 
1778 	case DIOCGETRULESETS:           /* struct pfioc_ruleset */
1779 	case DIOCGETRULESET: {          /* struct pfioc_ruleset */
1780 		struct pfioc_ruleset *pr = NULL;
1781 
1782 		PFIOC_STRUCT_BEGIN(addr, pr);
1783 		error = pfioctl_ioc_ruleset(cmd, pr, p);
1784 		PFIOC_STRUCT_END(pr, addr);
1785 		break;
1786 	}
1787 
1788 	case DIOCRCLRTABLES:            /* struct pfioc_table */
1789 	case DIOCRADDTABLES:            /* struct pfioc_table */
1790 	case DIOCRDELTABLES:            /* struct pfioc_table */
1791 	case DIOCRGETTABLES:            /* struct pfioc_table */
1792 	case DIOCRGETTSTATS:            /* struct pfioc_table */
1793 	case DIOCRCLRTSTATS:            /* struct pfioc_table */
1794 	case DIOCRSETTFLAGS:            /* struct pfioc_table */
1795 	case DIOCRCLRADDRS:             /* struct pfioc_table */
1796 	case DIOCRADDADDRS:             /* struct pfioc_table */
1797 	case DIOCRDELADDRS:             /* struct pfioc_table */
1798 	case DIOCRSETADDRS:             /* struct pfioc_table */
1799 	case DIOCRGETADDRS:             /* struct pfioc_table */
1800 	case DIOCRGETASTATS:            /* struct pfioc_table */
1801 	case DIOCRCLRASTATS:            /* struct pfioc_table */
1802 	case DIOCRTSTADDRS:             /* struct pfioc_table */
1803 	case DIOCRINADEFINE: {          /* struct pfioc_table */
1804 		PFIOCX_STRUCT_DECL(pfioc_table);
1805 
1806 		PFIOCX_STRUCT_BEGIN(addr, pfioc_table);
1807 		error = pfioctl_ioc_table(cmd,
1808 		    PFIOCX_STRUCT_ADDR32(pfioc_table),
1809 		    PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1810 		PFIOCX_STRUCT_END(pfioc_table, addr);
1811 		break;
1812 	}
1813 
1814 	case DIOCOSFPADD:               /* struct pf_osfp_ioctl */
1815 	case DIOCOSFPGET: {             /* struct pf_osfp_ioctl */
1816 		struct pf_osfp_ioctl *io = NULL;
1817 
1818 		PFIOC_STRUCT_BEGIN(addr, io);
1819 		if (cmd == DIOCOSFPADD) {
1820 			error = pf_osfp_add(io);
1821 		} else {
1822 			VERIFY(cmd == DIOCOSFPGET);
1823 			error = pf_osfp_get(io);
1824 		}
1825 		PFIOC_STRUCT_END(io, addr);
1826 		break;
1827 	}
1828 
1829 	case DIOCXBEGIN:                /* struct pfioc_trans */
1830 	case DIOCXROLLBACK:             /* struct pfioc_trans */
1831 	case DIOCXCOMMIT: {             /* struct pfioc_trans */
1832 		PFIOCX_STRUCT_DECL(pfioc_trans);
1833 
1834 		PFIOCX_STRUCT_BEGIN(addr, pfioc_trans);
1835 		error = pfioctl_ioc_trans(cmd,
1836 		    PFIOCX_STRUCT_ADDR32(pfioc_trans),
1837 		    PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1838 		PFIOCX_STRUCT_END(pfioc_trans, addr);
1839 		break;
1840 	}
1841 
1842 	case DIOCGETSRCNODES: {         /* struct pfioc_src_nodes */
1843 		PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1844 
1845 		PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes);
1846 		error = pfioctl_ioc_src_nodes(cmd,
1847 		    PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1848 		    PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1849 		PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1850 		break;
1851 	}
1852 
1853 	case DIOCCLRSRCNODES: {
1854 		struct pf_src_node      *n;
1855 		struct pf_state         *state;
1856 
1857 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1858 			state->src_node = NULL;
1859 			state->nat_src_node = NULL;
1860 		}
1861 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1862 			n->expire = 1;
1863 			n->states = 0;
1864 		}
1865 		pf_purge_expired_src_nodes();
1866 		pf_status.src_nodes = 0;
1867 		break;
1868 	}
1869 
1870 	case DIOCKILLSRCNODES: {        /* struct pfioc_src_node_kill */
1871 		struct pfioc_src_node_kill *psnk = NULL;
1872 
1873 		PFIOC_STRUCT_BEGIN(addr, psnk);
1874 		error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1875 		PFIOC_STRUCT_END(psnk, addr);
1876 		break;
1877 	}
1878 
1879 	case DIOCSETHOSTID: {           /* u_int32_t */
1880 		u_int32_t hid;
1881 
1882 		/* small enough to be on stack */
1883 		bcopy(addr, &hid, sizeof(hid));
1884 		if (hid == 0) {
1885 			pf_status.hostid = random();
1886 		} else {
1887 			pf_status.hostid = hid;
1888 		}
1889 		break;
1890 	}
1891 
1892 	case DIOCOSFPFLUSH:
1893 		pf_osfp_flush();
1894 		break;
1895 
1896 	case DIOCIGETIFACES:            /* struct pfioc_iface */
1897 	case DIOCSETIFFLAG:             /* struct pfioc_iface */
1898 	case DIOCCLRIFFLAG: {           /* struct pfioc_iface */
1899 		PFIOCX_STRUCT_DECL(pfioc_iface);
1900 
1901 		PFIOCX_STRUCT_BEGIN(addr, pfioc_iface);
1902 		error = pfioctl_ioc_iface(cmd,
1903 		    PFIOCX_STRUCT_ADDR32(pfioc_iface),
1904 		    PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1905 		PFIOCX_STRUCT_END(pfioc_iface, addr);
1906 		break;
1907 	}
1908 
1909 	default:
1910 		error = ENODEV;
1911 		break;
1912 	}
1913 
1914 	lck_mtx_unlock(&pf_lock);
1915 	lck_rw_done(&pf_perim_lock);
1916 
1917 	return error;
1918 }
1919 
1920 static int
pfioctl_ioc_table(u_long cmd,struct pfioc_table_32 * io32,struct pfioc_table_64 * io64,struct proc * p)1921 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1922     struct pfioc_table_64 *io64, struct proc *p)
1923 {
1924 	int p64 = proc_is64bit(p);
1925 	int error = 0;
1926 
1927 	if (!p64) {
1928 		goto struct32;
1929 	}
1930 
1931 #ifdef __LP64__
1932 	/*
1933 	 * 64-bit structure processing
1934 	 */
1935 	switch (cmd) {
1936 	case DIOCRCLRTABLES:
1937 		if (io64->pfrio_esize != 0) {
1938 			error = ENODEV;
1939 			break;
1940 		}
1941 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1942 		error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1943 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1944 		break;
1945 
1946 	case DIOCRADDTABLES:
1947 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1948 			error = ENODEV;
1949 			break;
1950 		}
1951 		error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1952 		    &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1953 		break;
1954 
1955 	case DIOCRDELTABLES:
1956 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1957 			error = ENODEV;
1958 			break;
1959 		}
1960 		error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1961 		    &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1962 		break;
1963 
1964 	case DIOCRGETTABLES:
1965 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1966 			error = ENODEV;
1967 			break;
1968 		}
1969 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1970 		error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
1971 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1972 		break;
1973 
1974 	case DIOCRGETTSTATS:
1975 		if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
1976 			error = ENODEV;
1977 			break;
1978 		}
1979 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1980 		error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
1981 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1982 		break;
1983 
1984 	case DIOCRCLRTSTATS:
1985 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1986 			error = ENODEV;
1987 			break;
1988 		}
1989 		error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
1990 		    &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1991 		break;
1992 
1993 	case DIOCRSETTFLAGS:
1994 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1995 			error = ENODEV;
1996 			break;
1997 		}
1998 		error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
1999 		    io64->pfrio_setflag, io64->pfrio_clrflag,
2000 		    &io64->pfrio_nchange, &io64->pfrio_ndel,
2001 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2002 		break;
2003 
2004 	case DIOCRCLRADDRS:
2005 		if (io64->pfrio_esize != 0) {
2006 			error = ENODEV;
2007 			break;
2008 		}
2009 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2010 		error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2011 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2012 		break;
2013 
2014 	case DIOCRADDADDRS:
2015 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2016 			error = ENODEV;
2017 			break;
2018 		}
2019 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2020 		error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2021 		    io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2022 		    PFR_FLAG_USERIOCTL);
2023 		break;
2024 
2025 	case DIOCRDELADDRS:
2026 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2027 			error = ENODEV;
2028 			break;
2029 		}
2030 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2031 		error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2032 		    io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2033 		    PFR_FLAG_USERIOCTL);
2034 		break;
2035 
2036 	case DIOCRSETADDRS:
2037 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2038 			error = ENODEV;
2039 			break;
2040 		}
2041 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2042 		error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2043 		    io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2044 		    &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2045 		    PFR_FLAG_USERIOCTL, 0);
2046 		break;
2047 
2048 	case DIOCRGETADDRS:
2049 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2050 			error = ENODEV;
2051 			break;
2052 		}
2053 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2054 		error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2055 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2056 		break;
2057 
2058 	case DIOCRGETASTATS:
2059 		if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2060 			error = ENODEV;
2061 			break;
2062 		}
2063 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2064 		error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2065 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2066 		break;
2067 
2068 	case DIOCRCLRASTATS:
2069 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2070 			error = ENODEV;
2071 			break;
2072 		}
2073 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2074 		error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2075 		    io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2076 		    PFR_FLAG_USERIOCTL);
2077 		break;
2078 
2079 	case DIOCRTSTADDRS:
2080 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2081 			error = ENODEV;
2082 			break;
2083 		}
2084 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2085 		error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2086 		    io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2087 		    PFR_FLAG_USERIOCTL);
2088 		break;
2089 
2090 	case DIOCRINADEFINE:
2091 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2092 			error = ENODEV;
2093 			break;
2094 		}
2095 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2096 		error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2097 		    io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2098 		    io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2099 		break;
2100 
2101 	default:
2102 		VERIFY(0);
2103 		/* NOTREACHED */
2104 	}
2105 	goto done;
2106 #else
2107 #pragma unused(io64)
2108 #endif /* __LP64__ */
2109 
2110 struct32:
2111 	/*
2112 	 * 32-bit structure processing
2113 	 */
2114 	switch (cmd) {
2115 	case DIOCRCLRTABLES:
2116 		if (io32->pfrio_esize != 0) {
2117 			error = ENODEV;
2118 			break;
2119 		}
2120 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2121 		error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2122 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2123 		break;
2124 
2125 	case DIOCRADDTABLES:
2126 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2127 			error = ENODEV;
2128 			break;
2129 		}
2130 		error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2131 		    &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2132 		break;
2133 
2134 	case DIOCRDELTABLES:
2135 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2136 			error = ENODEV;
2137 			break;
2138 		}
2139 		error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2140 		    &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2141 		break;
2142 
2143 	case DIOCRGETTABLES:
2144 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2145 			error = ENODEV;
2146 			break;
2147 		}
2148 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2149 		error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2150 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2151 		break;
2152 
2153 	case DIOCRGETTSTATS:
2154 		if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2155 			error = ENODEV;
2156 			break;
2157 		}
2158 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2159 		error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2160 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2161 		break;
2162 
2163 	case DIOCRCLRTSTATS:
2164 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2165 			error = ENODEV;
2166 			break;
2167 		}
2168 		error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2169 		    &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2170 		break;
2171 
2172 	case DIOCRSETTFLAGS:
2173 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2174 			error = ENODEV;
2175 			break;
2176 		}
2177 		error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2178 		    io32->pfrio_setflag, io32->pfrio_clrflag,
2179 		    &io32->pfrio_nchange, &io32->pfrio_ndel,
2180 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2181 		break;
2182 
2183 	case DIOCRCLRADDRS:
2184 		if (io32->pfrio_esize != 0) {
2185 			error = ENODEV;
2186 			break;
2187 		}
2188 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2189 		error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2190 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2191 		break;
2192 
2193 	case DIOCRADDADDRS:
2194 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2195 			error = ENODEV;
2196 			break;
2197 		}
2198 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2199 		error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2200 		    io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2201 		    PFR_FLAG_USERIOCTL);
2202 		break;
2203 
2204 	case DIOCRDELADDRS:
2205 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2206 			error = ENODEV;
2207 			break;
2208 		}
2209 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2210 		error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2211 		    io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2212 		    PFR_FLAG_USERIOCTL);
2213 		break;
2214 
2215 	case DIOCRSETADDRS:
2216 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2217 			error = ENODEV;
2218 			break;
2219 		}
2220 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2221 		error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2222 		    io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2223 		    &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2224 		    PFR_FLAG_USERIOCTL, 0);
2225 		break;
2226 
2227 	case DIOCRGETADDRS:
2228 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2229 			error = ENODEV;
2230 			break;
2231 		}
2232 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2233 		error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2234 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2235 		break;
2236 
2237 	case DIOCRGETASTATS:
2238 		if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2239 			error = ENODEV;
2240 			break;
2241 		}
2242 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2243 		error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2244 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2245 		break;
2246 
2247 	case DIOCRCLRASTATS:
2248 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2249 			error = ENODEV;
2250 			break;
2251 		}
2252 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2253 		error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2254 		    io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2255 		    PFR_FLAG_USERIOCTL);
2256 		break;
2257 
2258 	case DIOCRTSTADDRS:
2259 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2260 			error = ENODEV;
2261 			break;
2262 		}
2263 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2264 		error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2265 		    io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2266 		    PFR_FLAG_USERIOCTL);
2267 		break;
2268 
2269 	case DIOCRINADEFINE:
2270 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2271 			error = ENODEV;
2272 			break;
2273 		}
2274 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2275 		error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2276 		    io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2277 		    io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2278 		break;
2279 
2280 	default:
2281 		VERIFY(0);
2282 		/* NOTREACHED */
2283 	}
2284 #ifdef __LP64__
2285 done:
2286 #endif
2287 	return error;
2288 }
2289 
2290 static int
pfioctl_ioc_tokens(u_long cmd,struct pfioc_tokens_32 * tok32,struct pfioc_tokens_64 * tok64,struct proc * p)2291 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2292     struct pfioc_tokens_64 *tok64, struct proc *p)
2293 {
2294 	struct pfioc_token *tokens;
2295 	struct pfioc_kernel_token *entry, *tmp;
2296 	user_addr_t token_buf;
2297 	int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2298 	char *ptr;
2299 
2300 	switch (cmd) {
2301 	case DIOCGETSTARTERS: {
2302 		int size;
2303 
2304 		if (nr_tokens == 0) {
2305 			error = ENOENT;
2306 			break;
2307 		}
2308 
2309 		size = sizeof(struct pfioc_token) * nr_tokens;
2310 		if (size / nr_tokens != sizeof(struct pfioc_token)) {
2311 			os_log_error(OS_LOG_DEFAULT, "%s: size overflows", __func__);
2312 			error = ERANGE;
2313 			break;
2314 		}
2315 		ocnt = cnt = (p64 ? tok64->size : tok32->size);
2316 		if (cnt == 0) {
2317 			if (p64) {
2318 				tok64->size = size;
2319 			} else {
2320 				tok32->size = size;
2321 			}
2322 			break;
2323 		}
2324 
2325 #ifdef __LP64__
2326 		token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2327 #else
2328 		token_buf = tok32->pgt_buf;
2329 #endif
2330 		tokens = (struct pfioc_token *)kalloc_data(size, Z_WAITOK | Z_ZERO);
2331 		if (tokens == NULL) {
2332 			error = ENOMEM;
2333 			break;
2334 		}
2335 
2336 		ptr = (void *)tokens;
2337 		SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2338 			struct pfioc_token *t;
2339 
2340 			if ((unsigned)cnt < sizeof(*tokens)) {
2341 				break;    /* no more buffer space left */
2342 			}
2343 			t = (struct pfioc_token *)(void *)ptr;
2344 			t->token_value  = entry->token.token_value;
2345 			t->timestamp    = entry->token.timestamp;
2346 			t->pid          = entry->token.pid;
2347 			bcopy(entry->token.proc_name, t->proc_name,
2348 			    PFTOK_PROCNAME_LEN);
2349 			ptr += sizeof(struct pfioc_token);
2350 
2351 			cnt -= sizeof(struct pfioc_token);
2352 		}
2353 
2354 		if (cnt < ocnt) {
2355 			error = copyout(tokens, token_buf, ocnt - cnt);
2356 		}
2357 
2358 		if (p64) {
2359 			tok64->size = ocnt - cnt;
2360 		} else {
2361 			tok32->size = ocnt - cnt;
2362 		}
2363 
2364 		kfree_data(tokens, size);
2365 		break;
2366 	}
2367 
2368 	default:
2369 		VERIFY(0);
2370 		/* NOTREACHED */
2371 	}
2372 
2373 	return error;
2374 }
2375 
2376 static void
pf_expire_states_and_src_nodes(struct pf_rule * rule)2377 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2378 {
2379 	struct pf_state         *state;
2380 	struct pf_src_node      *sn;
2381 	int                      killed = 0;
2382 
2383 	/* expire the states */
2384 	state = TAILQ_FIRST(&state_list);
2385 	while (state) {
2386 		if (state->rule.ptr == rule) {
2387 			state->timeout = PFTM_PURGE;
2388 		}
2389 		state = TAILQ_NEXT(state, entry_list);
2390 	}
2391 	pf_purge_expired_states(pf_status.states);
2392 
2393 	/* expire the src_nodes */
2394 	RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2395 		if (sn->rule.ptr != rule) {
2396 			continue;
2397 		}
2398 		if (sn->states != 0) {
2399 			RB_FOREACH(state, pf_state_tree_id,
2400 			    &tree_id) {
2401 				if (state->src_node == sn) {
2402 					state->src_node = NULL;
2403 				}
2404 				if (state->nat_src_node == sn) {
2405 					state->nat_src_node = NULL;
2406 				}
2407 			}
2408 			sn->states = 0;
2409 		}
2410 		sn->expire = 1;
2411 		killed++;
2412 	}
2413 	if (killed) {
2414 		pf_purge_expired_src_nodes();
2415 	}
2416 }
2417 
2418 static void
pf_delete_rule_from_ruleset(struct pf_ruleset * ruleset,int rs_num,struct pf_rule * rule)2419 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2420     struct pf_rule *rule)
2421 {
2422 	struct pf_rule *r;
2423 	int nr = 0;
2424 
2425 	pf_expire_states_and_src_nodes(rule);
2426 
2427 	pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2428 	if (ruleset->rules[rs_num].active.rcount-- == 0) {
2429 		panic("%s: rcount value broken!", __func__);
2430 	}
2431 	r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2432 
2433 	while (r) {
2434 		r->nr = nr++;
2435 		r = TAILQ_NEXT(r, entries);
2436 	}
2437 }
2438 
2439 
2440 static void
pf_ruleset_cleanup(struct pf_ruleset * ruleset,int rs)2441 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2442 {
2443 	pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2444 	ruleset->rules[rs].active.ticket =
2445 	    ++ruleset->rules[rs].inactive.ticket;
2446 }
2447 
2448 /*
2449  * req_dev encodes the PF interface. Currently, possible values are
2450  * 0 or PFRULE_PFM
2451  */
2452 static int
pf_delete_rule_by_ticket(struct pfioc_rule * pr,u_int32_t req_dev)2453 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2454 {
2455 	struct pf_ruleset       *ruleset;
2456 	struct pf_rule          *rule = NULL;
2457 	int                      is_anchor;
2458 	int                      error;
2459 	int                      i;
2460 
2461 	is_anchor = (pr->anchor_call[0] != '\0');
2462 	if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2463 	    pr->rule.owner, is_anchor, &error)) == NULL) {
2464 		return error;
2465 	}
2466 
2467 	for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2468 		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2469 		while (rule && (rule->ticket != pr->rule.ticket)) {
2470 			rule = TAILQ_NEXT(rule, entries);
2471 		}
2472 	}
2473 	if (rule == NULL) {
2474 		return ENOENT;
2475 	} else {
2476 		i--;
2477 	}
2478 
2479 	if (strcmp(rule->owner, pr->rule.owner)) {
2480 		return EACCES;
2481 	}
2482 
2483 delete_rule:
2484 	if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2485 	    ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2486 	    ((ruleset->rules[i].active.rcount - 1) == 0)) {
2487 		/* set rule & ruleset to parent and repeat */
2488 		struct pf_rule *delete_rule = rule;
2489 		struct pf_ruleset *delete_ruleset = ruleset;
2490 
2491 #define parent_ruleset          ruleset->anchor->parent->ruleset
2492 		if (ruleset->anchor->parent == NULL) {
2493 			ruleset = &pf_main_ruleset;
2494 		} else {
2495 			ruleset = &parent_ruleset;
2496 		}
2497 
2498 		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2499 		while (rule &&
2500 		    (rule->anchor != delete_ruleset->anchor)) {
2501 			rule = TAILQ_NEXT(rule, entries);
2502 		}
2503 		if (rule == NULL) {
2504 			panic("%s: rule not found!", __func__);
2505 		}
2506 
2507 		/*
2508 		 * if reqest device != rule's device, bail :
2509 		 * with error if ticket matches;
2510 		 * without error if ticket doesn't match (i.e. its just cleanup)
2511 		 */
2512 		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2513 			if (rule->ticket != pr->rule.ticket) {
2514 				return 0;
2515 			} else {
2516 				return EACCES;
2517 			}
2518 		}
2519 
2520 		if (delete_rule->rule_flag & PFRULE_PFM) {
2521 			pffwrules--;
2522 		}
2523 
2524 		pf_delete_rule_from_ruleset(delete_ruleset,
2525 		    i, delete_rule);
2526 		delete_ruleset->rules[i].active.ticket =
2527 		    ++delete_ruleset->rules[i].inactive.ticket;
2528 		goto delete_rule;
2529 	} else {
2530 		/*
2531 		 * process deleting rule only if device that added the
2532 		 * rule matches device that issued the request
2533 		 */
2534 		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2535 			return EACCES;
2536 		}
2537 		if (rule->rule_flag & PFRULE_PFM) {
2538 			pffwrules--;
2539 		}
2540 		pf_delete_rule_from_ruleset(ruleset, i,
2541 		    rule);
2542 		pf_ruleset_cleanup(ruleset, i);
2543 	}
2544 
2545 	return 0;
2546 }
2547 
2548 /*
2549  * req_dev encodes the PF interface. Currently, possible values are
2550  * 0 or PFRULE_PFM
2551  */
2552 static void
pf_delete_rule_by_owner(char * owner,u_int32_t req_dev)2553 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2554 {
2555 	struct pf_ruleset       *ruleset;
2556 	struct pf_rule          *rule, *next;
2557 	int                      deleted = 0;
2558 
2559 	for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2560 		rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2561 		ruleset = &pf_main_ruleset;
2562 		while (rule) {
2563 			next = TAILQ_NEXT(rule, entries);
2564 			/*
2565 			 * process deleting rule only if device that added the
2566 			 * rule matches device that issued the request
2567 			 */
2568 			if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2569 				rule = next;
2570 				continue;
2571 			}
2572 			if (rule->anchor) {
2573 				if (((strcmp(rule->owner, owner)) == 0) ||
2574 				    ((strcmp(rule->owner, "")) == 0)) {
2575 					if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2576 						if (deleted) {
2577 							pf_ruleset_cleanup(ruleset, rs);
2578 							deleted = 0;
2579 						}
2580 						/* step into anchor */
2581 						ruleset =
2582 						    &rule->anchor->ruleset;
2583 						rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2584 						continue;
2585 					} else {
2586 						if (rule->rule_flag &
2587 						    PFRULE_PFM) {
2588 							pffwrules--;
2589 						}
2590 						pf_delete_rule_from_ruleset(ruleset, rs, rule);
2591 						deleted = 1;
2592 						rule = next;
2593 					}
2594 				} else {
2595 					rule = next;
2596 				}
2597 			} else {
2598 				if (((strcmp(rule->owner, owner)) == 0)) {
2599 					/* delete rule */
2600 					if (rule->rule_flag & PFRULE_PFM) {
2601 						pffwrules--;
2602 					}
2603 					pf_delete_rule_from_ruleset(ruleset,
2604 					    rs, rule);
2605 					deleted = 1;
2606 				}
2607 				rule = next;
2608 			}
2609 			if (rule == NULL) {
2610 				if (deleted) {
2611 					pf_ruleset_cleanup(ruleset, rs);
2612 					deleted = 0;
2613 				}
2614 				if (ruleset != &pf_main_ruleset) {
2615 					pf_deleterule_anchor_step_out(&ruleset,
2616 					    rs, &rule);
2617 				}
2618 			}
2619 		}
2620 	}
2621 }
2622 
2623 static void
pf_deleterule_anchor_step_out(struct pf_ruleset ** ruleset_ptr,int rs,struct pf_rule ** rule_ptr)2624 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2625     int rs, struct pf_rule **rule_ptr)
2626 {
2627 	struct pf_ruleset *ruleset = *ruleset_ptr;
2628 	struct pf_rule *rule = *rule_ptr;
2629 
2630 	/* step out of anchor */
2631 	struct pf_ruleset *rs_copy = ruleset;
2632 	ruleset = ruleset->anchor->parent?
2633 	    &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2634 
2635 	rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2636 	while (rule && (rule->anchor != rs_copy->anchor)) {
2637 		rule = TAILQ_NEXT(rule, entries);
2638 	}
2639 	if (rule == NULL) {
2640 		panic("%s: parent rule of anchor not found!", __func__);
2641 	}
2642 	if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2643 		rule = TAILQ_NEXT(rule, entries);
2644 	}
2645 
2646 	*ruleset_ptr = ruleset;
2647 	*rule_ptr = rule;
2648 }
2649 
2650 static void
pf_addrwrap_setup(struct pf_addr_wrap * aw)2651 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2652 {
2653 	VERIFY(aw);
2654 	bzero(&aw->p, sizeof aw->p);
2655 }
2656 
2657 static int
pf_rule_setup(struct pfioc_rule * pr,struct pf_rule * rule,struct pf_ruleset * ruleset)2658 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2659     struct pf_ruleset *ruleset)
2660 {
2661 	struct pf_pooladdr      *apa;
2662 	int                      error = 0;
2663 
2664 	if (rule->ifname[0]) {
2665 		rule->kif = pfi_kif_get(rule->ifname);
2666 		if (rule->kif == NULL) {
2667 			pool_put(&pf_rule_pl, rule);
2668 			return EINVAL;
2669 		}
2670 		pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2671 	}
2672 	if (rule->tagname[0]) {
2673 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) {
2674 			error = EBUSY;
2675 		}
2676 	}
2677 	if (rule->match_tagname[0]) {
2678 		if ((rule->match_tag =
2679 		    pf_tagname2tag(rule->match_tagname)) == 0) {
2680 			error = EBUSY;
2681 		}
2682 	}
2683 	if (rule->rt && !rule->direction) {
2684 		error = EINVAL;
2685 	}
2686 #if PFLOG
2687 	if (!rule->log) {
2688 		rule->logif = 0;
2689 	}
2690 	if (rule->logif >= PFLOGIFS_MAX) {
2691 		error = EINVAL;
2692 	}
2693 #endif /* PFLOG */
2694 	pf_addrwrap_setup(&rule->src.addr);
2695 	pf_addrwrap_setup(&rule->dst.addr);
2696 	if (pf_rtlabel_add(&rule->src.addr) ||
2697 	    pf_rtlabel_add(&rule->dst.addr)) {
2698 		error = EBUSY;
2699 	}
2700 	if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2701 		error = EINVAL;
2702 	}
2703 	if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2704 		error = EINVAL;
2705 	}
2706 	if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2707 		error = EINVAL;
2708 	}
2709 	if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2710 		error = EINVAL;
2711 	}
2712 	if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) {
2713 		error = EINVAL;
2714 	}
2715 	TAILQ_FOREACH(apa, &pf_pabuf, entries)
2716 	if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2717 		error = EINVAL;
2718 	}
2719 
2720 	if (rule->overload_tblname[0]) {
2721 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2722 		    rule->overload_tblname)) == NULL) {
2723 			error = EINVAL;
2724 		} else {
2725 			rule->overload_tbl->pfrkt_flags |=
2726 			    PFR_TFLAG_ACTIVE;
2727 		}
2728 	}
2729 
2730 	pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2731 
2732 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2733 	    (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2734 	    rule->anchor == NULL) ||
2735 	    (rule->rt > PF_FASTROUTE)) &&
2736 	    (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2737 		error = EINVAL;
2738 	}
2739 
2740 	if (error) {
2741 		pf_rm_rule(NULL, rule);
2742 		return error;
2743 	}
2744 	/* For a NAT64 rule the rule's address family is AF_INET6 whereas
2745 	 * the address pool's family will be AF_INET
2746 	 */
2747 	rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2748 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2749 	rule->evaluations = rule->packets[0] = rule->packets[1] =
2750 	    rule->bytes[0] = rule->bytes[1] = 0;
2751 
2752 	return 0;
2753 }
2754 
2755 static int
pfioctl_ioc_rule(u_long cmd,int minordev,struct pfioc_rule * pr,struct proc * p)2756 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2757 {
2758 	int error = 0;
2759 	u_int32_t req_dev = 0;
2760 
2761 	switch (cmd) {
2762 	case DIOCADDRULE: {
2763 		struct pf_ruleset       *ruleset;
2764 		struct pf_rule          *rule, *tail;
2765 		int                     rs_num;
2766 
2767 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2768 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2769 		ruleset = pf_find_ruleset(pr->anchor);
2770 		if (ruleset == NULL) {
2771 			error = EINVAL;
2772 			break;
2773 		}
2774 		rs_num = pf_get_ruleset_number(pr->rule.action);
2775 		if (rs_num >= PF_RULESET_MAX) {
2776 			error = EINVAL;
2777 			break;
2778 		}
2779 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2780 			error = EINVAL;
2781 			break;
2782 		}
2783 		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2784 			error = EBUSY;
2785 			break;
2786 		}
2787 		if (pr->pool_ticket != ticket_pabuf) {
2788 			error = EBUSY;
2789 			break;
2790 		}
2791 		rule = pool_get(&pf_rule_pl, PR_WAITOK);
2792 		if (rule == NULL) {
2793 			error = ENOMEM;
2794 			break;
2795 		}
2796 		pf_rule_copyin(&pr->rule, rule, p, minordev);
2797 #if !INET
2798 		if (rule->af == AF_INET) {
2799 			pool_put(&pf_rule_pl, rule);
2800 			error = EAFNOSUPPORT;
2801 			break;
2802 		}
2803 #endif /* INET */
2804 		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2805 		    pf_rulequeue);
2806 		if (tail) {
2807 			rule->nr = tail->nr + 1;
2808 		} else {
2809 			rule->nr = 0;
2810 		}
2811 
2812 		if ((error = pf_rule_setup(pr, rule, ruleset))) {
2813 			break;
2814 		}
2815 
2816 		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2817 		    rule, entries);
2818 		ruleset->rules[rs_num].inactive.rcount++;
2819 		if (rule->rule_flag & PFRULE_PFM) {
2820 			pffwrules++;
2821 		}
2822 
2823 		if (rule->action == PF_NAT64) {
2824 			atomic_add_16(&pf_nat64_configured, 1);
2825 		}
2826 
2827 		if (pr->anchor_call[0] == '\0') {
2828 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2829 			if (rule->rule_flag & PFRULE_PFM) {
2830 				INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2831 			}
2832 		}
2833 
2834 #if DUMMYNET
2835 		if (rule->action == PF_DUMMYNET) {
2836 			struct dummynet_event dn_event;
2837 			uint32_t direction = DN_INOUT;
2838 			bzero(&dn_event, sizeof(dn_event));
2839 
2840 			dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2841 
2842 			if (rule->direction == PF_IN) {
2843 				direction = DN_IN;
2844 			} else if (rule->direction == PF_OUT) {
2845 				direction = DN_OUT;
2846 			}
2847 
2848 			dn_event.dn_event_rule_config.dir = direction;
2849 			dn_event.dn_event_rule_config.af = rule->af;
2850 			dn_event.dn_event_rule_config.proto = rule->proto;
2851 			dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2852 			dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2853 			strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2854 			    sizeof(dn_event.dn_event_rule_config.ifname));
2855 
2856 			dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2857 		}
2858 #endif
2859 		break;
2860 	}
2861 
2862 	case DIOCGETRULES: {
2863 		struct pf_ruleset       *ruleset;
2864 		struct pf_rule          *tail;
2865 		int                      rs_num;
2866 
2867 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2868 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2869 		ruleset = pf_find_ruleset(pr->anchor);
2870 		if (ruleset == NULL) {
2871 			error = EINVAL;
2872 			break;
2873 		}
2874 		rs_num = pf_get_ruleset_number(pr->rule.action);
2875 		if (rs_num >= PF_RULESET_MAX) {
2876 			error = EINVAL;
2877 			break;
2878 		}
2879 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2880 		    pf_rulequeue);
2881 		if (tail) {
2882 			pr->nr = tail->nr + 1;
2883 		} else {
2884 			pr->nr = 0;
2885 		}
2886 		pr->ticket = ruleset->rules[rs_num].active.ticket;
2887 		break;
2888 	}
2889 
2890 	case DIOCGETRULE: {
2891 		struct pf_ruleset       *ruleset;
2892 		struct pf_rule          *rule;
2893 		int                      rs_num, i;
2894 
2895 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2896 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2897 		ruleset = pf_find_ruleset(pr->anchor);
2898 		if (ruleset == NULL) {
2899 			error = EINVAL;
2900 			break;
2901 		}
2902 		rs_num = pf_get_ruleset_number(pr->rule.action);
2903 		if (rs_num >= PF_RULESET_MAX) {
2904 			error = EINVAL;
2905 			break;
2906 		}
2907 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2908 			error = EBUSY;
2909 			break;
2910 		}
2911 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2912 		while ((rule != NULL) && (rule->nr != pr->nr)) {
2913 			rule = TAILQ_NEXT(rule, entries);
2914 		}
2915 		if (rule == NULL) {
2916 			error = EBUSY;
2917 			break;
2918 		}
2919 		pf_rule_copyout(rule, &pr->rule);
2920 		if (pf_anchor_copyout(ruleset, rule, pr)) {
2921 			error = EBUSY;
2922 			break;
2923 		}
2924 		pfi_dynaddr_copyout(&pr->rule.src.addr);
2925 		pfi_dynaddr_copyout(&pr->rule.dst.addr);
2926 		pf_tbladdr_copyout(&pr->rule.src.addr);
2927 		pf_tbladdr_copyout(&pr->rule.dst.addr);
2928 		pf_rtlabel_copyout(&pr->rule.src.addr);
2929 		pf_rtlabel_copyout(&pr->rule.dst.addr);
2930 		for (i = 0; i < PF_SKIP_COUNT; ++i) {
2931 			if (rule->skip[i].ptr == NULL) {
2932 				pr->rule.skip[i].nr = -1;
2933 			} else {
2934 				pr->rule.skip[i].nr =
2935 				    rule->skip[i].ptr->nr;
2936 			}
2937 		}
2938 
2939 		if (pr->action == PF_GET_CLR_CNTR) {
2940 			rule->evaluations = 0;
2941 			rule->packets[0] = rule->packets[1] = 0;
2942 			rule->bytes[0] = rule->bytes[1] = 0;
2943 		}
2944 		break;
2945 	}
2946 
2947 	case DIOCCHANGERULE: {
2948 		struct pfioc_rule       *pcr = pr;
2949 		struct pf_ruleset       *ruleset;
2950 		struct pf_rule          *oldrule = NULL, *newrule = NULL;
2951 		struct pf_pooladdr      *pa;
2952 		u_int32_t                nr = 0;
2953 		int                      rs_num;
2954 
2955 		if (!(pcr->action == PF_CHANGE_REMOVE ||
2956 		    pcr->action == PF_CHANGE_GET_TICKET) &&
2957 		    pcr->pool_ticket != ticket_pabuf) {
2958 			error = EBUSY;
2959 			break;
2960 		}
2961 
2962 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
2963 		    pcr->action > PF_CHANGE_GET_TICKET) {
2964 			error = EINVAL;
2965 			break;
2966 		}
2967 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
2968 		pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
2969 		ruleset = pf_find_ruleset(pcr->anchor);
2970 		if (ruleset == NULL) {
2971 			error = EINVAL;
2972 			break;
2973 		}
2974 		rs_num = pf_get_ruleset_number(pcr->rule.action);
2975 		if (rs_num >= PF_RULESET_MAX) {
2976 			error = EINVAL;
2977 			break;
2978 		}
2979 
2980 		if (pcr->action == PF_CHANGE_GET_TICKET) {
2981 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
2982 			break;
2983 		} else {
2984 			if (pcr->ticket !=
2985 			    ruleset->rules[rs_num].active.ticket) {
2986 				error = EINVAL;
2987 				break;
2988 			}
2989 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2990 				error = EINVAL;
2991 				break;
2992 			}
2993 		}
2994 
2995 		if (pcr->action != PF_CHANGE_REMOVE) {
2996 			newrule = pool_get(&pf_rule_pl, PR_WAITOK);
2997 			if (newrule == NULL) {
2998 				error = ENOMEM;
2999 				break;
3000 			}
3001 			pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3002 #if !INET
3003 			if (newrule->af == AF_INET) {
3004 				pool_put(&pf_rule_pl, newrule);
3005 				error = EAFNOSUPPORT;
3006 				break;
3007 			}
3008 #endif /* INET */
3009 			if (newrule->ifname[0]) {
3010 				newrule->kif = pfi_kif_get(newrule->ifname);
3011 				if (newrule->kif == NULL) {
3012 					pool_put(&pf_rule_pl, newrule);
3013 					error = EINVAL;
3014 					break;
3015 				}
3016 				pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3017 			} else {
3018 				newrule->kif = NULL;
3019 			}
3020 
3021 			if (newrule->tagname[0]) {
3022 				if ((newrule->tag =
3023 				    pf_tagname2tag(newrule->tagname)) == 0) {
3024 					error = EBUSY;
3025 				}
3026 			}
3027 			if (newrule->match_tagname[0]) {
3028 				if ((newrule->match_tag = pf_tagname2tag(
3029 					    newrule->match_tagname)) == 0) {
3030 					error = EBUSY;
3031 				}
3032 			}
3033 			if (newrule->rt && !newrule->direction) {
3034 				error = EINVAL;
3035 			}
3036 #if PFLOG
3037 			if (!newrule->log) {
3038 				newrule->logif = 0;
3039 			}
3040 			if (newrule->logif >= PFLOGIFS_MAX) {
3041 				error = EINVAL;
3042 			}
3043 #endif /* PFLOG */
3044 			pf_addrwrap_setup(&newrule->src.addr);
3045 			pf_addrwrap_setup(&newrule->dst.addr);
3046 			if (pf_rtlabel_add(&newrule->src.addr) ||
3047 			    pf_rtlabel_add(&newrule->dst.addr)) {
3048 				error = EBUSY;
3049 			}
3050 			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3051 				error = EINVAL;
3052 			}
3053 			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3054 				error = EINVAL;
3055 			}
3056 			if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3057 				error = EINVAL;
3058 			}
3059 			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3060 				error = EINVAL;
3061 			}
3062 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) {
3063 				error = EINVAL;
3064 			}
3065 			TAILQ_FOREACH(pa, &pf_pabuf, entries)
3066 			if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3067 				error = EINVAL;
3068 			}
3069 
3070 			if (newrule->overload_tblname[0]) {
3071 				if ((newrule->overload_tbl = pfr_attach_table(
3072 					    ruleset, newrule->overload_tblname)) ==
3073 				    NULL) {
3074 					error = EINVAL;
3075 				} else {
3076 					newrule->overload_tbl->pfrkt_flags |=
3077 					    PFR_TFLAG_ACTIVE;
3078 				}
3079 			}
3080 
3081 			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3082 			if (((((newrule->action == PF_NAT) ||
3083 			    (newrule->action == PF_RDR) ||
3084 			    (newrule->action == PF_BINAT) ||
3085 			    (newrule->rt > PF_FASTROUTE)) &&
3086 			    !newrule->anchor)) &&
3087 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3088 				error = EINVAL;
3089 			}
3090 
3091 			if (error) {
3092 				pf_rm_rule(NULL, newrule);
3093 				break;
3094 			}
3095 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3096 			newrule->evaluations = 0;
3097 			newrule->packets[0] = newrule->packets[1] = 0;
3098 			newrule->bytes[0] = newrule->bytes[1] = 0;
3099 		}
3100 		pf_empty_pool(&pf_pabuf);
3101 
3102 		if (pcr->action == PF_CHANGE_ADD_HEAD) {
3103 			oldrule = TAILQ_FIRST(
3104 				ruleset->rules[rs_num].active.ptr);
3105 		} else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3106 			oldrule = TAILQ_LAST(
3107 				ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3108 		} else {
3109 			oldrule = TAILQ_FIRST(
3110 				ruleset->rules[rs_num].active.ptr);
3111 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3112 				oldrule = TAILQ_NEXT(oldrule, entries);
3113 			}
3114 			if (oldrule == NULL) {
3115 				if (newrule != NULL) {
3116 					pf_rm_rule(NULL, newrule);
3117 				}
3118 				error = EINVAL;
3119 				break;
3120 			}
3121 		}
3122 
3123 		if (pcr->action == PF_CHANGE_REMOVE) {
3124 			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3125 			ruleset->rules[rs_num].active.rcount--;
3126 		} else {
3127 			if (oldrule == NULL) {
3128 				TAILQ_INSERT_TAIL(
3129 					ruleset->rules[rs_num].active.ptr,
3130 					newrule, entries);
3131 			} else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3132 			    pcr->action == PF_CHANGE_ADD_BEFORE) {
3133 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3134 			} else {
3135 				TAILQ_INSERT_AFTER(
3136 					ruleset->rules[rs_num].active.ptr,
3137 					oldrule, newrule, entries);
3138 			}
3139 			ruleset->rules[rs_num].active.rcount++;
3140 		}
3141 
3142 		nr = 0;
3143 		TAILQ_FOREACH(oldrule,
3144 		    ruleset->rules[rs_num].active.ptr, entries)
3145 		oldrule->nr = nr++;
3146 
3147 		ruleset->rules[rs_num].active.ticket++;
3148 
3149 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3150 		pf_remove_if_empty_ruleset(ruleset);
3151 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
3152 		net_filter_event_mark(NET_FILTER_EVENT_PF,
3153 		    pf_check_compatible_rules());
3154 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3155 		break;
3156 	}
3157 
3158 	case DIOCINSERTRULE: {
3159 		struct pf_ruleset       *ruleset;
3160 		struct pf_rule          *rule, *tail, *r;
3161 		int                     rs_num;
3162 		int                     is_anchor;
3163 
3164 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3165 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3166 		is_anchor = (pr->anchor_call[0] != '\0');
3167 
3168 		if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3169 		    pr->rule.owner, is_anchor, &error)) == NULL) {
3170 			break;
3171 		}
3172 
3173 		rs_num = pf_get_ruleset_number(pr->rule.action);
3174 		if (rs_num >= PF_RULESET_MAX) {
3175 			error = EINVAL;
3176 			break;
3177 		}
3178 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3179 			error = EINVAL;
3180 			break;
3181 		}
3182 
3183 		/* make sure this anchor rule doesn't exist already */
3184 		if (is_anchor) {
3185 			r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3186 			while (r) {
3187 				if (r->anchor &&
3188 				    ((strcmp(r->anchor->name,
3189 				    pr->anchor_call)) == 0)) {
3190 					if (((strcmp(pr->rule.owner,
3191 					    r->owner)) == 0) ||
3192 					    ((strcmp(r->owner, "")) == 0)) {
3193 						error = EEXIST;
3194 					} else {
3195 						error = EPERM;
3196 					}
3197 					break;
3198 				}
3199 				r = TAILQ_NEXT(r, entries);
3200 			}
3201 			if (error != 0) {
3202 				return error;
3203 			}
3204 		}
3205 
3206 		rule = pool_get(&pf_rule_pl, PR_WAITOK);
3207 		if (rule == NULL) {
3208 			error = ENOMEM;
3209 			break;
3210 		}
3211 		pf_rule_copyin(&pr->rule, rule, p, minordev);
3212 #if !INET
3213 		if (rule->af == AF_INET) {
3214 			pool_put(&pf_rule_pl, rule);
3215 			error = EAFNOSUPPORT;
3216 			break;
3217 		}
3218 #endif /* INET */
3219 		r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3220 		while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3221 			r = TAILQ_NEXT(r, entries);
3222 		}
3223 		if (r == NULL) {
3224 			if ((tail =
3225 			    TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3226 			    pf_rulequeue)) != NULL) {
3227 				rule->nr = tail->nr + 1;
3228 			} else {
3229 				rule->nr = 0;
3230 			}
3231 		} else {
3232 			rule->nr = r->nr;
3233 		}
3234 
3235 		if ((error = pf_rule_setup(pr, rule, ruleset))) {
3236 			break;
3237 		}
3238 
3239 		if (rule->anchor != NULL) {
3240 			strlcpy(rule->anchor->owner, rule->owner,
3241 			    PF_OWNER_NAME_SIZE);
3242 		}
3243 
3244 		if (r) {
3245 			TAILQ_INSERT_BEFORE(r, rule, entries);
3246 			while (r && ++r->nr) {
3247 				r = TAILQ_NEXT(r, entries);
3248 			}
3249 		} else {
3250 			TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3251 			    rule, entries);
3252 		}
3253 		ruleset->rules[rs_num].active.rcount++;
3254 
3255 		/* Calculate checksum for the main ruleset */
3256 		if (ruleset == &pf_main_ruleset) {
3257 			error = pf_setup_pfsync_matching(ruleset);
3258 		}
3259 
3260 		pf_ruleset_cleanup(ruleset, rs_num);
3261 		rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3262 
3263 		pr->rule.ticket = rule->ticket;
3264 		pf_rule_copyout(rule, &pr->rule);
3265 		if (rule->rule_flag & PFRULE_PFM) {
3266 			pffwrules++;
3267 		}
3268 		if (rule->action == PF_NAT64) {
3269 			atomic_add_16(&pf_nat64_configured, 1);
3270 		}
3271 
3272 		if (pr->anchor_call[0] == '\0') {
3273 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3274 			if (rule->rule_flag & PFRULE_PFM) {
3275 				INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3276 			}
3277 		}
3278 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
3279 		net_filter_event_mark(NET_FILTER_EVENT_PF,
3280 		    pf_check_compatible_rules());
3281 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3282 		break;
3283 	}
3284 
3285 	case DIOCDELETERULE: {
3286 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3287 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3288 
3289 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3290 			error = EINVAL;
3291 			break;
3292 		}
3293 
3294 		/* get device through which request is made */
3295 		if ((uint8_t)minordev == PFDEV_PFM) {
3296 			req_dev |= PFRULE_PFM;
3297 		}
3298 
3299 		if (pr->rule.ticket) {
3300 			if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3301 				break;
3302 			}
3303 		} else {
3304 			pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3305 		}
3306 		pr->nr = pffwrules;
3307 		if (pr->rule.action == PF_NAT64) {
3308 			atomic_add_16(&pf_nat64_configured, -1);
3309 		}
3310 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
3311 		net_filter_event_mark(NET_FILTER_EVENT_PF,
3312 		    pf_check_compatible_rules());
3313 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3314 		break;
3315 	}
3316 
3317 	default:
3318 		VERIFY(0);
3319 		/* NOTREACHED */
3320 	}
3321 
3322 	return error;
3323 }
3324 
3325 static int
pfioctl_ioc_state_kill(u_long cmd,struct pfioc_state_kill * psk,struct proc * p)3326 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3327 {
3328 #pragma unused(p)
3329 	int error = 0;
3330 
3331 	psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3332 	psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3333 
3334 	bool ifname_matched = true;
3335 	bool owner_matched = true;
3336 
3337 	switch (cmd) {
3338 	case DIOCCLRSTATES: {
3339 		struct pf_state         *s, *nexts;
3340 		int                      killed = 0;
3341 
3342 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3343 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3344 			/*
3345 			 * Purge all states only when neither ifname
3346 			 * or owner is provided. If any of these are provided
3347 			 * we purge only the states with meta data that match
3348 			 */
3349 			bool unlink_state = false;
3350 			ifname_matched = true;
3351 			owner_matched = true;
3352 
3353 			if (psk->psk_ifname[0] &&
3354 			    strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3355 				ifname_matched = false;
3356 			}
3357 
3358 			if (psk->psk_ownername[0] &&
3359 			    ((NULL == s->rule.ptr) ||
3360 			    strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3361 				owner_matched = false;
3362 			}
3363 
3364 			unlink_state = ifname_matched && owner_matched;
3365 
3366 			if (unlink_state) {
3367 #if NPFSYNC
3368 				/* don't send out individual delete messages */
3369 				s->sync_flags = PFSTATE_NOSYNC;
3370 #endif
3371 				pf_unlink_state(s);
3372 				killed++;
3373 			}
3374 		}
3375 		psk->psk_af = (sa_family_t)killed;
3376 #if NPFSYNC
3377 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3378 #endif
3379 		break;
3380 	}
3381 
3382 	case DIOCKILLSTATES: {
3383 		struct pf_state         *s, *nexts;
3384 		struct pf_state_key     *sk;
3385 		struct pf_state_host    *src, *dst;
3386 		int                      killed = 0;
3387 
3388 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3389 		    s = nexts) {
3390 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3391 			sk = s->state_key;
3392 			ifname_matched = true;
3393 			owner_matched = true;
3394 
3395 			if (psk->psk_ifname[0] &&
3396 			    strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3397 				ifname_matched = false;
3398 			}
3399 
3400 			if (psk->psk_ownername[0] &&
3401 			    ((NULL == s->rule.ptr) ||
3402 			    strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3403 				owner_matched = false;
3404 			}
3405 
3406 			if (sk->direction == PF_OUT) {
3407 				src = &sk->lan;
3408 				dst = &sk->ext_lan;
3409 			} else {
3410 				src = &sk->ext_lan;
3411 				dst = &sk->lan;
3412 			}
3413 			if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3414 			    (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3415 			    PF_MATCHA(psk->psk_src.neg,
3416 			    &psk->psk_src.addr.v.a.addr,
3417 			    &psk->psk_src.addr.v.a.mask,
3418 			    &src->addr, sk->af_lan) &&
3419 			    PF_MATCHA(psk->psk_dst.neg,
3420 			    &psk->psk_dst.addr.v.a.addr,
3421 			    &psk->psk_dst.addr.v.a.mask,
3422 			    &dst->addr, sk->af_lan) &&
3423 			    (pf_match_xport(psk->psk_proto,
3424 			    psk->psk_proto_variant, &psk->psk_src.xport,
3425 			    &src->xport)) &&
3426 			    (pf_match_xport(psk->psk_proto,
3427 			    psk->psk_proto_variant, &psk->psk_dst.xport,
3428 			    &dst->xport)) &&
3429 			    ifname_matched &&
3430 			    owner_matched) {
3431 #if NPFSYNC
3432 				/* send immediate delete of state */
3433 				pfsync_delete_state(s);
3434 				s->sync_flags |= PFSTATE_NOSYNC;
3435 #endif
3436 				pf_unlink_state(s);
3437 				killed++;
3438 			}
3439 		}
3440 		psk->psk_af = (sa_family_t)killed;
3441 		break;
3442 	}
3443 
3444 	default:
3445 		VERIFY(0);
3446 		/* NOTREACHED */
3447 	}
3448 
3449 	return error;
3450 }
3451 
3452 static int
pfioctl_ioc_state(u_long cmd,struct pfioc_state * ps,struct proc * p)3453 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3454 {
3455 #pragma unused(p)
3456 	int error = 0;
3457 
3458 	switch (cmd) {
3459 	case DIOCADDSTATE: {
3460 		struct pfsync_state     *sp = &ps->state;
3461 		struct pf_state         *s;
3462 		struct pf_state_key     *sk;
3463 		struct pfi_kif          *kif;
3464 
3465 		if (sp->timeout >= PFTM_MAX) {
3466 			error = EINVAL;
3467 			break;
3468 		}
3469 		s = pool_get(&pf_state_pl, PR_WAITOK);
3470 		if (s == NULL) {
3471 			error = ENOMEM;
3472 			break;
3473 		}
3474 		bzero(s, sizeof(struct pf_state));
3475 		if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3476 			pool_put(&pf_state_pl, s);
3477 			error = ENOMEM;
3478 			break;
3479 		}
3480 		pf_state_import(sp, sk, s);
3481 		kif = pfi_kif_get(sp->ifname);
3482 		if (kif == NULL) {
3483 			pf_detach_state(s, 0);
3484 			pool_put(&pf_state_pl, s);
3485 			error = ENOENT;
3486 			break;
3487 		}
3488 		TAILQ_INIT(&s->unlink_hooks);
3489 		s->state_key->app_state = 0;
3490 		if (pf_insert_state(kif, s)) {
3491 			pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3492 			pool_put(&pf_state_pl, s);
3493 			error = EEXIST;
3494 			break;
3495 		}
3496 		pf_default_rule.states++;
3497 		VERIFY(pf_default_rule.states != 0);
3498 		break;
3499 	}
3500 
3501 	case DIOCGETSTATE: {
3502 		struct pf_state         *s;
3503 		struct pf_state_cmp      id_key;
3504 
3505 		bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3506 		id_key.creatorid = ps->state.creatorid;
3507 
3508 		s = pf_find_state_byid(&id_key);
3509 		if (s == NULL) {
3510 			error = ENOENT;
3511 			break;
3512 		}
3513 
3514 		pf_state_export(&ps->state, s->state_key, s);
3515 		break;
3516 	}
3517 
3518 	default:
3519 		VERIFY(0);
3520 		/* NOTREACHED */
3521 	}
3522 
3523 	return error;
3524 }
3525 
3526 static int
pfioctl_ioc_states(u_long cmd,struct pfioc_states_32 * ps32,struct pfioc_states_64 * ps64,struct proc * p)3527 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3528     struct pfioc_states_64 *ps64, struct proc *p)
3529 {
3530 	int p64 = proc_is64bit(p);
3531 	int error = 0;
3532 
3533 	switch (cmd) {
3534 	case DIOCGETSTATES: {           /* struct pfioc_states */
3535 		struct pf_state         *state;
3536 		struct pfsync_state     *pstore;
3537 		user_addr_t              buf;
3538 		u_int32_t                nr = 0;
3539 		int                      len, size;
3540 
3541 		len = (p64 ? ps64->ps_len : ps32->ps_len);
3542 		if (len == 0) {
3543 			size = sizeof(struct pfsync_state) * pf_status.states;
3544 			if (p64) {
3545 				ps64->ps_len = size;
3546 			} else {
3547 				ps32->ps_len = size;
3548 			}
3549 			break;
3550 		}
3551 
3552 		pstore = kalloc_type(struct pfsync_state,
3553 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
3554 #ifdef __LP64__
3555 		buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3556 #else
3557 		buf = ps32->ps_buf;
3558 #endif
3559 
3560 		state = TAILQ_FIRST(&state_list);
3561 		while (state) {
3562 			if (state->timeout != PFTM_UNLINKED) {
3563 				if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3564 					break;
3565 				}
3566 
3567 				pf_state_export(pstore,
3568 				    state->state_key, state);
3569 				error = copyout(pstore, buf, sizeof(*pstore));
3570 				if (error) {
3571 					kfree_type(struct pfsync_state, pstore);
3572 					goto fail;
3573 				}
3574 				buf += sizeof(*pstore);
3575 				nr++;
3576 			}
3577 			state = TAILQ_NEXT(state, entry_list);
3578 		}
3579 
3580 		size = sizeof(struct pfsync_state) * nr;
3581 		if (p64) {
3582 			ps64->ps_len = size;
3583 		} else {
3584 			ps32->ps_len = size;
3585 		}
3586 
3587 		kfree_type(struct pfsync_state, pstore);
3588 		break;
3589 	}
3590 
3591 	default:
3592 		VERIFY(0);
3593 		/* NOTREACHED */
3594 	}
3595 fail:
3596 	return error;
3597 }
3598 
3599 static int
pfioctl_ioc_natlook(u_long cmd,struct pfioc_natlook * pnl,struct proc * p)3600 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3601 {
3602 #pragma unused(p)
3603 	int error = 0;
3604 
3605 	switch (cmd) {
3606 	case DIOCNATLOOK: {
3607 		struct pf_state_key     *sk;
3608 		struct pf_state         *state;
3609 		struct pf_state_key_cmp  key;
3610 		int                      m = 0, direction = pnl->direction;
3611 
3612 		key.proto = pnl->proto;
3613 		key.proto_variant = pnl->proto_variant;
3614 
3615 		if (!pnl->proto ||
3616 		    PF_AZERO(&pnl->saddr, pnl->af) ||
3617 		    PF_AZERO(&pnl->daddr, pnl->af) ||
3618 		    ((pnl->proto == IPPROTO_TCP ||
3619 		    pnl->proto == IPPROTO_UDP) &&
3620 		    (!pnl->dxport.port || !pnl->sxport.port))) {
3621 			error = EINVAL;
3622 		} else {
3623 			/*
3624 			 * userland gives us source and dest of connection,
3625 			 * reverse the lookup so we ask for what happens with
3626 			 * the return traffic, enabling us to find it in the
3627 			 * state tree.
3628 			 */
3629 			if (direction == PF_IN) {
3630 				key.af_gwy = pnl->af;
3631 				PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3632 				    pnl->af);
3633 				memcpy(&key.ext_gwy.xport, &pnl->dxport,
3634 				    sizeof(key.ext_gwy.xport));
3635 				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3636 				memcpy(&key.gwy.xport, &pnl->sxport,
3637 				    sizeof(key.gwy.xport));
3638 				state = pf_find_state_all(&key, PF_IN, &m);
3639 			} else {
3640 				key.af_lan = pnl->af;
3641 				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3642 				memcpy(&key.lan.xport, &pnl->dxport,
3643 				    sizeof(key.lan.xport));
3644 				PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3645 				    pnl->af);
3646 				memcpy(&key.ext_lan.xport, &pnl->sxport,
3647 				    sizeof(key.ext_lan.xport));
3648 				state = pf_find_state_all(&key, PF_OUT, &m);
3649 			}
3650 			if (m > 1) {
3651 				error = E2BIG;  /* more than one state */
3652 			} else if (state != NULL) {
3653 				sk = state->state_key;
3654 				if (direction == PF_IN) {
3655 					PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3656 					    sk->af_lan);
3657 					memcpy(&pnl->rsxport, &sk->lan.xport,
3658 					    sizeof(pnl->rsxport));
3659 					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3660 					    pnl->af);
3661 					memcpy(&pnl->rdxport, &pnl->dxport,
3662 					    sizeof(pnl->rdxport));
3663 				} else {
3664 					PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3665 					    sk->af_gwy);
3666 					memcpy(&pnl->rdxport, &sk->gwy.xport,
3667 					    sizeof(pnl->rdxport));
3668 					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3669 					    pnl->af);
3670 					memcpy(&pnl->rsxport, &pnl->sxport,
3671 					    sizeof(pnl->rsxport));
3672 				}
3673 			} else {
3674 				error = ENOENT;
3675 			}
3676 		}
3677 		break;
3678 	}
3679 
3680 	default:
3681 		VERIFY(0);
3682 		/* NOTREACHED */
3683 	}
3684 
3685 	return error;
3686 }
3687 
3688 static int
pfioctl_ioc_tm(u_long cmd,struct pfioc_tm * pt,struct proc * p)3689 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3690 {
3691 #pragma unused(p)
3692 	int error = 0;
3693 
3694 	switch (cmd) {
3695 	case DIOCSETTIMEOUT: {
3696 		int old;
3697 
3698 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3699 		    pt->seconds < 0) {
3700 			error = EINVAL;
3701 			goto fail;
3702 		}
3703 		old = pf_default_rule.timeout[pt->timeout];
3704 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3705 			pt->seconds = 1;
3706 		}
3707 		pf_default_rule.timeout[pt->timeout] = pt->seconds;
3708 		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3709 			wakeup(pf_purge_thread_fn);
3710 		}
3711 		pt->seconds = old;
3712 		break;
3713 	}
3714 
3715 	case DIOCGETTIMEOUT: {
3716 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3717 			error = EINVAL;
3718 			goto fail;
3719 		}
3720 		pt->seconds = pf_default_rule.timeout[pt->timeout];
3721 		break;
3722 	}
3723 
3724 	default:
3725 		VERIFY(0);
3726 		/* NOTREACHED */
3727 	}
3728 fail:
3729 	return error;
3730 }
3731 
3732 static int
pfioctl_ioc_limit(u_long cmd,struct pfioc_limit * pl,struct proc * p)3733 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3734 {
3735 #pragma unused(p)
3736 	int error = 0;
3737 
3738 	switch (cmd) {
3739 	case DIOCGETLIMIT: {
3740 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3741 			error = EINVAL;
3742 			goto fail;
3743 		}
3744 		pl->limit = pf_pool_limits[pl->index].limit;
3745 		break;
3746 	}
3747 
3748 	case DIOCSETLIMIT: {
3749 		int old_limit;
3750 
3751 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3752 		    pf_pool_limits[pl->index].pp == NULL) {
3753 			error = EINVAL;
3754 			goto fail;
3755 		}
3756 		pool_sethardlimit(pf_pool_limits[pl->index].pp,
3757 		    pl->limit, NULL, 0);
3758 		old_limit = pf_pool_limits[pl->index].limit;
3759 		pf_pool_limits[pl->index].limit = pl->limit;
3760 		pl->limit = old_limit;
3761 		break;
3762 	}
3763 
3764 	default:
3765 		VERIFY(0);
3766 		/* NOTREACHED */
3767 	}
3768 fail:
3769 	return error;
3770 }
3771 
3772 static int
pfioctl_ioc_pooladdr(u_long cmd,struct pfioc_pooladdr * pp,struct proc * p)3773 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3774 {
3775 #pragma unused(p)
3776 	struct pf_pooladdr *pa = NULL;
3777 	struct pf_pool *pool = NULL;
3778 	int error = 0;
3779 
3780 	switch (cmd) {
3781 	case DIOCBEGINADDRS: {
3782 		pf_empty_pool(&pf_pabuf);
3783 		pp->ticket = ++ticket_pabuf;
3784 		break;
3785 	}
3786 
3787 	case DIOCADDADDR: {
3788 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3789 		if (pp->ticket != ticket_pabuf) {
3790 			error = EBUSY;
3791 			break;
3792 		}
3793 #if !INET
3794 		if (pp->af == AF_INET) {
3795 			error = EAFNOSUPPORT;
3796 			break;
3797 		}
3798 #endif /* INET */
3799 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3800 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3801 		    pp->addr.addr.type != PF_ADDR_TABLE) {
3802 			error = EINVAL;
3803 			break;
3804 		}
3805 		pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3806 		if (pa == NULL) {
3807 			error = ENOMEM;
3808 			break;
3809 		}
3810 		pf_pooladdr_copyin(&pp->addr, pa);
3811 		if (pa->ifname[0]) {
3812 			pa->kif = pfi_kif_get(pa->ifname);
3813 			if (pa->kif == NULL) {
3814 				pool_put(&pf_pooladdr_pl, pa);
3815 				error = EINVAL;
3816 				break;
3817 			}
3818 			pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3819 		}
3820 		pf_addrwrap_setup(&pa->addr);
3821 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3822 			pfi_dynaddr_remove(&pa->addr);
3823 			pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3824 			pool_put(&pf_pooladdr_pl, pa);
3825 			error = EINVAL;
3826 			break;
3827 		}
3828 		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3829 		break;
3830 	}
3831 
3832 	case DIOCGETADDRS: {
3833 		pp->nr = 0;
3834 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3835 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3836 		    pp->r_num, 0, 1, 0);
3837 		if (pool == NULL) {
3838 			error = EBUSY;
3839 			break;
3840 		}
3841 		TAILQ_FOREACH(pa, &pool->list, entries)
3842 		pp->nr++;
3843 		break;
3844 	}
3845 
3846 	case DIOCGETADDR: {
3847 		u_int32_t                nr = 0;
3848 
3849 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3850 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3851 		    pp->r_num, 0, 1, 1);
3852 		if (pool == NULL) {
3853 			error = EBUSY;
3854 			break;
3855 		}
3856 		pa = TAILQ_FIRST(&pool->list);
3857 		while ((pa != NULL) && (nr < pp->nr)) {
3858 			pa = TAILQ_NEXT(pa, entries);
3859 			nr++;
3860 		}
3861 		if (pa == NULL) {
3862 			error = EBUSY;
3863 			break;
3864 		}
3865 		pf_pooladdr_copyout(pa, &pp->addr);
3866 		pfi_dynaddr_copyout(&pp->addr.addr);
3867 		pf_tbladdr_copyout(&pp->addr.addr);
3868 		pf_rtlabel_copyout(&pp->addr.addr);
3869 		break;
3870 	}
3871 
3872 	case DIOCCHANGEADDR: {
3873 		struct pfioc_pooladdr   *pca = pp;
3874 		struct pf_pooladdr      *oldpa = NULL, *newpa = NULL;
3875 		struct pf_ruleset       *ruleset;
3876 
3877 		if (pca->action < PF_CHANGE_ADD_HEAD ||
3878 		    pca->action > PF_CHANGE_REMOVE) {
3879 			error = EINVAL;
3880 			break;
3881 		}
3882 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3883 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3884 		    pca->addr.addr.type != PF_ADDR_TABLE) {
3885 			error = EINVAL;
3886 			break;
3887 		}
3888 
3889 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3890 		ruleset = pf_find_ruleset(pca->anchor);
3891 		if (ruleset == NULL) {
3892 			error = EBUSY;
3893 			break;
3894 		}
3895 		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3896 		    pca->r_num, pca->r_last, 1, 1);
3897 		if (pool == NULL) {
3898 			error = EBUSY;
3899 			break;
3900 		}
3901 		if (pca->action != PF_CHANGE_REMOVE) {
3902 			newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3903 			if (newpa == NULL) {
3904 				error = ENOMEM;
3905 				break;
3906 			}
3907 			pf_pooladdr_copyin(&pca->addr, newpa);
3908 #if !INET
3909 			if (pca->af == AF_INET) {
3910 				pool_put(&pf_pooladdr_pl, newpa);
3911 				error = EAFNOSUPPORT;
3912 				break;
3913 			}
3914 #endif /* INET */
3915 			if (newpa->ifname[0]) {
3916 				newpa->kif = pfi_kif_get(newpa->ifname);
3917 				if (newpa->kif == NULL) {
3918 					pool_put(&pf_pooladdr_pl, newpa);
3919 					error = EINVAL;
3920 					break;
3921 				}
3922 				pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3923 			} else {
3924 				newpa->kif = NULL;
3925 			}
3926 			pf_addrwrap_setup(&newpa->addr);
3927 			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3928 			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
3929 				pfi_dynaddr_remove(&newpa->addr);
3930 				pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3931 				pool_put(&pf_pooladdr_pl, newpa);
3932 				error = EINVAL;
3933 				break;
3934 			}
3935 		}
3936 
3937 		if (pca->action == PF_CHANGE_ADD_HEAD) {
3938 			oldpa = TAILQ_FIRST(&pool->list);
3939 		} else if (pca->action == PF_CHANGE_ADD_TAIL) {
3940 			oldpa = TAILQ_LAST(&pool->list, pf_palist);
3941 		} else {
3942 			int     i = 0;
3943 
3944 			oldpa = TAILQ_FIRST(&pool->list);
3945 			while ((oldpa != NULL) && (i < (int)pca->nr)) {
3946 				oldpa = TAILQ_NEXT(oldpa, entries);
3947 				i++;
3948 			}
3949 			if (oldpa == NULL) {
3950 				error = EINVAL;
3951 				break;
3952 			}
3953 		}
3954 
3955 		if (pca->action == PF_CHANGE_REMOVE) {
3956 			TAILQ_REMOVE(&pool->list, oldpa, entries);
3957 			pfi_dynaddr_remove(&oldpa->addr);
3958 			pf_tbladdr_remove(&oldpa->addr);
3959 			pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3960 			pool_put(&pf_pooladdr_pl, oldpa);
3961 		} else {
3962 			if (oldpa == NULL) {
3963 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3964 			} else if (pca->action == PF_CHANGE_ADD_HEAD ||
3965 			    pca->action == PF_CHANGE_ADD_BEFORE) {
3966 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3967 			} else {
3968 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
3969 				    newpa, entries);
3970 			}
3971 		}
3972 
3973 		pool->cur = TAILQ_FIRST(&pool->list);
3974 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
3975 		    pca->af);
3976 		break;
3977 	}
3978 
3979 	default:
3980 		VERIFY(0);
3981 		/* NOTREACHED */
3982 	}
3983 
3984 	return error;
3985 }
3986 
3987 static int
pfioctl_ioc_ruleset(u_long cmd,struct pfioc_ruleset * pr,struct proc * p)3988 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
3989 {
3990 #pragma unused(p)
3991 	int error = 0;
3992 
3993 	switch (cmd) {
3994 	case DIOCGETRULESETS: {
3995 		struct pf_ruleset       *ruleset;
3996 		struct pf_anchor        *anchor;
3997 
3998 		pr->path[sizeof(pr->path) - 1] = '\0';
3999 		pr->name[sizeof(pr->name) - 1] = '\0';
4000 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4001 			error = EINVAL;
4002 			break;
4003 		}
4004 		pr->nr = 0;
4005 		if (ruleset->anchor == NULL) {
4006 			/* XXX kludge for pf_main_ruleset */
4007 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4008 			if (anchor->parent == NULL) {
4009 				pr->nr++;
4010 			}
4011 		} else {
4012 			RB_FOREACH(anchor, pf_anchor_node,
4013 			    &ruleset->anchor->children)
4014 			pr->nr++;
4015 		}
4016 		break;
4017 	}
4018 
4019 	case DIOCGETRULESET: {
4020 		struct pf_ruleset       *ruleset;
4021 		struct pf_anchor        *anchor;
4022 		u_int32_t                nr = 0;
4023 
4024 		pr->path[sizeof(pr->path) - 1] = '\0';
4025 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4026 			error = EINVAL;
4027 			break;
4028 		}
4029 		pr->name[0] = 0;
4030 		if (ruleset->anchor == NULL) {
4031 			/* XXX kludge for pf_main_ruleset */
4032 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4033 			if (anchor->parent == NULL && nr++ == pr->nr) {
4034 				strlcpy(pr->name, anchor->name,
4035 				    sizeof(pr->name));
4036 				break;
4037 			}
4038 		} else {
4039 			RB_FOREACH(anchor, pf_anchor_node,
4040 			    &ruleset->anchor->children)
4041 			if (nr++ == pr->nr) {
4042 				strlcpy(pr->name, anchor->name,
4043 				    sizeof(pr->name));
4044 				break;
4045 			}
4046 		}
4047 		if (!pr->name[0]) {
4048 			error = EBUSY;
4049 		}
4050 		break;
4051 	}
4052 
4053 	default:
4054 		VERIFY(0);
4055 		/* NOTREACHED */
4056 	}
4057 
4058 	return error;
4059 }
4060 
4061 static int
pfioctl_ioc_trans(u_long cmd,struct pfioc_trans_32 * io32,struct pfioc_trans_64 * io64,struct proc * p)4062 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4063     struct pfioc_trans_64 *io64, struct proc *p)
4064 {
4065 	int error = 0, esize, size;
4066 	user_addr_t buf;
4067 
4068 #ifdef __LP64__
4069 	int p64 = proc_is64bit(p);
4070 
4071 	esize = (p64 ? io64->esize : io32->esize);
4072 	size = (p64 ? io64->size : io32->size);
4073 	buf = (p64 ? io64->array : io32->array);
4074 #else
4075 #pragma unused(io64, p)
4076 	esize = io32->esize;
4077 	size = io32->size;
4078 	buf = io32->array;
4079 #endif
4080 
4081 	switch (cmd) {
4082 	case DIOCXBEGIN: {
4083 		struct pfioc_trans_e    *ioe;
4084 		struct pfr_table        *table;
4085 		int                      i;
4086 
4087 		if (esize != sizeof(*ioe)) {
4088 			error = ENODEV;
4089 			goto fail;
4090 		}
4091 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4092 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4093 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4094 			if (copyin(buf, ioe, sizeof(*ioe))) {
4095 				kfree_type(struct pfr_table, table);
4096 				kfree_type(struct pfioc_trans_e, ioe);
4097 				error = EFAULT;
4098 				goto fail;
4099 			}
4100 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4101 			switch (ioe->rs_num) {
4102 			case PF_RULESET_ALTQ:
4103 				break;
4104 			case PF_RULESET_TABLE:
4105 				bzero(table, sizeof(*table));
4106 				strlcpy(table->pfrt_anchor, ioe->anchor,
4107 				    sizeof(table->pfrt_anchor));
4108 				if ((error = pfr_ina_begin(table,
4109 				    &ioe->ticket, NULL, 0))) {
4110 					kfree_type(struct pfr_table, table);
4111 					kfree_type(struct pfioc_trans_e, ioe);
4112 					goto fail;
4113 				}
4114 				break;
4115 			default:
4116 				if ((error = pf_begin_rules(&ioe->ticket,
4117 				    ioe->rs_num, ioe->anchor))) {
4118 					kfree_type(struct pfr_table, table);
4119 					kfree_type(struct pfioc_trans_e, ioe);
4120 					goto fail;
4121 				}
4122 				break;
4123 			}
4124 			if (copyout(ioe, buf, sizeof(*ioe))) {
4125 				kfree_type(struct pfr_table, table);
4126 				kfree_type(struct pfioc_trans_e, ioe);
4127 				error = EFAULT;
4128 				goto fail;
4129 			}
4130 		}
4131 		kfree_type(struct pfr_table, table);
4132 		kfree_type(struct pfioc_trans_e, ioe);
4133 		break;
4134 	}
4135 
4136 	case DIOCXROLLBACK: {
4137 		struct pfioc_trans_e    *ioe;
4138 		struct pfr_table        *table;
4139 		int                      i;
4140 
4141 		if (esize != sizeof(*ioe)) {
4142 			error = ENODEV;
4143 			goto fail;
4144 		}
4145 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4146 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4147 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4148 			if (copyin(buf, ioe, sizeof(*ioe))) {
4149 				kfree_type(struct pfr_table, table);
4150 				kfree_type(struct pfioc_trans_e, ioe);
4151 				error = EFAULT;
4152 				goto fail;
4153 			}
4154 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4155 			switch (ioe->rs_num) {
4156 			case PF_RULESET_ALTQ:
4157 				break;
4158 			case PF_RULESET_TABLE:
4159 				bzero(table, sizeof(*table));
4160 				strlcpy(table->pfrt_anchor, ioe->anchor,
4161 				    sizeof(table->pfrt_anchor));
4162 				if ((error = pfr_ina_rollback(table,
4163 				    ioe->ticket, NULL, 0))) {
4164 					kfree_type(struct pfr_table, table);
4165 					kfree_type(struct pfioc_trans_e, ioe);
4166 					goto fail; /* really bad */
4167 				}
4168 				break;
4169 			default:
4170 				if ((error = pf_rollback_rules(ioe->ticket,
4171 				    ioe->rs_num, ioe->anchor))) {
4172 					kfree_type(struct pfr_table, table);
4173 					kfree_type(struct pfioc_trans_e, ioe);
4174 					goto fail; /* really bad */
4175 				}
4176 				break;
4177 			}
4178 		}
4179 		kfree_type(struct pfr_table, table);
4180 		kfree_type(struct pfioc_trans_e, ioe);
4181 		break;
4182 	}
4183 
4184 	case DIOCXCOMMIT: {
4185 		struct pfioc_trans_e    *ioe;
4186 		struct pfr_table        *table;
4187 		struct pf_ruleset       *rs;
4188 		user_addr_t              _buf = buf;
4189 		int                      i;
4190 
4191 		if (esize != sizeof(*ioe)) {
4192 			error = ENODEV;
4193 			goto fail;
4194 		}
4195 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4196 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4197 		/* first makes sure everything will succeed */
4198 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4199 			if (copyin(buf, ioe, sizeof(*ioe))) {
4200 				kfree_type(struct pfr_table, table);
4201 				kfree_type(struct pfioc_trans_e, ioe);
4202 				error = EFAULT;
4203 				goto fail;
4204 			}
4205 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4206 			switch (ioe->rs_num) {
4207 			case PF_RULESET_ALTQ:
4208 				break;
4209 			case PF_RULESET_TABLE:
4210 				rs = pf_find_ruleset(ioe->anchor);
4211 				if (rs == NULL || !rs->topen || ioe->ticket !=
4212 				    rs->tticket) {
4213 					kfree_type(struct pfr_table, table);
4214 					kfree_type(struct pfioc_trans_e, ioe);
4215 					error = EBUSY;
4216 					goto fail;
4217 				}
4218 				break;
4219 			default:
4220 				if (ioe->rs_num < 0 || ioe->rs_num >=
4221 				    PF_RULESET_MAX) {
4222 					kfree_type(struct pfr_table, table);
4223 					kfree_type(struct pfioc_trans_e, ioe);
4224 					error = EINVAL;
4225 					goto fail;
4226 				}
4227 				rs = pf_find_ruleset(ioe->anchor);
4228 				if (rs == NULL ||
4229 				    !rs->rules[ioe->rs_num].inactive.open ||
4230 				    rs->rules[ioe->rs_num].inactive.ticket !=
4231 				    ioe->ticket) {
4232 					kfree_type(struct pfr_table, table);
4233 					kfree_type(struct pfioc_trans_e, ioe);
4234 					error = EBUSY;
4235 					goto fail;
4236 				}
4237 				break;
4238 			}
4239 		}
4240 		buf = _buf;
4241 		/* now do the commit - no errors should happen here */
4242 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4243 			if (copyin(buf, ioe, sizeof(*ioe))) {
4244 				kfree_type(struct pfr_table, table);
4245 				kfree_type(struct pfioc_trans_e, ioe);
4246 				error = EFAULT;
4247 				goto fail;
4248 			}
4249 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4250 			switch (ioe->rs_num) {
4251 			case PF_RULESET_ALTQ:
4252 				break;
4253 			case PF_RULESET_TABLE:
4254 				bzero(table, sizeof(*table));
4255 				strlcpy(table->pfrt_anchor, ioe->anchor,
4256 				    sizeof(table->pfrt_anchor));
4257 				if ((error = pfr_ina_commit(table, ioe->ticket,
4258 				    NULL, NULL, 0))) {
4259 					kfree_type(struct pfr_table, table);
4260 					kfree_type(struct pfioc_trans_e, ioe);
4261 					goto fail; /* really bad */
4262 				}
4263 				break;
4264 			default:
4265 				if ((error = pf_commit_rules(ioe->ticket,
4266 				    ioe->rs_num, ioe->anchor))) {
4267 					kfree_type(struct pfr_table, table);
4268 					kfree_type(struct pfioc_trans_e, ioe);
4269 					goto fail; /* really bad */
4270 				}
4271 				break;
4272 			}
4273 		}
4274 		kfree_type(struct pfr_table, table);
4275 		kfree_type(struct pfioc_trans_e, ioe);
4276 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
4277 		net_filter_event_mark(NET_FILTER_EVENT_PF,
4278 		    pf_check_compatible_rules());
4279 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
4280 		break;
4281 	}
4282 
4283 	default:
4284 		VERIFY(0);
4285 		/* NOTREACHED */
4286 	}
4287 fail:
4288 	return error;
4289 }
4290 
4291 static int
pfioctl_ioc_src_nodes(u_long cmd,struct pfioc_src_nodes_32 * psn32,struct pfioc_src_nodes_64 * psn64,struct proc * p)4292 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4293     struct pfioc_src_nodes_64 *psn64, struct proc *p)
4294 {
4295 	int p64 = proc_is64bit(p);
4296 	int error = 0;
4297 
4298 	switch (cmd) {
4299 	case DIOCGETSRCNODES: {
4300 		struct pf_src_node      *n, *pstore;
4301 		user_addr_t              buf;
4302 		u_int32_t                nr = 0;
4303 		int                      space, size;
4304 
4305 		space = (p64 ? psn64->psn_len : psn32->psn_len);
4306 		if (space == 0) {
4307 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4308 			nr++;
4309 
4310 			size = sizeof(struct pf_src_node) * nr;
4311 			if (p64) {
4312 				psn64->psn_len = size;
4313 			} else {
4314 				psn32->psn_len = size;
4315 			}
4316 			break;
4317 		}
4318 
4319 		pstore = kalloc_type(struct pf_src_node, Z_WAITOK | Z_NOFAIL);
4320 #ifdef __LP64__
4321 		buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4322 #else
4323 		buf = psn32->psn_buf;
4324 #endif
4325 
4326 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4327 			uint64_t secs = pf_time_second(), diff;
4328 
4329 			if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4330 				break;
4331 			}
4332 
4333 			bcopy(n, pstore, sizeof(*pstore));
4334 			if (n->rule.ptr != NULL) {
4335 				pstore->rule.nr = n->rule.ptr->nr;
4336 			}
4337 			pstore->creation = secs - pstore->creation;
4338 			if (pstore->expire > secs) {
4339 				pstore->expire -= secs;
4340 			} else {
4341 				pstore->expire = 0;
4342 			}
4343 
4344 			/* adjust the connection rate estimate */
4345 			diff = secs - n->conn_rate.last;
4346 			if (diff >= n->conn_rate.seconds) {
4347 				pstore->conn_rate.count = 0;
4348 			} else {
4349 				pstore->conn_rate.count -=
4350 				    n->conn_rate.count * diff /
4351 				    n->conn_rate.seconds;
4352 			}
4353 
4354 			_RB_PARENT(pstore, entry) = NULL;
4355 			RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4356 			pstore->kif = NULL;
4357 
4358 			error = copyout(pstore, buf, sizeof(*pstore));
4359 			if (error) {
4360 				kfree_type(struct pf_src_node, pstore);
4361 				goto fail;
4362 			}
4363 			buf += sizeof(*pstore);
4364 			nr++;
4365 		}
4366 
4367 		size = sizeof(struct pf_src_node) * nr;
4368 		if (p64) {
4369 			psn64->psn_len = size;
4370 		} else {
4371 			psn32->psn_len = size;
4372 		}
4373 
4374 		kfree_type(struct pf_src_node, pstore);
4375 		break;
4376 	}
4377 
4378 	default:
4379 		VERIFY(0);
4380 		/* NOTREACHED */
4381 	}
4382 fail:
4383 	return error;
4384 }
4385 
4386 static int
pfioctl_ioc_src_node_kill(u_long cmd,struct pfioc_src_node_kill * psnk,struct proc * p)4387 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4388     struct proc *p)
4389 {
4390 #pragma unused(p)
4391 	int error = 0;
4392 
4393 	switch (cmd) {
4394 	case DIOCKILLSRCNODES: {
4395 		struct pf_src_node      *sn;
4396 		struct pf_state         *s;
4397 		int                     killed = 0;
4398 
4399 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4400 			if (PF_MATCHA(psnk->psnk_src.neg,
4401 			    &psnk->psnk_src.addr.v.a.addr,
4402 			    &psnk->psnk_src.addr.v.a.mask,
4403 			    &sn->addr, sn->af) &&
4404 			    PF_MATCHA(psnk->psnk_dst.neg,
4405 			    &psnk->psnk_dst.addr.v.a.addr,
4406 			    &psnk->psnk_dst.addr.v.a.mask,
4407 			    &sn->raddr, sn->af)) {
4408 				/* Handle state to src_node linkage */
4409 				if (sn->states != 0) {
4410 					RB_FOREACH(s, pf_state_tree_id,
4411 					    &tree_id) {
4412 						if (s->src_node == sn) {
4413 							s->src_node = NULL;
4414 						}
4415 						if (s->nat_src_node == sn) {
4416 							s->nat_src_node = NULL;
4417 						}
4418 					}
4419 					sn->states = 0;
4420 				}
4421 				sn->expire = 1;
4422 				killed++;
4423 			}
4424 		}
4425 
4426 		if (killed > 0) {
4427 			pf_purge_expired_src_nodes();
4428 		}
4429 
4430 		psnk->psnk_af = (sa_family_t)killed;
4431 		break;
4432 	}
4433 
4434 	default:
4435 		VERIFY(0);
4436 		/* NOTREACHED */
4437 	}
4438 
4439 	return error;
4440 }
4441 
4442 static int
pfioctl_ioc_iface(u_long cmd,struct pfioc_iface_32 * io32,struct pfioc_iface_64 * io64,struct proc * p)4443 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4444     struct pfioc_iface_64 *io64, struct proc *p)
4445 {
4446 	int p64 = proc_is64bit(p);
4447 	int error = 0;
4448 
4449 	switch (cmd) {
4450 	case DIOCIGETIFACES: {
4451 		user_addr_t buf;
4452 		int esize;
4453 
4454 #ifdef __LP64__
4455 		buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4456 		esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4457 #else
4458 		buf = io32->pfiio_buffer;
4459 		esize = io32->pfiio_esize;
4460 #endif
4461 
4462 		/* esize must be that of the user space version of pfi_kif */
4463 		if (esize != sizeof(struct pfi_uif)) {
4464 			error = ENODEV;
4465 			break;
4466 		}
4467 		if (p64) {
4468 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4469 		} else {
4470 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4471 		}
4472 		error = pfi_get_ifaces(
4473 			p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4474 			p64 ? &io64->pfiio_size : &io32->pfiio_size);
4475 		break;
4476 	}
4477 
4478 	case DIOCSETIFFLAG: {
4479 		if (p64) {
4480 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4481 		} else {
4482 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4483 		}
4484 
4485 		error = pfi_set_flags(
4486 			p64 ? io64->pfiio_name : io32->pfiio_name,
4487 			p64 ? io64->pfiio_flags : io32->pfiio_flags);
4488 		break;
4489 	}
4490 
4491 	case DIOCCLRIFFLAG: {
4492 		if (p64) {
4493 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4494 		} else {
4495 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4496 		}
4497 
4498 		error = pfi_clear_flags(
4499 			p64 ? io64->pfiio_name : io32->pfiio_name,
4500 			p64 ? io64->pfiio_flags : io32->pfiio_flags);
4501 		break;
4502 	}
4503 
4504 	default:
4505 		VERIFY(0);
4506 		/* NOTREACHED */
4507 	}
4508 
4509 	return error;
4510 }
4511 
4512 int
pf_af_hook(struct ifnet * ifp,struct mbuf ** mppn,struct mbuf ** mp,unsigned int af,int input,struct ip_fw_args * fwa)4513 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4514     unsigned int af, int input, struct ip_fw_args *fwa)
4515 {
4516 	int error = 0;
4517 	struct mbuf *nextpkt;
4518 	net_thread_marks_t marks;
4519 	struct ifnet * pf_ifp = ifp;
4520 
4521 	/* Always allow traffic on co-processor interfaces. */
4522 	if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) {
4523 		return 0;
4524 	}
4525 
4526 	marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4527 
4528 	if (marks != net_thread_marks_none) {
4529 		lck_rw_lock_shared(&pf_perim_lock);
4530 		if (!pf_is_enabled) {
4531 			goto done;
4532 		}
4533 		lck_mtx_lock(&pf_lock);
4534 	}
4535 
4536 	if (mppn != NULL && *mppn != NULL) {
4537 		VERIFY(*mppn == *mp);
4538 	}
4539 	if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4540 		(*mp)->m_nextpkt = NULL;
4541 	}
4542 
4543 	/*
4544 	 * For packets destined to locally hosted IP address
4545 	 * ip_output_list sets Mbuf's pkt header's rcvif to
4546 	 * the interface hosting the IP address.
4547 	 * While on the output path ifp passed to pf_af_hook
4548 	 * to such local communication is the loopback interface,
4549 	 * the input path derives ifp from mbuf packet header's
4550 	 * rcvif.
4551 	 * This asymmetry caues issues with PF.
4552 	 * To handle that case, we have a limited change here to
4553 	 * pass interface as loopback if packets are looped in.
4554 	 */
4555 	if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4556 		pf_ifp = lo_ifp;
4557 	}
4558 
4559 	switch (af) {
4560 #if INET
4561 	case AF_INET: {
4562 		error = pf_inet_hook(pf_ifp, mp, input, fwa);
4563 		break;
4564 	}
4565 #endif /* INET */
4566 	case AF_INET6:
4567 		error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4568 		break;
4569 	default:
4570 		break;
4571 	}
4572 
4573 	/* When packet valid, link to the next packet */
4574 	if (*mp != NULL && nextpkt != NULL) {
4575 		struct mbuf *m = *mp;
4576 		while (m->m_nextpkt != NULL) {
4577 			m = m->m_nextpkt;
4578 		}
4579 		m->m_nextpkt = nextpkt;
4580 	}
4581 	/* Fix up linkage of previous packet in the chain */
4582 	if (mppn != NULL) {
4583 		if (*mp != NULL) {
4584 			*mppn = *mp;
4585 		} else {
4586 			*mppn = nextpkt;
4587 		}
4588 	}
4589 
4590 	if (marks != net_thread_marks_none) {
4591 		lck_mtx_unlock(&pf_lock);
4592 	}
4593 
4594 done:
4595 	if (marks != net_thread_marks_none) {
4596 		lck_rw_done(&pf_perim_lock);
4597 	}
4598 
4599 	net_thread_marks_pop(marks);
4600 	return error;
4601 }
4602 
4603 
4604 #if INET
4605 static __attribute__((noinline)) int
pf_inet_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4606 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4607     struct ip_fw_args *fwa)
4608 {
4609 	struct mbuf *m = *mp;
4610 #if BYTE_ORDER != BIG_ENDIAN
4611 	struct ip *ip = mtod(m, struct ip *);
4612 #endif
4613 	int error = 0;
4614 
4615 	/*
4616 	 * If the packet is outbound, is originated locally, is flagged for
4617 	 * delayed UDP/TCP checksum calculation, and is about to be processed
4618 	 * for an interface that doesn't support the appropriate checksum
4619 	 * offloading, then calculated the checksum here so that PF can adjust
4620 	 * it properly.
4621 	 */
4622 	if (!input && m->m_pkthdr.rcvif == NULL) {
4623 		static const int mask = CSUM_DELAY_DATA;
4624 		const int flags = m->m_pkthdr.csum_flags &
4625 		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4626 
4627 		if (flags & mask) {
4628 			in_delayed_cksum(m);
4629 			m->m_pkthdr.csum_flags &= ~mask;
4630 		}
4631 	}
4632 
4633 #if BYTE_ORDER != BIG_ENDIAN
4634 	HTONS(ip->ip_len);
4635 	HTONS(ip->ip_off);
4636 #endif
4637 	if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4638 		if (*mp != NULL) {
4639 			m_freem(*mp);
4640 			*mp = NULL;
4641 			error = EHOSTUNREACH;
4642 		} else {
4643 			error = ENOBUFS;
4644 		}
4645 	}
4646 #if BYTE_ORDER != BIG_ENDIAN
4647 	else {
4648 		if (*mp != NULL) {
4649 			ip = mtod(*mp, struct ip *);
4650 			NTOHS(ip->ip_len);
4651 			NTOHS(ip->ip_off);
4652 		}
4653 	}
4654 #endif
4655 	return error;
4656 }
4657 #endif /* INET */
4658 
4659 int __attribute__((noinline))
pf_inet6_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4660 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4661     struct ip_fw_args *fwa)
4662 {
4663 	int error = 0;
4664 
4665 	/*
4666 	 * If the packet is outbound, is originated locally, is flagged for
4667 	 * delayed UDP/TCP checksum calculation, and is about to be processed
4668 	 * for an interface that doesn't support the appropriate checksum
4669 	 * offloading, then calculated the checksum here so that PF can adjust
4670 	 * it properly.
4671 	 */
4672 	if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4673 		static const int mask = CSUM_DELAY_IPV6_DATA;
4674 		const int flags = (*mp)->m_pkthdr.csum_flags &
4675 		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4676 
4677 		if (flags & mask) {
4678 			/*
4679 			 * Checksum offload should not have been enabled
4680 			 * when extension headers exist, thus 0 for optlen.
4681 			 */
4682 			in6_delayed_cksum(*mp);
4683 			(*mp)->m_pkthdr.csum_flags &= ~mask;
4684 		}
4685 	}
4686 
4687 	if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4688 		if (*mp != NULL) {
4689 			m_freem(*mp);
4690 			*mp = NULL;
4691 			error = EHOSTUNREACH;
4692 		} else {
4693 			error = ENOBUFS;
4694 		}
4695 	}
4696 	return error;
4697 }
4698 
4699 int
pf_ifaddr_hook(struct ifnet * ifp)4700 pf_ifaddr_hook(struct ifnet *ifp)
4701 {
4702 	struct pfi_kif *kif = ifp->if_pf_kif;
4703 
4704 	if (kif != NULL) {
4705 		lck_rw_lock_shared(&pf_perim_lock);
4706 		lck_mtx_lock(&pf_lock);
4707 
4708 		pfi_kifaddr_update(kif);
4709 
4710 		lck_mtx_unlock(&pf_lock);
4711 		lck_rw_done(&pf_perim_lock);
4712 	}
4713 	return 0;
4714 }
4715 
4716 /*
4717  * Caller acquires dlil lock as writer (exclusive)
4718  */
4719 void
pf_ifnet_hook(struct ifnet * ifp,int attach)4720 pf_ifnet_hook(struct ifnet *ifp, int attach)
4721 {
4722 	lck_rw_lock_shared(&pf_perim_lock);
4723 	lck_mtx_lock(&pf_lock);
4724 	if (attach) {
4725 		pfi_attach_ifnet(ifp);
4726 	} else {
4727 		pfi_detach_ifnet(ifp);
4728 	}
4729 	lck_mtx_unlock(&pf_lock);
4730 	lck_rw_done(&pf_perim_lock);
4731 }
4732 
4733 static void
pf_attach_hooks(void)4734 pf_attach_hooks(void)
4735 {
4736 	ifnet_head_lock_shared();
4737 	/*
4738 	 * Check against ifnet_addrs[] before proceeding, in case this
4739 	 * is called very early on, e.g. during dlil_init() before any
4740 	 * network interface is attached.
4741 	 */
4742 	if (ifnet_addrs != NULL) {
4743 		int i;
4744 
4745 		for (i = 0; i <= if_index; i++) {
4746 			struct ifnet *ifp = ifindex2ifnet[i];
4747 			if (ifp != NULL) {
4748 				pfi_attach_ifnet(ifp);
4749 			}
4750 		}
4751 	}
4752 	ifnet_head_done();
4753 }
4754 
4755 #if 0
4756 /* currently unused along with pfdetach() */
4757 static void
4758 pf_detach_hooks(void)
4759 {
4760 	ifnet_head_lock_shared();
4761 	if (ifnet_addrs != NULL) {
4762 		for (i = 0; i <= if_index; i++) {
4763 			int i;
4764 
4765 			struct ifnet *ifp = ifindex2ifnet[i];
4766 			if (ifp != NULL && ifp->if_pf_kif != NULL) {
4767 				pfi_detach_ifnet(ifp);
4768 			}
4769 		}
4770 	}
4771 	ifnet_head_done();
4772 }
4773 #endif
4774 
4775 /*
4776  * 'D' group ioctls.
4777  *
4778  * The switch statement below does nothing at runtime, as it serves as a
4779  * compile time check to ensure that all of the socket 'D' ioctls (those
4780  * in the 'D' group going thru soo_ioctl) that are made available by the
4781  * networking stack is unique.  This works as long as this routine gets
4782  * updated each time a new interface ioctl gets added.
4783  *
4784  * Any failures at compile time indicates duplicated ioctl values.
4785  */
4786 static __attribute__((unused)) void
pfioctl_cassert(void)4787 pfioctl_cassert(void)
4788 {
4789 	/*
4790 	 * This is equivalent to _CASSERT() and the compiler wouldn't
4791 	 * generate any instructions, thus for compile time only.
4792 	 */
4793 	switch ((u_long)0) {
4794 	case 0:
4795 
4796 	/* bsd/net/pfvar.h */
4797 	case DIOCSTART:
4798 	case DIOCSTOP:
4799 	case DIOCADDRULE:
4800 	case DIOCGETSTARTERS:
4801 	case DIOCGETRULES:
4802 	case DIOCGETRULE:
4803 	case DIOCSTARTREF:
4804 	case DIOCSTOPREF:
4805 	case DIOCCLRSTATES:
4806 	case DIOCGETSTATE:
4807 	case DIOCSETSTATUSIF:
4808 	case DIOCGETSTATUS:
4809 	case DIOCCLRSTATUS:
4810 	case DIOCNATLOOK:
4811 	case DIOCSETDEBUG:
4812 	case DIOCGETSTATES:
4813 	case DIOCCHANGERULE:
4814 	case DIOCINSERTRULE:
4815 	case DIOCDELETERULE:
4816 	case DIOCSETTIMEOUT:
4817 	case DIOCGETTIMEOUT:
4818 	case DIOCADDSTATE:
4819 	case DIOCCLRRULECTRS:
4820 	case DIOCGETLIMIT:
4821 	case DIOCSETLIMIT:
4822 	case DIOCKILLSTATES:
4823 	case DIOCSTARTALTQ:
4824 	case DIOCSTOPALTQ:
4825 	case DIOCADDALTQ:
4826 	case DIOCGETALTQS:
4827 	case DIOCGETALTQ:
4828 	case DIOCCHANGEALTQ:
4829 	case DIOCGETQSTATS:
4830 	case DIOCBEGINADDRS:
4831 	case DIOCADDADDR:
4832 	case DIOCGETADDRS:
4833 	case DIOCGETADDR:
4834 	case DIOCCHANGEADDR:
4835 	case DIOCGETRULESETS:
4836 	case DIOCGETRULESET:
4837 	case DIOCRCLRTABLES:
4838 	case DIOCRADDTABLES:
4839 	case DIOCRDELTABLES:
4840 	case DIOCRGETTABLES:
4841 	case DIOCRGETTSTATS:
4842 	case DIOCRCLRTSTATS:
4843 	case DIOCRCLRADDRS:
4844 	case DIOCRADDADDRS:
4845 	case DIOCRDELADDRS:
4846 	case DIOCRSETADDRS:
4847 	case DIOCRGETADDRS:
4848 	case DIOCRGETASTATS:
4849 	case DIOCRCLRASTATS:
4850 	case DIOCRTSTADDRS:
4851 	case DIOCRSETTFLAGS:
4852 	case DIOCRINADEFINE:
4853 	case DIOCOSFPFLUSH:
4854 	case DIOCOSFPADD:
4855 	case DIOCOSFPGET:
4856 	case DIOCXBEGIN:
4857 	case DIOCXCOMMIT:
4858 	case DIOCXROLLBACK:
4859 	case DIOCGETSRCNODES:
4860 	case DIOCCLRSRCNODES:
4861 	case DIOCSETHOSTID:
4862 	case DIOCIGETIFACES:
4863 	case DIOCSETIFFLAG:
4864 	case DIOCCLRIFFLAG:
4865 	case DIOCKILLSRCNODES:
4866 	case DIOCGIFSPEED:
4867 		;
4868 	}
4869 }
4870