xref: /xnu-8796.121.2/bsd/net/pf_ioctl.c (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*	$apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */
30 /*	$OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */
31 
32 /*
33  * Copyright (c) 2001 Daniel Hartmeier
34  * Copyright (c) 2002,2003 Henning Brauer
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *    - Redistributions of source code must retain the above copyright
42  *      notice, this list of conditions and the following disclaimer.
43  *    - Redistributions in binary form must reproduce the above
44  *      copyright notice, this list of conditions and the following
45  *      disclaimer in the documentation and/or other materials provided
46  *      with the distribution.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59  * POSSIBILITY OF SUCH DAMAGE.
60  *
61  * Effort sponsored in part by the Defense Advanced Research Projects
62  * Agency (DARPA) and Air Force Research Laboratory, Air Force
63  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64  *
65  */
66 
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/fcntl.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/kernel.h>
76 #include <sys/time.h>
77 #include <sys/proc_internal.h>
78 #include <sys/malloc.h>
79 #include <sys/kauth.h>
80 #include <sys/conf.h>
81 #include <sys/mcache.h>
82 #include <sys/queue.h>
83 #include <os/log.h>
84 
85 #include <mach/vm_param.h>
86 
87 #include <net/dlil.h>
88 #include <net/if.h>
89 #include <net/if_types.h>
90 #include <net/net_api_stats.h>
91 #include <net/route.h>
92 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
93 #include <skywalk/lib/net_filter_event.h>
94 #endif
95 
96 #include <netinet/in.h>
97 #include <netinet/in_var.h>
98 #include <netinet/in_systm.h>
99 #include <netinet/ip.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/ip_icmp.h>
102 #include <netinet/if_ether.h>
103 
104 #if DUMMYNET
105 #include <netinet/ip_dummynet.h>
106 #else
107 struct ip_fw_args;
108 #endif /* DUMMYNET */
109 
110 #include <libkern/crypto/md5.h>
111 
112 #include <machine/machine_routines.h>
113 
114 #include <miscfs/devfs/devfs.h>
115 
116 #include <net/pfvar.h>
117 
118 #if NPFSYNC
119 #include <net/if_pfsync.h>
120 #endif /* NPFSYNC */
121 
122 #if PFLOG
123 #include <net/if_pflog.h>
124 #endif /* PFLOG */
125 
126 #include <netinet/ip6.h>
127 #include <netinet/in_pcb.h>
128 
129 #include <dev/random/randomdev.h>
130 
131 #if 0
132 static void pfdetach(void);
133 #endif
134 static int pfopen(dev_t, int, int, struct proc *);
135 static int pfclose(dev_t, int, int, struct proc *);
136 static int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
137 static int pfioctl_ioc_table(u_long, struct pfioc_table_32 *,
138     struct pfioc_table_64 *, struct proc *);
139 static int pfioctl_ioc_tokens(u_long, struct pfioc_tokens_32 *,
140     struct pfioc_tokens_64 *, struct proc *);
141 static int pfioctl_ioc_rule(u_long, int, struct pfioc_rule *, struct proc *);
142 static int pfioctl_ioc_state_kill(u_long, struct pfioc_state_kill *,
143     struct proc *);
144 static int pfioctl_ioc_state(u_long, struct pfioc_state *, struct proc *);
145 static int pfioctl_ioc_states(u_long, struct pfioc_states_32 *,
146     struct pfioc_states_64 *, struct proc *);
147 static int pfioctl_ioc_natlook(u_long, struct pfioc_natlook *, struct proc *);
148 static int pfioctl_ioc_tm(u_long, struct pfioc_tm *, struct proc *);
149 static int pfioctl_ioc_limit(u_long, struct pfioc_limit *, struct proc *);
150 static int pfioctl_ioc_pooladdr(u_long, struct pfioc_pooladdr *, struct proc *);
151 static int pfioctl_ioc_ruleset(u_long, struct pfioc_ruleset *, struct proc *);
152 static int pfioctl_ioc_trans(u_long, struct pfioc_trans_32 *,
153     struct pfioc_trans_64 *, struct proc *);
154 static int pfioctl_ioc_src_nodes(u_long, struct pfioc_src_nodes_32 *,
155     struct pfioc_src_nodes_64 *, struct proc *);
156 static int pfioctl_ioc_src_node_kill(u_long, struct pfioc_src_node_kill *,
157     struct proc *);
158 static int pfioctl_ioc_iface(u_long, struct pfioc_iface_32 *,
159     struct pfioc_iface_64 *, struct proc *);
160 static struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
161     u_int8_t, u_int8_t, u_int8_t);
162 static void pf_mv_pool(struct pf_palist *, struct pf_palist *);
163 static void pf_empty_pool(struct pf_palist *);
164 static int pf_begin_rules(u_int32_t *, int, const char *);
165 static int pf_rollback_rules(u_int32_t, int, char *);
166 static int pf_setup_pfsync_matching(struct pf_ruleset *);
167 static void pf_hash_rule(MD5_CTX *, struct pf_rule *);
168 static void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *, u_int8_t);
169 static int pf_commit_rules(u_int32_t, int, char *);
170 static void pf_rule_copyin(struct pf_rule *, struct pf_rule *, struct proc *,
171     int);
172 static void pf_rule_copyout(struct pf_rule *, struct pf_rule *);
173 static void pf_state_export(struct pfsync_state *, struct pf_state_key *,
174     struct pf_state *);
175 static void pf_state_import(struct pfsync_state *, struct pf_state_key *,
176     struct pf_state *);
177 static void pf_pooladdr_copyin(struct pf_pooladdr *, struct pf_pooladdr *);
178 static void pf_pooladdr_copyout(struct pf_pooladdr *, struct pf_pooladdr *);
179 static void pf_expire_states_and_src_nodes(struct pf_rule *);
180 static void pf_delete_rule_from_ruleset(struct pf_ruleset *,
181     int, struct pf_rule *);
182 static void pf_addrwrap_setup(struct pf_addr_wrap *);
183 static int pf_rule_setup(struct pfioc_rule *, struct pf_rule *,
184     struct pf_ruleset *);
185 static void pf_delete_rule_by_owner(char *, u_int32_t);
186 static int pf_delete_rule_by_ticket(struct pfioc_rule *, u_int32_t);
187 static void pf_ruleset_cleanup(struct pf_ruleset *, int);
188 static void pf_deleterule_anchor_step_out(struct pf_ruleset **,
189     int, struct pf_rule **);
190 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
191 static void pf_process_compatibilities(void);
192 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
193 
194 #define PF_CDEV_MAJOR   (-1)
195 
196 static const struct cdevsw pf_cdevsw = {
197 	.d_open       = pfopen,
198 	.d_close      = pfclose,
199 	.d_read       = eno_rdwrt,
200 	.d_write      = eno_rdwrt,
201 	.d_ioctl      = pfioctl,
202 	.d_stop       = eno_stop,
203 	.d_reset      = eno_reset,
204 	.d_ttys       = NULL,
205 	.d_select     = eno_select,
206 	.d_mmap       = eno_mmap,
207 	.d_strategy   = eno_strat,
208 	.d_reserved_1 = eno_getc,
209 	.d_reserved_2 = eno_putc,
210 	.d_type       = 0
211 };
212 
213 static void pf_attach_hooks(void);
214 #if 0
215 /* currently unused along with pfdetach() */
216 static void pf_detach_hooks(void);
217 #endif
218 
219 /*
220  * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer,
221  * and used in pf_af_hook() for performance optimization, such that packets
222  * will enter pf_test() or pf_test6() only when PF is running.
223  */
224 int pf_is_enabled = 0;
225 
226 u_int32_t pf_hash_seed;
227 int16_t pf_nat64_configured = 0;
228 
229 /*
230  * These are the pf enabled reference counting variables
231  */
232 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token))
233 
234 static u_int64_t pf_enabled_ref_count;
235 static u_int32_t nr_tokens = 0;
236 static u_int32_t pffwrules;
237 static u_int32_t pfdevcnt;
238 
239 SLIST_HEAD(list_head, pfioc_kernel_token);
240 static struct list_head token_list_head;
241 
242 struct pf_rule           pf_default_rule;
243 
244 typedef struct {
245 	char tag_name[PF_TAG_NAME_SIZE];
246 	uint16_t tag_id;
247 } pf_reserved_tag_table_t;
248 
249 #define NUM_RESERVED_TAGS    2
250 static pf_reserved_tag_table_t pf_reserved_tag_table[NUM_RESERVED_TAGS] = {
251 	{ PF_TAG_NAME_SYSTEM_SERVICE, PF_TAG_ID_SYSTEM_SERVICE},
252 	{ PF_TAG_NAME_STACK_DROP, PF_TAG_ID_STACK_DROP},
253 };
254 #define RESERVED_TAG_ID_MIN    PF_TAG_ID_SYSTEM_SERVICE
255 
256 #define DYNAMIC_TAG_ID_MAX    50000
257 static TAILQ_HEAD(pf_tags, pf_tagname)  pf_tags =
258     TAILQ_HEAD_INITIALIZER(pf_tags);
259 
260 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
261 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
262 #endif
263 static u_int16_t         tagname2tag(struct pf_tags *, char *);
264 static void              tag_unref(struct pf_tags *, u_int16_t);
265 static int               pf_rtlabel_add(struct pf_addr_wrap *);
266 static void              pf_rtlabel_remove(struct pf_addr_wrap *);
267 static void              pf_rtlabel_copyout(struct pf_addr_wrap *);
268 
269 #if INET
270 static int pf_inet_hook(struct ifnet *, struct mbuf **, int,
271     struct ip_fw_args *);
272 #endif /* INET */
273 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int,
274     struct ip_fw_args *);
275 
276 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
277 
278 /*
279  * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit)
280  */
281 #define PFIOCX_STRUCT_DECL(s)                                           \
282 struct {                                                                \
283 	union {                                                         \
284 	        struct s##_32	_s##_32;                                \
285 	        struct s##_64	_s##_64;                                \
286 	} _u;                                                           \
287 } *s##_un = NULL                                                        \
288 
289 #define PFIOCX_STRUCT_BEGIN(a, s) {                                     \
290 	VERIFY(s##_un == NULL);                                         \
291 	s##_un = kalloc_type(typeof(*s##_un), Z_WAITOK_ZERO_NOFAIL);    \
292 	if (p64)                                                        \
293 	        bcopy(a, &s##_un->_u._s##_64,                           \
294 	            sizeof (struct s##_64));                            \
295 	else                                                            \
296 	        bcopy(a, &s##_un->_u._s##_32,                           \
297 	            sizeof (struct s##_32));                            \
298 }
299 
300 #define PFIOCX_STRUCT_END(s, a) {                                       \
301 	VERIFY(s##_un != NULL);                                         \
302 	if (p64)                                                        \
303 	        bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64));  \
304 	else                                                            \
305 	        bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32));  \
306 	kfree_type(typeof(*s##_un), s##_un);                            \
307 }
308 
309 #define PFIOCX_STRUCT_ADDR32(s)         (&s##_un->_u._s##_32)
310 #define PFIOCX_STRUCT_ADDR64(s)         (&s##_un->_u._s##_64)
311 
312 /*
313  * Helper macros for regular ioctl structures.
314  */
315 #define PFIOC_STRUCT_BEGIN(a, v) {                                      \
316 	VERIFY((v) == NULL);                                            \
317 	(v) = kalloc_type(typeof(*(v)), Z_WAITOK_ZERO_NOFAIL);          \
318 	bcopy(a, v, sizeof (*(v)));                                     \
319 }
320 
321 #define PFIOC_STRUCT_END(v, a) {                                        \
322 	VERIFY((v) != NULL);                                            \
323 	bcopy(v, a, sizeof (*(v)));                                     \
324 	kfree_type(typeof(*(v)), v);                                    \
325 }
326 
327 #define PFIOC_STRUCT_ADDR32(s)          (&s##_un->_u._s##_32)
328 #define PFIOC_STRUCT_ADDR64(s)          (&s##_un->_u._s##_64)
329 
330 struct thread *pf_purge_thread;
331 
332 extern void pfi_kifaddr_update(void *);
333 
334 /* pf enable ref-counting helper functions */
335 static u_int64_t                generate_token(struct proc *);
336 static int                      remove_token(struct pfioc_remove_token *);
337 static void                     invalidate_all_tokens(void);
338 
339 static u_int64_t
generate_token(struct proc * p)340 generate_token(struct proc *p)
341 {
342 	u_int64_t token_value;
343 	struct pfioc_kernel_token *new_token;
344 
345 	if (nr_tokens + 1 > NR_TOKENS_LIMIT) {
346 		os_log_error(OS_LOG_DEFAULT, "%s: NR_TOKENS_LIMIT reached", __func__);
347 		return 0;
348 	}
349 
350 	new_token = kalloc_type(struct pfioc_kernel_token,
351 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
352 
353 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
354 
355 	token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token);
356 
357 	new_token->token.token_value = token_value;
358 	new_token->token.pid = proc_pid(p);
359 	proc_name(new_token->token.pid, new_token->token.proc_name,
360 	    sizeof(new_token->token.proc_name));
361 	new_token->token.timestamp = pf_calendar_time_second();
362 
363 	SLIST_INSERT_HEAD(&token_list_head, new_token, next);
364 	nr_tokens++;
365 
366 	return token_value;
367 }
368 
369 static int
remove_token(struct pfioc_remove_token * tok)370 remove_token(struct pfioc_remove_token *tok)
371 {
372 	struct pfioc_kernel_token *entry, *tmp;
373 
374 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
375 
376 	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
377 		if (tok->token_value == entry->token.token_value) {
378 			SLIST_REMOVE(&token_list_head, entry,
379 			    pfioc_kernel_token, next);
380 			kfree_type(struct pfioc_kernel_token, entry);
381 			nr_tokens--;
382 			return 0;    /* success */
383 		}
384 	}
385 
386 	printf("pf : remove failure\n");
387 	return ESRCH;    /* failure */
388 }
389 
390 static void
invalidate_all_tokens(void)391 invalidate_all_tokens(void)
392 {
393 	struct pfioc_kernel_token *entry, *tmp;
394 
395 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
396 
397 	SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
398 		SLIST_REMOVE(&token_list_head, entry, pfioc_kernel_token, next);
399 		kfree_type(struct pfioc_kernel_token, entry);
400 	}
401 
402 	nr_tokens = 0;
403 }
404 
405 void
pfinit(void)406 pfinit(void)
407 {
408 	u_int32_t *t = pf_default_rule.timeout;
409 	int maj;
410 
411 	pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
412 	    NULL);
413 	pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
414 	    "pfsrctrpl", NULL);
415 	pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
416 	    NULL);
417 	pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0,
418 	    "pfstatekeypl", NULL);
419 	pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0,
420 	    "pfappstatepl", NULL);
421 	pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
422 	    "pfpooladdrpl", NULL);
423 	pfr_initialize();
424 	pfi_initialize();
425 	pf_osfp_initialize();
426 
427 	pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
428 	    pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
429 
430 	if (max_mem <= 256 * 1024 * 1024) {
431 		pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
432 		    PFR_KENTRY_HIWAT_SMALL;
433 	}
434 
435 	RB_INIT(&tree_src_tracking);
436 	RB_INIT(&pf_anchors);
437 	pf_init_ruleset(&pf_main_ruleset);
438 	TAILQ_INIT(&pf_pabuf);
439 	TAILQ_INIT(&state_list);
440 
441 	_CASSERT((SC_BE & SCIDX_MASK) == SCIDX_BE);
442 	_CASSERT((SC_BK_SYS & SCIDX_MASK) == SCIDX_BK_SYS);
443 	_CASSERT((SC_BK & SCIDX_MASK) == SCIDX_BK);
444 	_CASSERT((SC_RD & SCIDX_MASK) == SCIDX_RD);
445 	_CASSERT((SC_OAM & SCIDX_MASK) == SCIDX_OAM);
446 	_CASSERT((SC_AV & SCIDX_MASK) == SCIDX_AV);
447 	_CASSERT((SC_RV & SCIDX_MASK) == SCIDX_RV);
448 	_CASSERT((SC_VI & SCIDX_MASK) == SCIDX_VI);
449 	_CASSERT((SC_SIG & SCIDX_MASK) == SCIDX_SIG);
450 	_CASSERT((SC_VO & SCIDX_MASK) == SCIDX_VO);
451 	_CASSERT((SC_CTL & SCIDX_MASK) == SCIDX_CTL);
452 
453 	/* default rule should never be garbage collected */
454 	pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
455 	pf_default_rule.action = PF_PASS;
456 	pf_default_rule.nr = -1;
457 	pf_default_rule.rtableid = IFSCOPE_NONE;
458 
459 	/* initialize default timeouts */
460 	t[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
461 	t[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
462 	t[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
463 	t[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
464 	t[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
465 	t[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
466 	t[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
467 	t[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
468 	t[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
469 	t[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
470 	t[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
471 	t[PFTM_GREv1_FIRST_PACKET] = PFTM_GREv1_FIRST_PACKET_VAL;
472 	t[PFTM_GREv1_INITIATING] = PFTM_GREv1_INITIATING_VAL;
473 	t[PFTM_GREv1_ESTABLISHED] = PFTM_GREv1_ESTABLISHED_VAL;
474 	t[PFTM_ESP_FIRST_PACKET] = PFTM_ESP_FIRST_PACKET_VAL;
475 	t[PFTM_ESP_INITIATING] = PFTM_ESP_INITIATING_VAL;
476 	t[PFTM_ESP_ESTABLISHED] = PFTM_ESP_ESTABLISHED_VAL;
477 	t[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
478 	t[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
479 	t[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
480 	t[PFTM_FRAG] = PFTM_FRAG_VAL;
481 	t[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
482 	t[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
483 	t[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
484 	t[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
485 	t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
486 
487 	pf_normalize_init();
488 	bzero(&pf_status, sizeof(pf_status));
489 	pf_status.debug = PF_DEBUG_URGENT;
490 	pf_hash_seed = RandomULong();
491 
492 	/* XXX do our best to avoid a conflict */
493 	pf_status.hostid = random();
494 
495 	if (kernel_thread_start(pf_purge_thread_fn, NULL,
496 	    &pf_purge_thread) != 0) {
497 		printf("%s: unable to start purge thread!", __func__);
498 		return;
499 	}
500 
501 	maj = cdevsw_add(PF_CDEV_MAJOR, &pf_cdevsw);
502 	if (maj == -1) {
503 		printf("%s: failed to allocate major number!\n", __func__);
504 		return;
505 	}
506 	(void) devfs_make_node(makedev(maj, PFDEV_PF), DEVFS_CHAR,
507 	    UID_ROOT, GID_WHEEL, 0600, "pf");
508 
509 	(void) devfs_make_node(makedev(maj, PFDEV_PFM), DEVFS_CHAR,
510 	    UID_ROOT, GID_WHEEL, 0600, "pfm");
511 
512 	pf_attach_hooks();
513 #if DUMMYNET
514 	dummynet_init();
515 #endif
516 }
517 
518 #if 0
519 static void
520 pfdetach(void)
521 {
522 	struct pf_anchor        *anchor;
523 	struct pf_state         *state;
524 	struct pf_src_node      *node;
525 	struct pfioc_table      pt;
526 	u_int32_t               ticket;
527 	int                     i;
528 	char                    r = '\0';
529 
530 	pf_detach_hooks();
531 
532 	pf_status.running = 0;
533 	wakeup(pf_purge_thread_fn);
534 
535 	/* clear the rulesets */
536 	for (i = 0; i < PF_RULESET_MAX; i++) {
537 		if (pf_begin_rules(&ticket, i, &r) == 0) {
538 			pf_commit_rules(ticket, i, &r);
539 		}
540 	}
541 
542 	/* clear states */
543 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
544 		state->timeout = PFTM_PURGE;
545 #if NPFSYNC
546 		state->sync_flags = PFSTATE_NOSYNC;
547 #endif
548 	}
549 	pf_purge_expired_states(pf_status.states);
550 
551 #if NPFSYNC
552 	pfsync_clear_states(pf_status.hostid, NULL);
553 #endif
554 
555 	/* clear source nodes */
556 	RB_FOREACH(state, pf_state_tree_id, &tree_id) {
557 		state->src_node = NULL;
558 		state->nat_src_node = NULL;
559 	}
560 	RB_FOREACH(node, pf_src_tree, &tree_src_tracking) {
561 		node->expire = 1;
562 		node->states = 0;
563 	}
564 	pf_purge_expired_src_nodes();
565 
566 	/* clear tables */
567 	memset(&pt, '\0', sizeof(pt));
568 	pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags);
569 
570 	/* destroy anchors */
571 	while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) {
572 		for (i = 0; i < PF_RULESET_MAX; i++) {
573 			if (pf_begin_rules(&ticket, i, anchor->name) == 0) {
574 				pf_commit_rules(ticket, i, anchor->name);
575 			}
576 		}
577 	}
578 
579 	/* destroy main ruleset */
580 	pf_remove_if_empty_ruleset(&pf_main_ruleset);
581 
582 	/* destroy the pools */
583 	pool_destroy(&pf_pooladdr_pl);
584 	pool_destroy(&pf_state_pl);
585 	pool_destroy(&pf_rule_pl);
586 	pool_destroy(&pf_src_tree_pl);
587 
588 	/* destroy subsystems */
589 	pf_normalize_destroy();
590 	pf_osfp_destroy();
591 	pfr_destroy();
592 	pfi_destroy();
593 }
594 #endif
595 
596 static int
pfopen(dev_t dev,int flags,int fmt,struct proc * p)597 pfopen(dev_t dev, int flags, int fmt, struct proc *p)
598 {
599 #pragma unused(flags, fmt, p)
600 	if (minor(dev) >= PFDEV_MAX) {
601 		return ENXIO;
602 	}
603 
604 	if (minor(dev) == PFDEV_PFM) {
605 		lck_mtx_lock(&pf_lock);
606 		if (pfdevcnt != 0) {
607 			lck_mtx_unlock(&pf_lock);
608 			return EBUSY;
609 		}
610 		pfdevcnt++;
611 		lck_mtx_unlock(&pf_lock);
612 	}
613 	return 0;
614 }
615 
616 static int
pfclose(dev_t dev,int flags,int fmt,struct proc * p)617 pfclose(dev_t dev, int flags, int fmt, struct proc *p)
618 {
619 #pragma unused(flags, fmt, p)
620 	if (minor(dev) >= PFDEV_MAX) {
621 		return ENXIO;
622 	}
623 
624 	if (minor(dev) == PFDEV_PFM) {
625 		lck_mtx_lock(&pf_lock);
626 		VERIFY(pfdevcnt > 0);
627 		pfdevcnt--;
628 		lck_mtx_unlock(&pf_lock);
629 	}
630 	return 0;
631 }
632 
633 static struct pf_pool *
pf_get_pool(char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket)634 pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
635     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
636     u_int8_t check_ticket)
637 {
638 	struct pf_ruleset       *ruleset;
639 	struct pf_rule          *rule;
640 	int                      rs_num;
641 	struct pf_pool          *p = NULL;
642 
643 	ruleset = pf_find_ruleset(anchor);
644 	if (ruleset == NULL) {
645 		goto done;
646 	}
647 	rs_num = pf_get_ruleset_number(rule_action);
648 	if (rs_num >= PF_RULESET_MAX) {
649 		goto done;
650 	}
651 	if (active) {
652 		if (check_ticket && ticket !=
653 		    ruleset->rules[rs_num].active.ticket) {
654 			goto done;
655 		}
656 		if (r_last) {
657 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
658 			    pf_rulequeue);
659 		} else {
660 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
661 		}
662 	} else {
663 		if (check_ticket && ticket !=
664 		    ruleset->rules[rs_num].inactive.ticket) {
665 			goto done;
666 		}
667 		if (r_last) {
668 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
669 			    pf_rulequeue);
670 		} else {
671 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
672 		}
673 	}
674 	if (!r_last) {
675 		while ((rule != NULL) && (rule->nr != rule_number)) {
676 			rule = TAILQ_NEXT(rule, entries);
677 		}
678 	}
679 	if (rule == NULL) {
680 		goto done;
681 	}
682 
683 	p = &rule->rpool;
684 done:
685 
686 	if (ruleset) {
687 		pf_release_ruleset(ruleset);
688 		ruleset = NULL;
689 	}
690 
691 	return p;
692 }
693 
694 static void
pf_mv_pool(struct pf_palist * poola,struct pf_palist * poolb)695 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
696 {
697 	struct pf_pooladdr      *mv_pool_pa;
698 
699 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
700 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
701 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
702 	}
703 }
704 
705 static void
pf_empty_pool(struct pf_palist * poola)706 pf_empty_pool(struct pf_palist *poola)
707 {
708 	struct pf_pooladdr      *empty_pool_pa;
709 
710 	while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
711 		pfi_dynaddr_remove(&empty_pool_pa->addr);
712 		pf_tbladdr_remove(&empty_pool_pa->addr);
713 		pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE);
714 		TAILQ_REMOVE(poola, empty_pool_pa, entries);
715 		pool_put(&pf_pooladdr_pl, empty_pool_pa);
716 	}
717 }
718 
719 void
pf_rm_rule(struct pf_rulequeue * rulequeue,struct pf_rule * rule)720 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
721 {
722 	if (rulequeue != NULL) {
723 		if (rule->states <= 0) {
724 			/*
725 			 * XXX - we need to remove the table *before* detaching
726 			 * the rule to make sure the table code does not delete
727 			 * the anchor under our feet.
728 			 */
729 			pf_tbladdr_remove(&rule->src.addr);
730 			pf_tbladdr_remove(&rule->dst.addr);
731 			if (rule->overload_tbl) {
732 				pfr_detach_table(rule->overload_tbl);
733 			}
734 		}
735 		TAILQ_REMOVE(rulequeue, rule, entries);
736 		rule->entries.tqe_prev = NULL;
737 		rule->nr = -1;
738 	}
739 
740 	if (rule->states > 0 || rule->src_nodes > 0 ||
741 	    rule->entries.tqe_prev != NULL) {
742 		return;
743 	}
744 	pf_tag_unref(rule->tag);
745 	pf_tag_unref(rule->match_tag);
746 	pf_rtlabel_remove(&rule->src.addr);
747 	pf_rtlabel_remove(&rule->dst.addr);
748 	pfi_dynaddr_remove(&rule->src.addr);
749 	pfi_dynaddr_remove(&rule->dst.addr);
750 	if (rulequeue == NULL) {
751 		pf_tbladdr_remove(&rule->src.addr);
752 		pf_tbladdr_remove(&rule->dst.addr);
753 		if (rule->overload_tbl) {
754 			pfr_detach_table(rule->overload_tbl);
755 		}
756 	}
757 	pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
758 	pf_anchor_remove(rule);
759 	pf_empty_pool(&rule->rpool.list);
760 	pool_put(&pf_rule_pl, rule);
761 }
762 
763 static u_int16_t
tagname2tag(struct pf_tags * head,char * tagname)764 tagname2tag(struct pf_tags *head, char *tagname)
765 {
766 	struct pf_tagname       *tag, *p = NULL;
767 	uint16_t                 new_tagid = 1;
768 	bool                     reserved_tag = false;
769 
770 	TAILQ_FOREACH(tag, head, entries)
771 	if (strcmp(tagname, tag->name) == 0) {
772 		tag->ref++;
773 		return tag->tag;
774 	}
775 
776 	/*
777 	 * check if it is a reserved tag.
778 	 */
779 	_CASSERT(RESERVED_TAG_ID_MIN > DYNAMIC_TAG_ID_MAX);
780 	for (int i = 0; i < NUM_RESERVED_TAGS; i++) {
781 		if (strncmp(tagname, pf_reserved_tag_table[i].tag_name,
782 		    PF_TAG_NAME_SIZE) == 0) {
783 			new_tagid = pf_reserved_tag_table[i].tag_id;
784 			reserved_tag = true;
785 			goto skip_dynamic_tag_alloc;
786 		}
787 	}
788 
789 	/*
790 	 * to avoid fragmentation, we do a linear search from the beginning
791 	 * and take the first free slot we find. if there is none or the list
792 	 * is empty, append a new entry at the end.
793 	 */
794 
795 	/* new entry */
796 	if (!TAILQ_EMPTY(head)) {
797 		/* skip reserved tags */
798 		for (p = TAILQ_FIRST(head); p != NULL &&
799 		    p->tag >= RESERVED_TAG_ID_MIN;
800 		    p = TAILQ_NEXT(p, entries)) {
801 			;
802 		}
803 
804 		for (; p != NULL && p->tag == new_tagid;
805 		    p = TAILQ_NEXT(p, entries)) {
806 			new_tagid = p->tag + 1;
807 		}
808 	}
809 
810 	if (new_tagid > DYNAMIC_TAG_ID_MAX) {
811 		return 0;
812 	}
813 
814 skip_dynamic_tag_alloc:
815 	/* allocate and fill new struct pf_tagname */
816 	tag = kalloc_type(struct pf_tagname, Z_WAITOK | Z_ZERO | Z_NOFAIL);
817 	strlcpy(tag->name, tagname, sizeof(tag->name));
818 	tag->tag = new_tagid;
819 	tag->ref++;
820 
821 	if (reserved_tag) { /* insert reserved tag at the head */
822 		TAILQ_INSERT_HEAD(head, tag, entries);
823 	} else if (p != NULL) { /* insert new entry before p */
824 		TAILQ_INSERT_BEFORE(p, tag, entries);
825 	} else { /* either list empty or no free slot in between */
826 		TAILQ_INSERT_TAIL(head, tag, entries);
827 	}
828 
829 	return tag->tag;
830 }
831 
832 static void
tag_unref(struct pf_tags * head,u_int16_t tag)833 tag_unref(struct pf_tags *head, u_int16_t tag)
834 {
835 	struct pf_tagname       *p, *next;
836 
837 	if (tag == 0) {
838 		return;
839 	}
840 
841 	for (p = TAILQ_FIRST(head); p != NULL; p = next) {
842 		next = TAILQ_NEXT(p, entries);
843 		if (tag == p->tag) {
844 			if (--p->ref == 0) {
845 				TAILQ_REMOVE(head, p, entries);
846 				kfree_type(struct pf_tagname, p);
847 			}
848 			break;
849 		}
850 	}
851 }
852 
853 u_int16_t
pf_tagname2tag(char * tagname)854 pf_tagname2tag(char *tagname)
855 {
856 	return tagname2tag(&pf_tags, tagname);
857 }
858 
859 u_int16_t
pf_tagname2tag_ext(char * tagname)860 pf_tagname2tag_ext(char *tagname)
861 {
862 	u_int16_t       tag;
863 
864 	lck_rw_lock_exclusive(&pf_perim_lock);
865 	lck_mtx_lock(&pf_lock);
866 	tag = pf_tagname2tag(tagname);
867 	lck_mtx_unlock(&pf_lock);
868 	lck_rw_done(&pf_perim_lock);
869 	return tag;
870 }
871 
872 void
pf_tag_ref(u_int16_t tag)873 pf_tag_ref(u_int16_t tag)
874 {
875 	struct pf_tagname *t;
876 
877 	TAILQ_FOREACH(t, &pf_tags, entries)
878 	if (t->tag == tag) {
879 		break;
880 	}
881 	if (t != NULL) {
882 		t->ref++;
883 	}
884 }
885 
886 void
pf_tag_unref(u_int16_t tag)887 pf_tag_unref(u_int16_t tag)
888 {
889 	tag_unref(&pf_tags, tag);
890 }
891 
892 static int
pf_rtlabel_add(struct pf_addr_wrap * a)893 pf_rtlabel_add(struct pf_addr_wrap *a)
894 {
895 #pragma unused(a)
896 	return 0;
897 }
898 
899 static void
pf_rtlabel_remove(struct pf_addr_wrap * a)900 pf_rtlabel_remove(struct pf_addr_wrap *a)
901 {
902 #pragma unused(a)
903 }
904 
905 static void
pf_rtlabel_copyout(struct pf_addr_wrap * a)906 pf_rtlabel_copyout(struct pf_addr_wrap *a)
907 {
908 #pragma unused(a)
909 }
910 
911 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)912 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
913 {
914 	struct pf_ruleset       *rs;
915 	struct pf_rule          *rule;
916 
917 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
918 		return EINVAL;
919 	}
920 	rs = pf_find_or_create_ruleset(anchor);
921 	if (rs == NULL) {
922 		return EINVAL;
923 	}
924 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
925 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
926 		rs->rules[rs_num].inactive.rcount--;
927 	}
928 	*ticket = ++rs->rules[rs_num].inactive.ticket;
929 	rs->rules[rs_num].inactive.open = 1;
930 	pf_release_ruleset(rs);
931 	rs = NULL;
932 	return 0;
933 }
934 
935 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)936 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
937 {
938 	struct pf_ruleset       *rs = NULL;
939 	struct pf_rule          *rule;
940 	int                     err = 0;
941 
942 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
943 		err = EINVAL;
944 		goto done;
945 	}
946 	rs = pf_find_ruleset(anchor);
947 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
948 	    rs->rules[rs_num].inactive.ticket != ticket) {
949 		goto done;
950 	}
951 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
952 		pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
953 		rs->rules[rs_num].inactive.rcount--;
954 	}
955 	rs->rules[rs_num].inactive.open = 0;
956 
957 done:
958 	if (rs) {
959 		pf_release_ruleset(rs);
960 		rs = NULL;
961 	}
962 	return err;
963 }
964 
965 #define PF_MD5_UPD(st, elm)                                             \
966 	MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm))
967 
968 #define PF_MD5_UPD_STR(st, elm)                                         \
969 	MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int)strlen((st)->elm))
970 
971 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \
972 	(stor) = htonl((st)->elm);                                      \
973 	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t));        \
974 } while (0)
975 
976 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \
977 	(stor) = htons((st)->elm);                                      \
978 	MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t));        \
979 } while (0)
980 
981 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr,u_int8_t proto)982 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr, u_int8_t proto)
983 {
984 	PF_MD5_UPD(pfr, addr.type);
985 	switch (pfr->addr.type) {
986 	case PF_ADDR_DYNIFTL:
987 		PF_MD5_UPD(pfr, addr.v.ifname);
988 		PF_MD5_UPD(pfr, addr.iflags);
989 		break;
990 	case PF_ADDR_TABLE:
991 		PF_MD5_UPD(pfr, addr.v.tblname);
992 		break;
993 	case PF_ADDR_ADDRMASK:
994 		/* XXX ignore af? */
995 		PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
996 		PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
997 		break;
998 	case PF_ADDR_RTLABEL:
999 		PF_MD5_UPD(pfr, addr.v.rtlabelname);
1000 		break;
1001 	}
1002 
1003 	switch (proto) {
1004 	case IPPROTO_TCP:
1005 	case IPPROTO_UDP:
1006 		PF_MD5_UPD(pfr, xport.range.port[0]);
1007 		PF_MD5_UPD(pfr, xport.range.port[1]);
1008 		PF_MD5_UPD(pfr, xport.range.op);
1009 		break;
1010 
1011 	default:
1012 		break;
1013 	}
1014 
1015 	PF_MD5_UPD(pfr, neg);
1016 }
1017 
1018 static void
pf_hash_rule(MD5_CTX * ctx,struct pf_rule * rule)1019 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
1020 {
1021 	u_int16_t x;
1022 	u_int32_t y;
1023 
1024 	pf_hash_rule_addr(ctx, &rule->src, rule->proto);
1025 	pf_hash_rule_addr(ctx, &rule->dst, rule->proto);
1026 	PF_MD5_UPD_STR(rule, label);
1027 	PF_MD5_UPD_STR(rule, ifname);
1028 	PF_MD5_UPD_STR(rule, match_tagname);
1029 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1030 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1031 	PF_MD5_UPD_HTONL(rule, prob, y);
1032 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1033 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1034 	PF_MD5_UPD(rule, uid.op);
1035 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1036 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1037 	PF_MD5_UPD(rule, gid.op);
1038 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1039 	PF_MD5_UPD(rule, action);
1040 	PF_MD5_UPD(rule, direction);
1041 	PF_MD5_UPD(rule, af);
1042 	PF_MD5_UPD(rule, quick);
1043 	PF_MD5_UPD(rule, ifnot);
1044 	PF_MD5_UPD(rule, match_tag_not);
1045 	PF_MD5_UPD(rule, natpass);
1046 	PF_MD5_UPD(rule, keep_state);
1047 	PF_MD5_UPD(rule, proto);
1048 	PF_MD5_UPD(rule, type);
1049 	PF_MD5_UPD(rule, code);
1050 	PF_MD5_UPD(rule, flags);
1051 	PF_MD5_UPD(rule, flagset);
1052 	PF_MD5_UPD(rule, allow_opts);
1053 	PF_MD5_UPD(rule, rt);
1054 	PF_MD5_UPD(rule, tos);
1055 }
1056 
1057 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1058 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1059 {
1060 	struct pf_ruleset       *rs = NULL;
1061 	struct pf_rule          *rule, **old_array, *r;
1062 	struct pf_rulequeue     *old_rules;
1063 	int                      error = 0;
1064 	u_int32_t                old_rcount;
1065 	u_int32_t                old_rsize;
1066 
1067 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1068 
1069 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX) {
1070 		error = EINVAL;
1071 		goto done;
1072 	}
1073 	rs = pf_find_ruleset(anchor);
1074 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1075 	    ticket != rs->rules[rs_num].inactive.ticket) {
1076 		error = EBUSY;
1077 		goto done;
1078 	}
1079 
1080 	/* Calculate checksum for the main ruleset */
1081 	if (rs == &pf_main_ruleset) {
1082 		error = pf_setup_pfsync_matching(rs);
1083 		if (error != 0) {
1084 			goto done;
1085 		}
1086 	}
1087 
1088 	/* Swap rules, keep the old. */
1089 	old_rules = rs->rules[rs_num].active.ptr;
1090 	old_rcount = rs->rules[rs_num].active.rcount;
1091 	old_rsize  = rs->rules[rs_num].active.rsize;
1092 	old_array = rs->rules[rs_num].active.ptr_array;
1093 
1094 	if (old_rcount != 0) {
1095 		r = TAILQ_FIRST(rs->rules[rs_num].active.ptr);
1096 		while (r) {
1097 			if (r->rule_flag & PFRULE_PFM) {
1098 				pffwrules--;
1099 			}
1100 			r = TAILQ_NEXT(r, entries);
1101 		}
1102 	}
1103 
1104 
1105 	rs->rules[rs_num].active.ptr =
1106 	    rs->rules[rs_num].inactive.ptr;
1107 	rs->rules[rs_num].active.ptr_array =
1108 	    rs->rules[rs_num].inactive.ptr_array;
1109 	rs->rules[rs_num].active.rsize =
1110 	    rs->rules[rs_num].inactive.rsize;
1111 	rs->rules[rs_num].active.rcount =
1112 	    rs->rules[rs_num].inactive.rcount;
1113 	rs->rules[rs_num].inactive.ptr = old_rules;
1114 	rs->rules[rs_num].inactive.ptr_array = old_array;
1115 	rs->rules[rs_num].inactive.rcount = old_rcount;
1116 	rs->rules[rs_num].inactive.rsize = old_rsize;
1117 
1118 	rs->rules[rs_num].active.ticket =
1119 	    rs->rules[rs_num].inactive.ticket;
1120 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1121 
1122 
1123 	/* Purge the old rule list. */
1124 	while ((rule = TAILQ_FIRST(old_rules)) != NULL) {
1125 		pf_rm_rule(old_rules, rule);
1126 	}
1127 	kfree_type(struct pf_rule *, rs->rules[rs_num].inactive.rsize,
1128 	    rs->rules[rs_num].inactive.ptr_array);
1129 	rs->rules[rs_num].inactive.ptr_array = NULL;
1130 	rs->rules[rs_num].inactive.rcount = 0;
1131 	rs->rules[rs_num].inactive.rsize = 0;
1132 	rs->rules[rs_num].inactive.open = 0;
1133 
1134 done:
1135 	if (rs) {
1136 		pf_release_ruleset(rs);
1137 	}
1138 	return error;
1139 }
1140 
1141 static void
pf_rule_copyin(struct pf_rule * src,struct pf_rule * dst,struct proc * p,int minordev)1142 pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p,
1143     int minordev)
1144 {
1145 	bcopy(src, dst, sizeof(struct pf_rule));
1146 
1147 	dst->label[sizeof(dst->label) - 1] = '\0';
1148 	dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1149 	dst->qname[sizeof(dst->qname) - 1] = '\0';
1150 	dst->pqname[sizeof(dst->pqname) - 1] = '\0';
1151 	dst->tagname[sizeof(dst->tagname) - 1] = '\0';
1152 	dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0';
1153 	dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0';
1154 	dst->owner[sizeof(dst->owner) - 1] = '\0';
1155 
1156 	dst->cuid = kauth_cred_getuid(kauth_cred_get());
1157 	dst->cpid = proc_getpid(p);
1158 
1159 	dst->anchor = NULL;
1160 	dst->kif = NULL;
1161 	dst->overload_tbl = NULL;
1162 
1163 	TAILQ_INIT(&dst->rpool.list);
1164 	dst->rpool.cur = NULL;
1165 
1166 	/* initialize refcounting */
1167 	dst->states = 0;
1168 	dst->src_nodes = 0;
1169 
1170 	dst->entries.tqe_prev = NULL;
1171 	dst->entries.tqe_next = NULL;
1172 	if ((uint8_t)minordev == PFDEV_PFM) {
1173 		dst->rule_flag |= PFRULE_PFM;
1174 	}
1175 }
1176 
1177 static void
pf_rule_copyout(struct pf_rule * src,struct pf_rule * dst)1178 pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst)
1179 {
1180 	bcopy(src, dst, sizeof(struct pf_rule));
1181 
1182 	dst->anchor = NULL;
1183 	dst->kif = NULL;
1184 	dst->overload_tbl = NULL;
1185 
1186 	dst->rpool.list.tqh_first = NULL;
1187 	dst->rpool.list.tqh_last = NULL;
1188 	dst->rpool.cur = NULL;
1189 
1190 	dst->entries.tqe_prev = NULL;
1191 	dst->entries.tqe_next = NULL;
1192 }
1193 
1194 static void
pf_state_export(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1195 pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk,
1196     struct pf_state *s)
1197 {
1198 	uint64_t secs = pf_time_second();
1199 	bzero(sp, sizeof(struct pfsync_state));
1200 
1201 	/* copy from state key */
1202 	sp->lan.addr = sk->lan.addr;
1203 	sp->lan.xport = sk->lan.xport;
1204 	sp->gwy.addr = sk->gwy.addr;
1205 	sp->gwy.xport = sk->gwy.xport;
1206 	sp->ext_lan.addr = sk->ext_lan.addr;
1207 	sp->ext_lan.xport = sk->ext_lan.xport;
1208 	sp->ext_gwy.addr = sk->ext_gwy.addr;
1209 	sp->ext_gwy.xport = sk->ext_gwy.xport;
1210 	sp->proto_variant = sk->proto_variant;
1211 	sp->tag = s->tag;
1212 	sp->proto = sk->proto;
1213 	sp->af_lan = sk->af_lan;
1214 	sp->af_gwy = sk->af_gwy;
1215 	sp->direction = sk->direction;
1216 	sp->flowhash = sk->flowhash;
1217 
1218 	/* copy from state */
1219 	memcpy(&sp->id, &s->id, sizeof(sp->id));
1220 	sp->creatorid = s->creatorid;
1221 	strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname));
1222 	pf_state_peer_to_pfsync(&s->src, &sp->src);
1223 	pf_state_peer_to_pfsync(&s->dst, &sp->dst);
1224 
1225 	sp->rule = s->rule.ptr->nr;
1226 	sp->nat_rule = (s->nat_rule.ptr == NULL) ?
1227 	    (unsigned)-1 : s->nat_rule.ptr->nr;
1228 	sp->anchor = (s->anchor.ptr == NULL) ?
1229 	    (unsigned)-1 : s->anchor.ptr->nr;
1230 
1231 	pf_state_counter_to_pfsync(s->bytes[0], sp->bytes[0]);
1232 	pf_state_counter_to_pfsync(s->bytes[1], sp->bytes[1]);
1233 	pf_state_counter_to_pfsync(s->packets[0], sp->packets[0]);
1234 	pf_state_counter_to_pfsync(s->packets[1], sp->packets[1]);
1235 	sp->creation = secs - s->creation;
1236 	sp->expire = pf_state_expires(s);
1237 	sp->log = s->log;
1238 	sp->allow_opts = s->allow_opts;
1239 	sp->timeout = s->timeout;
1240 
1241 	if (s->src_node) {
1242 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1243 	}
1244 	if (s->nat_src_node) {
1245 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
1246 	}
1247 
1248 	if (sp->expire > secs) {
1249 		sp->expire -= secs;
1250 	} else {
1251 		sp->expire = 0;
1252 	}
1253 }
1254 
1255 static void
pf_state_import(struct pfsync_state * sp,struct pf_state_key * sk,struct pf_state * s)1256 pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk,
1257     struct pf_state *s)
1258 {
1259 	/* copy to state key */
1260 	sk->lan.addr = sp->lan.addr;
1261 	sk->lan.xport = sp->lan.xport;
1262 	sk->gwy.addr = sp->gwy.addr;
1263 	sk->gwy.xport = sp->gwy.xport;
1264 	sk->ext_lan.addr = sp->ext_lan.addr;
1265 	sk->ext_lan.xport = sp->ext_lan.xport;
1266 	sk->ext_gwy.addr = sp->ext_gwy.addr;
1267 	sk->ext_gwy.xport = sp->ext_gwy.xport;
1268 	sk->proto_variant = sp->proto_variant;
1269 	s->tag = sp->tag;
1270 	sk->proto = sp->proto;
1271 	sk->af_lan = sp->af_lan;
1272 	sk->af_gwy = sp->af_gwy;
1273 	sk->direction = sp->direction;
1274 	ASSERT(sk->flowsrc == FLOWSRC_PF);
1275 	ASSERT(sk->flowhash != 0);
1276 
1277 	/* copy to state */
1278 	memcpy(&s->id, &sp->id, sizeof(sp->id));
1279 	s->creatorid = sp->creatorid;
1280 	pf_state_peer_from_pfsync(&sp->src, &s->src);
1281 	pf_state_peer_from_pfsync(&sp->dst, &s->dst);
1282 
1283 	s->rule.ptr = &pf_default_rule;
1284 	s->nat_rule.ptr = NULL;
1285 	s->anchor.ptr = NULL;
1286 	s->rt_kif = NULL;
1287 	s->creation = pf_time_second();
1288 	s->expire = pf_time_second();
1289 	if (sp->expire > 0) {
1290 		s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire;
1291 	}
1292 	s->pfsync_time = 0;
1293 	s->packets[0] = s->packets[1] = 0;
1294 	s->bytes[0] = s->bytes[1] = 0;
1295 }
1296 
1297 static void
pf_pooladdr_copyin(struct pf_pooladdr * src,struct pf_pooladdr * dst)1298 pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1299 {
1300 	bcopy(src, dst, sizeof(struct pf_pooladdr));
1301 
1302 	dst->entries.tqe_prev = NULL;
1303 	dst->entries.tqe_next = NULL;
1304 	dst->ifname[sizeof(dst->ifname) - 1] = '\0';
1305 	dst->kif = NULL;
1306 }
1307 
1308 static void
pf_pooladdr_copyout(struct pf_pooladdr * src,struct pf_pooladdr * dst)1309 pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst)
1310 {
1311 	bcopy(src, dst, sizeof(struct pf_pooladdr));
1312 
1313 	dst->entries.tqe_prev = NULL;
1314 	dst->entries.tqe_next = NULL;
1315 	dst->kif = NULL;
1316 }
1317 
1318 static int
pf_setup_pfsync_matching(struct pf_ruleset * rs)1319 pf_setup_pfsync_matching(struct pf_ruleset *rs)
1320 {
1321 	MD5_CTX                  ctx;
1322 	struct pf_rule          *rule;
1323 	int                      rs_cnt;
1324 	u_int8_t                 digest[PF_MD5_DIGEST_LENGTH];
1325 
1326 	MD5Init(&ctx);
1327 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1328 		/* XXX PF_RULESET_SCRUB as well? */
1329 		if (rs_cnt == PF_RULESET_SCRUB) {
1330 			continue;
1331 		}
1332 
1333 		rs->rules[rs_cnt].inactive.ptr_array = krealloc_type(struct pf_rule *,
1334 		    rs->rules[rs_cnt].inactive.rsize, rs->rules[rs_cnt].inactive.rcount,
1335 		    rs->rules[rs_cnt].inactive.ptr_array, Z_WAITOK | Z_REALLOCF);
1336 
1337 		if (rs->rules[rs_cnt].inactive.rcount &&
1338 		    !rs->rules[rs_cnt].inactive.ptr_array) {
1339 			rs->rules[rs_cnt].inactive.rsize = 0;
1340 			return ENOMEM;
1341 		}
1342 		rs->rules[rs_cnt].inactive.rsize =
1343 		    rs->rules[rs_cnt].inactive.rcount;
1344 
1345 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1346 		    entries) {
1347 			pf_hash_rule(&ctx, rule);
1348 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1349 		}
1350 	}
1351 
1352 	MD5Final(digest, &ctx);
1353 	memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum));
1354 	return 0;
1355 }
1356 
1357 static void
pf_start(void)1358 pf_start(void)
1359 {
1360 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1361 
1362 	VERIFY(pf_is_enabled == 0);
1363 
1364 	pf_is_enabled = 1;
1365 	pf_status.running = 1;
1366 	pf_status.since = pf_calendar_time_second();
1367 	if (pf_status.stateid == 0) {
1368 		pf_status.stateid = pf_time_second();
1369 		pf_status.stateid = pf_status.stateid << 32;
1370 	}
1371 	wakeup(pf_purge_thread_fn);
1372 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
1373 	pf_process_compatibilities();
1374 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1375 	DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1376 }
1377 
1378 static void
pf_stop(void)1379 pf_stop(void)
1380 {
1381 	LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1382 
1383 	VERIFY(pf_is_enabled);
1384 
1385 	pf_status.running = 0;
1386 	pf_is_enabled = 0;
1387 	pf_status.since = pf_calendar_time_second();
1388 	wakeup(pf_purge_thread_fn);
1389 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
1390 	pf_process_compatibilities();
1391 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
1392 	DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1393 }
1394 
1395 static int
pfioctl(dev_t dev,u_long cmd,caddr_t addr,int flags,struct proc * p)1396 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1397 {
1398 #pragma unused(dev)
1399 	int p64 = proc_is64bit(p);
1400 	int error = 0;
1401 	int minordev = minor(dev);
1402 
1403 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
1404 		return EPERM;
1405 	}
1406 
1407 	/* XXX keep in sync with switch() below */
1408 	if (securelevel > 1) {
1409 		switch (cmd) {
1410 		case DIOCGETRULES:
1411 		case DIOCGETRULE:
1412 		case DIOCGETADDRS:
1413 		case DIOCGETADDR:
1414 		case DIOCGETSTATE:
1415 		case DIOCSETSTATUSIF:
1416 		case DIOCGETSTATUS:
1417 		case DIOCCLRSTATUS:
1418 		case DIOCNATLOOK:
1419 		case DIOCSETDEBUG:
1420 		case DIOCGETSTATES:
1421 		case DIOCINSERTRULE:
1422 		case DIOCDELETERULE:
1423 		case DIOCGETTIMEOUT:
1424 		case DIOCCLRRULECTRS:
1425 		case DIOCGETLIMIT:
1426 		case DIOCGETALTQS:
1427 		case DIOCGETALTQ:
1428 		case DIOCGETQSTATS:
1429 		case DIOCGETRULESETS:
1430 		case DIOCGETRULESET:
1431 		case DIOCRGETTABLES:
1432 		case DIOCRGETTSTATS:
1433 		case DIOCRCLRTSTATS:
1434 		case DIOCRCLRADDRS:
1435 		case DIOCRADDADDRS:
1436 		case DIOCRDELADDRS:
1437 		case DIOCRSETADDRS:
1438 		case DIOCRGETADDRS:
1439 		case DIOCRGETASTATS:
1440 		case DIOCRCLRASTATS:
1441 		case DIOCRTSTADDRS:
1442 		case DIOCOSFPGET:
1443 		case DIOCGETSRCNODES:
1444 		case DIOCCLRSRCNODES:
1445 		case DIOCIGETIFACES:
1446 		case DIOCGIFSPEED:
1447 		case DIOCSETIFFLAG:
1448 		case DIOCCLRIFFLAG:
1449 			break;
1450 		case DIOCRCLRTABLES:
1451 		case DIOCRADDTABLES:
1452 		case DIOCRDELTABLES:
1453 		case DIOCRSETTFLAGS: {
1454 			int pfrio_flags;
1455 
1456 			bcopy(&((struct pfioc_table *)(void *)addr)->
1457 			    pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1458 
1459 			if (pfrio_flags & PFR_FLAG_DUMMY) {
1460 				break; /* dummy operation ok */
1461 			}
1462 			return EPERM;
1463 		}
1464 		default:
1465 			return EPERM;
1466 		}
1467 	}
1468 
1469 	if (!(flags & FWRITE)) {
1470 		switch (cmd) {
1471 		case DIOCSTART:
1472 		case DIOCSTARTREF:
1473 		case DIOCSTOP:
1474 		case DIOCSTOPREF:
1475 		case DIOCGETSTARTERS:
1476 		case DIOCGETRULES:
1477 		case DIOCGETADDRS:
1478 		case DIOCGETADDR:
1479 		case DIOCGETSTATE:
1480 		case DIOCGETSTATUS:
1481 		case DIOCGETSTATES:
1482 		case DIOCINSERTRULE:
1483 		case DIOCDELETERULE:
1484 		case DIOCGETTIMEOUT:
1485 		case DIOCGETLIMIT:
1486 		case DIOCGETALTQS:
1487 		case DIOCGETALTQ:
1488 		case DIOCGETQSTATS:
1489 		case DIOCGETRULESETS:
1490 		case DIOCGETRULESET:
1491 		case DIOCNATLOOK:
1492 		case DIOCRGETTABLES:
1493 		case DIOCRGETTSTATS:
1494 		case DIOCRGETADDRS:
1495 		case DIOCRGETASTATS:
1496 		case DIOCRTSTADDRS:
1497 		case DIOCOSFPGET:
1498 		case DIOCGETSRCNODES:
1499 		case DIOCIGETIFACES:
1500 		case DIOCGIFSPEED:
1501 			break;
1502 		case DIOCRCLRTABLES:
1503 		case DIOCRADDTABLES:
1504 		case DIOCRDELTABLES:
1505 		case DIOCRCLRTSTATS:
1506 		case DIOCRCLRADDRS:
1507 		case DIOCRADDADDRS:
1508 		case DIOCRDELADDRS:
1509 		case DIOCRSETADDRS:
1510 		case DIOCRSETTFLAGS: {
1511 			int pfrio_flags;
1512 
1513 			bcopy(&((struct pfioc_table *)(void *)addr)->
1514 			    pfrio_flags, &pfrio_flags, sizeof(pfrio_flags));
1515 
1516 			if (pfrio_flags & PFR_FLAG_DUMMY) {
1517 				flags |= FWRITE; /* need write lock for dummy */
1518 				break; /* dummy operation ok */
1519 			}
1520 			return EACCES;
1521 		}
1522 		case DIOCGETRULE: {
1523 			u_int32_t action;
1524 
1525 			bcopy(&((struct pfioc_rule *)(void *)addr)->action,
1526 			    &action, sizeof(action));
1527 
1528 			if (action == PF_GET_CLR_CNTR) {
1529 				return EACCES;
1530 			}
1531 			break;
1532 		}
1533 		default:
1534 			return EACCES;
1535 		}
1536 	}
1537 
1538 	if (flags & FWRITE) {
1539 		lck_rw_lock_exclusive(&pf_perim_lock);
1540 	} else {
1541 		lck_rw_lock_shared(&pf_perim_lock);
1542 	}
1543 
1544 	lck_mtx_lock(&pf_lock);
1545 
1546 	switch (cmd) {
1547 	case DIOCSTART:
1548 		if (pf_status.running) {
1549 			/*
1550 			 * Increment the reference for a simple -e enable, so
1551 			 * that even if other processes drop their references,
1552 			 * pf will still be available to processes that turned
1553 			 * it on without taking a reference
1554 			 */
1555 			if (nr_tokens == pf_enabled_ref_count) {
1556 				pf_enabled_ref_count++;
1557 				VERIFY(pf_enabled_ref_count != 0);
1558 			}
1559 			error = EEXIST;
1560 		} else if (pf_purge_thread == NULL) {
1561 			error = ENOMEM;
1562 		} else {
1563 			pf_start();
1564 			pf_enabled_ref_count++;
1565 			VERIFY(pf_enabled_ref_count != 0);
1566 		}
1567 		break;
1568 
1569 	case DIOCSTARTREF:              /* u_int64_t */
1570 		if (pf_purge_thread == NULL) {
1571 			error = ENOMEM;
1572 		} else {
1573 			u_int64_t token;
1574 
1575 			/* small enough to be on stack */
1576 			if ((token = generate_token(p)) != 0) {
1577 				if (pf_is_enabled == 0) {
1578 					pf_start();
1579 				}
1580 				pf_enabled_ref_count++;
1581 				VERIFY(pf_enabled_ref_count != 0);
1582 			} else {
1583 				error = ENOMEM;
1584 				DPFPRINTF(PF_DEBUG_URGENT,
1585 				    ("pf: unable to generate token\n"));
1586 			}
1587 			bcopy(&token, addr, sizeof(token));
1588 		}
1589 		break;
1590 
1591 	case DIOCSTOP:
1592 		if (!pf_status.running) {
1593 			error = ENOENT;
1594 		} else {
1595 			pf_stop();
1596 			pf_enabled_ref_count = 0;
1597 			invalidate_all_tokens();
1598 		}
1599 		break;
1600 
1601 	case DIOCSTOPREF:               /* struct pfioc_remove_token */
1602 		if (!pf_status.running) {
1603 			error = ENOENT;
1604 		} else {
1605 			struct pfioc_remove_token pfrt;
1606 
1607 			/* small enough to be on stack */
1608 			bcopy(addr, &pfrt, sizeof(pfrt));
1609 			if ((error = remove_token(&pfrt)) == 0) {
1610 				VERIFY(pf_enabled_ref_count != 0);
1611 				pf_enabled_ref_count--;
1612 				/* return currently held references */
1613 				pfrt.refcount = pf_enabled_ref_count;
1614 				DPFPRINTF(PF_DEBUG_MISC,
1615 				    ("pf: enabled refcount decremented\n"));
1616 			} else {
1617 				error = EINVAL;
1618 				DPFPRINTF(PF_DEBUG_URGENT,
1619 				    ("pf: token mismatch\n"));
1620 			}
1621 			bcopy(&pfrt, addr, sizeof(pfrt));
1622 
1623 			if (error == 0 && pf_enabled_ref_count == 0) {
1624 				pf_stop();
1625 			}
1626 		}
1627 		break;
1628 
1629 	case DIOCGETSTARTERS: {         /* struct pfioc_tokens */
1630 		PFIOCX_STRUCT_DECL(pfioc_tokens);
1631 
1632 		PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens);
1633 		error = pfioctl_ioc_tokens(cmd,
1634 		    PFIOCX_STRUCT_ADDR32(pfioc_tokens),
1635 		    PFIOCX_STRUCT_ADDR64(pfioc_tokens), p);
1636 		PFIOCX_STRUCT_END(pfioc_tokens, addr);
1637 		break;
1638 	}
1639 
1640 	case DIOCADDRULE:               /* struct pfioc_rule */
1641 	case DIOCGETRULES:              /* struct pfioc_rule */
1642 	case DIOCGETRULE:               /* struct pfioc_rule */
1643 	case DIOCCHANGERULE:            /* struct pfioc_rule */
1644 	case DIOCINSERTRULE:            /* struct pfioc_rule */
1645 	case DIOCDELETERULE: {          /* struct pfioc_rule */
1646 		struct pfioc_rule *pr = NULL;
1647 
1648 		PFIOC_STRUCT_BEGIN(addr, pr);
1649 		error = pfioctl_ioc_rule(cmd, minordev, pr, p);
1650 		PFIOC_STRUCT_END(pr, addr);
1651 		break;
1652 	}
1653 
1654 	case DIOCCLRSTATES:             /* struct pfioc_state_kill */
1655 	case DIOCKILLSTATES: {          /* struct pfioc_state_kill */
1656 		struct pfioc_state_kill *psk = NULL;
1657 
1658 		PFIOC_STRUCT_BEGIN(addr, psk);
1659 		error = pfioctl_ioc_state_kill(cmd, psk, p);
1660 		PFIOC_STRUCT_END(psk, addr);
1661 		break;
1662 	}
1663 
1664 	case DIOCADDSTATE:              /* struct pfioc_state */
1665 	case DIOCGETSTATE: {            /* struct pfioc_state */
1666 		struct pfioc_state *ps = NULL;
1667 
1668 		PFIOC_STRUCT_BEGIN(addr, ps);
1669 		error = pfioctl_ioc_state(cmd, ps, p);
1670 		PFIOC_STRUCT_END(ps, addr);
1671 		break;
1672 	}
1673 
1674 	case DIOCGETSTATES: {           /* struct pfioc_states */
1675 		PFIOCX_STRUCT_DECL(pfioc_states);
1676 
1677 		PFIOCX_STRUCT_BEGIN(addr, pfioc_states);
1678 		error = pfioctl_ioc_states(cmd,
1679 		    PFIOCX_STRUCT_ADDR32(pfioc_states),
1680 		    PFIOCX_STRUCT_ADDR64(pfioc_states), p);
1681 		PFIOCX_STRUCT_END(pfioc_states, addr);
1682 		break;
1683 	}
1684 
1685 	case DIOCGETSTATUS: {           /* struct pf_status */
1686 		struct pf_status *s = NULL;
1687 
1688 		PFIOC_STRUCT_BEGIN(&pf_status, s);
1689 		pfi_update_status(s->ifname, s);
1690 		PFIOC_STRUCT_END(s, addr);
1691 		break;
1692 	}
1693 
1694 	case DIOCSETSTATUSIF: {         /* struct pfioc_if */
1695 		struct pfioc_if *pi = (struct pfioc_if *)(void *)addr;
1696 
1697 		/* OK for unaligned accesses */
1698 		if (pi->ifname[0] == 0) {
1699 			bzero(pf_status.ifname, IFNAMSIZ);
1700 			break;
1701 		}
1702 		strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
1703 		break;
1704 	}
1705 
1706 	case DIOCCLRSTATUS: {
1707 		bzero(pf_status.counters, sizeof(pf_status.counters));
1708 		bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
1709 		bzero(pf_status.scounters, sizeof(pf_status.scounters));
1710 		pf_status.since = pf_calendar_time_second();
1711 		if (*pf_status.ifname) {
1712 			pfi_update_status(pf_status.ifname, NULL);
1713 		}
1714 		break;
1715 	}
1716 
1717 	case DIOCNATLOOK: {             /* struct pfioc_natlook */
1718 		struct pfioc_natlook *pnl = NULL;
1719 
1720 		PFIOC_STRUCT_BEGIN(addr, pnl);
1721 		error = pfioctl_ioc_natlook(cmd, pnl, p);
1722 		PFIOC_STRUCT_END(pnl, addr);
1723 		break;
1724 	}
1725 
1726 	case DIOCSETTIMEOUT:            /* struct pfioc_tm */
1727 	case DIOCGETTIMEOUT: {          /* struct pfioc_tm */
1728 		struct pfioc_tm pt;
1729 
1730 		/* small enough to be on stack */
1731 		bcopy(addr, &pt, sizeof(pt));
1732 		error = pfioctl_ioc_tm(cmd, &pt, p);
1733 		bcopy(&pt, addr, sizeof(pt));
1734 		break;
1735 	}
1736 
1737 	case DIOCGETLIMIT:              /* struct pfioc_limit */
1738 	case DIOCSETLIMIT: {            /* struct pfioc_limit */
1739 		struct pfioc_limit pl;
1740 
1741 		/* small enough to be on stack */
1742 		bcopy(addr, &pl, sizeof(pl));
1743 		error = pfioctl_ioc_limit(cmd, &pl, p);
1744 		bcopy(&pl, addr, sizeof(pl));
1745 		break;
1746 	}
1747 
1748 	case DIOCSETDEBUG: {            /* u_int32_t */
1749 		bcopy(addr, &pf_status.debug, sizeof(u_int32_t));
1750 		break;
1751 	}
1752 
1753 	case DIOCCLRRULECTRS: {
1754 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
1755 		struct pf_ruleset       *ruleset = &pf_main_ruleset;
1756 		struct pf_rule          *rule;
1757 
1758 		TAILQ_FOREACH(rule,
1759 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
1760 			rule->evaluations = 0;
1761 			rule->packets[0] = rule->packets[1] = 0;
1762 			rule->bytes[0] = rule->bytes[1] = 0;
1763 		}
1764 		break;
1765 	}
1766 
1767 	case DIOCGIFSPEED: {
1768 		struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
1769 		struct pf_ifspeed ps;
1770 		struct ifnet *ifp;
1771 		u_int64_t baudrate;
1772 
1773 		if (psp->ifname[0] != '\0') {
1774 			/* Can we completely trust user-land? */
1775 			strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
1776 			ps.ifname[IFNAMSIZ - 1] = '\0';
1777 			ifp = ifunit(ps.ifname);
1778 			if (ifp != NULL) {
1779 				baudrate = ifp->if_output_bw.max_bw;
1780 				bcopy(&baudrate, &psp->baudrate,
1781 				    sizeof(baudrate));
1782 			} else {
1783 				error = EINVAL;
1784 			}
1785 		} else {
1786 			error = EINVAL;
1787 		}
1788 		break;
1789 	}
1790 
1791 	case DIOCBEGINADDRS:            /* struct pfioc_pooladdr */
1792 	case DIOCADDADDR:               /* struct pfioc_pooladdr */
1793 	case DIOCGETADDRS:              /* struct pfioc_pooladdr */
1794 	case DIOCGETADDR:               /* struct pfioc_pooladdr */
1795 	case DIOCCHANGEADDR: {          /* struct pfioc_pooladdr */
1796 		struct pfioc_pooladdr *pp = NULL;
1797 
1798 		PFIOC_STRUCT_BEGIN(addr, pp);
1799 		error = pfioctl_ioc_pooladdr(cmd, pp, p);
1800 		PFIOC_STRUCT_END(pp, addr);
1801 		break;
1802 	}
1803 
1804 	case DIOCGETRULESETS:           /* struct pfioc_ruleset */
1805 	case DIOCGETRULESET: {          /* struct pfioc_ruleset */
1806 		struct pfioc_ruleset *pr = NULL;
1807 
1808 		PFIOC_STRUCT_BEGIN(addr, pr);
1809 		error = pfioctl_ioc_ruleset(cmd, pr, p);
1810 		PFIOC_STRUCT_END(pr, addr);
1811 		break;
1812 	}
1813 
1814 	case DIOCRCLRTABLES:            /* struct pfioc_table */
1815 	case DIOCRADDTABLES:            /* struct pfioc_table */
1816 	case DIOCRDELTABLES:            /* struct pfioc_table */
1817 	case DIOCRGETTABLES:            /* struct pfioc_table */
1818 	case DIOCRGETTSTATS:            /* struct pfioc_table */
1819 	case DIOCRCLRTSTATS:            /* struct pfioc_table */
1820 	case DIOCRSETTFLAGS:            /* struct pfioc_table */
1821 	case DIOCRCLRADDRS:             /* struct pfioc_table */
1822 	case DIOCRADDADDRS:             /* struct pfioc_table */
1823 	case DIOCRDELADDRS:             /* struct pfioc_table */
1824 	case DIOCRSETADDRS:             /* struct pfioc_table */
1825 	case DIOCRGETADDRS:             /* struct pfioc_table */
1826 	case DIOCRGETASTATS:            /* struct pfioc_table */
1827 	case DIOCRCLRASTATS:            /* struct pfioc_table */
1828 	case DIOCRTSTADDRS:             /* struct pfioc_table */
1829 	case DIOCRINADEFINE: {          /* struct pfioc_table */
1830 		PFIOCX_STRUCT_DECL(pfioc_table);
1831 
1832 		PFIOCX_STRUCT_BEGIN(addr, pfioc_table);
1833 		error = pfioctl_ioc_table(cmd,
1834 		    PFIOCX_STRUCT_ADDR32(pfioc_table),
1835 		    PFIOCX_STRUCT_ADDR64(pfioc_table), p);
1836 		PFIOCX_STRUCT_END(pfioc_table, addr);
1837 		break;
1838 	}
1839 
1840 	case DIOCOSFPADD:               /* struct pf_osfp_ioctl */
1841 	case DIOCOSFPGET: {             /* struct pf_osfp_ioctl */
1842 		struct pf_osfp_ioctl *io = NULL;
1843 
1844 		PFIOC_STRUCT_BEGIN(addr, io);
1845 		if (cmd == DIOCOSFPADD) {
1846 			error = pf_osfp_add(io);
1847 		} else {
1848 			VERIFY(cmd == DIOCOSFPGET);
1849 			error = pf_osfp_get(io);
1850 		}
1851 		PFIOC_STRUCT_END(io, addr);
1852 		break;
1853 	}
1854 
1855 	case DIOCXBEGIN:                /* struct pfioc_trans */
1856 	case DIOCXROLLBACK:             /* struct pfioc_trans */
1857 	case DIOCXCOMMIT: {             /* struct pfioc_trans */
1858 		PFIOCX_STRUCT_DECL(pfioc_trans);
1859 
1860 		PFIOCX_STRUCT_BEGIN(addr, pfioc_trans);
1861 		error = pfioctl_ioc_trans(cmd,
1862 		    PFIOCX_STRUCT_ADDR32(pfioc_trans),
1863 		    PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
1864 		PFIOCX_STRUCT_END(pfioc_trans, addr);
1865 		break;
1866 	}
1867 
1868 	case DIOCGETSRCNODES: {         /* struct pfioc_src_nodes */
1869 		PFIOCX_STRUCT_DECL(pfioc_src_nodes);
1870 
1871 		PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes);
1872 		error = pfioctl_ioc_src_nodes(cmd,
1873 		    PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
1874 		    PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
1875 		PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
1876 		break;
1877 	}
1878 
1879 	case DIOCCLRSRCNODES: {
1880 		struct pf_src_node      *n;
1881 		struct pf_state         *state;
1882 
1883 		RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1884 			state->src_node = NULL;
1885 			state->nat_src_node = NULL;
1886 		}
1887 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
1888 			n->expire = 1;
1889 			n->states = 0;
1890 		}
1891 		pf_purge_expired_src_nodes();
1892 		pf_status.src_nodes = 0;
1893 		break;
1894 	}
1895 
1896 	case DIOCKILLSRCNODES: {        /* struct pfioc_src_node_kill */
1897 		struct pfioc_src_node_kill *psnk = NULL;
1898 
1899 		PFIOC_STRUCT_BEGIN(addr, psnk);
1900 		error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
1901 		PFIOC_STRUCT_END(psnk, addr);
1902 		break;
1903 	}
1904 
1905 	case DIOCSETHOSTID: {           /* u_int32_t */
1906 		u_int32_t hid;
1907 
1908 		/* small enough to be on stack */
1909 		bcopy(addr, &hid, sizeof(hid));
1910 		if (hid == 0) {
1911 			pf_status.hostid = random();
1912 		} else {
1913 			pf_status.hostid = hid;
1914 		}
1915 		break;
1916 	}
1917 
1918 	case DIOCOSFPFLUSH:
1919 		pf_osfp_flush();
1920 		break;
1921 
1922 	case DIOCIGETIFACES:            /* struct pfioc_iface */
1923 	case DIOCSETIFFLAG:             /* struct pfioc_iface */
1924 	case DIOCCLRIFFLAG: {           /* struct pfioc_iface */
1925 		PFIOCX_STRUCT_DECL(pfioc_iface);
1926 
1927 		PFIOCX_STRUCT_BEGIN(addr, pfioc_iface);
1928 		error = pfioctl_ioc_iface(cmd,
1929 		    PFIOCX_STRUCT_ADDR32(pfioc_iface),
1930 		    PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
1931 		PFIOCX_STRUCT_END(pfioc_iface, addr);
1932 		break;
1933 	}
1934 
1935 	default:
1936 		error = ENODEV;
1937 		break;
1938 	}
1939 
1940 	lck_mtx_unlock(&pf_lock);
1941 	lck_rw_done(&pf_perim_lock);
1942 
1943 	return error;
1944 }
1945 
1946 static int
pfioctl_ioc_table(u_long cmd,struct pfioc_table_32 * io32,struct pfioc_table_64 * io64,struct proc * p)1947 pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
1948     struct pfioc_table_64 *io64, struct proc *p)
1949 {
1950 	int p64 = proc_is64bit(p);
1951 	int error = 0;
1952 
1953 	if (!p64) {
1954 		goto struct32;
1955 	}
1956 
1957 #ifdef __LP64__
1958 	/*
1959 	 * 64-bit structure processing
1960 	 */
1961 	switch (cmd) {
1962 	case DIOCRCLRTABLES:
1963 		if (io64->pfrio_esize != 0) {
1964 			error = ENODEV;
1965 			break;
1966 		}
1967 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1968 		error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
1969 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1970 		break;
1971 
1972 	case DIOCRADDTABLES:
1973 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1974 			error = ENODEV;
1975 			break;
1976 		}
1977 		error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
1978 		    &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1979 		break;
1980 
1981 	case DIOCRDELTABLES:
1982 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1983 			error = ENODEV;
1984 			break;
1985 		}
1986 		error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
1987 		    &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1988 		break;
1989 
1990 	case DIOCRGETTABLES:
1991 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
1992 			error = ENODEV;
1993 			break;
1994 		}
1995 		pfr_table_copyin_cleanup(&io64->pfrio_table);
1996 		error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
1997 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
1998 		break;
1999 
2000 	case DIOCRGETTSTATS:
2001 		if (io64->pfrio_esize != sizeof(struct pfr_tstats)) {
2002 			error = ENODEV;
2003 			break;
2004 		}
2005 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2006 		error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
2007 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2008 		break;
2009 
2010 	case DIOCRCLRTSTATS:
2011 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2012 			error = ENODEV;
2013 			break;
2014 		}
2015 		error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
2016 		    &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2017 		break;
2018 
2019 	case DIOCRSETTFLAGS:
2020 		if (io64->pfrio_esize != sizeof(struct pfr_table)) {
2021 			error = ENODEV;
2022 			break;
2023 		}
2024 		error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
2025 		    io64->pfrio_setflag, io64->pfrio_clrflag,
2026 		    &io64->pfrio_nchange, &io64->pfrio_ndel,
2027 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2028 		break;
2029 
2030 	case DIOCRCLRADDRS:
2031 		if (io64->pfrio_esize != 0) {
2032 			error = ENODEV;
2033 			break;
2034 		}
2035 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2036 		error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
2037 		    io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2038 		break;
2039 
2040 	case DIOCRADDADDRS:
2041 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2042 			error = ENODEV;
2043 			break;
2044 		}
2045 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2046 		error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2047 		    io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
2048 		    PFR_FLAG_USERIOCTL);
2049 		break;
2050 
2051 	case DIOCRDELADDRS:
2052 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2053 			error = ENODEV;
2054 			break;
2055 		}
2056 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2057 		error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2058 		    io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
2059 		    PFR_FLAG_USERIOCTL);
2060 		break;
2061 
2062 	case DIOCRSETADDRS:
2063 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2064 			error = ENODEV;
2065 			break;
2066 		}
2067 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2068 		error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2069 		    io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
2070 		    &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
2071 		    PFR_FLAG_USERIOCTL, 0);
2072 		break;
2073 
2074 	case DIOCRGETADDRS:
2075 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2076 			error = ENODEV;
2077 			break;
2078 		}
2079 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2080 		error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2081 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2082 		break;
2083 
2084 	case DIOCRGETASTATS:
2085 		if (io64->pfrio_esize != sizeof(struct pfr_astats)) {
2086 			error = ENODEV;
2087 			break;
2088 		}
2089 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2090 		error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
2091 		    &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2092 		break;
2093 
2094 	case DIOCRCLRASTATS:
2095 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2096 			error = ENODEV;
2097 			break;
2098 		}
2099 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2100 		error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
2101 		    io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
2102 		    PFR_FLAG_USERIOCTL);
2103 		break;
2104 
2105 	case DIOCRTSTADDRS:
2106 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2107 			error = ENODEV;
2108 			break;
2109 		}
2110 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2111 		error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
2112 		    io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
2113 		    PFR_FLAG_USERIOCTL);
2114 		break;
2115 
2116 	case DIOCRINADEFINE:
2117 		if (io64->pfrio_esize != sizeof(struct pfr_addr)) {
2118 			error = ENODEV;
2119 			break;
2120 		}
2121 		pfr_table_copyin_cleanup(&io64->pfrio_table);
2122 		error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
2123 		    io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
2124 		    io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
2125 		break;
2126 
2127 	default:
2128 		VERIFY(0);
2129 		/* NOTREACHED */
2130 	}
2131 	goto done;
2132 #else
2133 #pragma unused(io64)
2134 #endif /* __LP64__ */
2135 
2136 struct32:
2137 	/*
2138 	 * 32-bit structure processing
2139 	 */
2140 	switch (cmd) {
2141 	case DIOCRCLRTABLES:
2142 		if (io32->pfrio_esize != 0) {
2143 			error = ENODEV;
2144 			break;
2145 		}
2146 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2147 		error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
2148 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2149 		break;
2150 
2151 	case DIOCRADDTABLES:
2152 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2153 			error = ENODEV;
2154 			break;
2155 		}
2156 		error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
2157 		    &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2158 		break;
2159 
2160 	case DIOCRDELTABLES:
2161 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2162 			error = ENODEV;
2163 			break;
2164 		}
2165 		error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
2166 		    &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2167 		break;
2168 
2169 	case DIOCRGETTABLES:
2170 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2171 			error = ENODEV;
2172 			break;
2173 		}
2174 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2175 		error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
2176 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2177 		break;
2178 
2179 	case DIOCRGETTSTATS:
2180 		if (io32->pfrio_esize != sizeof(struct pfr_tstats)) {
2181 			error = ENODEV;
2182 			break;
2183 		}
2184 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2185 		error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
2186 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2187 		break;
2188 
2189 	case DIOCRCLRTSTATS:
2190 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2191 			error = ENODEV;
2192 			break;
2193 		}
2194 		error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
2195 		    &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2196 		break;
2197 
2198 	case DIOCRSETTFLAGS:
2199 		if (io32->pfrio_esize != sizeof(struct pfr_table)) {
2200 			error = ENODEV;
2201 			break;
2202 		}
2203 		error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
2204 		    io32->pfrio_setflag, io32->pfrio_clrflag,
2205 		    &io32->pfrio_nchange, &io32->pfrio_ndel,
2206 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2207 		break;
2208 
2209 	case DIOCRCLRADDRS:
2210 		if (io32->pfrio_esize != 0) {
2211 			error = ENODEV;
2212 			break;
2213 		}
2214 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2215 		error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
2216 		    io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2217 		break;
2218 
2219 	case DIOCRADDADDRS:
2220 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2221 			error = ENODEV;
2222 			break;
2223 		}
2224 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2225 		error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2226 		    io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
2227 		    PFR_FLAG_USERIOCTL);
2228 		break;
2229 
2230 	case DIOCRDELADDRS:
2231 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2232 			error = ENODEV;
2233 			break;
2234 		}
2235 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2236 		error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2237 		    io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
2238 		    PFR_FLAG_USERIOCTL);
2239 		break;
2240 
2241 	case DIOCRSETADDRS:
2242 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2243 			error = ENODEV;
2244 			break;
2245 		}
2246 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2247 		error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2248 		    io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
2249 		    &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
2250 		    PFR_FLAG_USERIOCTL, 0);
2251 		break;
2252 
2253 	case DIOCRGETADDRS:
2254 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2255 			error = ENODEV;
2256 			break;
2257 		}
2258 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2259 		error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2260 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2261 		break;
2262 
2263 	case DIOCRGETASTATS:
2264 		if (io32->pfrio_esize != sizeof(struct pfr_astats)) {
2265 			error = ENODEV;
2266 			break;
2267 		}
2268 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2269 		error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
2270 		    &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2271 		break;
2272 
2273 	case DIOCRCLRASTATS:
2274 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2275 			error = ENODEV;
2276 			break;
2277 		}
2278 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2279 		error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
2280 		    io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
2281 		    PFR_FLAG_USERIOCTL);
2282 		break;
2283 
2284 	case DIOCRTSTADDRS:
2285 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2286 			error = ENODEV;
2287 			break;
2288 		}
2289 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2290 		error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
2291 		    io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
2292 		    PFR_FLAG_USERIOCTL);
2293 		break;
2294 
2295 	case DIOCRINADEFINE:
2296 		if (io32->pfrio_esize != sizeof(struct pfr_addr)) {
2297 			error = ENODEV;
2298 			break;
2299 		}
2300 		pfr_table_copyin_cleanup(&io32->pfrio_table);
2301 		error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
2302 		    io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
2303 		    io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
2304 		break;
2305 
2306 	default:
2307 		VERIFY(0);
2308 		/* NOTREACHED */
2309 	}
2310 #ifdef __LP64__
2311 done:
2312 #endif
2313 	return error;
2314 }
2315 
2316 static int
pfioctl_ioc_tokens(u_long cmd,struct pfioc_tokens_32 * tok32,struct pfioc_tokens_64 * tok64,struct proc * p)2317 pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
2318     struct pfioc_tokens_64 *tok64, struct proc *p)
2319 {
2320 	struct pfioc_token *tokens;
2321 	struct pfioc_kernel_token *entry, *tmp;
2322 	user_addr_t token_buf;
2323 	int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
2324 	char *ptr;
2325 
2326 	switch (cmd) {
2327 	case DIOCGETSTARTERS: {
2328 		int size;
2329 
2330 		if (nr_tokens == 0) {
2331 			error = ENOENT;
2332 			break;
2333 		}
2334 
2335 		size = sizeof(struct pfioc_token) * nr_tokens;
2336 		if (size / nr_tokens != sizeof(struct pfioc_token)) {
2337 			os_log_error(OS_LOG_DEFAULT, "%s: size overflows", __func__);
2338 			error = ERANGE;
2339 			break;
2340 		}
2341 		ocnt = cnt = (p64 ? tok64->size : tok32->size);
2342 		if (cnt == 0) {
2343 			if (p64) {
2344 				tok64->size = size;
2345 			} else {
2346 				tok32->size = size;
2347 			}
2348 			break;
2349 		}
2350 
2351 #ifdef __LP64__
2352 		token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
2353 #else
2354 		token_buf = tok32->pgt_buf;
2355 #endif
2356 		tokens = (struct pfioc_token *)kalloc_data(size, Z_WAITOK | Z_ZERO);
2357 		if (tokens == NULL) {
2358 			error = ENOMEM;
2359 			break;
2360 		}
2361 
2362 		ptr = (void *)tokens;
2363 		SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
2364 			struct pfioc_token *t;
2365 
2366 			if ((unsigned)cnt < sizeof(*tokens)) {
2367 				break;    /* no more buffer space left */
2368 			}
2369 			t = (struct pfioc_token *)(void *)ptr;
2370 			t->token_value  = entry->token.token_value;
2371 			t->timestamp    = entry->token.timestamp;
2372 			t->pid          = entry->token.pid;
2373 			bcopy(entry->token.proc_name, t->proc_name,
2374 			    PFTOK_PROCNAME_LEN);
2375 			ptr += sizeof(struct pfioc_token);
2376 
2377 			cnt -= sizeof(struct pfioc_token);
2378 		}
2379 
2380 		if (cnt < ocnt) {
2381 			error = copyout(tokens, token_buf, ocnt - cnt);
2382 		}
2383 
2384 		if (p64) {
2385 			tok64->size = ocnt - cnt;
2386 		} else {
2387 			tok32->size = ocnt - cnt;
2388 		}
2389 
2390 		kfree_data(tokens, size);
2391 		break;
2392 	}
2393 
2394 	default:
2395 		VERIFY(0);
2396 		/* NOTREACHED */
2397 	}
2398 
2399 	return error;
2400 }
2401 
2402 static void
pf_expire_states_and_src_nodes(struct pf_rule * rule)2403 pf_expire_states_and_src_nodes(struct pf_rule *rule)
2404 {
2405 	struct pf_state         *state;
2406 	struct pf_src_node      *sn;
2407 	int                      killed = 0;
2408 
2409 	/* expire the states */
2410 	state = TAILQ_FIRST(&state_list);
2411 	while (state) {
2412 		if (state->rule.ptr == rule) {
2413 			state->timeout = PFTM_PURGE;
2414 		}
2415 		state = TAILQ_NEXT(state, entry_list);
2416 	}
2417 	pf_purge_expired_states(pf_status.states);
2418 
2419 	/* expire the src_nodes */
2420 	RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
2421 		if (sn->rule.ptr != rule) {
2422 			continue;
2423 		}
2424 		if (sn->states != 0) {
2425 			RB_FOREACH(state, pf_state_tree_id,
2426 			    &tree_id) {
2427 				if (state->src_node == sn) {
2428 					state->src_node = NULL;
2429 				}
2430 				if (state->nat_src_node == sn) {
2431 					state->nat_src_node = NULL;
2432 				}
2433 			}
2434 			sn->states = 0;
2435 		}
2436 		sn->expire = 1;
2437 		killed++;
2438 	}
2439 	if (killed) {
2440 		pf_purge_expired_src_nodes();
2441 	}
2442 }
2443 
2444 static void
pf_delete_rule_from_ruleset(struct pf_ruleset * ruleset,int rs_num,struct pf_rule * rule)2445 pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
2446     struct pf_rule *rule)
2447 {
2448 	struct pf_rule *r;
2449 	int nr = 0;
2450 
2451 	pf_expire_states_and_src_nodes(rule);
2452 
2453 	pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
2454 	if (ruleset->rules[rs_num].active.rcount-- == 0) {
2455 		panic("%s: rcount value broken!", __func__);
2456 	}
2457 	r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2458 
2459 	while (r) {
2460 		r->nr = nr++;
2461 		r = TAILQ_NEXT(r, entries);
2462 	}
2463 }
2464 
2465 
2466 static void
pf_ruleset_cleanup(struct pf_ruleset * ruleset,int rs)2467 pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
2468 {
2469 	pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
2470 	ruleset->rules[rs].active.ticket =
2471 	    ++ruleset->rules[rs].inactive.ticket;
2472 }
2473 
2474 /*
2475  * req_dev encodes the PF interface. Currently, possible values are
2476  * 0 or PFRULE_PFM
2477  */
2478 static int
pf_delete_rule_by_ticket(struct pfioc_rule * pr,u_int32_t req_dev)2479 pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
2480 {
2481 	struct pf_ruleset       *ruleset;
2482 	struct pf_rule          *rule = NULL;
2483 	int                      is_anchor;
2484 	int                      error = 0;
2485 	int                      i;
2486 
2487 	is_anchor = (pr->anchor_call[0] != '\0');
2488 	if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
2489 	    pr->rule.owner, is_anchor, &error)) == NULL) {
2490 		goto done;
2491 	}
2492 
2493 	for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
2494 		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2495 		while (rule && (rule->ticket != pr->rule.ticket)) {
2496 			rule = TAILQ_NEXT(rule, entries);
2497 		}
2498 	}
2499 	if (rule == NULL) {
2500 		error = ENOENT;
2501 		goto done;
2502 	} else {
2503 		i--;
2504 	}
2505 
2506 	if (strcmp(rule->owner, pr->rule.owner)) {
2507 		error = EACCES;
2508 		goto done;
2509 	}
2510 
2511 delete_rule:
2512 	if (rule->anchor && (ruleset != &pf_main_ruleset) &&
2513 	    ((strcmp(ruleset->anchor->owner, "")) == 0) &&
2514 	    ((ruleset->rules[i].active.rcount - 1) == 0)) {
2515 		/* set rule & ruleset to parent and repeat */
2516 		struct pf_rule *delete_rule = rule;
2517 		struct pf_ruleset *delete_ruleset = ruleset;
2518 
2519 #define parent_ruleset          ruleset->anchor->parent->ruleset
2520 		if (ruleset->anchor->parent == NULL) {
2521 			ruleset = &pf_main_ruleset;
2522 		} else {
2523 			ruleset = &parent_ruleset;
2524 		}
2525 
2526 		rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
2527 		while (rule &&
2528 		    (rule->anchor != delete_ruleset->anchor)) {
2529 			rule = TAILQ_NEXT(rule, entries);
2530 		}
2531 		if (rule == NULL) {
2532 			panic("%s: rule not found!", __func__);
2533 		}
2534 
2535 		/*
2536 		 * if reqest device != rule's device, bail :
2537 		 * with error if ticket matches;
2538 		 * without error if ticket doesn't match (i.e. its just cleanup)
2539 		 */
2540 		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2541 			if (rule->ticket != pr->rule.ticket) {
2542 				goto done;
2543 			} else {
2544 				error = EACCES;
2545 				goto done;
2546 			}
2547 		}
2548 
2549 		if (delete_rule->rule_flag & PFRULE_PFM) {
2550 			pffwrules--;
2551 		}
2552 
2553 		pf_delete_rule_from_ruleset(delete_ruleset,
2554 		    i, delete_rule);
2555 		delete_ruleset->rules[i].active.ticket =
2556 		    ++delete_ruleset->rules[i].inactive.ticket;
2557 		goto delete_rule;
2558 	} else {
2559 		/*
2560 		 * process deleting rule only if device that added the
2561 		 * rule matches device that issued the request
2562 		 */
2563 		if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2564 			error = EACCES;
2565 			goto done;
2566 		}
2567 		if (rule->rule_flag & PFRULE_PFM) {
2568 			pffwrules--;
2569 		}
2570 		pf_delete_rule_from_ruleset(ruleset, i,
2571 		    rule);
2572 		pf_ruleset_cleanup(ruleset, i);
2573 	}
2574 
2575 done:
2576 	if (ruleset) {
2577 		pf_release_ruleset(ruleset);
2578 		ruleset = NULL;
2579 	}
2580 	return error;
2581 }
2582 
2583 /*
2584  * req_dev encodes the PF interface. Currently, possible values are
2585  * 0 or PFRULE_PFM
2586  */
2587 static void
pf_delete_rule_by_owner(char * owner,u_int32_t req_dev)2588 pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
2589 {
2590 	struct pf_ruleset       *ruleset;
2591 	struct pf_rule          *rule, *next;
2592 	int                      deleted = 0;
2593 
2594 	for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
2595 		rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
2596 		ruleset = &pf_main_ruleset;
2597 		while (rule) {
2598 			next = TAILQ_NEXT(rule, entries);
2599 			/*
2600 			 * process deleting rule only if device that added the
2601 			 * rule matches device that issued the request
2602 			 */
2603 			if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
2604 				rule = next;
2605 			} else if (rule->anchor) {
2606 				if (((strcmp(rule->owner, owner)) == 0) ||
2607 				    ((strcmp(rule->owner, "")) == 0)) {
2608 					if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2609 						if (deleted) {
2610 							pf_ruleset_cleanup(ruleset, rs);
2611 							deleted = 0;
2612 						}
2613 						/* step into anchor */
2614 						ruleset =
2615 						    &rule->anchor->ruleset;
2616 						rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2617 						continue;
2618 					} else {
2619 						if (rule->rule_flag &
2620 						    PFRULE_PFM) {
2621 							pffwrules--;
2622 						}
2623 						pf_delete_rule_from_ruleset(ruleset, rs, rule);
2624 						deleted = 1;
2625 						rule = next;
2626 					}
2627 				} else {
2628 					rule = next;
2629 				}
2630 			} else {
2631 				if (((strcmp(rule->owner, owner)) == 0)) {
2632 					/* delete rule */
2633 					if (rule->rule_flag & PFRULE_PFM) {
2634 						pffwrules--;
2635 					}
2636 					pf_delete_rule_from_ruleset(ruleset,
2637 					    rs, rule);
2638 					deleted = 1;
2639 				}
2640 				rule = next;
2641 			}
2642 			if (rule == NULL) {
2643 				if (deleted) {
2644 					pf_ruleset_cleanup(ruleset, rs);
2645 					deleted = 0;
2646 				}
2647 				if (ruleset != &pf_main_ruleset) {
2648 					pf_deleterule_anchor_step_out(&ruleset,
2649 					    rs, &rule);
2650 				}
2651 			}
2652 		}
2653 	}
2654 }
2655 
2656 static void
pf_deleterule_anchor_step_out(struct pf_ruleset ** ruleset_ptr,int rs,struct pf_rule ** rule_ptr)2657 pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
2658     int rs, struct pf_rule **rule_ptr)
2659 {
2660 	struct pf_ruleset *ruleset = *ruleset_ptr;
2661 	struct pf_rule *rule = *rule_ptr;
2662 
2663 	/* step out of anchor */
2664 	struct pf_ruleset *rs_copy = ruleset;
2665 	ruleset = ruleset->anchor->parent?
2666 	    &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
2667 
2668 	rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
2669 	while (rule && (rule->anchor != rs_copy->anchor)) {
2670 		rule = TAILQ_NEXT(rule, entries);
2671 	}
2672 	if (rule == NULL) {
2673 		panic("%s: parent rule of anchor not found!", __func__);
2674 	}
2675 	if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
2676 		rule = TAILQ_NEXT(rule, entries);
2677 	}
2678 
2679 	*ruleset_ptr = ruleset;
2680 	*rule_ptr = rule;
2681 }
2682 
2683 static void
pf_addrwrap_setup(struct pf_addr_wrap * aw)2684 pf_addrwrap_setup(struct pf_addr_wrap *aw)
2685 {
2686 	VERIFY(aw);
2687 	bzero(&aw->p, sizeof aw->p);
2688 }
2689 
2690 static int
pf_rule_setup(struct pfioc_rule * pr,struct pf_rule * rule,struct pf_ruleset * ruleset)2691 pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
2692     struct pf_ruleset *ruleset)
2693 {
2694 	struct pf_pooladdr      *apa;
2695 	int                      error = 0;
2696 
2697 	if (rule->ifname[0]) {
2698 		rule->kif = pfi_kif_get(rule->ifname);
2699 		if (rule->kif == NULL) {
2700 			pool_put(&pf_rule_pl, rule);
2701 			return EINVAL;
2702 		}
2703 		pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
2704 	}
2705 	if (rule->tagname[0]) {
2706 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) {
2707 			error = EBUSY;
2708 		}
2709 	}
2710 	if (rule->match_tagname[0]) {
2711 		if ((rule->match_tag =
2712 		    pf_tagname2tag(rule->match_tagname)) == 0) {
2713 			error = EBUSY;
2714 		}
2715 	}
2716 	if (rule->rt && !rule->direction) {
2717 		error = EINVAL;
2718 	}
2719 #if PFLOG
2720 	if (!rule->log) {
2721 		rule->logif = 0;
2722 	}
2723 	if (rule->logif >= PFLOGIFS_MAX) {
2724 		error = EINVAL;
2725 	}
2726 #endif /* PFLOG */
2727 	pf_addrwrap_setup(&rule->src.addr);
2728 	pf_addrwrap_setup(&rule->dst.addr);
2729 	if (pf_rtlabel_add(&rule->src.addr) ||
2730 	    pf_rtlabel_add(&rule->dst.addr)) {
2731 		error = EBUSY;
2732 	}
2733 	if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) {
2734 		error = EINVAL;
2735 	}
2736 	if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) {
2737 		error = EINVAL;
2738 	}
2739 	if (pf_tbladdr_setup(ruleset, &rule->src.addr)) {
2740 		error = EINVAL;
2741 	}
2742 	if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) {
2743 		error = EINVAL;
2744 	}
2745 	if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) {
2746 		error = EINVAL;
2747 	}
2748 	TAILQ_FOREACH(apa, &pf_pabuf, entries)
2749 	if (pf_tbladdr_setup(ruleset, &apa->addr)) {
2750 		error = EINVAL;
2751 	}
2752 
2753 	if (rule->overload_tblname[0]) {
2754 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2755 		    rule->overload_tblname)) == NULL) {
2756 			error = EINVAL;
2757 		} else {
2758 			rule->overload_tbl->pfrkt_flags |=
2759 			    PFR_TFLAG_ACTIVE;
2760 		}
2761 	}
2762 
2763 	pf_mv_pool(&pf_pabuf, &rule->rpool.list);
2764 
2765 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2766 	    (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) &&
2767 	    rule->anchor == NULL) ||
2768 	    (rule->rt > PF_FASTROUTE)) &&
2769 	    (TAILQ_FIRST(&rule->rpool.list) == NULL)) {
2770 		error = EINVAL;
2771 	}
2772 
2773 	if (error) {
2774 		pf_rm_rule(NULL, rule);
2775 		return error;
2776 	}
2777 	/* For a NAT64 rule the rule's address family is AF_INET6 whereas
2778 	 * the address pool's family will be AF_INET
2779 	 */
2780 	rule->rpool.af = (rule->action == PF_NAT64) ? AF_INET: rule->af;
2781 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2782 	rule->evaluations = rule->packets[0] = rule->packets[1] =
2783 	    rule->bytes[0] = rule->bytes[1] = 0;
2784 
2785 	return 0;
2786 }
2787 
2788 static int
pfioctl_ioc_rule(u_long cmd,int minordev,struct pfioc_rule * pr,struct proc * p)2789 pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
2790 {
2791 	int error = 0;
2792 	u_int32_t req_dev = 0;
2793 	struct pf_ruleset *ruleset = NULL;
2794 
2795 	switch (cmd) {
2796 	case DIOCADDRULE: {
2797 		struct pf_rule          *rule, *tail;
2798 		int                     rs_num;
2799 
2800 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2801 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2802 		ruleset = pf_find_ruleset(pr->anchor);
2803 		if (ruleset == NULL) {
2804 			error = EINVAL;
2805 			break;
2806 		}
2807 		rs_num = pf_get_ruleset_number(pr->rule.action);
2808 		if (rs_num >= PF_RULESET_MAX) {
2809 			error = EINVAL;
2810 			break;
2811 		}
2812 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
2813 			error = EINVAL;
2814 			break;
2815 		}
2816 		if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
2817 			error = EBUSY;
2818 			break;
2819 		}
2820 		if (pr->pool_ticket != ticket_pabuf) {
2821 			error = EBUSY;
2822 			break;
2823 		}
2824 		rule = pool_get(&pf_rule_pl, PR_WAITOK);
2825 		if (rule == NULL) {
2826 			error = ENOMEM;
2827 			break;
2828 		}
2829 		pf_rule_copyin(&pr->rule, rule, p, minordev);
2830 #if !INET
2831 		if (rule->af == AF_INET) {
2832 			pool_put(&pf_rule_pl, rule);
2833 			error = EAFNOSUPPORT;
2834 			break;
2835 		}
2836 #endif /* INET */
2837 		tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2838 		    pf_rulequeue);
2839 		if (tail) {
2840 			rule->nr = tail->nr + 1;
2841 		} else {
2842 			rule->nr = 0;
2843 		}
2844 
2845 		if ((error = pf_rule_setup(pr, rule, ruleset))) {
2846 			break;
2847 		}
2848 
2849 		TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2850 		    rule, entries);
2851 		ruleset->rules[rs_num].inactive.rcount++;
2852 		if (rule->rule_flag & PFRULE_PFM) {
2853 			pffwrules++;
2854 		}
2855 
2856 		if (rule->action == PF_NAT64) {
2857 			atomic_add_16(&pf_nat64_configured, 1);
2858 		}
2859 
2860 		if (pr->anchor_call[0] == '\0') {
2861 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
2862 			if (rule->rule_flag & PFRULE_PFM) {
2863 				INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
2864 			}
2865 		}
2866 
2867 #if DUMMYNET
2868 		if (rule->action == PF_DUMMYNET) {
2869 			struct dummynet_event dn_event;
2870 			uint32_t direction = DN_INOUT;
2871 			bzero(&dn_event, sizeof(dn_event));
2872 
2873 			dn_event.dn_event_code = DUMMYNET_RULE_CONFIG;
2874 
2875 			if (rule->direction == PF_IN) {
2876 				direction = DN_IN;
2877 			} else if (rule->direction == PF_OUT) {
2878 				direction = DN_OUT;
2879 			}
2880 
2881 			dn_event.dn_event_rule_config.dir = direction;
2882 			dn_event.dn_event_rule_config.af = rule->af;
2883 			dn_event.dn_event_rule_config.proto = rule->proto;
2884 			dn_event.dn_event_rule_config.src_port = rule->src.xport.range.port[0];
2885 			dn_event.dn_event_rule_config.dst_port = rule->dst.xport.range.port[0];
2886 			strlcpy(dn_event.dn_event_rule_config.ifname, rule->ifname,
2887 			    sizeof(dn_event.dn_event_rule_config.ifname));
2888 
2889 			dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2890 		}
2891 #endif
2892 		break;
2893 	}
2894 
2895 	case DIOCGETRULES: {
2896 		struct pf_rule          *tail;
2897 		int                      rs_num;
2898 
2899 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2900 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2901 		ruleset = pf_find_ruleset(pr->anchor);
2902 		if (ruleset == NULL) {
2903 			error = EINVAL;
2904 			break;
2905 		}
2906 		rs_num = pf_get_ruleset_number(pr->rule.action);
2907 		if (rs_num >= PF_RULESET_MAX) {
2908 			error = EINVAL;
2909 			break;
2910 		}
2911 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2912 		    pf_rulequeue);
2913 		if (tail) {
2914 			pr->nr = tail->nr + 1;
2915 		} else {
2916 			pr->nr = 0;
2917 		}
2918 		pr->ticket = ruleset->rules[rs_num].active.ticket;
2919 		break;
2920 	}
2921 
2922 	case DIOCGETRULE: {
2923 		struct pf_rule          *rule;
2924 		int                      rs_num, i;
2925 
2926 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
2927 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
2928 		ruleset = pf_find_ruleset(pr->anchor);
2929 		if (ruleset == NULL) {
2930 			error = EINVAL;
2931 			break;
2932 		}
2933 		rs_num = pf_get_ruleset_number(pr->rule.action);
2934 		if (rs_num >= PF_RULESET_MAX) {
2935 			error = EINVAL;
2936 			break;
2937 		}
2938 		if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
2939 			error = EBUSY;
2940 			break;
2941 		}
2942 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
2943 		while ((rule != NULL) && (rule->nr != pr->nr)) {
2944 			rule = TAILQ_NEXT(rule, entries);
2945 		}
2946 		if (rule == NULL) {
2947 			error = EBUSY;
2948 			break;
2949 		}
2950 		pf_rule_copyout(rule, &pr->rule);
2951 		if (pf_anchor_copyout(ruleset, rule, pr)) {
2952 			error = EBUSY;
2953 			break;
2954 		}
2955 		pfi_dynaddr_copyout(&pr->rule.src.addr);
2956 		pfi_dynaddr_copyout(&pr->rule.dst.addr);
2957 		pf_tbladdr_copyout(&pr->rule.src.addr);
2958 		pf_tbladdr_copyout(&pr->rule.dst.addr);
2959 		pf_rtlabel_copyout(&pr->rule.src.addr);
2960 		pf_rtlabel_copyout(&pr->rule.dst.addr);
2961 		for (i = 0; i < PF_SKIP_COUNT; ++i) {
2962 			if (rule->skip[i].ptr == NULL) {
2963 				pr->rule.skip[i].nr = -1;
2964 			} else {
2965 				pr->rule.skip[i].nr =
2966 				    rule->skip[i].ptr->nr;
2967 			}
2968 		}
2969 
2970 		if (pr->action == PF_GET_CLR_CNTR) {
2971 			rule->evaluations = 0;
2972 			rule->packets[0] = rule->packets[1] = 0;
2973 			rule->bytes[0] = rule->bytes[1] = 0;
2974 		}
2975 		break;
2976 	}
2977 
2978 	case DIOCCHANGERULE: {
2979 		struct pfioc_rule       *pcr = pr;
2980 		struct pf_rule          *oldrule = NULL, *newrule = NULL;
2981 		struct pf_pooladdr      *pa;
2982 		u_int32_t                nr = 0;
2983 		int                      rs_num;
2984 
2985 		if (!(pcr->action == PF_CHANGE_REMOVE ||
2986 		    pcr->action == PF_CHANGE_GET_TICKET) &&
2987 		    pcr->pool_ticket != ticket_pabuf) {
2988 			error = EBUSY;
2989 			break;
2990 		}
2991 
2992 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
2993 		    pcr->action > PF_CHANGE_GET_TICKET) {
2994 			error = EINVAL;
2995 			break;
2996 		}
2997 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
2998 		pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0';
2999 		ruleset = pf_find_ruleset(pcr->anchor);
3000 		if (ruleset == NULL) {
3001 			error = EINVAL;
3002 			break;
3003 		}
3004 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3005 		if (rs_num >= PF_RULESET_MAX) {
3006 			error = EINVAL;
3007 			break;
3008 		}
3009 
3010 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3011 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3012 			break;
3013 		} else {
3014 			if (pcr->ticket !=
3015 			    ruleset->rules[rs_num].active.ticket) {
3016 				error = EINVAL;
3017 				break;
3018 			}
3019 			if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3020 				error = EINVAL;
3021 				break;
3022 			}
3023 		}
3024 
3025 		if (pcr->action != PF_CHANGE_REMOVE) {
3026 			newrule = pool_get(&pf_rule_pl, PR_WAITOK);
3027 			if (newrule == NULL) {
3028 				error = ENOMEM;
3029 				break;
3030 			}
3031 			pf_rule_copyin(&pcr->rule, newrule, p, minordev);
3032 #if !INET
3033 			if (newrule->af == AF_INET) {
3034 				pool_put(&pf_rule_pl, newrule);
3035 				error = EAFNOSUPPORT;
3036 				break;
3037 			}
3038 #endif /* INET */
3039 			if (newrule->ifname[0]) {
3040 				newrule->kif = pfi_kif_get(newrule->ifname);
3041 				if (newrule->kif == NULL) {
3042 					pool_put(&pf_rule_pl, newrule);
3043 					error = EINVAL;
3044 					break;
3045 				}
3046 				pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE);
3047 			} else {
3048 				newrule->kif = NULL;
3049 			}
3050 
3051 			if (newrule->tagname[0]) {
3052 				if ((newrule->tag =
3053 				    pf_tagname2tag(newrule->tagname)) == 0) {
3054 					error = EBUSY;
3055 				}
3056 			}
3057 			if (newrule->match_tagname[0]) {
3058 				if ((newrule->match_tag = pf_tagname2tag(
3059 					    newrule->match_tagname)) == 0) {
3060 					error = EBUSY;
3061 				}
3062 			}
3063 			if (newrule->rt && !newrule->direction) {
3064 				error = EINVAL;
3065 			}
3066 #if PFLOG
3067 			if (!newrule->log) {
3068 				newrule->logif = 0;
3069 			}
3070 			if (newrule->logif >= PFLOGIFS_MAX) {
3071 				error = EINVAL;
3072 			}
3073 #endif /* PFLOG */
3074 			pf_addrwrap_setup(&newrule->src.addr);
3075 			pf_addrwrap_setup(&newrule->dst.addr);
3076 			if (pf_rtlabel_add(&newrule->src.addr) ||
3077 			    pf_rtlabel_add(&newrule->dst.addr)) {
3078 				error = EBUSY;
3079 			}
3080 			if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) {
3081 				error = EINVAL;
3082 			}
3083 			if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) {
3084 				error = EINVAL;
3085 			}
3086 			if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) {
3087 				error = EINVAL;
3088 			}
3089 			if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) {
3090 				error = EINVAL;
3091 			}
3092 			if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) {
3093 				error = EINVAL;
3094 			}
3095 			TAILQ_FOREACH(pa, &pf_pabuf, entries)
3096 			if (pf_tbladdr_setup(ruleset, &pa->addr)) {
3097 				error = EINVAL;
3098 			}
3099 
3100 			if (newrule->overload_tblname[0]) {
3101 				if ((newrule->overload_tbl = pfr_attach_table(
3102 					    ruleset, newrule->overload_tblname)) ==
3103 				    NULL) {
3104 					error = EINVAL;
3105 				} else {
3106 					newrule->overload_tbl->pfrkt_flags |=
3107 					    PFR_TFLAG_ACTIVE;
3108 				}
3109 			}
3110 
3111 			pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
3112 			if (((((newrule->action == PF_NAT) ||
3113 			    (newrule->action == PF_RDR) ||
3114 			    (newrule->action == PF_BINAT) ||
3115 			    (newrule->rt > PF_FASTROUTE)) &&
3116 			    !newrule->anchor)) &&
3117 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL)) {
3118 				error = EINVAL;
3119 			}
3120 
3121 			if (error) {
3122 				pf_rm_rule(NULL, newrule);
3123 				break;
3124 			}
3125 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3126 			newrule->evaluations = 0;
3127 			newrule->packets[0] = newrule->packets[1] = 0;
3128 			newrule->bytes[0] = newrule->bytes[1] = 0;
3129 		}
3130 		pf_empty_pool(&pf_pabuf);
3131 
3132 		if (pcr->action == PF_CHANGE_ADD_HEAD) {
3133 			oldrule = TAILQ_FIRST(
3134 				ruleset->rules[rs_num].active.ptr);
3135 		} else if (pcr->action == PF_CHANGE_ADD_TAIL) {
3136 			oldrule = TAILQ_LAST(
3137 				ruleset->rules[rs_num].active.ptr, pf_rulequeue);
3138 		} else {
3139 			oldrule = TAILQ_FIRST(
3140 				ruleset->rules[rs_num].active.ptr);
3141 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) {
3142 				oldrule = TAILQ_NEXT(oldrule, entries);
3143 			}
3144 			if (oldrule == NULL) {
3145 				if (newrule != NULL) {
3146 					pf_rm_rule(NULL, newrule);
3147 				}
3148 				error = EINVAL;
3149 				break;
3150 			}
3151 		}
3152 
3153 		if (pcr->action == PF_CHANGE_REMOVE) {
3154 			pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
3155 			ruleset->rules[rs_num].active.rcount--;
3156 		} else {
3157 			if (oldrule == NULL) {
3158 				TAILQ_INSERT_TAIL(
3159 					ruleset->rules[rs_num].active.ptr,
3160 					newrule, entries);
3161 			} else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3162 			    pcr->action == PF_CHANGE_ADD_BEFORE) {
3163 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3164 			} else {
3165 				TAILQ_INSERT_AFTER(
3166 					ruleset->rules[rs_num].active.ptr,
3167 					oldrule, newrule, entries);
3168 			}
3169 			ruleset->rules[rs_num].active.rcount++;
3170 		}
3171 
3172 		nr = 0;
3173 		TAILQ_FOREACH(oldrule,
3174 		    ruleset->rules[rs_num].active.ptr, entries)
3175 		oldrule->nr = nr++;
3176 
3177 		ruleset->rules[rs_num].active.ticket++;
3178 
3179 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3180 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
3181 		pf_process_compatibilities();
3182 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3183 		break;
3184 	}
3185 
3186 	case DIOCINSERTRULE: {
3187 		struct pf_rule          *rule, *tail, *r;
3188 		int                     rs_num;
3189 		int                     is_anchor;
3190 
3191 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3192 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3193 		is_anchor = (pr->anchor_call[0] != '\0');
3194 
3195 		if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
3196 		    pr->rule.owner, is_anchor, &error)) == NULL) {
3197 			break;
3198 		}
3199 
3200 		rs_num = pf_get_ruleset_number(pr->rule.action);
3201 		if (rs_num >= PF_RULESET_MAX) {
3202 			error = EINVAL;
3203 			break;
3204 		}
3205 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3206 			error = EINVAL;
3207 			break;
3208 		}
3209 
3210 		/* make sure this anchor rule doesn't exist already */
3211 		if (is_anchor) {
3212 			r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3213 			while (r) {
3214 				if (r->anchor &&
3215 				    ((strcmp(r->anchor->name,
3216 				    pr->anchor_call)) == 0)) {
3217 					if (((strcmp(pr->rule.owner,
3218 					    r->owner)) == 0) ||
3219 					    ((strcmp(r->owner, "")) == 0)) {
3220 						error = EEXIST;
3221 					} else {
3222 						error = EPERM;
3223 					}
3224 					break;
3225 				}
3226 				r = TAILQ_NEXT(r, entries);
3227 			}
3228 			if (error != 0) {
3229 				break;
3230 			}
3231 		}
3232 
3233 		rule = pool_get(&pf_rule_pl, PR_WAITOK);
3234 		if (rule == NULL) {
3235 			error = ENOMEM;
3236 			break;
3237 		}
3238 		pf_rule_copyin(&pr->rule, rule, p, minordev);
3239 #if !INET
3240 		if (rule->af == AF_INET) {
3241 			pool_put(&pf_rule_pl, rule);
3242 			error = EAFNOSUPPORT;
3243 			break;
3244 		}
3245 #endif /* INET */
3246 		r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3247 		while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) {
3248 			r = TAILQ_NEXT(r, entries);
3249 		}
3250 		if (r == NULL) {
3251 			if ((tail =
3252 			    TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3253 			    pf_rulequeue)) != NULL) {
3254 				rule->nr = tail->nr + 1;
3255 			} else {
3256 				rule->nr = 0;
3257 			}
3258 		} else {
3259 			rule->nr = r->nr;
3260 		}
3261 
3262 		if ((error = pf_rule_setup(pr, rule, ruleset))) {
3263 			break;
3264 		}
3265 
3266 		if (rule->anchor != NULL) {
3267 			strlcpy(rule->anchor->owner, rule->owner,
3268 			    PF_OWNER_NAME_SIZE);
3269 		}
3270 
3271 		if (r) {
3272 			TAILQ_INSERT_BEFORE(r, rule, entries);
3273 			while (r && ++r->nr) {
3274 				r = TAILQ_NEXT(r, entries);
3275 			}
3276 		} else {
3277 			TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr,
3278 			    rule, entries);
3279 		}
3280 		ruleset->rules[rs_num].active.rcount++;
3281 
3282 		/* Calculate checksum for the main ruleset */
3283 		if (ruleset == &pf_main_ruleset) {
3284 			error = pf_setup_pfsync_matching(ruleset);
3285 		}
3286 
3287 		pf_ruleset_cleanup(ruleset, rs_num);
3288 		rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule);
3289 
3290 		pr->rule.ticket = rule->ticket;
3291 		pf_rule_copyout(rule, &pr->rule);
3292 		if (rule->rule_flag & PFRULE_PFM) {
3293 			pffwrules++;
3294 		}
3295 		if (rule->action == PF_NAT64) {
3296 			atomic_add_16(&pf_nat64_configured, 1);
3297 		}
3298 
3299 		if (pr->anchor_call[0] == '\0') {
3300 			INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total);
3301 			if (rule->rule_flag & PFRULE_PFM) {
3302 				INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_os);
3303 			}
3304 		}
3305 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
3306 		pf_process_compatibilities();
3307 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3308 		break;
3309 	}
3310 
3311 	case DIOCDELETERULE: {
3312 		ASSERT(ruleset == NULL);
3313 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3314 		pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0';
3315 
3316 		if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3317 			error = EINVAL;
3318 			break;
3319 		}
3320 
3321 		/* get device through which request is made */
3322 		if ((uint8_t)minordev == PFDEV_PFM) {
3323 			req_dev |= PFRULE_PFM;
3324 		}
3325 
3326 		if (pr->rule.ticket) {
3327 			if ((error = pf_delete_rule_by_ticket(pr, req_dev))) {
3328 				break;
3329 			}
3330 		} else {
3331 			pf_delete_rule_by_owner(pr->rule.owner, req_dev);
3332 		}
3333 		pr->nr = pffwrules;
3334 		if (pr->rule.action == PF_NAT64) {
3335 			atomic_add_16(&pf_nat64_configured, -1);
3336 		}
3337 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
3338 		pf_process_compatibilities();
3339 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
3340 		break;
3341 	}
3342 
3343 	default:
3344 		VERIFY(0);
3345 		/* NOTREACHED */
3346 	}
3347 	if (ruleset != NULL) {
3348 		pf_release_ruleset(ruleset);
3349 		ruleset = NULL;
3350 	}
3351 
3352 	return error;
3353 }
3354 
3355 static int
pfioctl_ioc_state_kill(u_long cmd,struct pfioc_state_kill * psk,struct proc * p)3356 pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p)
3357 {
3358 #pragma unused(p)
3359 	int error = 0;
3360 
3361 	psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0';
3362 	psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0';
3363 
3364 	bool ifname_matched = true;
3365 	bool owner_matched = true;
3366 
3367 	switch (cmd) {
3368 	case DIOCCLRSTATES: {
3369 		struct pf_state         *s, *nexts;
3370 		int                      killed = 0;
3371 
3372 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) {
3373 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3374 			/*
3375 			 * Purge all states only when neither ifname
3376 			 * or owner is provided. If any of these are provided
3377 			 * we purge only the states with meta data that match
3378 			 */
3379 			bool unlink_state = false;
3380 			ifname_matched = true;
3381 			owner_matched = true;
3382 
3383 			if (psk->psk_ifname[0] &&
3384 			    strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3385 				ifname_matched = false;
3386 			}
3387 
3388 			if (psk->psk_ownername[0] &&
3389 			    ((NULL == s->rule.ptr) ||
3390 			    strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3391 				owner_matched = false;
3392 			}
3393 
3394 			unlink_state = ifname_matched && owner_matched;
3395 
3396 			if (unlink_state) {
3397 #if NPFSYNC
3398 				/* don't send out individual delete messages */
3399 				s->sync_flags = PFSTATE_NOSYNC;
3400 #endif
3401 				pf_unlink_state(s);
3402 				killed++;
3403 			}
3404 		}
3405 		psk->psk_af = (sa_family_t)killed;
3406 #if NPFSYNC
3407 		pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3408 #endif
3409 		break;
3410 	}
3411 
3412 	case DIOCKILLSTATES: {
3413 		struct pf_state         *s, *nexts;
3414 		struct pf_state_key     *sk;
3415 		struct pf_state_host    *src, *dst;
3416 		int                      killed = 0;
3417 
3418 		for (s = RB_MIN(pf_state_tree_id, &tree_id); s;
3419 		    s = nexts) {
3420 			nexts = RB_NEXT(pf_state_tree_id, &tree_id, s);
3421 			sk = s->state_key;
3422 			ifname_matched = true;
3423 			owner_matched = true;
3424 
3425 			if (psk->psk_ifname[0] &&
3426 			    strcmp(psk->psk_ifname, s->kif->pfik_name)) {
3427 				ifname_matched = false;
3428 			}
3429 
3430 			if (psk->psk_ownername[0] &&
3431 			    ((NULL == s->rule.ptr) ||
3432 			    strcmp(psk->psk_ownername, s->rule.ptr->owner))) {
3433 				owner_matched = false;
3434 			}
3435 
3436 			if (sk->direction == PF_OUT) {
3437 				src = &sk->lan;
3438 				dst = &sk->ext_lan;
3439 			} else {
3440 				src = &sk->ext_lan;
3441 				dst = &sk->lan;
3442 			}
3443 			if ((!psk->psk_af || sk->af_lan == psk->psk_af) &&
3444 			    (!psk->psk_proto || psk->psk_proto == sk->proto) &&
3445 			    PF_MATCHA(psk->psk_src.neg,
3446 			    &psk->psk_src.addr.v.a.addr,
3447 			    &psk->psk_src.addr.v.a.mask,
3448 			    &src->addr, sk->af_lan) &&
3449 			    PF_MATCHA(psk->psk_dst.neg,
3450 			    &psk->psk_dst.addr.v.a.addr,
3451 			    &psk->psk_dst.addr.v.a.mask,
3452 			    &dst->addr, sk->af_lan) &&
3453 			    (pf_match_xport(psk->psk_proto,
3454 			    psk->psk_proto_variant, &psk->psk_src.xport,
3455 			    &src->xport)) &&
3456 			    (pf_match_xport(psk->psk_proto,
3457 			    psk->psk_proto_variant, &psk->psk_dst.xport,
3458 			    &dst->xport)) &&
3459 			    ifname_matched &&
3460 			    owner_matched) {
3461 #if NPFSYNC
3462 				/* send immediate delete of state */
3463 				pfsync_delete_state(s);
3464 				s->sync_flags |= PFSTATE_NOSYNC;
3465 #endif
3466 				pf_unlink_state(s);
3467 				killed++;
3468 			}
3469 		}
3470 		psk->psk_af = (sa_family_t)killed;
3471 		break;
3472 	}
3473 
3474 	default:
3475 		VERIFY(0);
3476 		/* NOTREACHED */
3477 	}
3478 
3479 	return error;
3480 }
3481 
3482 static int
pfioctl_ioc_state(u_long cmd,struct pfioc_state * ps,struct proc * p)3483 pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p)
3484 {
3485 #pragma unused(p)
3486 	int error = 0;
3487 
3488 	switch (cmd) {
3489 	case DIOCADDSTATE: {
3490 		struct pfsync_state     *sp = &ps->state;
3491 		struct pf_state         *s;
3492 		struct pf_state_key     *sk;
3493 		struct pfi_kif          *kif;
3494 
3495 		if (sp->timeout >= PFTM_MAX) {
3496 			error = EINVAL;
3497 			break;
3498 		}
3499 		s = pool_get(&pf_state_pl, PR_WAITOK);
3500 		if (s == NULL) {
3501 			error = ENOMEM;
3502 			break;
3503 		}
3504 		bzero(s, sizeof(struct pf_state));
3505 		if ((sk = pf_alloc_state_key(s, NULL)) == NULL) {
3506 			pool_put(&pf_state_pl, s);
3507 			error = ENOMEM;
3508 			break;
3509 		}
3510 		pf_state_import(sp, sk, s);
3511 		kif = pfi_kif_get(sp->ifname);
3512 		if (kif == NULL) {
3513 			pf_detach_state(s, 0);
3514 			pool_put(&pf_state_pl, s);
3515 			error = ENOENT;
3516 			break;
3517 		}
3518 		TAILQ_INIT(&s->unlink_hooks);
3519 		s->state_key->app_state = 0;
3520 		if (pf_insert_state(kif, s)) {
3521 			pfi_kif_unref(kif, PFI_KIF_REF_NONE);
3522 			pool_put(&pf_state_pl, s);
3523 			error = EEXIST;
3524 			break;
3525 		}
3526 		pf_default_rule.states++;
3527 		VERIFY(pf_default_rule.states != 0);
3528 		break;
3529 	}
3530 
3531 	case DIOCGETSTATE: {
3532 		struct pf_state         *s;
3533 		struct pf_state_cmp      id_key;
3534 
3535 		bcopy(ps->state.id, &id_key.id, sizeof(id_key.id));
3536 		id_key.creatorid = ps->state.creatorid;
3537 
3538 		s = pf_find_state_byid(&id_key);
3539 		if (s == NULL) {
3540 			error = ENOENT;
3541 			break;
3542 		}
3543 
3544 		pf_state_export(&ps->state, s->state_key, s);
3545 		break;
3546 	}
3547 
3548 	default:
3549 		VERIFY(0);
3550 		/* NOTREACHED */
3551 	}
3552 
3553 	return error;
3554 }
3555 
3556 static int
pfioctl_ioc_states(u_long cmd,struct pfioc_states_32 * ps32,struct pfioc_states_64 * ps64,struct proc * p)3557 pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32,
3558     struct pfioc_states_64 *ps64, struct proc *p)
3559 {
3560 	int p64 = proc_is64bit(p);
3561 	int error = 0;
3562 
3563 	switch (cmd) {
3564 	case DIOCGETSTATES: {           /* struct pfioc_states */
3565 		struct pf_state         *state;
3566 		struct pfsync_state     *pstore;
3567 		user_addr_t              buf;
3568 		u_int32_t                nr = 0;
3569 		int                      len, size;
3570 
3571 		len = (p64 ? ps64->ps_len : ps32->ps_len);
3572 		if (len == 0) {
3573 			size = sizeof(struct pfsync_state) * pf_status.states;
3574 			if (p64) {
3575 				ps64->ps_len = size;
3576 			} else {
3577 				ps32->ps_len = size;
3578 			}
3579 			break;
3580 		}
3581 
3582 		pstore = kalloc_type(struct pfsync_state,
3583 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
3584 #ifdef __LP64__
3585 		buf = (p64 ? ps64->ps_buf : ps32->ps_buf);
3586 #else
3587 		buf = ps32->ps_buf;
3588 #endif
3589 
3590 		state = TAILQ_FIRST(&state_list);
3591 		while (state) {
3592 			if (state->timeout != PFTM_UNLINKED) {
3593 				if ((nr + 1) * sizeof(*pstore) > (unsigned)len) {
3594 					break;
3595 				}
3596 
3597 				pf_state_export(pstore,
3598 				    state->state_key, state);
3599 				error = copyout(pstore, buf, sizeof(*pstore));
3600 				if (error) {
3601 					kfree_type(struct pfsync_state, pstore);
3602 					goto fail;
3603 				}
3604 				buf += sizeof(*pstore);
3605 				nr++;
3606 			}
3607 			state = TAILQ_NEXT(state, entry_list);
3608 		}
3609 
3610 		size = sizeof(struct pfsync_state) * nr;
3611 		if (p64) {
3612 			ps64->ps_len = size;
3613 		} else {
3614 			ps32->ps_len = size;
3615 		}
3616 
3617 		kfree_type(struct pfsync_state, pstore);
3618 		break;
3619 	}
3620 
3621 	default:
3622 		VERIFY(0);
3623 		/* NOTREACHED */
3624 	}
3625 fail:
3626 	return error;
3627 }
3628 
3629 static int
pfioctl_ioc_natlook(u_long cmd,struct pfioc_natlook * pnl,struct proc * p)3630 pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p)
3631 {
3632 #pragma unused(p)
3633 	int error = 0;
3634 
3635 	switch (cmd) {
3636 	case DIOCNATLOOK: {
3637 		struct pf_state_key     *sk;
3638 		struct pf_state         *state;
3639 		struct pf_state_key_cmp  key;
3640 		int                      m = 0, direction = pnl->direction;
3641 
3642 		key.proto = pnl->proto;
3643 		key.proto_variant = pnl->proto_variant;
3644 
3645 		if (!pnl->proto ||
3646 		    PF_AZERO(&pnl->saddr, pnl->af) ||
3647 		    PF_AZERO(&pnl->daddr, pnl->af) ||
3648 		    ((pnl->proto == IPPROTO_TCP ||
3649 		    pnl->proto == IPPROTO_UDP) &&
3650 		    (!pnl->dxport.port || !pnl->sxport.port))) {
3651 			error = EINVAL;
3652 		} else {
3653 			/*
3654 			 * userland gives us source and dest of connection,
3655 			 * reverse the lookup so we ask for what happens with
3656 			 * the return traffic, enabling us to find it in the
3657 			 * state tree.
3658 			 */
3659 			if (direction == PF_IN) {
3660 				key.af_gwy = pnl->af;
3661 				PF_ACPY(&key.ext_gwy.addr, &pnl->daddr,
3662 				    pnl->af);
3663 				memcpy(&key.ext_gwy.xport, &pnl->dxport,
3664 				    sizeof(key.ext_gwy.xport));
3665 				PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
3666 				memcpy(&key.gwy.xport, &pnl->sxport,
3667 				    sizeof(key.gwy.xport));
3668 				state = pf_find_state_all(&key, PF_IN, &m);
3669 			} else {
3670 				key.af_lan = pnl->af;
3671 				PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
3672 				memcpy(&key.lan.xport, &pnl->dxport,
3673 				    sizeof(key.lan.xport));
3674 				PF_ACPY(&key.ext_lan.addr, &pnl->saddr,
3675 				    pnl->af);
3676 				memcpy(&key.ext_lan.xport, &pnl->sxport,
3677 				    sizeof(key.ext_lan.xport));
3678 				state = pf_find_state_all(&key, PF_OUT, &m);
3679 			}
3680 			if (m > 1) {
3681 				error = E2BIG;  /* more than one state */
3682 			} else if (state != NULL) {
3683 				sk = state->state_key;
3684 				if (direction == PF_IN) {
3685 					PF_ACPY(&pnl->rsaddr, &sk->lan.addr,
3686 					    sk->af_lan);
3687 					memcpy(&pnl->rsxport, &sk->lan.xport,
3688 					    sizeof(pnl->rsxport));
3689 					PF_ACPY(&pnl->rdaddr, &pnl->daddr,
3690 					    pnl->af);
3691 					memcpy(&pnl->rdxport, &pnl->dxport,
3692 					    sizeof(pnl->rdxport));
3693 				} else {
3694 					PF_ACPY(&pnl->rdaddr, &sk->gwy.addr,
3695 					    sk->af_gwy);
3696 					memcpy(&pnl->rdxport, &sk->gwy.xport,
3697 					    sizeof(pnl->rdxport));
3698 					PF_ACPY(&pnl->rsaddr, &pnl->saddr,
3699 					    pnl->af);
3700 					memcpy(&pnl->rsxport, &pnl->sxport,
3701 					    sizeof(pnl->rsxport));
3702 				}
3703 			} else {
3704 				error = ENOENT;
3705 			}
3706 		}
3707 		break;
3708 	}
3709 
3710 	default:
3711 		VERIFY(0);
3712 		/* NOTREACHED */
3713 	}
3714 
3715 	return error;
3716 }
3717 
3718 static int
pfioctl_ioc_tm(u_long cmd,struct pfioc_tm * pt,struct proc * p)3719 pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p)
3720 {
3721 #pragma unused(p)
3722 	int error = 0;
3723 
3724 	switch (cmd) {
3725 	case DIOCSETTIMEOUT: {
3726 		int old;
3727 
3728 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3729 		    pt->seconds < 0) {
3730 			error = EINVAL;
3731 			goto fail;
3732 		}
3733 		old = pf_default_rule.timeout[pt->timeout];
3734 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) {
3735 			pt->seconds = 1;
3736 		}
3737 		pf_default_rule.timeout[pt->timeout] = pt->seconds;
3738 		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) {
3739 			wakeup(pf_purge_thread_fn);
3740 		}
3741 		pt->seconds = old;
3742 		break;
3743 	}
3744 
3745 	case DIOCGETTIMEOUT: {
3746 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3747 			error = EINVAL;
3748 			goto fail;
3749 		}
3750 		pt->seconds = pf_default_rule.timeout[pt->timeout];
3751 		break;
3752 	}
3753 
3754 	default:
3755 		VERIFY(0);
3756 		/* NOTREACHED */
3757 	}
3758 fail:
3759 	return error;
3760 }
3761 
3762 static int
pfioctl_ioc_limit(u_long cmd,struct pfioc_limit * pl,struct proc * p)3763 pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p)
3764 {
3765 #pragma unused(p)
3766 	int error = 0;
3767 
3768 	switch (cmd) {
3769 	case DIOCGETLIMIT: {
3770 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3771 			error = EINVAL;
3772 			goto fail;
3773 		}
3774 		pl->limit = pf_pool_limits[pl->index].limit;
3775 		break;
3776 	}
3777 
3778 	case DIOCSETLIMIT: {
3779 		int old_limit;
3780 
3781 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3782 		    pf_pool_limits[pl->index].pp == NULL) {
3783 			error = EINVAL;
3784 			goto fail;
3785 		}
3786 		pool_sethardlimit(pf_pool_limits[pl->index].pp,
3787 		    pl->limit, NULL, 0);
3788 		old_limit = pf_pool_limits[pl->index].limit;
3789 		pf_pool_limits[pl->index].limit = pl->limit;
3790 		pl->limit = old_limit;
3791 		break;
3792 	}
3793 
3794 	default:
3795 		VERIFY(0);
3796 		/* NOTREACHED */
3797 	}
3798 fail:
3799 	return error;
3800 }
3801 
3802 static int
pfioctl_ioc_pooladdr(u_long cmd,struct pfioc_pooladdr * pp,struct proc * p)3803 pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p)
3804 {
3805 #pragma unused(p)
3806 	struct pf_pooladdr *pa = NULL;
3807 	struct pf_pool *pool = NULL;
3808 	int error = 0;
3809 	struct pf_ruleset *ruleset = NULL;
3810 
3811 	switch (cmd) {
3812 	case DIOCBEGINADDRS: {
3813 		pf_empty_pool(&pf_pabuf);
3814 		pp->ticket = ++ticket_pabuf;
3815 		break;
3816 	}
3817 
3818 	case DIOCADDADDR: {
3819 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3820 		if (pp->ticket != ticket_pabuf) {
3821 			error = EBUSY;
3822 			break;
3823 		}
3824 #if !INET
3825 		if (pp->af == AF_INET) {
3826 			error = EAFNOSUPPORT;
3827 			break;
3828 		}
3829 #endif /* INET */
3830 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3831 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3832 		    pp->addr.addr.type != PF_ADDR_TABLE) {
3833 			error = EINVAL;
3834 			break;
3835 		}
3836 		pa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3837 		if (pa == NULL) {
3838 			error = ENOMEM;
3839 			break;
3840 		}
3841 		pf_pooladdr_copyin(&pp->addr, pa);
3842 		if (pa->ifname[0]) {
3843 			pa->kif = pfi_kif_get(pa->ifname);
3844 			if (pa->kif == NULL) {
3845 				pool_put(&pf_pooladdr_pl, pa);
3846 				error = EINVAL;
3847 				break;
3848 			}
3849 			pfi_kif_ref(pa->kif, PFI_KIF_REF_RULE);
3850 		}
3851 		pf_addrwrap_setup(&pa->addr);
3852 		if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
3853 			pfi_dynaddr_remove(&pa->addr);
3854 			pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE);
3855 			pool_put(&pf_pooladdr_pl, pa);
3856 			error = EINVAL;
3857 			break;
3858 		}
3859 		TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
3860 		break;
3861 	}
3862 
3863 	case DIOCGETADDRS: {
3864 		pp->nr = 0;
3865 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3866 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3867 		    pp->r_num, 0, 1, 0);
3868 		if (pool == NULL) {
3869 			error = EBUSY;
3870 			break;
3871 		}
3872 		TAILQ_FOREACH(pa, &pool->list, entries)
3873 		pp->nr++;
3874 		break;
3875 	}
3876 
3877 	case DIOCGETADDR: {
3878 		u_int32_t                nr = 0;
3879 
3880 		pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3881 		pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
3882 		    pp->r_num, 0, 1, 1);
3883 		if (pool == NULL) {
3884 			error = EBUSY;
3885 			break;
3886 		}
3887 		pa = TAILQ_FIRST(&pool->list);
3888 		while ((pa != NULL) && (nr < pp->nr)) {
3889 			pa = TAILQ_NEXT(pa, entries);
3890 			nr++;
3891 		}
3892 		if (pa == NULL) {
3893 			error = EBUSY;
3894 			break;
3895 		}
3896 		pf_pooladdr_copyout(pa, &pp->addr);
3897 		pfi_dynaddr_copyout(&pp->addr.addr);
3898 		pf_tbladdr_copyout(&pp->addr.addr);
3899 		pf_rtlabel_copyout(&pp->addr.addr);
3900 		break;
3901 	}
3902 
3903 	case DIOCCHANGEADDR: {
3904 		struct pfioc_pooladdr   *pca = pp;
3905 		struct pf_pooladdr      *oldpa = NULL, *newpa = NULL;
3906 
3907 		if (pca->action < PF_CHANGE_ADD_HEAD ||
3908 		    pca->action > PF_CHANGE_REMOVE) {
3909 			error = EINVAL;
3910 			break;
3911 		}
3912 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
3913 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
3914 		    pca->addr.addr.type != PF_ADDR_TABLE) {
3915 			error = EINVAL;
3916 			break;
3917 		}
3918 
3919 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
3920 		ruleset = pf_find_ruleset(pca->anchor);
3921 		if (ruleset == NULL) {
3922 			error = EBUSY;
3923 			break;
3924 		}
3925 		pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
3926 		    pca->r_num, pca->r_last, 1, 1);
3927 		if (pool == NULL) {
3928 			error = EBUSY;
3929 			break;
3930 		}
3931 		if (pca->action != PF_CHANGE_REMOVE) {
3932 			newpa = pool_get(&pf_pooladdr_pl, PR_WAITOK);
3933 			if (newpa == NULL) {
3934 				error = ENOMEM;
3935 				break;
3936 			}
3937 			pf_pooladdr_copyin(&pca->addr, newpa);
3938 #if !INET
3939 			if (pca->af == AF_INET) {
3940 				pool_put(&pf_pooladdr_pl, newpa);
3941 				error = EAFNOSUPPORT;
3942 				break;
3943 			}
3944 #endif /* INET */
3945 			if (newpa->ifname[0]) {
3946 				newpa->kif = pfi_kif_get(newpa->ifname);
3947 				if (newpa->kif == NULL) {
3948 					pool_put(&pf_pooladdr_pl, newpa);
3949 					error = EINVAL;
3950 					break;
3951 				}
3952 				pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE);
3953 			} else {
3954 				newpa->kif = NULL;
3955 			}
3956 			pf_addrwrap_setup(&newpa->addr);
3957 			if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
3958 			    pf_tbladdr_setup(ruleset, &newpa->addr)) {
3959 				pfi_dynaddr_remove(&newpa->addr);
3960 				pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE);
3961 				pool_put(&pf_pooladdr_pl, newpa);
3962 				error = EINVAL;
3963 				break;
3964 			}
3965 		}
3966 
3967 		if (pca->action == PF_CHANGE_ADD_HEAD) {
3968 			oldpa = TAILQ_FIRST(&pool->list);
3969 		} else if (pca->action == PF_CHANGE_ADD_TAIL) {
3970 			oldpa = TAILQ_LAST(&pool->list, pf_palist);
3971 		} else {
3972 			int     i = 0;
3973 
3974 			oldpa = TAILQ_FIRST(&pool->list);
3975 			while ((oldpa != NULL) && (i < (int)pca->nr)) {
3976 				oldpa = TAILQ_NEXT(oldpa, entries);
3977 				i++;
3978 			}
3979 			if (oldpa == NULL) {
3980 				error = EINVAL;
3981 				break;
3982 			}
3983 		}
3984 
3985 		if (pca->action == PF_CHANGE_REMOVE) {
3986 			TAILQ_REMOVE(&pool->list, oldpa, entries);
3987 			pfi_dynaddr_remove(&oldpa->addr);
3988 			pf_tbladdr_remove(&oldpa->addr);
3989 			pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE);
3990 			pool_put(&pf_pooladdr_pl, oldpa);
3991 		} else {
3992 			if (oldpa == NULL) {
3993 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
3994 			} else if (pca->action == PF_CHANGE_ADD_HEAD ||
3995 			    pca->action == PF_CHANGE_ADD_BEFORE) {
3996 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
3997 			} else {
3998 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
3999 				    newpa, entries);
4000 			}
4001 		}
4002 
4003 		pool->cur = TAILQ_FIRST(&pool->list);
4004 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
4005 		    pca->af);
4006 		break;
4007 	}
4008 
4009 	default:
4010 		VERIFY(0);
4011 		/* NOTREACHED */
4012 	}
4013 
4014 	if (ruleset) {
4015 		pf_release_ruleset(ruleset);
4016 		ruleset = NULL;
4017 	}
4018 
4019 	return error;
4020 }
4021 
4022 static int
pfioctl_ioc_ruleset(u_long cmd,struct pfioc_ruleset * pr,struct proc * p)4023 pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p)
4024 {
4025 #pragma unused(p)
4026 	int error = 0;
4027 	struct pf_ruleset *ruleset = NULL;
4028 
4029 	switch (cmd) {
4030 	case DIOCGETRULESETS: {
4031 		struct pf_anchor        *anchor;
4032 
4033 		pr->path[sizeof(pr->path) - 1] = '\0';
4034 		pr->name[sizeof(pr->name) - 1] = '\0';
4035 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4036 			error = EINVAL;
4037 			break;
4038 		}
4039 		pr->nr = 0;
4040 		if (ruleset->anchor == NULL) {
4041 			/* XXX kludge for pf_main_ruleset */
4042 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4043 			if (anchor->parent == NULL) {
4044 				pr->nr++;
4045 			}
4046 		} else {
4047 			RB_FOREACH(anchor, pf_anchor_node,
4048 			    &ruleset->anchor->children)
4049 			pr->nr++;
4050 		}
4051 		break;
4052 	}
4053 
4054 	case DIOCGETRULESET: {
4055 		struct pf_anchor        *anchor;
4056 		u_int32_t                nr = 0;
4057 
4058 		pr->path[sizeof(pr->path) - 1] = '\0';
4059 		if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
4060 			error = EINVAL;
4061 			break;
4062 		}
4063 		pr->name[0] = 0;
4064 		if (ruleset->anchor == NULL) {
4065 			/* XXX kludge for pf_main_ruleset */
4066 			RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
4067 			if (anchor->parent == NULL && nr++ == pr->nr) {
4068 				strlcpy(pr->name, anchor->name,
4069 				    sizeof(pr->name));
4070 				break;
4071 			}
4072 		} else {
4073 			RB_FOREACH(anchor, pf_anchor_node,
4074 			    &ruleset->anchor->children)
4075 			if (nr++ == pr->nr) {
4076 				strlcpy(pr->name, anchor->name,
4077 				    sizeof(pr->name));
4078 				break;
4079 			}
4080 		}
4081 		if (!pr->name[0]) {
4082 			error = EBUSY;
4083 		}
4084 		break;
4085 	}
4086 
4087 	default:
4088 		VERIFY(0);
4089 		/* NOTREACHED */
4090 	}
4091 
4092 	if (ruleset) {
4093 		pf_release_ruleset(ruleset);
4094 		ruleset = NULL;
4095 	}
4096 	return error;
4097 }
4098 
4099 static int
pfioctl_ioc_trans(u_long cmd,struct pfioc_trans_32 * io32,struct pfioc_trans_64 * io64,struct proc * p)4100 pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32,
4101     struct pfioc_trans_64 *io64, struct proc *p)
4102 {
4103 	int error = 0, esize, size;
4104 	user_addr_t buf;
4105 	struct pf_ruleset *rs = NULL;
4106 
4107 #ifdef __LP64__
4108 	int p64 = proc_is64bit(p);
4109 
4110 	esize = (p64 ? io64->esize : io32->esize);
4111 	size = (p64 ? io64->size : io32->size);
4112 	buf = (p64 ? io64->array : io32->array);
4113 #else
4114 #pragma unused(io64, p)
4115 	esize = io32->esize;
4116 	size = io32->size;
4117 	buf = io32->array;
4118 #endif
4119 
4120 	switch (cmd) {
4121 	case DIOCXBEGIN: {
4122 		struct pfioc_trans_e    *ioe;
4123 		struct pfr_table        *table;
4124 		int                      i;
4125 
4126 		if (esize != sizeof(*ioe)) {
4127 			error = ENODEV;
4128 			goto fail;
4129 		}
4130 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4131 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4132 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4133 			if (copyin(buf, ioe, sizeof(*ioe))) {
4134 				kfree_type(struct pfr_table, table);
4135 				kfree_type(struct pfioc_trans_e, ioe);
4136 				error = EFAULT;
4137 				goto fail;
4138 			}
4139 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4140 			switch (ioe->rs_num) {
4141 			case PF_RULESET_ALTQ:
4142 				break;
4143 			case PF_RULESET_TABLE:
4144 				bzero(table, sizeof(*table));
4145 				strlcpy(table->pfrt_anchor, ioe->anchor,
4146 				    sizeof(table->pfrt_anchor));
4147 				if ((error = pfr_ina_begin(table,
4148 				    &ioe->ticket, NULL, 0))) {
4149 					kfree_type(struct pfr_table, table);
4150 					kfree_type(struct pfioc_trans_e, ioe);
4151 					goto fail;
4152 				}
4153 				break;
4154 			default:
4155 				if ((error = pf_begin_rules(&ioe->ticket,
4156 				    ioe->rs_num, ioe->anchor))) {
4157 					kfree_type(struct pfr_table, table);
4158 					kfree_type(struct pfioc_trans_e, ioe);
4159 					goto fail;
4160 				}
4161 				break;
4162 			}
4163 			if (copyout(ioe, buf, sizeof(*ioe))) {
4164 				kfree_type(struct pfr_table, table);
4165 				kfree_type(struct pfioc_trans_e, ioe);
4166 				error = EFAULT;
4167 				goto fail;
4168 			}
4169 		}
4170 		kfree_type(struct pfr_table, table);
4171 		kfree_type(struct pfioc_trans_e, ioe);
4172 		break;
4173 	}
4174 
4175 	case DIOCXROLLBACK: {
4176 		struct pfioc_trans_e    *ioe;
4177 		struct pfr_table        *table;
4178 		int                      i;
4179 
4180 		if (esize != sizeof(*ioe)) {
4181 			error = ENODEV;
4182 			goto fail;
4183 		}
4184 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4185 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4186 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4187 			if (copyin(buf, ioe, sizeof(*ioe))) {
4188 				kfree_type(struct pfr_table, table);
4189 				kfree_type(struct pfioc_trans_e, ioe);
4190 				error = EFAULT;
4191 				goto fail;
4192 			}
4193 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4194 			switch (ioe->rs_num) {
4195 			case PF_RULESET_ALTQ:
4196 				break;
4197 			case PF_RULESET_TABLE:
4198 				bzero(table, sizeof(*table));
4199 				strlcpy(table->pfrt_anchor, ioe->anchor,
4200 				    sizeof(table->pfrt_anchor));
4201 				if ((error = pfr_ina_rollback(table,
4202 				    ioe->ticket, NULL, 0))) {
4203 					kfree_type(struct pfr_table, table);
4204 					kfree_type(struct pfioc_trans_e, ioe);
4205 					goto fail; /* really bad */
4206 				}
4207 				break;
4208 			default:
4209 				if ((error = pf_rollback_rules(ioe->ticket,
4210 				    ioe->rs_num, ioe->anchor))) {
4211 					kfree_type(struct pfr_table, table);
4212 					kfree_type(struct pfioc_trans_e, ioe);
4213 					goto fail; /* really bad */
4214 				}
4215 				break;
4216 			}
4217 		}
4218 		kfree_type(struct pfr_table, table);
4219 		kfree_type(struct pfioc_trans_e, ioe);
4220 		break;
4221 	}
4222 
4223 	case DIOCXCOMMIT: {
4224 		struct pfioc_trans_e    *ioe;
4225 		struct pfr_table        *table;
4226 		user_addr_t              _buf = buf;
4227 		int                      i;
4228 
4229 		if (esize != sizeof(*ioe)) {
4230 			error = ENODEV;
4231 			goto fail;
4232 		}
4233 		ioe = kalloc_type(struct pfioc_trans_e, Z_WAITOK);
4234 		table = kalloc_type(struct pfr_table, Z_WAITOK);
4235 		/* first makes sure everything will succeed */
4236 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4237 			if (copyin(buf, ioe, sizeof(*ioe))) {
4238 				kfree_type(struct pfr_table, table);
4239 				kfree_type(struct pfioc_trans_e, ioe);
4240 				error = EFAULT;
4241 				goto fail;
4242 			}
4243 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4244 			switch (ioe->rs_num) {
4245 			case PF_RULESET_ALTQ:
4246 				break;
4247 			case PF_RULESET_TABLE:
4248 				rs = pf_find_ruleset(ioe->anchor);
4249 				if (rs == NULL || !rs->topen || ioe->ticket !=
4250 				    rs->tticket) {
4251 					kfree_type(struct pfr_table, table);
4252 					kfree_type(struct pfioc_trans_e, ioe);
4253 					error = EBUSY;
4254 					goto fail;
4255 				}
4256 				break;
4257 			default:
4258 				if (ioe->rs_num < 0 || ioe->rs_num >=
4259 				    PF_RULESET_MAX) {
4260 					kfree_type(struct pfr_table, table);
4261 					kfree_type(struct pfioc_trans_e, ioe);
4262 					error = EINVAL;
4263 					goto fail;
4264 				}
4265 				rs = pf_find_ruleset(ioe->anchor);
4266 				if (rs == NULL ||
4267 				    !rs->rules[ioe->rs_num].inactive.open ||
4268 				    rs->rules[ioe->rs_num].inactive.ticket !=
4269 				    ioe->ticket) {
4270 					kfree_type(struct pfr_table, table);
4271 					kfree_type(struct pfioc_trans_e, ioe);
4272 					error = EBUSY;
4273 					goto fail;
4274 				}
4275 				break;
4276 			}
4277 		}
4278 		buf = _buf;
4279 		/* now do the commit - no errors should happen here */
4280 		for (i = 0; i < size; i++, buf += sizeof(*ioe)) {
4281 			if (copyin(buf, ioe, sizeof(*ioe))) {
4282 				kfree_type(struct pfr_table, table);
4283 				kfree_type(struct pfioc_trans_e, ioe);
4284 				error = EFAULT;
4285 				goto fail;
4286 			}
4287 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4288 			switch (ioe->rs_num) {
4289 			case PF_RULESET_ALTQ:
4290 				break;
4291 			case PF_RULESET_TABLE:
4292 				bzero(table, sizeof(*table));
4293 				strlcpy(table->pfrt_anchor, ioe->anchor,
4294 				    sizeof(table->pfrt_anchor));
4295 				if ((error = pfr_ina_commit(table, ioe->ticket,
4296 				    NULL, NULL, 0))) {
4297 					kfree_type(struct pfr_table, table);
4298 					kfree_type(struct pfioc_trans_e, ioe);
4299 					goto fail;
4300 				}
4301 				break;
4302 			default:
4303 				if ((error = pf_commit_rules(ioe->ticket,
4304 				    ioe->rs_num, ioe->anchor))) {
4305 					kfree_type(struct pfr_table, table);
4306 					kfree_type(struct pfioc_trans_e, ioe);
4307 					goto fail;
4308 				}
4309 				break;
4310 			}
4311 		}
4312 		kfree_type(struct pfr_table, table);
4313 		kfree_type(struct pfioc_trans_e, ioe);
4314 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
4315 		pf_process_compatibilities();
4316 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
4317 		break;
4318 	}
4319 
4320 	default:
4321 		VERIFY(0);
4322 		/* NOTREACHED */
4323 	}
4324 fail:
4325 	if (rs) {
4326 		pf_release_ruleset(rs);
4327 		rs = NULL;
4328 	}
4329 	return error;
4330 }
4331 
4332 static int
pfioctl_ioc_src_nodes(u_long cmd,struct pfioc_src_nodes_32 * psn32,struct pfioc_src_nodes_64 * psn64,struct proc * p)4333 pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32,
4334     struct pfioc_src_nodes_64 *psn64, struct proc *p)
4335 {
4336 	int p64 = proc_is64bit(p);
4337 	int error = 0;
4338 
4339 	switch (cmd) {
4340 	case DIOCGETSRCNODES: {
4341 		struct pf_src_node      *n, *pstore;
4342 		user_addr_t              buf;
4343 		u_int32_t                nr = 0;
4344 		int                      space, size;
4345 
4346 		space = (p64 ? psn64->psn_len : psn32->psn_len);
4347 		if (space == 0) {
4348 			RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
4349 			nr++;
4350 
4351 			size = sizeof(struct pf_src_node) * nr;
4352 			if (p64) {
4353 				psn64->psn_len = size;
4354 			} else {
4355 				psn32->psn_len = size;
4356 			}
4357 			break;
4358 		}
4359 
4360 		pstore = kalloc_type(struct pf_src_node, Z_WAITOK | Z_NOFAIL);
4361 #ifdef __LP64__
4362 		buf = (p64 ? psn64->psn_buf : psn32->psn_buf);
4363 #else
4364 		buf = psn32->psn_buf;
4365 #endif
4366 
4367 		RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
4368 			uint64_t secs = pf_time_second(), diff;
4369 
4370 			if ((nr + 1) * sizeof(*pstore) > (unsigned)space) {
4371 				break;
4372 			}
4373 
4374 			bcopy(n, pstore, sizeof(*pstore));
4375 			if (n->rule.ptr != NULL) {
4376 				pstore->rule.nr = n->rule.ptr->nr;
4377 			}
4378 			pstore->creation = secs - pstore->creation;
4379 			if (pstore->expire > secs) {
4380 				pstore->expire -= secs;
4381 			} else {
4382 				pstore->expire = 0;
4383 			}
4384 
4385 			/* adjust the connection rate estimate */
4386 			diff = secs - n->conn_rate.last;
4387 			if (diff >= n->conn_rate.seconds) {
4388 				pstore->conn_rate.count = 0;
4389 			} else {
4390 				pstore->conn_rate.count -=
4391 				    n->conn_rate.count * diff /
4392 				    n->conn_rate.seconds;
4393 			}
4394 
4395 			_RB_PARENT(pstore, entry) = NULL;
4396 			RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL;
4397 			pstore->kif = NULL;
4398 
4399 			error = copyout(pstore, buf, sizeof(*pstore));
4400 			if (error) {
4401 				kfree_type(struct pf_src_node, pstore);
4402 				goto fail;
4403 			}
4404 			buf += sizeof(*pstore);
4405 			nr++;
4406 		}
4407 
4408 		size = sizeof(struct pf_src_node) * nr;
4409 		if (p64) {
4410 			psn64->psn_len = size;
4411 		} else {
4412 			psn32->psn_len = size;
4413 		}
4414 
4415 		kfree_type(struct pf_src_node, pstore);
4416 		break;
4417 	}
4418 
4419 	default:
4420 		VERIFY(0);
4421 		/* NOTREACHED */
4422 	}
4423 fail:
4424 	return error;
4425 }
4426 
4427 static int
pfioctl_ioc_src_node_kill(u_long cmd,struct pfioc_src_node_kill * psnk,struct proc * p)4428 pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk,
4429     struct proc *p)
4430 {
4431 #pragma unused(p)
4432 	int error = 0;
4433 
4434 	switch (cmd) {
4435 	case DIOCKILLSRCNODES: {
4436 		struct pf_src_node      *sn;
4437 		struct pf_state         *s;
4438 		int                     killed = 0;
4439 
4440 		RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
4441 			if (PF_MATCHA(psnk->psnk_src.neg,
4442 			    &psnk->psnk_src.addr.v.a.addr,
4443 			    &psnk->psnk_src.addr.v.a.mask,
4444 			    &sn->addr, sn->af) &&
4445 			    PF_MATCHA(psnk->psnk_dst.neg,
4446 			    &psnk->psnk_dst.addr.v.a.addr,
4447 			    &psnk->psnk_dst.addr.v.a.mask,
4448 			    &sn->raddr, sn->af)) {
4449 				/* Handle state to src_node linkage */
4450 				if (sn->states != 0) {
4451 					RB_FOREACH(s, pf_state_tree_id,
4452 					    &tree_id) {
4453 						if (s->src_node == sn) {
4454 							s->src_node = NULL;
4455 						}
4456 						if (s->nat_src_node == sn) {
4457 							s->nat_src_node = NULL;
4458 						}
4459 					}
4460 					sn->states = 0;
4461 				}
4462 				sn->expire = 1;
4463 				killed++;
4464 			}
4465 		}
4466 
4467 		if (killed > 0) {
4468 			pf_purge_expired_src_nodes();
4469 		}
4470 
4471 		psnk->psnk_af = (sa_family_t)killed;
4472 		break;
4473 	}
4474 
4475 	default:
4476 		VERIFY(0);
4477 		/* NOTREACHED */
4478 	}
4479 
4480 	return error;
4481 }
4482 
4483 static int
pfioctl_ioc_iface(u_long cmd,struct pfioc_iface_32 * io32,struct pfioc_iface_64 * io64,struct proc * p)4484 pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32,
4485     struct pfioc_iface_64 *io64, struct proc *p)
4486 {
4487 	int p64 = proc_is64bit(p);
4488 	int error = 0;
4489 
4490 	switch (cmd) {
4491 	case DIOCIGETIFACES: {
4492 		user_addr_t buf;
4493 		int esize;
4494 
4495 #ifdef __LP64__
4496 		buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer);
4497 		esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize);
4498 #else
4499 		buf = io32->pfiio_buffer;
4500 		esize = io32->pfiio_esize;
4501 #endif
4502 
4503 		/* esize must be that of the user space version of pfi_kif */
4504 		if (esize != sizeof(struct pfi_uif)) {
4505 			error = ENODEV;
4506 			break;
4507 		}
4508 		if (p64) {
4509 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4510 		} else {
4511 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4512 		}
4513 		error = pfi_get_ifaces(
4514 			p64 ? io64->pfiio_name : io32->pfiio_name, buf,
4515 			p64 ? &io64->pfiio_size : &io32->pfiio_size);
4516 		break;
4517 	}
4518 
4519 	case DIOCSETIFFLAG: {
4520 		if (p64) {
4521 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4522 		} else {
4523 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4524 		}
4525 
4526 		error = pfi_set_flags(
4527 			p64 ? io64->pfiio_name : io32->pfiio_name,
4528 			p64 ? io64->pfiio_flags : io32->pfiio_flags);
4529 		break;
4530 	}
4531 
4532 	case DIOCCLRIFFLAG: {
4533 		if (p64) {
4534 			io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0';
4535 		} else {
4536 			io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0';
4537 		}
4538 
4539 		error = pfi_clear_flags(
4540 			p64 ? io64->pfiio_name : io32->pfiio_name,
4541 			p64 ? io64->pfiio_flags : io32->pfiio_flags);
4542 		break;
4543 	}
4544 
4545 	default:
4546 		VERIFY(0);
4547 		/* NOTREACHED */
4548 	}
4549 
4550 	return error;
4551 }
4552 
4553 int
pf_af_hook(struct ifnet * ifp,struct mbuf ** mppn,struct mbuf ** mp,unsigned int af,int input,struct ip_fw_args * fwa)4554 pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp,
4555     unsigned int af, int input, struct ip_fw_args *fwa)
4556 {
4557 	int error = 0;
4558 	struct mbuf *nextpkt;
4559 	net_thread_marks_t marks;
4560 	struct ifnet * pf_ifp = ifp;
4561 
4562 	/* Always allow traffic on co-processor interfaces. */
4563 	if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) {
4564 		return 0;
4565 	}
4566 
4567 	marks = net_thread_marks_push(NET_THREAD_HELD_PF);
4568 
4569 	if (marks != net_thread_marks_none) {
4570 		lck_rw_lock_shared(&pf_perim_lock);
4571 		if (!pf_is_enabled) {
4572 			goto done;
4573 		}
4574 		lck_mtx_lock(&pf_lock);
4575 	}
4576 
4577 	if (mppn != NULL && *mppn != NULL) {
4578 		VERIFY(*mppn == *mp);
4579 	}
4580 	if ((nextpkt = (*mp)->m_nextpkt) != NULL) {
4581 		(*mp)->m_nextpkt = NULL;
4582 	}
4583 
4584 	/*
4585 	 * For packets destined to locally hosted IP address
4586 	 * ip_output_list sets Mbuf's pkt header's rcvif to
4587 	 * the interface hosting the IP address.
4588 	 * While on the output path ifp passed to pf_af_hook
4589 	 * to such local communication is the loopback interface,
4590 	 * the input path derives ifp from mbuf packet header's
4591 	 * rcvif.
4592 	 * This asymmetry caues issues with PF.
4593 	 * To handle that case, we have a limited change here to
4594 	 * pass interface as loopback if packets are looped in.
4595 	 */
4596 	if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4597 		pf_ifp = lo_ifp;
4598 	}
4599 
4600 	switch (af) {
4601 #if INET
4602 	case AF_INET: {
4603 		error = pf_inet_hook(pf_ifp, mp, input, fwa);
4604 		break;
4605 	}
4606 #endif /* INET */
4607 	case AF_INET6:
4608 		error = pf_inet6_hook(pf_ifp, mp, input, fwa);
4609 		break;
4610 	default:
4611 		break;
4612 	}
4613 
4614 	/* When packet valid, link to the next packet */
4615 	if (*mp != NULL && nextpkt != NULL) {
4616 		struct mbuf *m = *mp;
4617 		while (m->m_nextpkt != NULL) {
4618 			m = m->m_nextpkt;
4619 		}
4620 		m->m_nextpkt = nextpkt;
4621 	}
4622 	/* Fix up linkage of previous packet in the chain */
4623 	if (mppn != NULL) {
4624 		if (*mp != NULL) {
4625 			*mppn = *mp;
4626 		} else {
4627 			*mppn = nextpkt;
4628 		}
4629 	}
4630 
4631 	if (marks != net_thread_marks_none) {
4632 		lck_mtx_unlock(&pf_lock);
4633 	}
4634 
4635 done:
4636 	if (marks != net_thread_marks_none) {
4637 		lck_rw_done(&pf_perim_lock);
4638 	}
4639 
4640 	net_thread_marks_pop(marks);
4641 	return error;
4642 }
4643 
4644 
4645 #if INET
4646 static __attribute__((noinline)) int
pf_inet_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4647 pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4648     struct ip_fw_args *fwa)
4649 {
4650 	struct mbuf *m = *mp;
4651 #if BYTE_ORDER != BIG_ENDIAN
4652 	struct ip *ip = mtod(m, struct ip *);
4653 #endif
4654 	int error = 0;
4655 
4656 	/*
4657 	 * If the packet is outbound, is originated locally, is flagged for
4658 	 * delayed UDP/TCP checksum calculation, and is about to be processed
4659 	 * for an interface that doesn't support the appropriate checksum
4660 	 * offloading, then calculated the checksum here so that PF can adjust
4661 	 * it properly.
4662 	 */
4663 	if (!input && m->m_pkthdr.rcvif == NULL) {
4664 		static const int mask = CSUM_DELAY_DATA;
4665 		const int flags = m->m_pkthdr.csum_flags &
4666 		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4667 
4668 		if (flags & mask) {
4669 			in_delayed_cksum(m);
4670 			m->m_pkthdr.csum_flags &= ~mask;
4671 		}
4672 	}
4673 
4674 #if BYTE_ORDER != BIG_ENDIAN
4675 	HTONS(ip->ip_len);
4676 	HTONS(ip->ip_off);
4677 #endif
4678 	if (pf_test_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4679 		if (*mp != NULL) {
4680 			m_freem(*mp);
4681 			*mp = NULL;
4682 			error = EHOSTUNREACH;
4683 		} else {
4684 			error = EJUSTRETURN;
4685 		}
4686 	}
4687 #if BYTE_ORDER != BIG_ENDIAN
4688 	else {
4689 		if (*mp != NULL) {
4690 			ip = mtod(*mp, struct ip *);
4691 			NTOHS(ip->ip_len);
4692 			NTOHS(ip->ip_off);
4693 		}
4694 	}
4695 #endif
4696 	return error;
4697 }
4698 #endif /* INET */
4699 
4700 int __attribute__((noinline))
pf_inet6_hook(struct ifnet * ifp,struct mbuf ** mp,int input,struct ip_fw_args * fwa)4701 pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input,
4702     struct ip_fw_args *fwa)
4703 {
4704 	int error = 0;
4705 
4706 	/*
4707 	 * If the packet is outbound, is originated locally, is flagged for
4708 	 * delayed UDP/TCP checksum calculation, and is about to be processed
4709 	 * for an interface that doesn't support the appropriate checksum
4710 	 * offloading, then calculated the checksum here so that PF can adjust
4711 	 * it properly.
4712 	 */
4713 	if (!input && (*mp)->m_pkthdr.rcvif == NULL) {
4714 		static const int mask = CSUM_DELAY_IPV6_DATA;
4715 		const int flags = (*mp)->m_pkthdr.csum_flags &
4716 		    ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
4717 
4718 		if (flags & mask) {
4719 			/*
4720 			 * Checksum offload should not have been enabled
4721 			 * when extension headers exist, thus 0 for optlen.
4722 			 */
4723 			in6_delayed_cksum(*mp);
4724 			(*mp)->m_pkthdr.csum_flags &= ~mask;
4725 		}
4726 	}
4727 
4728 	if (pf_test6_mbuf(input ? PF_IN : PF_OUT, ifp, mp, NULL, fwa) != PF_PASS) {
4729 		if (*mp != NULL) {
4730 			m_freem(*mp);
4731 			*mp = NULL;
4732 			error = EHOSTUNREACH;
4733 		} else {
4734 			error = EJUSTRETURN;
4735 		}
4736 	}
4737 	return error;
4738 }
4739 
4740 int
pf_ifaddr_hook(struct ifnet * ifp)4741 pf_ifaddr_hook(struct ifnet *ifp)
4742 {
4743 	struct pfi_kif *kif = ifp->if_pf_kif;
4744 
4745 	if (kif != NULL) {
4746 		lck_rw_lock_shared(&pf_perim_lock);
4747 		lck_mtx_lock(&pf_lock);
4748 
4749 		pfi_kifaddr_update(kif);
4750 
4751 		lck_mtx_unlock(&pf_lock);
4752 		lck_rw_done(&pf_perim_lock);
4753 	}
4754 	return 0;
4755 }
4756 
4757 /*
4758  * Caller acquires dlil lock as writer (exclusive)
4759  */
4760 void
pf_ifnet_hook(struct ifnet * ifp,int attach)4761 pf_ifnet_hook(struct ifnet *ifp, int attach)
4762 {
4763 	lck_rw_lock_shared(&pf_perim_lock);
4764 	lck_mtx_lock(&pf_lock);
4765 	if (attach) {
4766 		pfi_attach_ifnet(ifp);
4767 	} else {
4768 		pfi_detach_ifnet(ifp);
4769 	}
4770 	lck_mtx_unlock(&pf_lock);
4771 	lck_rw_done(&pf_perim_lock);
4772 }
4773 
4774 static void
pf_attach_hooks(void)4775 pf_attach_hooks(void)
4776 {
4777 	ifnet_head_lock_shared();
4778 	/*
4779 	 * Check against ifnet_addrs[] before proceeding, in case this
4780 	 * is called very early on, e.g. during dlil_init() before any
4781 	 * network interface is attached.
4782 	 */
4783 	if (ifnet_addrs != NULL) {
4784 		int i;
4785 
4786 		for (i = 0; i <= if_index; i++) {
4787 			struct ifnet *ifp = ifindex2ifnet[i];
4788 			if (ifp != NULL) {
4789 				pfi_attach_ifnet(ifp);
4790 			}
4791 		}
4792 	}
4793 	ifnet_head_done();
4794 }
4795 
4796 #if 0
4797 /* currently unused along with pfdetach() */
4798 static void
4799 pf_detach_hooks(void)
4800 {
4801 	ifnet_head_lock_shared();
4802 	if (ifnet_addrs != NULL) {
4803 		for (i = 0; i <= if_index; i++) {
4804 			int i;
4805 
4806 			struct ifnet *ifp = ifindex2ifnet[i];
4807 			if (ifp != NULL && ifp->if_pf_kif != NULL) {
4808 				pfi_detach_ifnet(ifp);
4809 			}
4810 		}
4811 	}
4812 	ifnet_head_done();
4813 }
4814 #endif
4815 
4816 /*
4817  * 'D' group ioctls.
4818  *
4819  * The switch statement below does nothing at runtime, as it serves as a
4820  * compile time check to ensure that all of the socket 'D' ioctls (those
4821  * in the 'D' group going thru soo_ioctl) that are made available by the
4822  * networking stack is unique.  This works as long as this routine gets
4823  * updated each time a new interface ioctl gets added.
4824  *
4825  * Any failures at compile time indicates duplicated ioctl values.
4826  */
4827 static __attribute__((unused)) void
pfioctl_cassert(void)4828 pfioctl_cassert(void)
4829 {
4830 	/*
4831 	 * This is equivalent to _CASSERT() and the compiler wouldn't
4832 	 * generate any instructions, thus for compile time only.
4833 	 */
4834 	switch ((u_long)0) {
4835 	case 0:
4836 
4837 	/* bsd/net/pfvar.h */
4838 	case DIOCSTART:
4839 	case DIOCSTOP:
4840 	case DIOCADDRULE:
4841 	case DIOCGETSTARTERS:
4842 	case DIOCGETRULES:
4843 	case DIOCGETRULE:
4844 	case DIOCSTARTREF:
4845 	case DIOCSTOPREF:
4846 	case DIOCCLRSTATES:
4847 	case DIOCGETSTATE:
4848 	case DIOCSETSTATUSIF:
4849 	case DIOCGETSTATUS:
4850 	case DIOCCLRSTATUS:
4851 	case DIOCNATLOOK:
4852 	case DIOCSETDEBUG:
4853 	case DIOCGETSTATES:
4854 	case DIOCCHANGERULE:
4855 	case DIOCINSERTRULE:
4856 	case DIOCDELETERULE:
4857 	case DIOCSETTIMEOUT:
4858 	case DIOCGETTIMEOUT:
4859 	case DIOCADDSTATE:
4860 	case DIOCCLRRULECTRS:
4861 	case DIOCGETLIMIT:
4862 	case DIOCSETLIMIT:
4863 	case DIOCKILLSTATES:
4864 	case DIOCSTARTALTQ:
4865 	case DIOCSTOPALTQ:
4866 	case DIOCADDALTQ:
4867 	case DIOCGETALTQS:
4868 	case DIOCGETALTQ:
4869 	case DIOCCHANGEALTQ:
4870 	case DIOCGETQSTATS:
4871 	case DIOCBEGINADDRS:
4872 	case DIOCADDADDR:
4873 	case DIOCGETADDRS:
4874 	case DIOCGETADDR:
4875 	case DIOCCHANGEADDR:
4876 	case DIOCGETRULESETS:
4877 	case DIOCGETRULESET:
4878 	case DIOCRCLRTABLES:
4879 	case DIOCRADDTABLES:
4880 	case DIOCRDELTABLES:
4881 	case DIOCRGETTABLES:
4882 	case DIOCRGETTSTATS:
4883 	case DIOCRCLRTSTATS:
4884 	case DIOCRCLRADDRS:
4885 	case DIOCRADDADDRS:
4886 	case DIOCRDELADDRS:
4887 	case DIOCRSETADDRS:
4888 	case DIOCRGETADDRS:
4889 	case DIOCRGETASTATS:
4890 	case DIOCRCLRASTATS:
4891 	case DIOCRTSTADDRS:
4892 	case DIOCRSETTFLAGS:
4893 	case DIOCRINADEFINE:
4894 	case DIOCOSFPFLUSH:
4895 	case DIOCOSFPADD:
4896 	case DIOCOSFPGET:
4897 	case DIOCXBEGIN:
4898 	case DIOCXCOMMIT:
4899 	case DIOCXROLLBACK:
4900 	case DIOCGETSRCNODES:
4901 	case DIOCCLRSRCNODES:
4902 	case DIOCSETHOSTID:
4903 	case DIOCIGETIFACES:
4904 	case DIOCSETIFFLAG:
4905 	case DIOCCLRIFFLAG:
4906 	case DIOCKILLSRCNODES:
4907 	case DIOCGIFSPEED:
4908 		;
4909 	}
4910 }
4911 
4912 #if SKYWALK && defined(XNU_TARGET_OS_OSX)
4913 static void
pf_process_compatibilities(void)4914 pf_process_compatibilities(void)
4915 {
4916 	uint32_t compat_bitmap = pf_check_compatible_rules();
4917 
4918 	net_filter_event_mark(NET_FILTER_EVENT_PF,
4919 	    (compat_bitmap &
4920 	    (PF_COMPATIBLE_FLAGS_CUSTOM_ANCHORS_PRESENT |
4921 	    PF_COMPATIBLE_FLAGS_CUSTOM_RULES_PRESENT)) == 0);
4922 
4923 	net_filter_event_mark(NET_FILTER_EVENT_PF_PRIVATE_PROXY,
4924 	    ((compat_bitmap & PF_COMPATIBLE_FLAGS_PF_ENABLED) == 0) ||
4925 	    (compat_bitmap & PF_COMPATIBLE_FLAGS_CUSTOM_RULES_PRESENT) == 0);
4926 }
4927 #endif // SKYWALK && defined(XNU_TARGET_OS_OSX)
4928