1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */
30 /* $OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $ */
31
32 /*
33 * Copyright (c) 2002 Cedric Berger
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * - Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * - Redistributions in binary form must reproduce the above
43 * copyright notice, this list of conditions and the following
44 * disclaimer in the documentation and/or other materials provided
45 * with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
50 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
51 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
53 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
55 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
57 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
59 *
60 */
61
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/socket.h>
65 #include <sys/mbuf.h>
66 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68
69 #include <net/if.h>
70 #include <net/route.h>
71 #include <netinet/in.h>
72 #include <net/radix.h>
73 #include <net/pfvar.h>
74
75 #define ACCEPT_FLAGS(flags, oklist) \
76 do { \
77 if ((flags & ~(oklist)) & \
78 PFR_FLAG_ALLMASK) \
79 return (EINVAL); \
80 } while (0)
81
82 #define COPYIN(from, to, size, flags) \
83 ((flags & PFR_FLAG_USERIOCTL) ? \
84 copyin((from), (to), (size)) : \
85 (bcopy(__unsafe_forge_bidi_indexable(void *, (from), (size)), (to), (size)), 0))
86
87 #define COPYOUT(from, to, size, flags) \
88 ((flags & PFR_FLAG_USERIOCTL) ? \
89 copyout((from), (to), (size)) : \
90 (bcopy((from), __unsafe_forge_bidi_indexable(void *, (to), (size)), (size)), 0))
91
92 #define FILLIN_SIN(sin, addr) \
93 do { \
94 (sin).sin_len = sizeof (sin); \
95 (sin).sin_family = AF_INET; \
96 (sin).sin_addr = (addr); \
97 } while (0)
98
99 #define FILLIN_SIN6(sin6, addr) \
100 do { \
101 (sin6).sin6_len = sizeof (sin6); \
102 (sin6).sin6_family = AF_INET6; \
103 (sin6).sin6_addr = (addr); \
104 } while (0)
105
106 #define SWAP(type, a1, a2) \
107 do { \
108 type tmp = a1; \
109 a1 = a2; \
110 a2 = tmp; \
111 } while (0)
112
113 #define SUNION2PF(su, af) (((af) == AF_INET) ? \
114 (struct pf_addr *)&(su)->sin.sin_addr : \
115 (struct pf_addr *)&(su)->sin6.sin6_addr)
116
117 #define AF_BITS(af) (((af) == AF_INET) ? 32 : 128)
118 #define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
119 #define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
120 #define KENTRY_RNF_ROOT(ke) \
121 ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
122
123 #define NO_ADDRESSES (-1)
124 #define ENQUEUE_UNMARKED_ONLY (1)
125 #define INVERT_NEG_FLAG (1)
126
127 struct pfr_walktree {
128 enum pfrw_op {
129 PFRW_MARK,
130 PFRW_SWEEP,
131 PFRW_ENQUEUE,
132 PFRW_GET_ADDRS,
133 PFRW_GET_ASTATS,
134 PFRW_POOL_GET,
135 PFRW_DYNADDR_UPDATE
136 } pfrw_op;
137 union {
138 user_addr_t pfrw1_addr;
139 user_addr_t pfrw1_astats;
140 struct pfr_kentryworkq *pfrw1_workq;
141 struct pfr_kentry *pfrw1_kentry;
142 struct pfi_dynaddr *pfrw1_dyn;
143 } pfrw_1;
144 int pfrw_free;
145 int pfrw_flags;
146 };
147 #define pfrw_addr pfrw_1.pfrw1_addr
148 #define pfrw_astats pfrw_1.pfrw1_astats
149 #define pfrw_workq pfrw_1.pfrw1_workq
150 #define pfrw_kentry pfrw_1.pfrw1_kentry
151 #define pfrw_dyn pfrw_1.pfrw1_dyn
152 #define pfrw_cnt pfrw_free
153
154 #define senderr(e) do { rv = (e); goto _bad; } while (0)
155
156 struct pool pfr_ktable_pl;
157 struct pool pfr_kentry_pl;
158
159 static struct pool pfr_kentry_pl2;
160 static struct sockaddr_in pfr_sin;
161 static struct sockaddr_in6 pfr_sin6;
162 static union sockaddr_union pfr_mask;
163 static struct pf_addr pfr_ffaddr;
164
165 static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke);
166 static int pfr_validate_addr(struct pfr_addr *);
167 static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *,
168 int *, int);
169 static void pfr_mark_addrs(struct pfr_ktable *);
170 static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
171 struct pfr_addr *, int);
172 static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, boolean_t);
173 static void pfr_destroy_kentries(struct pfr_kentryworkq *);
174 static void pfr_destroy_kentry(struct pfr_kentry *);
175 static void pfr_insert_kentries(struct pfr_ktable *,
176 struct pfr_kentryworkq *, u_int64_t);
177 static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *);
178 static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int);
179 static void pfr_reset_feedback(user_addr_t, int, int);
180 static void pfr_prepare_network(union sockaddr_union *, int, int);
181 static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *);
182 static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *);
183 static int pfr_walktree(struct radix_node *, void *);
184 static int pfr_validate_table(struct pfr_table *, int, int);
185 static int pfr_fix_anchor(char *__counted_by(size), size_t size);
186 static void pfr_commit_ktable(struct pfr_ktable *, u_int64_t);
187 static void pfr_insert_ktables(struct pfr_ktableworkq *);
188 static void pfr_insert_ktable(struct pfr_ktable *);
189 static void pfr_setflags_ktables(struct pfr_ktableworkq *);
190 static void pfr_setflags_ktable(struct pfr_ktable *, int);
191 static void pfr_clstats_ktables(struct pfr_ktableworkq *, u_int64_t, int);
192 static void pfr_clstats_ktable(struct pfr_ktable *, u_int64_t, int);
193 static struct pfr_ktable *pfr_create_ktable(struct pfr_table *, u_int64_t, int);
194 static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
195 static void pfr_destroy_ktable(struct pfr_ktable *, int);
196 static int pfr_ktable_compare(struct pfr_ktable *, struct pfr_ktable *);
197 static struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
198 static void pfr_clean_node_mask(struct pfr_ktable *, struct pfr_kentryworkq *);
199 static int pfr_table_count(struct pfr_table *, int);
200 static int pfr_skip_table(struct pfr_table *, struct pfr_ktable *, int);
201 static struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
202
203 RB_PROTOTYPE_SC(static, pfr_ktablehead, pfr_ktable, pfrkt_tree,
204 pfr_ktable_compare);
205 RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
206
207 static struct pfr_ktablehead pfr_ktables;
208 static struct pfr_table pfr_nulltable;
209 static int pfr_ktable_cnt;
210
211 void
pfr_initialize(void)212 pfr_initialize(void)
213 {
214 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
215 "pfrktable", NULL);
216 pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
217 "pfrkentry", NULL);
218 pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0,
219 "pfrkentry2", NULL);
220
221 pfr_sin.sin_len = sizeof(pfr_sin);
222 pfr_sin.sin_family = AF_INET;
223 pfr_sin6.sin6_len = sizeof(pfr_sin6);
224 pfr_sin6.sin6_family = AF_INET6;
225
226 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
227 }
228
229 #if 0
230 void
231 pfr_destroy(void)
232 {
233 pool_destroy(&pfr_ktable_pl);
234 pool_destroy(&pfr_kentry_pl);
235 pool_destroy(&pfr_kentry_pl2);
236 }
237 #endif
238
239 int
pfr_clr_addrs(struct pfr_table * tbl,int * ndel,int flags)240 pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
241 {
242 struct pfr_ktable *kt;
243 struct pfr_kentryworkq workq;
244
245 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
246 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
247 return EINVAL;
248 }
249 kt = pfr_lookup_table(tbl);
250 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
251 return ESRCH;
252 }
253 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
254 return EPERM;
255 }
256 pfr_enqueue_addrs(kt, &workq, ndel, 0);
257
258 if (!(flags & PFR_FLAG_DUMMY)) {
259 pfr_remove_kentries(kt, &workq);
260 if (kt->pfrkt_cnt) {
261 printf("pfr_clr_addrs: corruption detected (%d).\n",
262 kt->pfrkt_cnt);
263 kt->pfrkt_cnt = 0;
264 }
265 }
266 return 0;
267 }
268
269 int
pfr_add_addrs(struct pfr_table * tbl,user_addr_t _addr,int size,int * nadd,int flags)270 pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
271 int *nadd, int flags)
272 {
273 struct pfr_ktable *kt, *tmpkt;
274 struct pfr_kentryworkq workq;
275 struct pfr_kentry *p, *q;
276 struct pfr_addr ad;
277 int i, rv, xadd = 0;
278 user_addr_t addr = _addr;
279 u_int64_t tzero = pf_calendar_time_second();
280
281 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
282 PFR_FLAG_FEEDBACK);
283 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
284 return EINVAL;
285 }
286 kt = pfr_lookup_table(tbl);
287 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
288 return ESRCH;
289 }
290 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
291 return EPERM;
292 }
293 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
294 if (tmpkt == NULL) {
295 return ENOMEM;
296 }
297 SLIST_INIT(&workq);
298 for (i = 0; i < size; i++, addr += sizeof(ad)) {
299 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
300 senderr(EFAULT);
301 }
302 if (pfr_validate_addr(&ad)) {
303 senderr(EINVAL);
304 }
305 p = pfr_lookup_addr(kt, &ad, 1);
306 q = pfr_lookup_addr(tmpkt, &ad, 1);
307 if (flags & PFR_FLAG_FEEDBACK) {
308 if (q != NULL) {
309 ad.pfra_fback = PFR_FB_DUPLICATE;
310 } else if (p == NULL) {
311 ad.pfra_fback = PFR_FB_ADDED;
312 } else if (p->pfrke_not != ad.pfra_not) {
313 ad.pfra_fback = PFR_FB_CONFLICT;
314 } else {
315 ad.pfra_fback = PFR_FB_NONE;
316 }
317 }
318 if (p == NULL && q == NULL) {
319 p = pfr_create_kentry(&ad,
320 !(flags & PFR_FLAG_USERIOCTL));
321 if (p == NULL) {
322 senderr(ENOMEM);
323 }
324 if (pfr_route_kentry(tmpkt, p)) {
325 pfr_destroy_kentry(p);
326 ad.pfra_fback = PFR_FB_NONE;
327 } else {
328 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
329 xadd++;
330 }
331 }
332 if (flags & PFR_FLAG_FEEDBACK) {
333 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
334 senderr(EFAULT);
335 }
336 }
337 }
338 pfr_clean_node_mask(tmpkt, &workq);
339 if (!(flags & PFR_FLAG_DUMMY)) {
340 pfr_insert_kentries(kt, &workq, tzero);
341 } else {
342 pfr_destroy_kentries(&workq);
343 }
344 if (nadd != NULL) {
345 *nadd = xadd;
346 }
347 pfr_destroy_ktable(tmpkt, 0);
348 return 0;
349 _bad:
350 pfr_clean_node_mask(tmpkt, &workq);
351 pfr_destroy_kentries(&workq);
352 if (flags & PFR_FLAG_FEEDBACK) {
353 pfr_reset_feedback(_addr, size, flags);
354 }
355 pfr_destroy_ktable(tmpkt, 0);
356 return rv;
357 }
358
359 int
pfr_del_addrs(struct pfr_table * tbl,user_addr_t _addr,int size,int * ndel,int flags)360 pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
361 int *ndel, int flags)
362 {
363 struct pfr_ktable *kt;
364 struct pfr_kentryworkq workq;
365 struct pfr_kentry *p;
366 struct pfr_addr ad;
367 user_addr_t addr = _addr;
368 int i, rv, xdel = 0, log = 1;
369
370 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
371 PFR_FLAG_FEEDBACK);
372 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) {
373 return EINVAL;
374 }
375 kt = pfr_lookup_table(tbl);
376 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
377 return ESRCH;
378 }
379 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
380 return EPERM;
381 }
382 /*
383 * there are two algorithms to choose from here.
384 * with:
385 * n: number of addresses to delete
386 * N: number of addresses in the table
387 *
388 * one is O(N) and is better for large 'n'
389 * one is O(n*LOG(N)) and is better for small 'n'
390 *
391 * following code try to decide which one is best.
392 */
393 for (i = kt->pfrkt_cnt; i > 0; i >>= 1) {
394 log++;
395 }
396 if (size > kt->pfrkt_cnt / log) {
397 /* full table scan */
398 pfr_mark_addrs(kt);
399 } else {
400 /* iterate over addresses to delete */
401 for (i = 0; i < size; i++, addr += sizeof(ad)) {
402 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
403 return EFAULT;
404 }
405 if (pfr_validate_addr(&ad)) {
406 return EINVAL;
407 }
408 p = pfr_lookup_addr(kt, &ad, 1);
409 if (p != NULL) {
410 p->pfrke_mark = 0;
411 }
412 }
413 }
414 SLIST_INIT(&workq);
415 for (addr = _addr, i = 0; i < size; i++, addr += sizeof(ad)) {
416 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
417 senderr(EFAULT);
418 }
419 if (pfr_validate_addr(&ad)) {
420 senderr(EINVAL);
421 }
422 p = pfr_lookup_addr(kt, &ad, 1);
423 if (flags & PFR_FLAG_FEEDBACK) {
424 if (p == NULL) {
425 ad.pfra_fback = PFR_FB_NONE;
426 } else if (p->pfrke_not != ad.pfra_not) {
427 ad.pfra_fback = PFR_FB_CONFLICT;
428 } else if (p->pfrke_mark) {
429 ad.pfra_fback = PFR_FB_DUPLICATE;
430 } else {
431 ad.pfra_fback = PFR_FB_DELETED;
432 }
433 }
434 if (p != NULL && p->pfrke_not == ad.pfra_not &&
435 !p->pfrke_mark) {
436 p->pfrke_mark = 1;
437 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
438 xdel++;
439 }
440 if (flags & PFR_FLAG_FEEDBACK) {
441 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
442 senderr(EFAULT);
443 }
444 }
445 }
446 if (!(flags & PFR_FLAG_DUMMY)) {
447 pfr_remove_kentries(kt, &workq);
448 }
449 if (ndel != NULL) {
450 *ndel = xdel;
451 }
452 return 0;
453 _bad:
454 if (flags & PFR_FLAG_FEEDBACK) {
455 pfr_reset_feedback(_addr, size, flags);
456 }
457 return rv;
458 }
459
460 int
pfr_set_addrs(struct pfr_table * tbl,user_addr_t _addr,int size,int * size2,int * nadd,int * ndel,int * nchange,int flags,u_int32_t ignore_pfrt_flags)461 pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
462 int *size2, int *nadd, int *ndel, int *nchange, int flags,
463 u_int32_t ignore_pfrt_flags)
464 {
465 struct pfr_ktable *kt, *tmpkt;
466 struct pfr_kentryworkq addq, delq, changeq;
467 struct pfr_kentry *p, *q;
468 struct pfr_addr ad;
469 user_addr_t addr = _addr;
470 int i, rv, xadd = 0, xdel = 0, xchange = 0;
471 u_int64_t tzero = pf_calendar_time_second();
472
473 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
474 PFR_FLAG_FEEDBACK);
475 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
476 PFR_FLAG_USERIOCTL)) {
477 return EINVAL;
478 }
479 kt = pfr_lookup_table(tbl);
480 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
481 return ESRCH;
482 }
483 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
484 return EPERM;
485 }
486 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
487 if (tmpkt == NULL) {
488 return ENOMEM;
489 }
490 pfr_mark_addrs(kt);
491 SLIST_INIT(&addq);
492 SLIST_INIT(&delq);
493 SLIST_INIT(&changeq);
494 for (i = 0; i < size; i++, addr += sizeof(ad)) {
495 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
496 senderr(EFAULT);
497 }
498 if (pfr_validate_addr(&ad)) {
499 senderr(EINVAL);
500 }
501 ad.pfra_fback = PFR_FB_NONE;
502 p = pfr_lookup_addr(kt, &ad, 1);
503 if (p != NULL) {
504 if (p->pfrke_mark) {
505 ad.pfra_fback = PFR_FB_DUPLICATE;
506 goto _skip;
507 }
508 p->pfrke_mark = 1;
509 if (p->pfrke_not != ad.pfra_not) {
510 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
511 ad.pfra_fback = PFR_FB_CHANGED;
512 xchange++;
513 }
514 } else {
515 q = pfr_lookup_addr(tmpkt, &ad, 1);
516 if (q != NULL) {
517 ad.pfra_fback = PFR_FB_DUPLICATE;
518 goto _skip;
519 }
520 p = pfr_create_kentry(&ad,
521 !(flags & PFR_FLAG_USERIOCTL));
522 if (p == NULL) {
523 senderr(ENOMEM);
524 }
525 if (pfr_route_kentry(tmpkt, p)) {
526 pfr_destroy_kentry(p);
527 ad.pfra_fback = PFR_FB_NONE;
528 } else {
529 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
530 ad.pfra_fback = PFR_FB_ADDED;
531 xadd++;
532 }
533 }
534 _skip:
535 if (flags & PFR_FLAG_FEEDBACK) {
536 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
537 senderr(EFAULT);
538 }
539 }
540 }
541 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
542 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
543 if (*size2 < size + xdel) {
544 *size2 = size + xdel;
545 senderr(0);
546 }
547 i = 0;
548 addr = _addr + size;
549 SLIST_FOREACH(p, &delq, pfrke_workq) {
550 pfr_copyout_addr(&ad, p);
551 ad.pfra_fback = PFR_FB_DELETED;
552 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
553 senderr(EFAULT);
554 }
555 addr += sizeof(ad);
556 i++;
557 }
558 }
559 pfr_clean_node_mask(tmpkt, &addq);
560 if (!(flags & PFR_FLAG_DUMMY)) {
561 pfr_insert_kentries(kt, &addq, tzero);
562 pfr_remove_kentries(kt, &delq);
563 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
564 } else {
565 pfr_destroy_kentries(&addq);
566 }
567 if (nadd != NULL) {
568 *nadd = xadd;
569 }
570 if (ndel != NULL) {
571 *ndel = xdel;
572 }
573 if (nchange != NULL) {
574 *nchange = xchange;
575 }
576 if ((flags & PFR_FLAG_FEEDBACK) && size2) {
577 *size2 = size + xdel;
578 }
579 pfr_destroy_ktable(tmpkt, 0);
580 return 0;
581 _bad:
582 pfr_clean_node_mask(tmpkt, &addq);
583 pfr_destroy_kentries(&addq);
584 if (flags & PFR_FLAG_FEEDBACK) {
585 pfr_reset_feedback(_addr, size, flags);
586 }
587 pfr_destroy_ktable(tmpkt, 0);
588 return rv;
589 }
590
591 int
pfr_tst_addrs(struct pfr_table * tbl,user_addr_t addr,int size,int * nmatch,int flags)592 pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size,
593 int *nmatch, int flags)
594 {
595 struct pfr_ktable *kt;
596 struct pfr_kentry *p;
597 struct pfr_addr ad;
598 int i, xmatch = 0;
599
600 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
601 if (pfr_validate_table(tbl, 0, 0)) {
602 return EINVAL;
603 }
604 kt = pfr_lookup_table(tbl);
605 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
606 return ESRCH;
607 }
608
609 for (i = 0; i < size; i++, addr += sizeof(ad)) {
610 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
611 return EFAULT;
612 }
613 if (pfr_validate_addr(&ad)) {
614 return EINVAL;
615 }
616 if (ADDR_NETWORK(&ad)) {
617 return EINVAL;
618 }
619 p = pfr_lookup_addr(kt, &ad, 0);
620 if (flags & PFR_FLAG_REPLACE) {
621 pfr_copyout_addr(&ad, p);
622 }
623 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
624 (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
625 if (p != NULL && !p->pfrke_not) {
626 xmatch++;
627 }
628 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
629 return EFAULT;
630 }
631 }
632 if (nmatch != NULL) {
633 *nmatch = xmatch;
634 }
635 return 0;
636 }
637
638 int
pfr_get_addrs(struct pfr_table * tbl,user_addr_t addr,int * size,int flags)639 pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size,
640 int flags)
641 {
642 struct pfr_ktable *kt;
643 struct pfr_walktree w;
644 int rv;
645
646 ACCEPT_FLAGS(flags, 0);
647 if (pfr_validate_table(tbl, 0, 0)) {
648 return EINVAL;
649 }
650 kt = pfr_lookup_table(tbl);
651 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
652 return ESRCH;
653 }
654 if (kt->pfrkt_cnt > *size) {
655 *size = kt->pfrkt_cnt;
656 return 0;
657 }
658
659 bzero(&w, sizeof(w));
660 w.pfrw_op = PFRW_GET_ADDRS;
661 w.pfrw_addr = addr;
662 w.pfrw_free = kt->pfrkt_cnt;
663 w.pfrw_flags = flags;
664 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
665 if (!rv) {
666 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
667 pfr_walktree, &w);
668 }
669 if (rv) {
670 return rv;
671 }
672
673 if (w.pfrw_free) {
674 printf("pfr_get_addrs: corruption detected (%d).\n",
675 w.pfrw_free);
676 return ENOTTY;
677 }
678 *size = kt->pfrkt_cnt;
679 return 0;
680 }
681
682 int
pfr_get_astats(struct pfr_table * tbl,user_addr_t addr,int * size,int flags)683 pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size,
684 int flags)
685 {
686 struct pfr_ktable *kt;
687 struct pfr_walktree w;
688 struct pfr_kentryworkq workq;
689 int rv;
690 u_int64_t tzero = pf_calendar_time_second();
691
692 /* XXX PFR_FLAG_CLSTATS disabled */
693 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
694 if (pfr_validate_table(tbl, 0, 0)) {
695 return EINVAL;
696 }
697 kt = pfr_lookup_table(tbl);
698 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
699 return ESRCH;
700 }
701 if (kt->pfrkt_cnt > *size) {
702 *size = kt->pfrkt_cnt;
703 return 0;
704 }
705
706 bzero(&w, sizeof(w));
707 w.pfrw_op = PFRW_GET_ASTATS;
708 w.pfrw_astats = addr;
709 w.pfrw_free = kt->pfrkt_cnt;
710 w.pfrw_flags = flags;
711 rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
712 if (!rv) {
713 rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
714 pfr_walktree, &w);
715 }
716 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
717 pfr_enqueue_addrs(kt, &workq, NULL, 0);
718 pfr_clstats_kentries(&workq, tzero, 0);
719 }
720 if (rv) {
721 return rv;
722 }
723
724 if (w.pfrw_free) {
725 printf("pfr_get_astats: corruption detected (%d).\n",
726 w.pfrw_free);
727 return ENOTTY;
728 }
729 *size = kt->pfrkt_cnt;
730 return 0;
731 }
732
733 int
pfr_clr_astats(struct pfr_table * tbl,user_addr_t _addr,int size,int * nzero,int flags)734 pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size,
735 int *nzero, int flags)
736 {
737 struct pfr_ktable *kt;
738 struct pfr_kentryworkq workq;
739 struct pfr_kentry *p;
740 struct pfr_addr ad;
741 user_addr_t addr = _addr;
742 int i, rv, xzero = 0;
743
744 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
745 PFR_FLAG_FEEDBACK);
746 if (pfr_validate_table(tbl, 0, 0)) {
747 return EINVAL;
748 }
749 kt = pfr_lookup_table(tbl);
750 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
751 return ESRCH;
752 }
753 SLIST_INIT(&workq);
754 for (i = 0; i < size; i++, addr += sizeof(ad)) {
755 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
756 senderr(EFAULT);
757 }
758 if (pfr_validate_addr(&ad)) {
759 senderr(EINVAL);
760 }
761 p = pfr_lookup_addr(kt, &ad, 1);
762 if (flags & PFR_FLAG_FEEDBACK) {
763 ad.pfra_fback = (p != NULL) ?
764 PFR_FB_CLEARED : PFR_FB_NONE;
765 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
766 senderr(EFAULT);
767 }
768 }
769 if (p != NULL) {
770 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
771 xzero++;
772 }
773 }
774
775 if (!(flags & PFR_FLAG_DUMMY)) {
776 pfr_clstats_kentries(&workq, 0, 0);
777 }
778 if (nzero != NULL) {
779 *nzero = xzero;
780 }
781 return 0;
782 _bad:
783 if (flags & PFR_FLAG_FEEDBACK) {
784 pfr_reset_feedback(_addr, size, flags);
785 }
786 return rv;
787 }
788
789 static int
pfr_validate_addr(struct pfr_addr * ad)790 pfr_validate_addr(struct pfr_addr *ad)
791 {
792 switch (ad->pfra_af) {
793 #if INET
794 case AF_INET:
795 if (ad->pfra_net > 32) {
796 return -1;
797 }
798 break;
799 #endif /* INET */
800 case AF_INET6:
801 if (ad->pfra_net > 128) {
802 return -1;
803 }
804 break;
805 default:
806 return -1;
807 }
808 uint8_t * adp = ad->pfra_af == AF_INET ? ((uint8_t *) &ad->pfra_ip4addr) : ((uint8_t *) &ad->pfra_ip6addr);
809 if ((ad->pfra_af == AF_INET &&
810 ad->pfra_net < 32) ||
811 (ad->pfra_af == AF_INET6 &&
812 ad->pfra_net < 128)) {
813 if (adp[ad->pfra_net / 8] & (0xFF >> (ad->pfra_net % 8))) {
814 return -1;
815 }
816 size_t limit = ad->pfra_af == AF_INET ? sizeof(struct in_addr) : sizeof(struct in6_addr);
817 for (size_t i = (ad->pfra_net + 7) / 8; i < limit; i++) {
818 if (adp[i]) {
819 return -1;
820 }
821 }
822 }
823 if (ad->pfra_not && ad->pfra_not != 1) {
824 return -1;
825 }
826 if (ad->pfra_fback) {
827 return -1;
828 }
829 return 0;
830 }
831
832 static void
pfr_enqueue_addrs(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,int * naddr,int sweep)833 pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
834 int *naddr, int sweep)
835 {
836 struct pfr_walktree w;
837
838 SLIST_INIT(workq);
839 bzero(&w, sizeof(w));
840 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
841 w.pfrw_workq = workq;
842 if (kt->pfrkt_ip4 != NULL) {
843 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
844 pfr_walktree, &w)) {
845 printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
846 }
847 }
848 if (kt->pfrkt_ip6 != NULL) {
849 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
850 pfr_walktree, &w)) {
851 printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
852 }
853 }
854 if (naddr != NULL) {
855 *naddr = w.pfrw_cnt;
856 }
857 }
858
859 static void
pfr_mark_addrs(struct pfr_ktable * kt)860 pfr_mark_addrs(struct pfr_ktable *kt)
861 {
862 struct pfr_walktree w;
863
864 bzero(&w, sizeof(w));
865 w.pfrw_op = PFRW_MARK;
866 if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) {
867 printf("pfr_mark_addrs: IPv4 walktree failed.\n");
868 }
869 if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) {
870 printf("pfr_mark_addrs: IPv6 walktree failed.\n");
871 }
872 }
873
874
875 static struct pfr_kentry *
pfr_lookup_addr(struct pfr_ktable * kt,struct pfr_addr * ad,int exact)876 pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
877 {
878 union sockaddr_union sa, mask;
879 struct radix_node_head *head;
880 struct pfr_kentry *ke;
881
882 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
883
884 bzero(&sa, sizeof(sa));
885 if (ad->pfra_af == AF_INET) {
886 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
887 head = kt->pfrkt_ip4;
888 } else if (ad->pfra_af == AF_INET6) {
889 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
890 head = kt->pfrkt_ip6;
891 } else {
892 return NULL;
893 }
894 if (ADDR_NETWORK(ad)) {
895 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
896 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
897 if (ke && KENTRY_RNF_ROOT(ke)) {
898 ke = NULL;
899 }
900 } else {
901 ke = (struct pfr_kentry *)rn_match(&sa, head);
902 if (ke && KENTRY_RNF_ROOT(ke)) {
903 ke = NULL;
904 }
905 if (exact && ke && KENTRY_NETWORK(ke)) {
906 ke = NULL;
907 }
908 }
909 return ke;
910 }
911
912 static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr * ad,boolean_t intr)913 pfr_create_kentry(struct pfr_addr *ad, boolean_t intr)
914 {
915 struct pfr_kentry *__single ke;
916
917 if (intr) {
918 ke = pool_get(&pfr_kentry_pl2, PR_WAITOK);
919 } else {
920 ke = pool_get(&pfr_kentry_pl, PR_WAITOK);
921 }
922 if (ke == NULL) {
923 return NULL;
924 }
925 bzero(ke, sizeof(*ke));
926
927 if (ad->pfra_af == AF_INET) {
928 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
929 } else if (ad->pfra_af == AF_INET6) {
930 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
931 }
932 ke->pfrke_af = ad->pfra_af;
933 ke->pfrke_net = ad->pfra_net;
934 ke->pfrke_not = ad->pfra_not;
935 ke->pfrke_intrpool = (u_int8_t)intr;
936 return ke;
937 }
938
939 static void
pfr_destroy_kentries(struct pfr_kentryworkq * workq)940 pfr_destroy_kentries(struct pfr_kentryworkq *workq)
941 {
942 struct pfr_kentry *p, *q;
943
944 for (p = SLIST_FIRST(workq); p != NULL; p = q) {
945 q = SLIST_NEXT(p, pfrke_workq);
946 pfr_destroy_kentry(p);
947 }
948 }
949
950 static void
pfr_destroy_kentry(struct pfr_kentry * ke)951 pfr_destroy_kentry(struct pfr_kentry *ke)
952 {
953 if (ke->pfrke_intrpool) {
954 pool_put(&pfr_kentry_pl2, ke);
955 } else {
956 pool_put(&pfr_kentry_pl, ke);
957 }
958 }
959
960 static void
pfr_insert_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq,u_int64_t tzero)961 pfr_insert_kentries(struct pfr_ktable *kt,
962 struct pfr_kentryworkq *workq, u_int64_t tzero)
963 {
964 struct pfr_kentry *p;
965 int rv, n = 0;
966
967 SLIST_FOREACH(p, workq, pfrke_workq) {
968 rv = pfr_route_kentry(kt, p);
969 if (rv) {
970 printf("pfr_insert_kentries: cannot route entry "
971 "(code=%d).\n", rv);
972 break;
973 }
974 p->pfrke_tzero = tzero;
975 n++;
976 }
977 kt->pfrkt_cnt += n;
978 }
979
980 int
pfr_insert_kentry(struct pfr_ktable * kt,struct pfr_addr * ad,u_int64_t tzero)981 pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero)
982 {
983 struct pfr_kentry *p;
984 int rv;
985
986 p = pfr_lookup_addr(kt, ad, 1);
987 if (p != NULL) {
988 return 0;
989 }
990 p = pfr_create_kentry(ad, TRUE);
991 if (p == NULL) {
992 return EINVAL;
993 }
994
995 rv = pfr_route_kentry(kt, p);
996 if (rv) {
997 return rv;
998 }
999
1000 p->pfrke_tzero = tzero;
1001 kt->pfrkt_cnt++;
1002
1003 return 0;
1004 }
1005
1006 static void
pfr_remove_kentries(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)1007 pfr_remove_kentries(struct pfr_ktable *kt,
1008 struct pfr_kentryworkq *workq)
1009 {
1010 struct pfr_kentry *p;
1011 int n = 0;
1012
1013 SLIST_FOREACH(p, workq, pfrke_workq) {
1014 pfr_unroute_kentry(kt, p);
1015 n++;
1016 }
1017 kt->pfrkt_cnt -= n;
1018 pfr_destroy_kentries(workq);
1019 }
1020
1021 static void
pfr_clean_node_mask(struct pfr_ktable * kt,struct pfr_kentryworkq * workq)1022 pfr_clean_node_mask(struct pfr_ktable *kt,
1023 struct pfr_kentryworkq *workq)
1024 {
1025 struct pfr_kentry *p;
1026
1027 SLIST_FOREACH(p, workq, pfrke_workq)
1028 pfr_unroute_kentry(kt, p);
1029 }
1030
1031 static void
pfr_clstats_kentries(struct pfr_kentryworkq * workq,u_int64_t tzero,int negchange)1032 pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero,
1033 int negchange)
1034 {
1035 struct pfr_kentry *p;
1036
1037 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1038
1039 SLIST_FOREACH(p, workq, pfrke_workq) {
1040 if (negchange) {
1041 p->pfrke_not = !p->pfrke_not;
1042 }
1043 bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
1044 bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
1045 p->pfrke_tzero = tzero;
1046 }
1047 }
1048
1049 static void
pfr_reset_feedback(user_addr_t addr,int size,int flags)1050 pfr_reset_feedback(user_addr_t addr, int size, int flags)
1051 {
1052 struct pfr_addr ad;
1053 int i;
1054
1055 for (i = 0; i < size; i++, addr += sizeof(ad)) {
1056 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
1057 break;
1058 }
1059 ad.pfra_fback = PFR_FB_NONE;
1060 if (COPYOUT(&ad, addr, sizeof(ad), flags)) {
1061 break;
1062 }
1063 }
1064 }
1065
1066 static void
pfr_prepare_network(union sockaddr_union * sa,int af,int net)1067 pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1068 {
1069 int i;
1070
1071 bzero(sa, sizeof(*sa));
1072 if (af == AF_INET) {
1073 sa->sin.sin_len = sizeof(sa->sin);
1074 sa->sin.sin_family = AF_INET;
1075 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32 - net)) : 0;
1076 } else if (af == AF_INET6) {
1077 sa->sin6.sin6_len = sizeof(sa->sin6);
1078 sa->sin6.sin6_family = AF_INET6;
1079 for (i = 0; i < 4; i++) {
1080 if (net <= 32) {
1081 sa->sin6.sin6_addr.s6_addr32[i] =
1082 net ? htonl(-1 << (32 - net)) : 0;
1083 break;
1084 }
1085 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1086 net -= 32;
1087 }
1088 }
1089 }
1090
1091 static int
pfr_route_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)1092 pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1093 {
1094 union sockaddr_union mask;
1095 struct radix_node *rn;
1096 struct radix_node_head *head;
1097
1098 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1099
1100 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1101 if (ke->pfrke_af == AF_INET) {
1102 head = kt->pfrkt_ip4;
1103 } else if (ke->pfrke_af == AF_INET6) {
1104 head = kt->pfrkt_ip6;
1105 } else {
1106 return -1;
1107 }
1108
1109 if (KENTRY_NETWORK(ke)) {
1110 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1111 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1112 } else {
1113 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1114 }
1115
1116 return rn == NULL ? -1 : 0;
1117 }
1118
1119 static int
pfr_unroute_kentry(struct pfr_ktable * kt,struct pfr_kentry * ke)1120 pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1121 {
1122 union sockaddr_union mask;
1123 struct radix_node *rn;
1124 struct radix_node_head *head;
1125
1126 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1127
1128 if (ke->pfrke_af == AF_INET) {
1129 head = kt->pfrkt_ip4;
1130 } else if (ke->pfrke_af == AF_INET6) {
1131 head = kt->pfrkt_ip6;
1132 } else {
1133 return -1;
1134 }
1135
1136 if (KENTRY_NETWORK(ke)) {
1137 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1138 rn = rn_delete(&ke->pfrke_sa, &mask, head);
1139 } else {
1140 rn = rn_delete(&ke->pfrke_sa, NULL, head);
1141 }
1142
1143 if (rn == NULL) {
1144 printf("pfr_unroute_kentry: delete failed.\n");
1145 return -1;
1146 }
1147 return 0;
1148 }
1149
1150 static void
pfr_copyout_addr(struct pfr_addr * ad,struct pfr_kentry * ke)1151 pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1152 {
1153 bzero(ad, sizeof(*ad));
1154 if (ke == NULL) {
1155 return;
1156 }
1157 ad->pfra_af = ke->pfrke_af;
1158 ad->pfra_net = ke->pfrke_net;
1159 ad->pfra_not = ke->pfrke_not;
1160 if (ad->pfra_af == AF_INET) {
1161 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1162 } else if (ad->pfra_af == AF_INET6) {
1163 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1164 }
1165 }
1166
1167 static int
pfr_walktree(struct radix_node * rn,void * arg)1168 pfr_walktree(struct radix_node *rn, void *arg)
1169 {
1170 struct pfr_kentry *__single ke = (struct pfr_kentry *)rn;
1171 struct pfr_walktree *__single w = arg;
1172 int flags = w->pfrw_flags;
1173
1174 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1175
1176 VERIFY(ke != NULL);
1177 switch (w->pfrw_op) {
1178 case PFRW_MARK:
1179 ke->pfrke_mark = 0;
1180 break;
1181 case PFRW_SWEEP:
1182 if (ke->pfrke_mark) {
1183 break;
1184 }
1185 OS_FALLTHROUGH;
1186 case PFRW_ENQUEUE:
1187 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1188 w->pfrw_cnt++;
1189 break;
1190 case PFRW_GET_ADDRS:
1191 if (w->pfrw_free-- > 0) {
1192 struct pfr_addr ad;
1193
1194 pfr_copyout_addr(&ad, ke);
1195 if (copyout(&ad, w->pfrw_addr, sizeof(ad))) {
1196 return EFAULT;
1197 }
1198 w->pfrw_addr += sizeof(ad);
1199 }
1200 break;
1201 case PFRW_GET_ASTATS:
1202 if (w->pfrw_free-- > 0) {
1203 struct pfr_astats as;
1204
1205 bzero(&as, sizeof(as));
1206
1207 pfr_copyout_addr(&as.pfras_a, ke);
1208
1209 bcopy(ke->pfrke_packets, as.pfras_packets,
1210 sizeof(as.pfras_packets));
1211 bcopy(ke->pfrke_bytes, as.pfras_bytes,
1212 sizeof(as.pfras_bytes));
1213 as.pfras_tzero = ke->pfrke_tzero;
1214
1215 if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags)) {
1216 return EFAULT;
1217 }
1218 w->pfrw_astats += sizeof(as);
1219 }
1220 break;
1221 case PFRW_POOL_GET:
1222 if (ke->pfrke_not) {
1223 break; /* negative entries are ignored */
1224 }
1225 if (!w->pfrw_cnt--) {
1226 w->pfrw_kentry = ke;
1227 return 1; /* finish search */
1228 }
1229 break;
1230 case PFRW_DYNADDR_UPDATE:
1231 if (ke->pfrke_af == AF_INET) {
1232 if (w->pfrw_dyn->pfid_acnt4++ > 0) {
1233 break;
1234 }
1235 pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1236 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1237 &ke->pfrke_sa, AF_INET);
1238 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1239 &pfr_mask, AF_INET);
1240 } else if (ke->pfrke_af == AF_INET6) {
1241 if (w->pfrw_dyn->pfid_acnt6++ > 0) {
1242 break;
1243 }
1244 pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1245 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1246 &ke->pfrke_sa, AF_INET6);
1247 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1248 &pfr_mask, AF_INET6);
1249 }
1250 break;
1251 }
1252 return 0;
1253 }
1254
1255 int
pfr_clr_tables(struct pfr_table * filter,int * ndel,int flags)1256 pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1257 {
1258 struct pfr_ktableworkq workq;
1259 struct pfr_ktable *p;
1260 int xdel = 0;
1261
1262 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1263
1264 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1265 PFR_FLAG_ALLRSETS);
1266 if (pfr_fix_anchor(filter->pfrt_anchor, sizeof(filter->pfrt_anchor))) {
1267 return EINVAL;
1268 }
1269 if (pfr_table_count(filter, flags) < 0) {
1270 return ENOENT;
1271 }
1272
1273 SLIST_INIT(&workq);
1274 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1275 if (pfr_skip_table(filter, p, flags)) {
1276 continue;
1277 }
1278 if (strlcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR, sizeof(p->pfrkt_anchor)) == 0) {
1279 continue;
1280 }
1281 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1282 continue;
1283 }
1284 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1285 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1286 xdel++;
1287 }
1288 if (!(flags & PFR_FLAG_DUMMY)) {
1289 pfr_setflags_ktables(&workq);
1290 }
1291 if (ndel != NULL) {
1292 *ndel = xdel;
1293 }
1294 return 0;
1295 }
1296
1297 int
pfr_add_tables(user_addr_t tbl,int size,int * nadd,int flags)1298 pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags)
1299 {
1300 struct pfr_ktableworkq addq, changeq;
1301 struct pfr_ktable *p, *q, *r, key;
1302 int i, rv, xadd = 0;
1303 u_int64_t tzero = pf_calendar_time_second();
1304
1305 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1306
1307 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1308 SLIST_INIT(&addq);
1309 SLIST_INIT(&changeq);
1310 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1311 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1312 senderr(EFAULT);
1313 }
1314 pfr_table_copyin_cleanup(&key.pfrkt_t);
1315 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1316 flags & PFR_FLAG_USERIOCTL)) {
1317 senderr(EINVAL);
1318 }
1319 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1320 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1321 if (p == NULL) {
1322 p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1323 if (p == NULL) {
1324 senderr(ENOMEM);
1325 }
1326 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1327 if (!pfr_ktable_compare(p, q)) {
1328 pfr_destroy_ktable(p, 0);
1329 goto _skip;
1330 }
1331 }
1332 SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1333 xadd++;
1334 if (!key.pfrkt_anchor[0]) {
1335 goto _skip;
1336 }
1337
1338 /* find or create root table */
1339 bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
1340 r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1341 if (r != NULL) {
1342 p->pfrkt_root = r;
1343 goto _skip;
1344 }
1345 SLIST_FOREACH(q, &addq, pfrkt_workq) {
1346 if (!pfr_ktable_compare(&key, q)) {
1347 p->pfrkt_root = q;
1348 goto _skip;
1349 }
1350 }
1351 key.pfrkt_flags = 0;
1352 r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1353 if (r == NULL) {
1354 senderr(ENOMEM);
1355 }
1356 SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1357 p->pfrkt_root = r;
1358 } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1359 SLIST_FOREACH(q, &changeq, pfrkt_workq)
1360 if (!pfr_ktable_compare(&key, q)) {
1361 goto _skip;
1362 }
1363 p->pfrkt_nflags = (p->pfrkt_flags &
1364 ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1365 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1366 xadd++;
1367 }
1368 _skip:
1369 ;
1370 }
1371 if (!(flags & PFR_FLAG_DUMMY)) {
1372 pfr_insert_ktables(&addq);
1373 pfr_setflags_ktables(&changeq);
1374 } else {
1375 pfr_destroy_ktables(&addq, 0);
1376 }
1377 if (nadd != NULL) {
1378 *nadd = xadd;
1379 }
1380 return 0;
1381 _bad:
1382 pfr_destroy_ktables(&addq, 0);
1383 return rv;
1384 }
1385
1386 int
pfr_del_tables(user_addr_t tbl,int size,int * ndel,int flags)1387 pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags)
1388 {
1389 struct pfr_ktableworkq workq;
1390 struct pfr_ktable *p, *q, key;
1391 int i, xdel = 0;
1392
1393 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1394
1395 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1396 SLIST_INIT(&workq);
1397 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1398 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1399 return EFAULT;
1400 }
1401 pfr_table_copyin_cleanup(&key.pfrkt_t);
1402 if (pfr_validate_table(&key.pfrkt_t, 0,
1403 flags & PFR_FLAG_USERIOCTL)) {
1404 return EINVAL;
1405 }
1406 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1407 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1408 SLIST_FOREACH(q, &workq, pfrkt_workq)
1409 if (!pfr_ktable_compare(p, q)) {
1410 goto _skip;
1411 }
1412 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1413 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1414 xdel++;
1415 }
1416 _skip:
1417 ;
1418 }
1419
1420 if (!(flags & PFR_FLAG_DUMMY)) {
1421 pfr_setflags_ktables(&workq);
1422 }
1423 if (ndel != NULL) {
1424 *ndel = xdel;
1425 }
1426 return 0;
1427 }
1428
1429 int
pfr_get_tables(struct pfr_table * filter,user_addr_t tbl,int * size,int flags)1430 pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size,
1431 int flags)
1432 {
1433 struct pfr_ktable *p;
1434 int n, nn;
1435
1436 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1437 if (pfr_fix_anchor(filter->pfrt_anchor, sizeof(filter->pfrt_anchor))) {
1438 return EINVAL;
1439 }
1440 n = nn = pfr_table_count(filter, flags);
1441 if (n < 0) {
1442 return ENOENT;
1443 }
1444 if (n > *size) {
1445 *size = n;
1446 return 0;
1447 }
1448 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1449 if (pfr_skip_table(filter, p, flags)) {
1450 continue;
1451 }
1452 if (n-- <= 0) {
1453 continue;
1454 }
1455 if (COPYOUT(&p->pfrkt_t, tbl, sizeof(p->pfrkt_t), flags)) {
1456 return EFAULT;
1457 }
1458 tbl += sizeof(p->pfrkt_t);
1459 }
1460 if (n) {
1461 printf("pfr_get_tables: corruption detected (%d).\n", n);
1462 return ENOTTY;
1463 }
1464 *size = nn;
1465 return 0;
1466 }
1467
1468 int
pfr_get_tstats(struct pfr_table * filter,user_addr_t tbl,int * size,int flags)1469 pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size,
1470 int flags)
1471 {
1472 struct pfr_ktable *p;
1473 struct pfr_ktableworkq workq;
1474 int n, nn;
1475 u_int64_t tzero = pf_calendar_time_second();
1476
1477 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1478
1479 /* XXX PFR_FLAG_CLSTATS disabled */
1480 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1481 if (pfr_fix_anchor(filter->pfrt_anchor, sizeof(filter->pfrt_anchor))) {
1482 return EINVAL;
1483 }
1484 n = nn = pfr_table_count(filter, flags);
1485 if (n < 0) {
1486 return ENOENT;
1487 }
1488 if (n > *size) {
1489 *size = n;
1490 return 0;
1491 }
1492 SLIST_INIT(&workq);
1493 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1494 if (pfr_skip_table(filter, p, flags)) {
1495 continue;
1496 }
1497 if (n-- <= 0) {
1498 continue;
1499 }
1500 if (COPYOUT(&p->pfrkt_ts, tbl, sizeof(p->pfrkt_ts), flags)) {
1501 return EFAULT;
1502 }
1503 tbl += sizeof(p->pfrkt_ts);
1504 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1505 }
1506 if (flags & PFR_FLAG_CLSTATS) {
1507 pfr_clstats_ktables(&workq, tzero,
1508 flags & PFR_FLAG_ADDRSTOO);
1509 }
1510 if (n) {
1511 printf("pfr_get_tstats: corruption detected (%d).\n", n);
1512 return ENOTTY;
1513 }
1514 *size = nn;
1515 return 0;
1516 }
1517
1518 int
pfr_clr_tstats(user_addr_t tbl,int size,int * nzero,int flags)1519 pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags)
1520 {
1521 struct pfr_ktableworkq workq;
1522 struct pfr_ktable *p, key;
1523 int i, xzero = 0;
1524 u_int64_t tzero = pf_calendar_time_second();
1525
1526 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1527
1528 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1529 PFR_FLAG_ADDRSTOO);
1530 SLIST_INIT(&workq);
1531 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1532 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1533 return EFAULT;
1534 }
1535 pfr_table_copyin_cleanup(&key.pfrkt_t);
1536 if (pfr_validate_table(&key.pfrkt_t, 0, 0)) {
1537 return EINVAL;
1538 }
1539 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1540 if (p != NULL) {
1541 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1542 xzero++;
1543 }
1544 }
1545 if (!(flags & PFR_FLAG_DUMMY)) {
1546 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1547 }
1548 if (nzero != NULL) {
1549 *nzero = xzero;
1550 }
1551 return 0;
1552 }
1553
1554 int
pfr_set_tflags(user_addr_t tbl,int size,int setflag,int clrflag,int * nchange,int * ndel,int flags)1555 pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag,
1556 int *nchange, int *ndel, int flags)
1557 {
1558 struct pfr_ktableworkq workq;
1559 struct pfr_ktable *p, *q, key;
1560 int i, xchange = 0, xdel = 0;
1561
1562 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1563
1564 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1565 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1566 (clrflag & ~PFR_TFLAG_USRMASK) ||
1567 (setflag & clrflag)) {
1568 return EINVAL;
1569 }
1570 SLIST_INIT(&workq);
1571 for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) {
1572 if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) {
1573 return EFAULT;
1574 }
1575 pfr_table_copyin_cleanup(&key.pfrkt_t);
1576 if (pfr_validate_table(&key.pfrkt_t, 0,
1577 flags & PFR_FLAG_USERIOCTL)) {
1578 return EINVAL;
1579 }
1580 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1581 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1582 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1583 ~clrflag;
1584 if (p->pfrkt_nflags == p->pfrkt_flags) {
1585 goto _skip;
1586 }
1587 SLIST_FOREACH(q, &workq, pfrkt_workq)
1588 if (!pfr_ktable_compare(p, q)) {
1589 goto _skip;
1590 }
1591 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1592 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1593 (clrflag & PFR_TFLAG_PERSIST) &&
1594 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) {
1595 xdel++;
1596 } else {
1597 xchange++;
1598 }
1599 }
1600 _skip:
1601 ;
1602 }
1603 if (!(flags & PFR_FLAG_DUMMY)) {
1604 pfr_setflags_ktables(&workq);
1605 }
1606 if (nchange != NULL) {
1607 *nchange = xchange;
1608 }
1609 if (ndel != NULL) {
1610 *ndel = xdel;
1611 }
1612 return 0;
1613 }
1614
1615 int
pfr_ina_begin(struct pfr_table * trs,u_int32_t * ticket,int * ndel,int flags)1616 pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1617 {
1618 struct pfr_ktableworkq workq;
1619 struct pfr_ktable *__single p;
1620 struct pf_ruleset *__single rs;
1621 int xdel = 0;
1622
1623 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1624
1625 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1626 rs = pf_find_or_create_ruleset(__unsafe_null_terminated_from_indexable(trs->pfrt_anchor));
1627 if (rs == NULL) {
1628 return ENOMEM;
1629 }
1630 SLIST_INIT(&workq);
1631 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1632 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1633 pfr_skip_table(trs, p, 0)) {
1634 continue;
1635 }
1636 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1637 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1638 xdel++;
1639 }
1640 if (!(flags & PFR_FLAG_DUMMY)) {
1641 pfr_setflags_ktables(&workq);
1642 if (ticket != NULL) {
1643 *ticket = ++rs->tticket;
1644 }
1645 rs->topen = 1;
1646 } else {
1647 pf_release_ruleset(rs);
1648 }
1649 if (ndel != NULL) {
1650 *ndel = xdel;
1651 }
1652 return 0;
1653 }
1654
1655 int
pfr_ina_define(struct pfr_table * tbl,user_addr_t addr,int size,int * nadd,int * naddr,u_int32_t ticket,int flags)1656 pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size,
1657 int *nadd, int *naddr, u_int32_t ticket, int flags)
1658 {
1659 struct pfr_ktableworkq tableq;
1660 struct pfr_kentryworkq addrq;
1661 struct pfr_ktable *__single kt, *__single rt, *__single shadow, key;
1662 struct pfr_kentry *__single p;
1663 struct pfr_addr ad;
1664 struct pf_ruleset *__single rs;
1665 int i, rv, xadd = 0, xaddr = 0;
1666
1667 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1668
1669 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1670 if (size && !(flags & PFR_FLAG_ADDRSTOO)) {
1671 return EINVAL;
1672 }
1673 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1674 flags & PFR_FLAG_USERIOCTL)) {
1675 return EINVAL;
1676 }
1677 rs = pf_find_ruleset(__unsafe_null_terminated_from_indexable(tbl->pfrt_anchor));
1678 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1679 return EBUSY;
1680 }
1681 pf_release_ruleset(rs);
1682 rs = NULL;
1683 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1684 SLIST_INIT(&tableq);
1685 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl);
1686 if (kt == NULL) {
1687 kt = pfr_create_ktable(tbl, 0, 1);
1688 if (kt == NULL) {
1689 return ENOMEM;
1690 }
1691 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1692 xadd++;
1693 if (!tbl->pfrt_anchor[0]) {
1694 goto _skip;
1695 }
1696
1697 /* find or create root table */
1698 bzero(&key, sizeof(key));
1699 strbufcpy(key.pfrkt_name, tbl->pfrt_name);
1700 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1701 if (rt != NULL) {
1702 kt->pfrkt_root = rt;
1703 goto _skip;
1704 }
1705 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1706 if (rt == NULL) {
1707 pfr_destroy_ktables(&tableq, 0);
1708 return ENOMEM;
1709 }
1710 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1711 kt->pfrkt_root = rt;
1712 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) {
1713 xadd++;
1714 }
1715 _skip:
1716 shadow = pfr_create_ktable(tbl, 0, 0);
1717 if (shadow == NULL) {
1718 pfr_destroy_ktables(&tableq, 0);
1719 return ENOMEM;
1720 }
1721 SLIST_INIT(&addrq);
1722 for (i = 0; i < size; i++, addr += sizeof(ad)) {
1723 if (COPYIN(addr, &ad, sizeof(ad), flags)) {
1724 senderr(EFAULT);
1725 }
1726 if (pfr_validate_addr(&ad)) {
1727 senderr(EINVAL);
1728 }
1729 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) {
1730 continue;
1731 }
1732 p = pfr_create_kentry(&ad, FALSE);
1733 if (p == NULL) {
1734 senderr(ENOMEM);
1735 }
1736 if (pfr_route_kentry(shadow, p)) {
1737 pfr_destroy_kentry(p);
1738 continue;
1739 }
1740 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1741 xaddr++;
1742 }
1743 if (!(flags & PFR_FLAG_DUMMY)) {
1744 if (kt->pfrkt_shadow != NULL) {
1745 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1746 }
1747 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1748 pfr_insert_ktables(&tableq);
1749 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1750 xaddr : NO_ADDRESSES;
1751 kt->pfrkt_shadow = shadow;
1752 } else {
1753 pfr_clean_node_mask(shadow, &addrq);
1754 pfr_destroy_ktable(shadow, 0);
1755 pfr_destroy_ktables(&tableq, 0);
1756 pfr_destroy_kentries(&addrq);
1757 }
1758 if (nadd != NULL) {
1759 *nadd = xadd;
1760 }
1761 if (naddr != NULL) {
1762 *naddr = xaddr;
1763 }
1764 return 0;
1765 _bad:
1766 pfr_destroy_ktable(shadow, 0);
1767 pfr_destroy_ktables(&tableq, 0);
1768 pfr_destroy_kentries(&addrq);
1769 return rv;
1770 }
1771
1772 int
pfr_ina_rollback(struct pfr_table * trs,u_int32_t ticket,int * ndel,int flags)1773 pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1774 {
1775 struct pfr_ktableworkq workq;
1776 struct pfr_ktable *__single p;
1777 struct pf_ruleset *__single rs;
1778 int xdel = 0;
1779
1780 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1781
1782 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1783 rs = pf_find_ruleset(__unsafe_null_terminated_from_indexable(trs->pfrt_anchor));
1784 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1785 goto done;
1786 }
1787 SLIST_INIT(&workq);
1788 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1789 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1790 pfr_skip_table(trs, p, 0)) {
1791 continue;
1792 }
1793 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1794 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1795 xdel++;
1796 }
1797 if (!(flags & PFR_FLAG_DUMMY)) {
1798 pfr_setflags_ktables(&workq);
1799 rs->topen = 0;
1800 }
1801 if (ndel != NULL) {
1802 *ndel = xdel;
1803 }
1804 done:
1805 if (rs) {
1806 pf_release_ruleset(rs);
1807 rs = NULL;
1808 }
1809 return 0;
1810 }
1811
1812 int
pfr_ina_commit(struct pfr_table * trs,u_int32_t ticket,int * nadd,int * nchange,int flags)1813 pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1814 int *nchange, int flags)
1815 {
1816 struct pfr_ktable *__single p, *__single q;
1817 struct pfr_ktableworkq workq;
1818 struct pf_ruleset *__single rs;
1819 int xadd = 0, xchange = 0;
1820 u_int64_t tzero = pf_calendar_time_second();
1821 int err = 0;
1822
1823 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1824
1825 ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1826 rs = pf_find_ruleset(__unsafe_null_terminated_from_indexable(trs->pfrt_anchor));
1827 if (rs == NULL || !rs->topen || ticket != rs->tticket) {
1828 err = EBUSY;
1829 goto done;
1830 }
1831
1832 SLIST_INIT(&workq);
1833 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1834 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1835 pfr_skip_table(trs, p, 0)) {
1836 continue;
1837 }
1838 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1839 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1840 xchange++;
1841 } else {
1842 xadd++;
1843 }
1844 }
1845
1846 if (!(flags & PFR_FLAG_DUMMY)) {
1847 for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1848 q = SLIST_NEXT(p, pfrkt_workq);
1849 pfr_commit_ktable(p, tzero);
1850 }
1851 rs->topen = 0;
1852 }
1853 if (nadd != NULL) {
1854 *nadd = xadd;
1855 }
1856 if (nchange != NULL) {
1857 *nchange = xchange;
1858 }
1859
1860 done:
1861 if (rs != NULL) {
1862 pf_release_ruleset(rs);
1863 rs = NULL;
1864 }
1865 return err;
1866 }
1867
1868 static void
pfr_commit_ktable(struct pfr_ktable * kt,u_int64_t tzero)1869 pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero)
1870 {
1871 struct pfr_ktable *__single shadow = kt->pfrkt_shadow;
1872 int nflags;
1873
1874 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
1875
1876 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1877 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1878 pfr_clstats_ktable(kt, tzero, 1);
1879 }
1880 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1881 /* kt might contain addresses */
1882 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
1883 struct pfr_kentry *p, *q, *next;
1884 struct pfr_addr ad;
1885
1886 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1887 pfr_mark_addrs(kt);
1888 SLIST_INIT(&addq);
1889 SLIST_INIT(&changeq);
1890 SLIST_INIT(&delq);
1891 SLIST_INIT(&garbageq);
1892 pfr_clean_node_mask(shadow, &addrq);
1893 for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1894 next = SLIST_NEXT(p, pfrke_workq); /* XXX */
1895 pfr_copyout_addr(&ad, p);
1896 q = pfr_lookup_addr(kt, &ad, 1);
1897 if (q != NULL) {
1898 if (q->pfrke_not != p->pfrke_not) {
1899 SLIST_INSERT_HEAD(&changeq, q,
1900 pfrke_workq);
1901 }
1902 q->pfrke_mark = 1;
1903 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1904 } else {
1905 p->pfrke_tzero = tzero;
1906 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1907 }
1908 }
1909 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1910 pfr_insert_kentries(kt, &addq, tzero);
1911 pfr_remove_kentries(kt, &delq);
1912 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1913 pfr_destroy_kentries(&garbageq);
1914 } else {
1915 /* kt cannot contain addresses */
1916 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1917 shadow->pfrkt_ip4);
1918 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1919 shadow->pfrkt_ip6);
1920 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1921 pfr_clstats_ktable(kt, tzero, 1);
1922 }
1923 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1924 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) &
1925 ~PFR_TFLAG_INACTIVE;
1926 pfr_destroy_ktable(shadow, 0);
1927 kt->pfrkt_shadow = NULL;
1928 pfr_setflags_ktable(kt, nflags);
1929 }
1930
1931 void
pfr_table_copyin_cleanup(struct pfr_table * tbl)1932 pfr_table_copyin_cleanup(struct pfr_table *tbl)
1933 {
1934 tbl->pfrt_anchor[sizeof(tbl->pfrt_anchor) - 1] = '\0';
1935 tbl->pfrt_name[sizeof(tbl->pfrt_name) - 1] = '\0';
1936 }
1937
1938 static int
pfr_validate_table(struct pfr_table * tbl,int allowedflags,int no_reserved)1939 pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1940 {
1941 size_t i;
1942
1943 if (!tbl->pfrt_name[0]) {
1944 return -1;
1945 }
1946 if (no_reserved && strlcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR, sizeof(tbl->pfrt_anchor)) == 0) {
1947 return -1;
1948 }
1949 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE - 1]) {
1950 return -1;
1951 }
1952 for (i = strbuflen(tbl->pfrt_name, sizeof(tbl->pfrt_name)); i < PF_TABLE_NAME_SIZE; i++) {
1953 if (tbl->pfrt_name[i]) {
1954 return -1;
1955 }
1956 }
1957 if (pfr_fix_anchor(tbl->pfrt_anchor, sizeof(tbl->pfrt_anchor))) {
1958 return -1;
1959 }
1960 if (tbl->pfrt_flags & ~allowedflags) {
1961 return -1;
1962 }
1963 return 0;
1964 }
1965
1966 /*
1967 * Rewrite anchors referenced by tables to remove slashes
1968 * and check for validity.
1969 */
1970 static int
pfr_fix_anchor(char * __counted_by (size)anchor,size_t size)1971 pfr_fix_anchor(char *__counted_by(size)anchor, size_t size)
1972 {
1973 size_t i;
1974
1975 if (anchor[0] == '/') {
1976 char *path;
1977 int off;
1978
1979 path = anchor;
1980 off = 1;
1981 while (*++path == '/') {
1982 off++;
1983 }
1984 bcopy(path, anchor, size - off);
1985 memset(anchor + size - off, 0, off);
1986 }
1987 if (anchor[size - 1]) {
1988 return -1;
1989 }
1990 for (i = strnlen(anchor, size); i < size; i++) {
1991 if (anchor[i]) {
1992 return -1;
1993 }
1994 }
1995 return 0;
1996 }
1997
1998 static int
pfr_table_count(struct pfr_table * filter,int flags)1999 pfr_table_count(struct pfr_table *filter, int flags)
2000 {
2001 struct pf_ruleset *__single rs;
2002
2003 if (flags & PFR_FLAG_ALLRSETS) {
2004 return pfr_ktable_cnt;
2005 }
2006 if (filter->pfrt_anchor[0]) {
2007 int r = -1;
2008 rs = pf_find_ruleset(__unsafe_null_terminated_from_indexable(filter->pfrt_anchor));
2009 r = (rs != NULL) ? rs->tables : -1;
2010 if (rs) {
2011 pf_release_ruleset(rs);
2012 rs = NULL;
2013 }
2014 return r;
2015 }
2016 return pf_main_ruleset.tables;
2017 }
2018
2019 static int
pfr_skip_table(struct pfr_table * filter,struct pfr_ktable * kt,int flags)2020 pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
2021 {
2022 if (flags & PFR_FLAG_ALLRSETS) {
2023 return 0;
2024 }
2025 if (strbufcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) {
2026 return 1;
2027 }
2028 return 0;
2029 }
2030
2031 static void
pfr_insert_ktables(struct pfr_ktableworkq * workq)2032 pfr_insert_ktables(struct pfr_ktableworkq *workq)
2033 {
2034 struct pfr_ktable *__single p;
2035
2036 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2037
2038 SLIST_FOREACH(p, workq, pfrkt_workq)
2039 pfr_insert_ktable(p);
2040 }
2041
2042 static void
pfr_insert_ktable(struct pfr_ktable * kt)2043 pfr_insert_ktable(struct pfr_ktable *kt)
2044 {
2045 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2046
2047 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
2048 pfr_ktable_cnt++;
2049 if (kt->pfrkt_root != NULL) {
2050 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) {
2051 pfr_setflags_ktable(kt->pfrkt_root,
2052 kt->pfrkt_root->pfrkt_flags | PFR_TFLAG_REFDANCHOR);
2053 }
2054 }
2055 }
2056
2057 static void
pfr_setflags_ktables(struct pfr_ktableworkq * workq)2058 pfr_setflags_ktables(struct pfr_ktableworkq *workq)
2059 {
2060 struct pfr_ktable *__single p, *__single q;
2061
2062 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2063
2064 for (p = SLIST_FIRST(workq); p; p = q) {
2065 q = SLIST_NEXT(p, pfrkt_workq);
2066 pfr_setflags_ktable(p, p->pfrkt_nflags);
2067 }
2068 }
2069
2070 static void
pfr_setflags_ktable(struct pfr_ktable * kt,int newf)2071 pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
2072 {
2073 struct pfr_kentryworkq addrq;
2074
2075 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2076
2077 if (!(newf & PFR_TFLAG_REFERENCED) &&
2078 !(newf & PFR_TFLAG_REFDANCHOR) &&
2079 !(newf & PFR_TFLAG_PERSIST)) {
2080 newf &= ~PFR_TFLAG_ACTIVE;
2081 }
2082 if (!(newf & PFR_TFLAG_ACTIVE)) {
2083 newf &= ~PFR_TFLAG_USRMASK;
2084 }
2085 if (!(newf & PFR_TFLAG_SETMASK)) {
2086 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
2087 if (kt->pfrkt_root != NULL) {
2088 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) {
2089 pfr_setflags_ktable(kt->pfrkt_root,
2090 kt->pfrkt_root->pfrkt_flags &
2091 ~PFR_TFLAG_REFDANCHOR);
2092 }
2093 }
2094 pfr_destroy_ktable(kt, 1);
2095 pfr_ktable_cnt--;
2096 return;
2097 }
2098 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
2099 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2100 pfr_remove_kentries(kt, &addrq);
2101 }
2102 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
2103 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
2104 kt->pfrkt_shadow = NULL;
2105 }
2106 kt->pfrkt_flags = newf;
2107 }
2108
2109 static void
pfr_clstats_ktables(struct pfr_ktableworkq * workq,u_int64_t tzero,int recurse)2110 pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse)
2111 {
2112 struct pfr_ktable *__single p;
2113
2114 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2115
2116 SLIST_FOREACH(p, workq, pfrkt_workq)
2117 pfr_clstats_ktable(p, tzero, recurse);
2118 }
2119
2120 static void
pfr_clstats_ktable(struct pfr_ktable * kt,u_int64_t tzero,int recurse)2121 pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse)
2122 {
2123 struct pfr_kentryworkq addrq;
2124
2125 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2126
2127 if (recurse) {
2128 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2129 pfr_clstats_kentries(&addrq, tzero, 0);
2130 }
2131 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
2132 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
2133 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
2134 kt->pfrkt_tzero = tzero;
2135 }
2136
2137 static struct pfr_ktable *
pfr_create_ktable(struct pfr_table * tbl,u_int64_t tzero,int attachruleset)2138 pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset)
2139 {
2140 struct pfr_ktable *__single kt;
2141 struct pf_ruleset *__single rs;
2142
2143 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2144
2145 kt = pool_get(&pfr_ktable_pl, PR_WAITOK);
2146 if (kt == NULL) {
2147 return NULL;
2148 }
2149 bzero(kt, sizeof(*kt));
2150 kt->pfrkt_t = *tbl;
2151
2152 if (attachruleset) {
2153 rs = pf_find_or_create_ruleset(__unsafe_null_terminated_from_indexable(tbl->pfrt_anchor));
2154 if (!rs) {
2155 pfr_destroy_ktable(kt, 0);
2156 return NULL;
2157 }
2158 kt->pfrkt_rs = rs;
2159 rs->tables++;
2160 }
2161
2162 if (!rn_inithead((void **)&kt->pfrkt_ip4,
2163 offsetof(struct sockaddr_in, sin_addr) * 8) ||
2164 !rn_inithead((void **)&kt->pfrkt_ip6,
2165 offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
2166 pfr_destroy_ktable(kt, 0);
2167 return NULL;
2168 }
2169 kt->pfrkt_tzero = tzero;
2170
2171 return kt;
2172 }
2173
2174 static void
pfr_destroy_ktables(struct pfr_ktableworkq * workq,int flushaddr)2175 pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2176 {
2177 struct pfr_ktable *__single p, *__single q;
2178
2179 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2180
2181 for (p = SLIST_FIRST(workq); p; p = q) {
2182 q = SLIST_NEXT(p, pfrkt_workq);
2183 pfr_destroy_ktable(p, flushaddr);
2184 }
2185 }
2186
2187 static void
pfr_destroy_ktable(struct pfr_ktable * kt,int flushaddr)2188 pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2189 {
2190 struct pfr_kentryworkq addrq;
2191
2192 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2193
2194 if (flushaddr) {
2195 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2196 pfr_clean_node_mask(kt, &addrq);
2197 pfr_destroy_kentries(&addrq);
2198 }
2199 if (kt->pfrkt_ip4 != NULL) {
2200 zfree(radix_node_head_zone, kt->pfrkt_ip4);
2201 }
2202 if (kt->pfrkt_ip6 != NULL) {
2203 zfree(radix_node_head_zone, kt->pfrkt_ip6);
2204 }
2205 if (kt->pfrkt_shadow != NULL) {
2206 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2207 }
2208 if (kt->pfrkt_rs != NULL) {
2209 kt->pfrkt_rs->tables--;
2210 pf_release_ruleset(kt->pfrkt_rs);
2211 }
2212 pool_put(&pfr_ktable_pl, kt);
2213 }
2214
2215 static int
pfr_ktable_compare(struct pfr_ktable * p,struct pfr_ktable * q)2216 pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2217 {
2218 int d;
2219
2220 if ((d = strbufcmp(p->pfrkt_name, q->pfrkt_name))) {
2221 return d;
2222 }
2223 return strbufcmp(p->pfrkt_anchor, q->pfrkt_anchor);
2224 }
2225
2226 static struct pfr_ktable *
pfr_lookup_table(struct pfr_table * tbl)2227 pfr_lookup_table(struct pfr_table *tbl)
2228 {
2229 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2230
2231 /* struct pfr_ktable start like a struct pfr_table */
2232 return RB_FIND(pfr_ktablehead, &pfr_ktables,
2233 (struct pfr_ktable *)(void *)tbl);
2234 }
2235
2236 int
pfr_match_addr(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af)2237 pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2238 {
2239 struct pfr_kentry *__single ke = NULL;
2240 int match;
2241
2242 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2243
2244 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
2245 kt = kt->pfrkt_root;
2246 }
2247 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2248 return 0;
2249 }
2250
2251 switch (af) {
2252 #if INET
2253 case AF_INET:
2254 pfr_sin.sin_addr.s_addr = a->addr32[0];
2255 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2256 if (ke && KENTRY_RNF_ROOT(ke)) {
2257 ke = NULL;
2258 }
2259 break;
2260 #endif /* INET */
2261 case AF_INET6:
2262 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2263 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2264 if (ke && KENTRY_RNF_ROOT(ke)) {
2265 ke = NULL;
2266 }
2267 break;
2268 }
2269 match = (ke && !ke->pfrke_not);
2270 if (match) {
2271 kt->pfrkt_match++;
2272 } else {
2273 kt->pfrkt_nomatch++;
2274 }
2275 return match;
2276 }
2277
2278 void
pfr_update_stats(struct pfr_ktable * kt,struct pf_addr * a,sa_family_t af,u_int64_t len,int dir_out,int op_pass,int notrule)2279 pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2280 u_int64_t len, int dir_out, int op_pass, int notrule)
2281 {
2282 struct pfr_kentry *__single ke = NULL;
2283
2284 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2285
2286 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
2287 kt = kt->pfrkt_root;
2288 }
2289 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2290 return;
2291 }
2292
2293 switch (af) {
2294 #if INET
2295 case AF_INET:
2296 pfr_sin.sin_addr.s_addr = a->addr32[0];
2297 ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2298 if (ke && KENTRY_RNF_ROOT(ke)) {
2299 ke = NULL;
2300 }
2301 break;
2302 #endif /* INET */
2303 case AF_INET6:
2304 bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
2305 ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2306 if (ke && KENTRY_RNF_ROOT(ke)) {
2307 ke = NULL;
2308 }
2309 break;
2310 default:
2311 ;
2312 }
2313 if ((ke == NULL || ke->pfrke_not) != notrule) {
2314 if (op_pass != PFR_OP_PASS) {
2315 printf("pfr_update_stats: assertion failed.\n");
2316 }
2317 op_pass = PFR_OP_XPASS;
2318 }
2319 kt->pfrkt_packets[dir_out][op_pass]++;
2320 kt->pfrkt_bytes[dir_out][op_pass] += len;
2321 if (ke != NULL && op_pass != PFR_OP_XPASS) {
2322 ke->pfrke_packets[dir_out][op_pass]++;
2323 ke->pfrke_bytes[dir_out][op_pass] += len;
2324 }
2325 }
2326
2327 struct pfr_ktable *
pfr_attach_table(struct pf_ruleset * rs,char const * name)2328 pfr_attach_table(struct pf_ruleset *rs, char const *name)
2329 {
2330 struct pfr_ktable *__single kt, *__single rt;
2331 struct pfr_table tbl;
2332 struct pf_anchor *__single ac = rs->anchor;
2333
2334 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2335
2336 bzero(&tbl, sizeof(tbl));
2337 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2338 if (ac != NULL) {
2339 strbufcpy(tbl.pfrt_anchor, ac->path);
2340 }
2341 kt = pfr_lookup_table(&tbl);
2342 if (kt == NULL) {
2343 kt = pfr_create_ktable(&tbl, pf_calendar_time_second(), 1);
2344 if (kt == NULL) {
2345 return NULL;
2346 }
2347 if (ac != NULL) {
2348 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2349 rt = pfr_lookup_table(&tbl);
2350 if (rt == NULL) {
2351 rt = pfr_create_ktable(&tbl, 0, 1);
2352 if (rt == NULL) {
2353 pfr_destroy_ktable(kt, 0);
2354 return NULL;
2355 }
2356 pfr_insert_ktable(rt);
2357 }
2358 kt->pfrkt_root = rt;
2359 }
2360 pfr_insert_ktable(kt);
2361 }
2362 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) {
2363 pfr_setflags_ktable(kt, kt->pfrkt_flags | PFR_TFLAG_REFERENCED);
2364 }
2365 return kt;
2366 }
2367
2368 void
pfr_detach_table(struct pfr_ktable * kt)2369 pfr_detach_table(struct pfr_ktable *kt)
2370 {
2371 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2372
2373 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) {
2374 printf("pfr_detach_table: refcount = %d.\n",
2375 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2376 } else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) {
2377 pfr_setflags_ktable(kt, kt->pfrkt_flags & ~PFR_TFLAG_REFERENCED);
2378 }
2379 }
2380
2381 int
pfr_pool_get(struct pfr_ktable * kt,int * pidx,struct pf_addr * counter,struct pf_addr ** raddr,struct pf_addr ** rmask,sa_family_t af)2382 pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2383 struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2384 {
2385 struct pfr_kentry *__single ke, *__single ke2;
2386 struct pf_addr *__single addr;
2387 union sockaddr_union mask;
2388 int idx = -1, use_counter = 0;
2389
2390 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2391
2392 if (af == AF_INET) {
2393 addr = (struct pf_addr *)&pfr_sin.sin_addr;
2394 } else if (af == AF_INET6) {
2395 addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2396 } else {
2397 return -1;
2398 }
2399
2400 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) {
2401 kt = kt->pfrkt_root;
2402 }
2403 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
2404 return -1;
2405 }
2406
2407 if (pidx != NULL) {
2408 idx = *pidx;
2409 }
2410 if (counter != NULL && idx >= 0) {
2411 use_counter = 1;
2412 }
2413 if (idx < 0) {
2414 idx = 0;
2415 }
2416
2417 _next_block:
2418 ke = pfr_kentry_byidx(kt, idx, af);
2419 if (ke == NULL) {
2420 kt->pfrkt_nomatch++;
2421 return 1;
2422 }
2423 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2424 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2425 *rmask = SUNION2PF(&pfr_mask, af);
2426
2427 if (use_counter) {
2428 /* is supplied address within block? */
2429 if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2430 /* no, go to next block in table */
2431 idx++;
2432 use_counter = 0;
2433 goto _next_block;
2434 }
2435 PF_ACPY(addr, counter, af);
2436 } else {
2437 /* use first address of block */
2438 PF_ACPY(addr, *raddr, af);
2439 }
2440
2441 if (!KENTRY_NETWORK(ke)) {
2442 /* this is a single IP address - no possible nested block */
2443 PF_ACPY(counter, addr, af);
2444 *pidx = idx;
2445 kt->pfrkt_match++;
2446 return 0;
2447 }
2448 for (;;) {
2449 /* we don't want to use a nested block */
2450 if (af == AF_INET) {
2451 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2452 kt->pfrkt_ip4);
2453 } else if (af == AF_INET6) {
2454 ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2455 kt->pfrkt_ip6);
2456 } else {
2457 return -1; /* never happens */
2458 }
2459 /* no need to check KENTRY_RNF_ROOT() here */
2460 if (ke2 == ke) {
2461 /* lookup return the same block - perfect */
2462 PF_ACPY(counter, addr, af);
2463 *pidx = idx;
2464 kt->pfrkt_match++;
2465 return 0;
2466 }
2467
2468 /* we need to increase the counter past the nested block */
2469 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2470 PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2471 PF_AINC(addr, af);
2472 if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2473 /* ok, we reached the end of our main block */
2474 /* go to next block in table */
2475 idx++;
2476 use_counter = 0;
2477 goto _next_block;
2478 }
2479 }
2480 }
2481
2482 static struct pfr_kentry *
pfr_kentry_byidx(struct pfr_ktable * kt,int idx,int af)2483 pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2484 {
2485 struct pfr_walktree w;
2486
2487 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2488
2489 bzero(&w, sizeof(w));
2490 w.pfrw_op = PFRW_POOL_GET;
2491 w.pfrw_cnt = idx;
2492
2493 switch (af) {
2494 #if INET
2495 case AF_INET:
2496 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2497 pfr_walktree, &w);
2498 return w.pfrw_kentry;
2499 #endif /* INET */
2500 case AF_INET6:
2501 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2502 pfr_walktree, &w);
2503 return w.pfrw_kentry;
2504 default:
2505 return NULL;
2506 }
2507 }
2508
2509 void
pfr_dynaddr_update(struct pfr_ktable * kt,struct pfi_dynaddr * dyn)2510 pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2511 {
2512 struct pfr_walktree w;
2513
2514 LCK_MTX_ASSERT(&pf_lock, LCK_MTX_ASSERT_OWNED);
2515
2516 bzero(&w, sizeof(w));
2517 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2518 w.pfrw_dyn = dyn;
2519
2520 dyn->pfid_acnt4 = 0;
2521 dyn->pfid_acnt6 = 0;
2522 if (!dyn->pfid_af || dyn->pfid_af == AF_INET) {
2523 (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2524 pfr_walktree, &w);
2525 }
2526 if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) {
2527 (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2528 pfr_walktree, &w);
2529 }
2530 }
2531