Bug Summary

File:net/pf_ioctl.c
Warning:line 1129, column 5
Value stored to 'flags' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name pf_ioctl.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/net/pf_ioctl.c
1/* $OpenBSD: pf_ioctl.c,v 1.370 2022/01/11 09:00:17 sashan Exp $ */
2
3/*
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2018 Henning Brauer <henning@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 * Effort sponsored in part by the Defense Advanced Research Projects
33 * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 *
36 */
37
38#include "pfsync.h"
39#include "pflog.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/sysctl.h>
44#include <sys/mbuf.h>
45#include <sys/filio.h>
46#include <sys/fcntl.h>
47#include <sys/socket.h>
48#include <sys/socketvar.h>
49#include <sys/kernel.h>
50#include <sys/time.h>
51#include <sys/timeout.h>
52#include <sys/pool.h>
53#include <sys/malloc.h>
54#include <sys/proc.h>
55#include <sys/rwlock.h>
56#include <sys/syslog.h>
57#include <uvm/uvm_extern.h>
58
59#include <crypto/md5.h>
60
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/route.h>
64#include <net/hfsc.h>
65#include <net/fq_codel.h>
66
67#include <netinet/in.h>
68#include <netinet/ip.h>
69#include <netinet/in_pcb.h>
70#include <netinet/ip_var.h>
71#include <netinet/ip_icmp.h>
72#include <netinet/tcp.h>
73#include <netinet/udp.h>
74
75#ifdef INET61
76#include <netinet/ip6.h>
77#include <netinet/icmp6.h>
78#endif /* INET6 */
79
80#include <net/pfvar.h>
81#include <net/pfvar_priv.h>
82
83#if NPFSYNC1 > 0
84#include <netinet/ip_ipsp.h>
85#include <net/if_pfsync.h>
86#endif /* NPFSYNC > 0 */
87
88struct pool pf_tag_pl;
89
90void pfattach(int);
91void pf_thread_create(void *);
92int pfopen(dev_t, int, int, struct proc *);
93int pfclose(dev_t, int, int, struct proc *);
94int pfioctl(dev_t, u_long, caddr_t, int, struct proc *);
95int pf_begin_rules(u_int32_t *, const char *);
96int pf_rollback_rules(u_int32_t, char *);
97void pf_remove_queues(void);
98int pf_commit_queues(void);
99void pf_free_queues(struct pf_queuehead *);
100void pf_calc_chksum(struct pf_ruleset *);
101void pf_hash_rule(MD5_CTX *, struct pf_rule *);
102void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
103int pf_commit_rules(u_int32_t, char *);
104int pf_addr_setup(struct pf_ruleset *,
105 struct pf_addr_wrap *, sa_family_t);
106struct pfi_kif *pf_kif_setup(struct pfi_kif *);
107void pf_addr_copyout(struct pf_addr_wrap *);
108void pf_trans_set_commit(void);
109void pf_pool_copyin(struct pf_pool *, struct pf_pool *);
110int pf_validate_range(u_int8_t, u_int16_t[2]);
111int pf_rule_copyin(struct pf_rule *, struct pf_rule *);
112u_int16_t pf_qname2qid(char *, int);
113void pf_qid2qname(u_int16_t, char *);
114void pf_qid_unref(u_int16_t);
115int pf_states_clr(struct pfioc_state_kill *);
116int pf_states_get(struct pfioc_states *);
117
118struct pf_rule pf_default_rule, pf_default_rule_new;
119
120struct {
121 char statusif[IFNAMSIZ16];
122 u_int32_t debug;
123 u_int32_t hostid;
124 u_int32_t reass;
125 u_int32_t mask;
126} pf_trans_set;
127
128#define PF_TSET_STATUSIF0x01 0x01
129#define PF_TSET_DEBUG0x02 0x02
130#define PF_TSET_HOSTID0x04 0x04
131#define PF_TSET_REASS0x08 0x08
132
133#define TAGID_MAX50000 50000
134TAILQ_HEAD(pf_tags, pf_tagname)struct pf_tags { struct pf_tagname *tqh_first; struct pf_tagname
**tqh_last; }
pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags){ ((void *)0), &(pf_tags).tqh_first },
135 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids){ ((void *)0), &(pf_qids).tqh_first };
136
137/*
138 * pf_lock protects consistency of PF data structures, which don't have
139 * their dedicated lock yet. The pf_lock currently protects:
140 * - rules,
141 * - radix tables,
142 * - source nodes
143 * All callers must grab pf_lock exclusively.
144 *
145 * pf_state_lock protects consistency of state table. Packets, which do state
146 * look up grab the lock as readers. If packet must create state, then it must
147 * grab the lock as writer. Whenever packet creates state it grabs pf_lock
148 * first then it locks pf_state_lock as the writer.
149 */
150struct rwlock pf_lock = RWLOCK_INITIALIZER("pf_lock"){ 0, "pf_lock" };
151struct rwlock pf_state_lock = RWLOCK_INITIALIZER("pf_state_lock"){ 0, "pf_state_lock" };
152
153#if (PF_QNAME_SIZE64 != PF_TAG_NAME_SIZE64)
154#error PF_QNAME_SIZE64 must be equal to PF_TAG_NAME_SIZE64
155#endif
156u_int16_t tagname2tag(struct pf_tags *, char *, int);
157void tag2tagname(struct pf_tags *, u_int16_t, char *);
158void tag_unref(struct pf_tags *, u_int16_t);
159int pf_rtlabel_add(struct pf_addr_wrap *);
160void pf_rtlabel_remove(struct pf_addr_wrap *);
161void pf_rtlabel_copyout(struct pf_addr_wrap *);
162
163
164void
165pfattach(int num)
166{
167 u_int32_t *timeout = pf_default_rule.timeout;
168
169 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0,
170 IPL_SOFTNET0x5, 0, "pfrule", NULL((void *)0));
171 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0,
172 IPL_SOFTNET0x5, 0, "pfsrctr", NULL((void *)0));
173 pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0,
174 IPL_SOFTNET0x5, 0, "pfsnitem", NULL((void *)0));
175 pool_init(&pf_state_pl, sizeof(struct pf_state), 0,
176 IPL_SOFTNET0x5, 0, "pfstate", NULL((void *)0));
177 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0,
178 IPL_SOFTNET0x5, 0, "pfstkey", NULL((void *)0));
179 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0,
180 IPL_SOFTNET0x5, 0, "pfstitem", NULL((void *)0));
181 pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0,
182 IPL_SOFTNET0x5, 0, "pfruleitem", NULL((void *)0));
183 pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0,
184 IPL_SOFTNET0x5, 0, "pfqueue", NULL((void *)0));
185 pool_init(&pf_tag_pl, sizeof(struct pf_tagname), 0,
186 IPL_SOFTNET0x5, 0, "pftag", NULL((void *)0));
187 pool_init(&pf_pktdelay_pl, sizeof(struct pf_pktdelay), 0,
188 IPL_SOFTNET0x5, 0, "pfpktdelay", NULL((void *)0));
189
190 hfsc_initialize();
191 pfr_initialize();
192 pfi_initialize();
193 pf_osfp_initialize();
194 pf_syncookies_init();
195
196 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
197 pf_pool_limits[PF_LIMIT_STATES].limit, NULL((void *)0), 0);
198
199 if (physmem <= atop(100*1024*1024)((100*1024*1024) >> 12))
200 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit =
201 PFR_KENTRY_HIWAT_SMALL100000;
202
203 RB_INIT(&tree_src_tracking)do { (&tree_src_tracking)->rbh_root = ((void *)0); } while
(0)
;
204 RB_INIT(&pf_anchors)do { (&pf_anchors)->rbh_root = ((void *)0); } while (0
)
;
205 pf_init_ruleset(&pf_main_rulesetpf_main_anchor.ruleset);
206 TAILQ_INIT(&pf_queues[0])do { (&pf_queues[0])->tqh_first = ((void *)0); (&pf_queues
[0])->tqh_last = &(&pf_queues[0])->tqh_first; }
while (0)
;
207 TAILQ_INIT(&pf_queues[1])do { (&pf_queues[1])->tqh_first = ((void *)0); (&pf_queues
[1])->tqh_last = &(&pf_queues[1])->tqh_first; }
while (0)
;
208 pf_queues_active = &pf_queues[0];
209 pf_queues_inactive = &pf_queues[1];
210
211 /* default rule should never be garbage collected */
212 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
213 pf_default_rule.action = PF_PASS;
214 pf_default_rule.nr = (u_int32_t)-1;
215 pf_default_rule.rtableid = -1;
216
217 /* initialize default timeouts */
218 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL120;
219 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL30;
220 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL24*60*60;
221 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL15 * 60;
222 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL45;
223 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL90;
224 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL60;
225 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL30;
226 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL60;
227 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL20;
228 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL10;
229 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL60;
230 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL30;
231 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL60;
232 timeout[PFTM_FRAG] = PFTM_FRAG_VAL60;
233 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL10;
234 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL0;
235 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL30;
236 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START60000;
237 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END120000;
238
239 pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK;
240 pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK;
241 pf_default_rule.rdr.addr.type = PF_ADDR_NONE;
242 pf_default_rule.nat.addr.type = PF_ADDR_NONE;
243 pf_default_rule.route.addr.type = PF_ADDR_NONE;
244
245 pf_normalize_init();
246 memset(&pf_status, 0, sizeof(pf_status))__builtin_memset((&pf_status), (0), (sizeof(pf_status)));
247 pf_status.debug = LOG_ERR3;
248 pf_status.reass = PF_REASS_ENABLED0x01;
249
250 /* XXX do our best to avoid a conflict */
251 pf_status.hostid = arc4random();
252}
253
254int
255pfopen(dev_t dev, int flags, int fmt, struct proc *p)
256{
257 if (minor(dev)((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >>
8))
>= 1)
258 return (ENXIO6);
259 return (0);
260}
261
262int
263pfclose(dev_t dev, int flags, int fmt, struct proc *p)
264{
265 if (minor(dev)((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >>
8))
>= 1)
266 return (ENXIO6);
267 return (0);
268}
269
270void
271pf_rule_free(struct pf_rule *rule)
272{
273 if (rule == NULL((void *)0))
274 return;
275
276 pfi_kif_free(rule->kif);
277 pfi_kif_free(rule->rcv_kif);
278 pfi_kif_free(rule->rdr.kif);
279 pfi_kif_free(rule->nat.kif);
280 pfi_kif_free(rule->route.kif);
281
282 pool_put(&pf_rule_pl, rule);
283}
284
285void
286pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
287{
288 if (rulequeue != NULL((void *)0)) {
289 if (rule->states_cur == 0 && rule->src_nodes == 0) {
290 /*
291 * XXX - we need to remove the table *before* detaching
292 * the rule to make sure the table code does not delete
293 * the anchor under our feet.
294 */
295 pf_tbladdr_remove(&rule->src.addr);
296 pf_tbladdr_remove(&rule->dst.addr);
297 pf_tbladdr_remove(&rule->rdr.addr);
298 pf_tbladdr_remove(&rule->nat.addr);
299 pf_tbladdr_remove(&rule->route.addr);
300 if (rule->overload_tbl)
301 pfr_detach_table(rule->overload_tbl);
302 }
303 TAILQ_REMOVE(rulequeue, rule, entries)do { if (((rule)->entries.tqe_next) != ((void *)0)) (rule)
->entries.tqe_next->entries.tqe_prev = (rule)->entries
.tqe_prev; else (rulequeue)->tqh_last = (rule)->entries
.tqe_prev; *(rule)->entries.tqe_prev = (rule)->entries.
tqe_next; ((rule)->entries.tqe_prev) = ((void *)-1); ((rule
)->entries.tqe_next) = ((void *)-1); } while (0)
;
304 rule->entries.tqe_prev = NULL((void *)0);
305 rule->nr = (u_int32_t)-1;
306 }
307
308 if (rule->states_cur > 0 || rule->src_nodes > 0 ||
309 rule->entries.tqe_prev != NULL((void *)0))
310 return;
311 pf_tag_unref(rule->tag);
312 pf_tag_unref(rule->match_tag);
313 pf_rtlabel_remove(&rule->src.addr);
314 pf_rtlabel_remove(&rule->dst.addr);
315 pfi_dynaddr_remove(&rule->src.addr);
316 pfi_dynaddr_remove(&rule->dst.addr);
317 pfi_dynaddr_remove(&rule->rdr.addr);
318 pfi_dynaddr_remove(&rule->nat.addr);
319 pfi_dynaddr_remove(&rule->route.addr);
320 if (rulequeue == NULL((void *)0)) {
321 pf_tbladdr_remove(&rule->src.addr);
322 pf_tbladdr_remove(&rule->dst.addr);
323 pf_tbladdr_remove(&rule->rdr.addr);
324 pf_tbladdr_remove(&rule->nat.addr);
325 pf_tbladdr_remove(&rule->route.addr);
326 if (rule->overload_tbl)
327 pfr_detach_table(rule->overload_tbl);
328 }
329 pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE);
330 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE);
331 pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE);
332 pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE);
333 pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE);
334 pf_remove_anchor(rule);
335 pool_put(&pf_rule_pl, rule);
336}
337
338void
339pf_purge_rule(struct pf_rule *rule)
340{
341 u_int32_t nr = 0;
342 struct pf_ruleset *ruleset;
343
344 KASSERT((rule != NULL) && (rule->ruleset != NULL))(((rule != ((void *)0)) && (rule->ruleset != ((void
*)0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf_ioctl.c"
, 344, "(rule != NULL) && (rule->ruleset != NULL)"
))
;
345 ruleset = rule->ruleset;
346
347 pf_rm_rule(ruleset->rules.active.ptr, rule);
348 ruleset->rules.active.rcount--;
349 TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries)for((rule) = ((ruleset->rules.active.ptr)->tqh_first); (
rule) != ((void *)0); (rule) = ((rule)->entries.tqe_next))
350 rule->nr = nr++;
351 ruleset->rules.active.ticket++;
352 pf_calc_skip_steps(ruleset->rules.active.ptr);
353 pf_remove_if_empty_ruleset(ruleset);
354
355 if (ruleset == &pf_main_rulesetpf_main_anchor.ruleset)
356 pf_calc_chksum(ruleset);
357}
358
359u_int16_t
360tagname2tag(struct pf_tags *head, char *tagname, int create)
361{
362 struct pf_tagname *tag, *p = NULL((void *)0);
363 u_int16_t new_tagid = 1;
364
365 TAILQ_FOREACH(tag, head, entries)for((tag) = ((head)->tqh_first); (tag) != ((void *)0); (tag
) = ((tag)->entries.tqe_next))
366 if (strcmp(tagname, tag->name) == 0) {
367 tag->ref++;
368 return (tag->tag);
369 }
370
371 if (!create)
372 return (0);
373
374 /*
375 * to avoid fragmentation, we do a linear search from the beginning
376 * and take the first free slot we find. if there is none or the list
377 * is empty, append a new entry at the end.
378 */
379
380 /* new entry */
381 TAILQ_FOREACH(p, head, entries)for((p) = ((head)->tqh_first); (p) != ((void *)0); (p) = (
(p)->entries.tqe_next))
{
382 if (p->tag != new_tagid)
383 break;
384 new_tagid = p->tag + 1;
385 }
386
387 if (new_tagid > TAGID_MAX50000)
388 return (0);
389
390 /* allocate and fill new struct pf_tagname */
391 tag = pool_get(&pf_tag_pl, PR_NOWAIT0x0002 | PR_ZERO0x0008);
392 if (tag == NULL((void *)0))
393 return (0);
394 strlcpy(tag->name, tagname, sizeof(tag->name));
395 tag->tag = new_tagid;
396 tag->ref++;
397
398 if (p != NULL((void *)0)) /* insert new entry before p */
399 TAILQ_INSERT_BEFORE(p, tag, entries)do { (tag)->entries.tqe_prev = (p)->entries.tqe_prev; (
tag)->entries.tqe_next = (p); *(p)->entries.tqe_prev = (
tag); (p)->entries.tqe_prev = &(tag)->entries.tqe_next
; } while (0)
;
400 else /* either list empty or no free slot in between */
401 TAILQ_INSERT_TAIL(head, tag, entries)do { (tag)->entries.tqe_next = ((void *)0); (tag)->entries
.tqe_prev = (head)->tqh_last; *(head)->tqh_last = (tag)
; (head)->tqh_last = &(tag)->entries.tqe_next; } while
(0)
;
402
403 return (tag->tag);
404}
405
406void
407tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
408{
409 struct pf_tagname *tag;
410
411 TAILQ_FOREACH(tag, head, entries)for((tag) = ((head)->tqh_first); (tag) != ((void *)0); (tag
) = ((tag)->entries.tqe_next))
412 if (tag->tag == tagid) {
413 strlcpy(p, tag->name, PF_TAG_NAME_SIZE64);
414 return;
415 }
416}
417
418void
419tag_unref(struct pf_tags *head, u_int16_t tag)
420{
421 struct pf_tagname *p, *next;
422
423 if (tag == 0)
424 return;
425
426 TAILQ_FOREACH_SAFE(p, head, entries, next)for ((p) = ((head)->tqh_first); (p) != ((void *)0) &&
((next) = ((p)->entries.tqe_next), 1); (p) = (next))
{
427 if (tag == p->tag) {
428 if (--p->ref == 0) {
429 TAILQ_REMOVE(head, p, entries)do { if (((p)->entries.tqe_next) != ((void *)0)) (p)->entries
.tqe_next->entries.tqe_prev = (p)->entries.tqe_prev; else
(head)->tqh_last = (p)->entries.tqe_prev; *(p)->entries
.tqe_prev = (p)->entries.tqe_next; ((p)->entries.tqe_prev
) = ((void *)-1); ((p)->entries.tqe_next) = ((void *)-1); }
while (0)
;
430 pool_put(&pf_tag_pl, p);
431 }
432 break;
433 }
434 }
435}
436
437u_int16_t
438pf_tagname2tag(char *tagname, int create)
439{
440 return (tagname2tag(&pf_tags, tagname, create));
441}
442
443void
444pf_tag2tagname(u_int16_t tagid, char *p)
445{
446 tag2tagname(&pf_tags, tagid, p);
447}
448
449void
450pf_tag_ref(u_int16_t tag)
451{
452 struct pf_tagname *t;
453
454 TAILQ_FOREACH(t, &pf_tags, entries)for((t) = ((&pf_tags)->tqh_first); (t) != ((void *)0);
(t) = ((t)->entries.tqe_next))
455 if (t->tag == tag)
456 break;
457 if (t != NULL((void *)0))
458 t->ref++;
459}
460
461void
462pf_tag_unref(u_int16_t tag)
463{
464 tag_unref(&pf_tags, tag);
465}
466
467int
468pf_rtlabel_add(struct pf_addr_wrap *a)
469{
470 if (a->type == PF_ADDR_RTLABEL &&
471 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
472 return (-1);
473 return (0);
474}
475
476void
477pf_rtlabel_remove(struct pf_addr_wrap *a)
478{
479 if (a->type == PF_ADDR_RTLABEL)
480 rtlabel_unref(a->v.rtlabel);
481}
482
483void
484pf_rtlabel_copyout(struct pf_addr_wrap *a)
485{
486 const char *name;
487
488 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
489 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL((void *)0))
490 strlcpy(a->v.rtlabelname, "?",
491 sizeof(a->v.rtlabelname));
492 else
493 strlcpy(a->v.rtlabelname, name,
494 sizeof(a->v.rtlabelname));
495 }
496}
497
498u_int16_t
499pf_qname2qid(char *qname, int create)
500{
501 return (tagname2tag(&pf_qids, qname, create));
502}
503
504void
505pf_qid2qname(u_int16_t qid, char *p)
506{
507 tag2tagname(&pf_qids, qid, p);
508}
509
510void
511pf_qid_unref(u_int16_t qid)
512{
513 tag_unref(&pf_qids, (u_int16_t)qid);
514}
515
516int
517pf_begin_rules(u_int32_t *ticket, const char *anchor)
518{
519 struct pf_ruleset *rs;
520 struct pf_rule *rule;
521
522 if ((rs = pf_find_or_create_ruleset(anchor)) == NULL((void *)0))
523 return (EINVAL22);
524 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)((rs->rules.inactive.ptr)->tqh_first)) != NULL((void *)0)) {
525 pf_rm_rule(rs->rules.inactive.ptr, rule);
526 rs->rules.inactive.rcount--;
527 }
528 *ticket = ++rs->rules.inactive.ticket;
529 rs->rules.inactive.open = 1;
530 return (0);
531}
532
533int
534pf_rollback_rules(u_int32_t ticket, char *anchor)
535{
536 struct pf_ruleset *rs;
537 struct pf_rule *rule;
538
539 rs = pf_find_ruleset(anchor);
540 if (rs == NULL((void *)0) || !rs->rules.inactive.open ||
541 rs->rules.inactive.ticket != ticket)
542 return (0);
543 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)((rs->rules.inactive.ptr)->tqh_first)) != NULL((void *)0)) {
544 pf_rm_rule(rs->rules.inactive.ptr, rule);
545 rs->rules.inactive.rcount--;
546 }
547 rs->rules.inactive.open = 0;
548
549 /* queue defs only in the main ruleset */
550 if (anchor[0])
551 return (0);
552
553 pf_free_queues(pf_queues_inactive);
554
555 return (0);
556}
557
558void
559pf_free_queues(struct pf_queuehead *where)
560{
561 struct pf_queuespec *q, *qtmp;
562
563 TAILQ_FOREACH_SAFE(q, where, entries, qtmp)for ((q) = ((where)->tqh_first); (q) != ((void *)0) &&
((qtmp) = ((q)->entries.tqe_next), 1); (q) = (qtmp))
{
564 TAILQ_REMOVE(where, q, entries)do { if (((q)->entries.tqe_next) != ((void *)0)) (q)->entries
.tqe_next->entries.tqe_prev = (q)->entries.tqe_prev; else
(where)->tqh_last = (q)->entries.tqe_prev; *(q)->entries
.tqe_prev = (q)->entries.tqe_next; ((q)->entries.tqe_prev
) = ((void *)-1); ((q)->entries.tqe_next) = ((void *)-1); }
while (0)
;
565 pfi_kif_unref(q->kif, PFI_KIF_REF_RULE);
566 pool_put(&pf_queue_pl, q);
567 }
568}
569
570void
571pf_remove_queues(void)
572{
573 struct pf_queuespec *q;
574 struct ifnet *ifp;
575
576 /* put back interfaces in normal queueing mode */
577 TAILQ_FOREACH(q, pf_queues_active, entries)for((q) = ((pf_queues_active)->tqh_first); (q) != ((void *
)0); (q) = ((q)->entries.tqe_next))
{
578 if (q->parent_qid != 0)
579 continue;
580
581 ifp = q->kif->pfik_ifp;
582 if (ifp == NULL((void *)0))
583 continue;
584
585 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL((void *)0));
586 }
587}
588
589struct pf_queue_if {
590 struct ifnet *ifp;
591 const struct ifq_ops *ifqops;
592 const struct pfq_ops *pfqops;
593 void *disc;
594 struct pf_queue_if *next;
595};
596
597static inline struct pf_queue_if *
598pf_ifp2q(struct pf_queue_if *list, struct ifnet *ifp)
599{
600 struct pf_queue_if *qif = list;
601
602 while (qif != NULL((void *)0)) {
603 if (qif->ifp == ifp)
604 return (qif);
605
606 qif = qif->next;
607 }
608
609 return (qif);
610}
611
612int
613pf_create_queues(void)
614{
615 struct pf_queuespec *q;
616 struct ifnet *ifp;
617 struct pf_queue_if *list = NULL((void *)0), *qif;
618 int error;
619
620 /*
621 * Find root queues and allocate traffic conditioner
622 * private data for these interfaces
623 */
624 TAILQ_FOREACH(q, pf_queues_active, entries)for((q) = ((pf_queues_active)->tqh_first); (q) != ((void *
)0); (q) = ((q)->entries.tqe_next))
{
625 if (q->parent_qid != 0)
626 continue;
627
628 ifp = q->kif->pfik_ifp;
629 if (ifp == NULL((void *)0))
630 continue;
631
632 qif = malloc(sizeof(*qif), M_TEMP127, M_WAITOK0x0001);
633 qif->ifp = ifp;
634
635 if (q->flags & PFQS_ROOTCLASS0x0002) {
636 qif->ifqops = ifq_hfsc_ops;
637 qif->pfqops = pfq_hfsc_ops;
638 } else {
639 qif->ifqops = ifq_fqcodel_ops;
640 qif->pfqops = pfq_fqcodel_ops;
641 }
642
643 qif->disc = qif->pfqops->pfq_alloc(ifp);
644
645 qif->next = list;
646 list = qif;
647 }
648
649 /* and now everything */
650 TAILQ_FOREACH(q, pf_queues_active, entries)for((q) = ((pf_queues_active)->tqh_first); (q) != ((void *
)0); (q) = ((q)->entries.tqe_next))
{
651 ifp = q->kif->pfik_ifp;
652 if (ifp == NULL((void *)0))
653 continue;
654
655 qif = pf_ifp2q(list, ifp);
656 KASSERT(qif != NULL)((qif != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf_ioctl.c"
, 656, "qif != NULL"))
;
657
658 error = qif->pfqops->pfq_addqueue(qif->disc, q);
659 if (error != 0)
660 goto error;
661 }
662
663 /* find root queues in old list to disable them if necessary */
664 TAILQ_FOREACH(q, pf_queues_inactive, entries)for((q) = ((pf_queues_inactive)->tqh_first); (q) != ((void
*)0); (q) = ((q)->entries.tqe_next))
{
665 if (q->parent_qid != 0)
666 continue;
667
668 ifp = q->kif->pfik_ifp;
669 if (ifp == NULL((void *)0))
670 continue;
671
672 qif = pf_ifp2q(list, ifp);
673 if (qif != NULL((void *)0))
674 continue;
675
676 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL((void *)0));
677 }
678
679 /* commit the new queues */
680 while (list != NULL((void *)0)) {
681 qif = list;
682 list = qif->next;
683
684 ifp = qif->ifp;
685
686 ifq_attach(&ifp->if_snd, qif->ifqops, qif->disc);
687 free(qif, M_TEMP127, sizeof(*qif));
688 }
689
690 return (0);
691
692error:
693 while (list != NULL((void *)0)) {
694 qif = list;
695 list = qif->next;
696
697 qif->pfqops->pfq_free(qif->disc);
698 free(qif, M_TEMP127, sizeof(*qif));
699 }
700
701 return (error);
702}
703
704int
705pf_commit_queues(void)
706{
707 struct pf_queuehead *qswap;
708 int error;
709
710 /* swap */
711 qswap = pf_queues_active;
712 pf_queues_active = pf_queues_inactive;
713 pf_queues_inactive = qswap;
714
715 error = pf_create_queues();
716 if (error != 0) {
717 pf_queues_inactive = pf_queues_active;
718 pf_queues_active = qswap;
719 return (error);
720 }
721
722 pf_free_queues(pf_queues_inactive);
723
724 return (0);
725}
726
727const struct pfq_ops *
728pf_queue_manager(struct pf_queuespec *q)
729{
730 if (q->flags & PFQS_FLOWQUEUE0x0001)
731 return pfq_fqcodel_ops;
732 return (/* pfq_default_ops */ NULL((void *)0));
733}
734
735#define PF_MD5_UPD(st, elm)MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->
elm))
\
736 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
737
738#define PF_MD5_UPD_STR(st, elm)MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm
))
\
739 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
740
741#define PF_MD5_UPD_HTONL(st, elm, stor)do { (stor) = (__uint32_t)(__builtin_constant_p((st)->elm)
? (__uint32_t)(((__uint32_t)((st)->elm) & 0xff) <<
24 | ((__uint32_t)((st)->elm) & 0xff00) << 8 | (
(__uint32_t)((st)->elm) & 0xff0000) >> 8 | ((__uint32_t
)((st)->elm) & 0xff000000) >> 24) : __swap32md((
st)->elm)); MD5Update(ctx, (u_int8_t *) &(stor), sizeof
(u_int32_t));} while (0)
do { \
742 (stor) = htonl((st)->elm)(__uint32_t)(__builtin_constant_p((st)->elm) ? (__uint32_t
)(((__uint32_t)((st)->elm) & 0xff) << 24 | ((__uint32_t
)((st)->elm) & 0xff00) << 8 | ((__uint32_t)((st)
->elm) & 0xff0000) >> 8 | ((__uint32_t)((st)->
elm) & 0xff000000) >> 24) : __swap32md((st)->elm
))
; \
743 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
744} while (0)
745
746#define PF_MD5_UPD_HTONS(st, elm, stor)do { (stor) = (__uint16_t)(__builtin_constant_p((st)->elm)
? (__uint16_t)(((__uint16_t)((st)->elm) & 0xffU) <<
8 | ((__uint16_t)((st)->elm) & 0xff00U) >> 8) :
__swap16md((st)->elm)); MD5Update(ctx, (u_int8_t *) &
(stor), sizeof(u_int16_t));} while (0)
do { \
747 (stor) = htons((st)->elm)(__uint16_t)(__builtin_constant_p((st)->elm) ? (__uint16_t
)(((__uint16_t)((st)->elm) & 0xffU) << 8 | ((__uint16_t
)((st)->elm) & 0xff00U) >> 8) : __swap16md((st)->
elm))
; \
748 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
749} while (0)
750
751void
752pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
753{
754 PF_MD5_UPD(pfr, addr.type)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.type, sizeof(
(pfr)->addr.type))
;
755 switch (pfr->addr.type) {
756 case PF_ADDR_DYNIFTL:
757 PF_MD5_UPD(pfr, addr.v.ifname)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.v.ifname, sizeof
((pfr)->addr.v.ifname))
;
758 PF_MD5_UPD(pfr, addr.iflags)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.iflags, sizeof
((pfr)->addr.iflags))
;
759 break;
760 case PF_ADDR_TABLE:
761 if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX"__automatic_",
762 strlen(PF_OPTIMIZER_TABLE_PFX"__automatic_")))
763 PF_MD5_UPD(pfr, addr.v.tblname)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.v.tblname, sizeof
((pfr)->addr.v.tblname))
;
764 break;
765 case PF_ADDR_ADDRMASK:
766 /* XXX ignore af? */
767 PF_MD5_UPD(pfr, addr.v.a.addr.addr32)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.v.a.addr.pfa.
addr32, sizeof((pfr)->addr.v.a.addr.pfa.addr32))
;
768 PF_MD5_UPD(pfr, addr.v.a.mask.addr32)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.v.a.mask.pfa.
addr32, sizeof((pfr)->addr.v.a.mask.pfa.addr32))
;
769 break;
770 case PF_ADDR_RTLABEL:
771 PF_MD5_UPD(pfr, addr.v.rtlabelname)MD5Update(ctx, (u_int8_t *) &(pfr)->addr.v.rtlabelname
, sizeof((pfr)->addr.v.rtlabelname))
;
772 break;
773 }
774
775 PF_MD5_UPD(pfr, port[0])MD5Update(ctx, (u_int8_t *) &(pfr)->port[0], sizeof((pfr
)->port[0]))
;
776 PF_MD5_UPD(pfr, port[1])MD5Update(ctx, (u_int8_t *) &(pfr)->port[1], sizeof((pfr
)->port[1]))
;
777 PF_MD5_UPD(pfr, neg)MD5Update(ctx, (u_int8_t *) &(pfr)->neg, sizeof((pfr)->
neg))
;
778 PF_MD5_UPD(pfr, port_op)MD5Update(ctx, (u_int8_t *) &(pfr)->port_op, sizeof((pfr
)->port_op))
;
779}
780
781void
782pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule)
783{
784 u_int16_t x;
785 u_int32_t y;
786
787 pf_hash_rule_addr(ctx, &rule->src);
788 pf_hash_rule_addr(ctx, &rule->dst);
789 PF_MD5_UPD_STR(rule, label)MD5Update(ctx, (u_int8_t *) (rule)->label, strlen((rule)->
label))
;
790 PF_MD5_UPD_STR(rule, ifname)MD5Update(ctx, (u_int8_t *) (rule)->ifname, strlen((rule)->
ifname))
;
791 PF_MD5_UPD_STR(rule, rcv_ifname)MD5Update(ctx, (u_int8_t *) (rule)->rcv_ifname, strlen((rule
)->rcv_ifname))
;
792 PF_MD5_UPD_STR(rule, match_tagname)MD5Update(ctx, (u_int8_t *) (rule)->match_tagname, strlen(
(rule)->match_tagname))
;
793 PF_MD5_UPD_HTONS(rule, match_tag, x)do { (x) = (__uint16_t)(__builtin_constant_p((rule)->match_tag
) ? (__uint16_t)(((__uint16_t)((rule)->match_tag) & 0xffU
) << 8 | ((__uint16_t)((rule)->match_tag) & 0xff00U
) >> 8) : __swap16md((rule)->match_tag)); MD5Update(
ctx, (u_int8_t *) &(x), sizeof(u_int16_t));} while (0)
; /* dup? */
794 PF_MD5_UPD_HTONL(rule, os_fingerprint, y)do { (y) = (__uint32_t)(__builtin_constant_p((rule)->os_fingerprint
) ? (__uint32_t)(((__uint32_t)((rule)->os_fingerprint) &
0xff) << 24 | ((__uint32_t)((rule)->os_fingerprint)
& 0xff00) << 8 | ((__uint32_t)((rule)->os_fingerprint
) & 0xff0000) >> 8 | ((__uint32_t)((rule)->os_fingerprint
) & 0xff000000) >> 24) : __swap32md((rule)->os_fingerprint
)); MD5Update(ctx, (u_int8_t *) &(y), sizeof(u_int32_t));
} while (0)
;
795 PF_MD5_UPD_HTONL(rule, prob, y)do { (y) = (__uint32_t)(__builtin_constant_p((rule)->prob)
? (__uint32_t)(((__uint32_t)((rule)->prob) & 0xff) <<
24 | ((__uint32_t)((rule)->prob) & 0xff00) << 8
| ((__uint32_t)((rule)->prob) & 0xff0000) >> 8 |
((__uint32_t)((rule)->prob) & 0xff000000) >> 24
) : __swap32md((rule)->prob)); MD5Update(ctx, (u_int8_t *)
&(y), sizeof(u_int32_t));} while (0)
;
796 PF_MD5_UPD_HTONL(rule, uid.uid[0], y)do { (y) = (__uint32_t)(__builtin_constant_p((rule)->uid.uid
[0]) ? (__uint32_t)(((__uint32_t)((rule)->uid.uid[0]) &
0xff) << 24 | ((__uint32_t)((rule)->uid.uid[0]) &
0xff00) << 8 | ((__uint32_t)((rule)->uid.uid[0]) &
0xff0000) >> 8 | ((__uint32_t)((rule)->uid.uid[0]) &
0xff000000) >> 24) : __swap32md((rule)->uid.uid[0])
); MD5Update(ctx, (u_int8_t *) &(y), sizeof(u_int32_t));}
while (0)
;
797 PF_MD5_UPD_HTONL(rule, uid.uid[1], y)do { (y) = (__uint32_t)(__builtin_constant_p((rule)->uid.uid
[1]) ? (__uint32_t)(((__uint32_t)((rule)->uid.uid[1]) &
0xff) << 24 | ((__uint32_t)((rule)->uid.uid[1]) &
0xff00) << 8 | ((__uint32_t)((rule)->uid.uid[1]) &
0xff0000) >> 8 | ((__uint32_t)((rule)->uid.uid[1]) &
0xff000000) >> 24) : __swap32md((rule)->uid.uid[1])
); MD5Update(ctx, (u_int8_t *) &(y), sizeof(u_int32_t));}
while (0)
;
798 PF_MD5_UPD(rule, uid.op)MD5Update(ctx, (u_int8_t *) &(rule)->uid.op, sizeof((rule
)->uid.op))
;
799 PF_MD5_UPD_HTONL(rule, gid.gid[0], y)do { (y) = (__uint32_t)(__builtin_constant_p((rule)->gid.gid
[0]) ? (__uint32_t)(((__uint32_t)((rule)->gid.gid[0]) &
0xff) << 24 | ((__uint32_t)((rule)->gid.gid[0]) &
0xff00) << 8 | ((__uint32_t)((rule)->gid.gid[0]) &
0xff0000) >> 8 | ((__uint32_t)((rule)->gid.gid[0]) &
0xff000000) >> 24) : __swap32md((rule)->gid.gid[0])
); MD5Update(ctx, (u_int8_t *) &(y), sizeof(u_int32_t));}
while (0)
;
800 PF_MD5_UPD_HTONL(rule, gid.gid[1], y)do { (y) = (__uint32_t)(__builtin_constant_p((rule)->gid.gid
[1]) ? (__uint32_t)(((__uint32_t)((rule)->gid.gid[1]) &
0xff) << 24 | ((__uint32_t)((rule)->gid.gid[1]) &
0xff00) << 8 | ((__uint32_t)((rule)->gid.gid[1]) &
0xff0000) >> 8 | ((__uint32_t)((rule)->gid.gid[1]) &
0xff000000) >> 24) : __swap32md((rule)->gid.gid[1])
); MD5Update(ctx, (u_int8_t *) &(y), sizeof(u_int32_t));}
while (0)
;
801 PF_MD5_UPD(rule, gid.op)MD5Update(ctx, (u_int8_t *) &(rule)->gid.op, sizeof((rule
)->gid.op))
;
802 PF_MD5_UPD_HTONL(rule, rule_flag, y)do { (y) = (__uint32_t)(__builtin_constant_p((rule)->rule_flag
) ? (__uint32_t)(((__uint32_t)((rule)->rule_flag) & 0xff
) << 24 | ((__uint32_t)((rule)->rule_flag) & 0xff00
) << 8 | ((__uint32_t)((rule)->rule_flag) & 0xff0000
) >> 8 | ((__uint32_t)((rule)->rule_flag) & 0xff000000
) >> 24) : __swap32md((rule)->rule_flag)); MD5Update
(ctx, (u_int8_t *) &(y), sizeof(u_int32_t));} while (0)
;
803 PF_MD5_UPD(rule, action)MD5Update(ctx, (u_int8_t *) &(rule)->action, sizeof((rule
)->action))
;
804 PF_MD5_UPD(rule, direction)MD5Update(ctx, (u_int8_t *) &(rule)->direction, sizeof
((rule)->direction))
;
805 PF_MD5_UPD(rule, af)MD5Update(ctx, (u_int8_t *) &(rule)->af, sizeof((rule)
->af))
;
806 PF_MD5_UPD(rule, quick)MD5Update(ctx, (u_int8_t *) &(rule)->quick, sizeof((rule
)->quick))
;
807 PF_MD5_UPD(rule, ifnot)MD5Update(ctx, (u_int8_t *) &(rule)->ifnot, sizeof((rule
)->ifnot))
;
808 PF_MD5_UPD(rule, rcvifnot)MD5Update(ctx, (u_int8_t *) &(rule)->rcvifnot, sizeof(
(rule)->rcvifnot))
;
809 PF_MD5_UPD(rule, match_tag_not)MD5Update(ctx, (u_int8_t *) &(rule)->match_tag_not, sizeof
((rule)->match_tag_not))
;
810 PF_MD5_UPD(rule, keep_state)MD5Update(ctx, (u_int8_t *) &(rule)->keep_state, sizeof
((rule)->keep_state))
;
811 PF_MD5_UPD(rule, proto)MD5Update(ctx, (u_int8_t *) &(rule)->proto, sizeof((rule
)->proto))
;
812 PF_MD5_UPD(rule, type)MD5Update(ctx, (u_int8_t *) &(rule)->type, sizeof((rule
)->type))
;
813 PF_MD5_UPD(rule, code)MD5Update(ctx, (u_int8_t *) &(rule)->code, sizeof((rule
)->code))
;
814 PF_MD5_UPD(rule, flags)MD5Update(ctx, (u_int8_t *) &(rule)->flags, sizeof((rule
)->flags))
;
815 PF_MD5_UPD(rule, flagset)MD5Update(ctx, (u_int8_t *) &(rule)->flagset, sizeof((
rule)->flagset))
;
816 PF_MD5_UPD(rule, allow_opts)MD5Update(ctx, (u_int8_t *) &(rule)->allow_opts, sizeof
((rule)->allow_opts))
;
817 PF_MD5_UPD(rule, rt)MD5Update(ctx, (u_int8_t *) &(rule)->rt, sizeof((rule)
->rt))
;
818 PF_MD5_UPD(rule, tos)MD5Update(ctx, (u_int8_t *) &(rule)->tos, sizeof((rule
)->tos))
;
819}
820
821int
822pf_commit_rules(u_int32_t ticket, char *anchor)
823{
824 struct pf_ruleset *rs;
825 struct pf_rule *rule;
826 struct pf_rulequeue *old_rules;
827 u_int32_t old_rcount;
828
829 /* Make sure any expired rules get removed from active rules first. */
830 pf_purge_expired_rules();
831
832 rs = pf_find_ruleset(anchor);
833 if (rs == NULL((void *)0) || !rs->rules.inactive.open ||
834 ticket != rs->rules.inactive.ticket)
835 return (EBUSY16);
836
837 if (rs == &pf_main_rulesetpf_main_anchor.ruleset)
838 pf_calc_chksum(rs);
839
840 /* Swap rules, keep the old. */
841 old_rules = rs->rules.active.ptr;
842 old_rcount = rs->rules.active.rcount;
843
844 rs->rules.active.ptr = rs->rules.inactive.ptr;
845 rs->rules.active.rcount = rs->rules.inactive.rcount;
846 rs->rules.inactive.ptr = old_rules;
847 rs->rules.inactive.rcount = old_rcount;
848
849 rs->rules.active.ticket = rs->rules.inactive.ticket;
850 pf_calc_skip_steps(rs->rules.active.ptr);
851
852
853 /* Purge the old rule list. */
854 while ((rule = TAILQ_FIRST(old_rules)((old_rules)->tqh_first)) != NULL((void *)0))
855 pf_rm_rule(old_rules, rule);
856 rs->rules.inactive.rcount = 0;
857 rs->rules.inactive.open = 0;
858 pf_remove_if_empty_ruleset(rs);
859
860 /* queue defs only in the main ruleset */
861 if (anchor[0])
862 return (0);
863 return (pf_commit_queues());
864}
865
866void
867pf_calc_chksum(struct pf_ruleset *rs)
868{
869 MD5_CTX ctx;
870 struct pf_rule *rule;
871 u_int8_t digest[PF_MD5_DIGEST_LENGTH16];
872
873 MD5Init(&ctx);
874
875 if (rs->rules.inactive.rcount) {
876 TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries)for((rule) = ((rs->rules.inactive.ptr)->tqh_first); (rule
) != ((void *)0); (rule) = ((rule)->entries.tqe_next))
{
877 pf_hash_rule(&ctx, rule);
878 }
879 }
880
881 MD5Final(digest, &ctx);
882 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum))__builtin_memcpy((pf_status.pf_chksum), (digest), (sizeof(pf_status
.pf_chksum)))
;
883}
884
885int
886pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr,
887 sa_family_t af)
888{
889 if (pfi_dynaddr_setup(addr, af) ||
890 pf_tbladdr_setup(ruleset, addr) ||
891 pf_rtlabel_add(addr))
892 return (EINVAL22);
893
894 return (0);
895}
896
897struct pfi_kif *
898pf_kif_setup(struct pfi_kif *kif_buf)
899{
900 struct pfi_kif *kif;
901
902 if (kif_buf == NULL((void *)0))
903 return (NULL((void *)0));
904
905 KASSERT(kif_buf->pfik_name[0] != '\0')((kif_buf->pfik_name[0] != '\0') ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/pf_ioctl.c", 905, "kif_buf->pfik_name[0] != '\\0'"
))
;
906
907 kif = pfi_kif_get(kif_buf->pfik_name, &kif_buf);
908 if (kif_buf != NULL((void *)0))
909 pfi_kif_free(kif_buf);
910 pfi_kif_ref(kif, PFI_KIF_REF_RULE);
911
912 return (kif);
913}
914
915void
916pf_addr_copyout(struct pf_addr_wrap *addr)
917{
918 pfi_dynaddr_copyout(addr);
919 pf_tbladdr_copyout(addr);
920 pf_rtlabel_copyout(addr);
921}
922
923int
924pf_states_clr(struct pfioc_state_kill *psk)
925{
926 struct pf_state *s, *nexts;
927 struct pf_state *head, *tail;
928 u_int killed = 0;
929 int error;
930
931 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
932
933 /* lock against the gc removing an item from the list */
934 error = rw_enter(&pf_state_list.pfs_rwl, RW_READ0x0002UL|RW_INTR0x0010UL);
935 if (error != 0)
936 goto unlock;
937
938 /* get a snapshot view of the ends of the list to traverse between */
939 mtx_enter(&pf_state_list.pfs_mtx);
940 head = TAILQ_FIRST(&pf_state_list.pfs_list)((&pf_state_list.pfs_list)->tqh_first);
941 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue)(*(((struct pf_state_queue *)((&pf_state_list.pfs_list)->
tqh_last))->tqh_last))
;
942 mtx_leave(&pf_state_list.pfs_mtx);
943
944 s = NULL((void *)0);
945 nexts = head;
946
947 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
948 PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0);
949
950 while (s != tail) {
951 s = nexts;
952 nexts = TAILQ_NEXT(s, entry_list)((s)->entry_list.tqe_next);
953
954 if (s->timeout == PFTM_UNLINKED)
955 continue;
956
957 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
958 s->kif->pfik_name)) {
959#if NPFSYNC1 > 0
960 /* don't send out individual delete messages */
961 SET(s->state_flags, PFSTATE_NOSYNC)((s->state_flags) |= (0x0008));
962#endif /* NPFSYNC > 0 */
963 pf_remove_state(s);
964 killed++;
965 }
966 }
967
968 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
969#if NPFSYNC1 > 0
970 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
971#endif /* NPFSYNC > 0 */
972 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
973 rw_exit(&pf_state_list.pfs_rwl);
974
975 psk->psk_killed = killed;
976unlock:
977 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
978
979 return (error);
980}
981
982int
983pf_states_get(struct pfioc_states *ps)
984{
985 struct pf_state *head, *tail;
986 struct pf_state *next, *state;
987 struct pfsync_state *p, pstore;
988 u_int32_t nr = 0;
989 int error;
990
991 if (ps->ps_len == 0) {
992 nr = pf_status.states;
993 ps->ps_len = sizeof(struct pfsync_state) * nr;
994 return (0);
995 }
996
997 p = ps->ps_statesps_u.psu_states;
998
999 /* lock against the gc removing an item from the list */
1000 error = rw_enter(&pf_state_list.pfs_rwl, RW_READ0x0002UL|RW_INTR0x0010UL);
1001 if (error != 0)
1002 return (error);
1003
1004 /* get a snapshot view of the ends of the list to traverse between */
1005 mtx_enter(&pf_state_list.pfs_mtx);
1006 head = TAILQ_FIRST(&pf_state_list.pfs_list)((&pf_state_list.pfs_list)->tqh_first);
1007 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue)(*(((struct pf_state_queue *)((&pf_state_list.pfs_list)->
tqh_last))->tqh_last))
;
1008 mtx_leave(&pf_state_list.pfs_mtx);
1009
1010 state = NULL((void *)0);
1011 next = head;
1012
1013 while (state != tail) {
1014 state = next;
1015 next = TAILQ_NEXT(state, entry_list)((state)->entry_list.tqe_next);
1016
1017 if (state->timeout == PFTM_UNLINKED)
1018 continue;
1019
1020 if ((nr+1) * sizeof(*p) > ps->ps_len)
1021 break;
1022
1023 pf_state_export(&pstore, state);
1024 error = copyout(&pstore, p, sizeof(*p));
1025 if (error)
1026 goto fail;
1027
1028 p++;
1029 nr++;
1030 }
1031 ps->ps_len = sizeof(struct pfsync_state) * nr;
1032
1033fail:
1034 rw_exit(&pf_state_list.pfs_rwl);
1035
1036 return (error);
1037}
1038
1039int
1040pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1041{
1042 int error = 0;
1043
1044 /* XXX keep in sync with switch() below */
1045 if (securelevel > 1)
1046 switch (cmd) {
1047 case DIOCGETRULES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((6)))
:
1048 case DIOCGETRULE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((7)))
:
1049 case DIOCGETSTATE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_state) & 0x1fff) << 16) | ((('D')) <<
8) | ((19)))
:
1050 case DIOCSETSTATUSIF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((20)))
:
1051 case DIOCGETSTATUS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pf_status) & 0x1fff) << 16) | ((('D')) <<
8) | ((21)))
:
1052 case DIOCCLRSTATUS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((22)))
:
1053 case DIOCNATLOOK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_natlook) & 0x1fff) << 16) | ((('D')) <<
8) | ((23)))
:
1054 case DIOCSETDEBUG(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(u_int32_t) & 0x1fff) << 16) | ((('D')) << 8)
| ((24)))
:
1055 case DIOCGETSTATES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_states) & 0x1fff) << 16) | ((('D')) <<
8) | ((25)))
:
1056 case DIOCGETTIMEOUT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_tm) & 0x1fff) << 16) | ((('D')) <<
8) | ((30)))
:
1057 case DIOCGETLIMIT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_limit) & 0x1fff) << 16) | ((('D')) <<
8) | ((39)))
:
1058 case DIOCGETRULESETS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_ruleset) & 0x1fff) << 16) | ((('D')) <<
8) | ((58)))
:
1059 case DIOCGETRULESET(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_ruleset) & 0x1fff) << 16) | ((('D')) <<
8) | ((59)))
:
1060 case DIOCGETQUEUES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_queue) & 0x1fff) << 16) | ((('D')) <<
8) | ((94)))
:
1061 case DIOCGETQUEUE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_queue) & 0x1fff) << 16) | ((('D')) <<
8) | ((95)))
:
1062 case DIOCGETQSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_qstats) & 0x1fff) << 16) | ((('D')) <<
8) | ((96)))
:
1063 case DIOCRGETTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((63)))
:
1064 case DIOCRGETTSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((64)))
:
1065 case DIOCRCLRTSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((65)))
:
1066 case DIOCRCLRADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((66)))
:
1067 case DIOCRADDADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((67)))
:
1068 case DIOCRDELADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((68)))
:
1069 case DIOCRSETADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((69)))
:
1070 case DIOCRGETADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((70)))
:
1071 case DIOCRGETASTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((71)))
:
1072 case DIOCRCLRASTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((72)))
:
1073 case DIOCRTSTADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((73)))
:
1074 case DIOCOSFPGET(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pf_osfp_ioctl) & 0x1fff) << 16) | ((('D')) <<
8) | ((80)))
:
1075 case DIOCGETSRCNODES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_src_nodes) & 0x1fff) << 16) | ((('D')
) << 8) | ((84)))
:
1076 case DIOCCLRSRCNODES((unsigned long)0x20000000 | ((0 & 0x1fff) << 16) |
((('D')) << 8) | ((85)))
:
1077 case DIOCIGETIFACES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((87)))
:
1078 case DIOCSETIFFLAG(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((89)))
:
1079 case DIOCCLRIFFLAG(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((90)))
:
1080 case DIOCGETSYNFLWATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_synflwats) & 0x1fff) << 16) | ((('D')
) << 8) | ((99)))
:
1081 break;
1082 case DIOCRCLRTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((60)))
:
1083 case DIOCRADDTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((61)))
:
1084 case DIOCRDELTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((62)))
:
1085 case DIOCRSETTFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((74)))
:
1086 if (((struct pfioc_table *)addr)->pfrio_flags &
1087 PFR_FLAG_DUMMY0x00000002)
1088 break; /* dummy operation ok */
1089 return (EPERM1);
1090 default:
1091 return (EPERM1);
1092 }
1093
1094 if (!(flags & FWRITE0x0002))
1095 switch (cmd) {
1096 case DIOCGETRULES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((6)))
:
1097 case DIOCGETSTATE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_state) & 0x1fff) << 16) | ((('D')) <<
8) | ((19)))
:
1098 case DIOCGETSTATUS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pf_status) & 0x1fff) << 16) | ((('D')) <<
8) | ((21)))
:
1099 case DIOCGETSTATES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_states) & 0x1fff) << 16) | ((('D')) <<
8) | ((25)))
:
1100 case DIOCGETTIMEOUT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_tm) & 0x1fff) << 16) | ((('D')) <<
8) | ((30)))
:
1101 case DIOCGETLIMIT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_limit) & 0x1fff) << 16) | ((('D')) <<
8) | ((39)))
:
1102 case DIOCGETRULESETS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_ruleset) & 0x1fff) << 16) | ((('D')) <<
8) | ((58)))
:
1103 case DIOCGETRULESET(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_ruleset) & 0x1fff) << 16) | ((('D')) <<
8) | ((59)))
:
1104 case DIOCGETQUEUES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_queue) & 0x1fff) << 16) | ((('D')) <<
8) | ((94)))
:
1105 case DIOCGETQUEUE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_queue) & 0x1fff) << 16) | ((('D')) <<
8) | ((95)))
:
1106 case DIOCGETQSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_qstats) & 0x1fff) << 16) | ((('D')) <<
8) | ((96)))
:
1107 case DIOCNATLOOK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_natlook) & 0x1fff) << 16) | ((('D')) <<
8) | ((23)))
:
1108 case DIOCRGETTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((63)))
:
1109 case DIOCRGETTSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((64)))
:
1110 case DIOCRGETADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((70)))
:
1111 case DIOCRGETASTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((71)))
:
1112 case DIOCRTSTADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((73)))
:
1113 case DIOCOSFPGET(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pf_osfp_ioctl) & 0x1fff) << 16) | ((('D')) <<
8) | ((80)))
:
1114 case DIOCGETSRCNODES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_src_nodes) & 0x1fff) << 16) | ((('D')
) << 8) | ((84)))
:
1115 case DIOCIGETIFACES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((87)))
:
1116 case DIOCGETSYNFLWATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_synflwats) & 0x1fff) << 16) | ((('D')
) << 8) | ((99)))
:
1117 break;
1118 case DIOCRCLRTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((60)))
:
1119 case DIOCRADDTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((61)))
:
1120 case DIOCRDELTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((62)))
:
1121 case DIOCRCLRTSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((65)))
:
1122 case DIOCRCLRADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((66)))
:
1123 case DIOCRADDADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((67)))
:
1124 case DIOCRDELADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((68)))
:
1125 case DIOCRSETADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((69)))
:
1126 case DIOCRSETTFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((74)))
:
1127 if (((struct pfioc_table *)addr)->pfrio_flags &
1128 PFR_FLAG_DUMMY0x00000002) {
1129 flags |= FWRITE0x0002; /* need write lock for dummy */
Value stored to 'flags' is never read
1130 break; /* dummy operation ok */
1131 }
1132 return (EACCES13);
1133 case DIOCGETRULE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((7)))
:
1134 if (((struct pfioc_rule *)addr)->action ==
1135 PF_GET_CLR_CNTR)
1136 return (EACCES13);
1137 break;
1138 default:
1139 return (EACCES13);
1140 }
1141
1142 switch (cmd) {
1143
1144 case DIOCSTART((unsigned long)0x20000000 | ((0 & 0x1fff) << 16) |
((('D')) << 8) | ((1)))
:
1145 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1146 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1147 if (pf_status.running)
1148 error = EEXIST17;
1149 else {
1150 pf_status.running = 1;
1151 pf_status.since = getuptime();
1152 if (pf_status.stateid == 0) {
1153 pf_status.stateid = gettime();
1154 pf_status.stateid = pf_status.stateid << 32;
1155 }
1156 timeout_add_sec(&pf_purge_to, 1);
1157 pf_create_queues();
1158 DPFPRINTF(LOG_NOTICE, "pf: started")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog(
"pf: started"); addlog("\n"); } } while (0)
;
1159 }
1160 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1161 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1162 break;
1163
1164 case DIOCSTOP((unsigned long)0x20000000 | ((0 & 0x1fff) << 16) |
((('D')) << 8) | ((2)))
:
1165 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1166 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1167 if (!pf_status.running)
1168 error = ENOENT2;
1169 else {
1170 pf_status.running = 0;
1171 pf_status.since = getuptime();
1172 pf_remove_queues();
1173 DPFPRINTF(LOG_NOTICE, "pf: stopped")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog(
"pf: stopped"); addlog("\n"); } } while (0)
;
1174 }
1175 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1176 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1177 break;
1178
1179 case DIOCGETQUEUES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_queue) & 0x1fff) << 16) | ((('D')) <<
8) | ((94)))
: {
1180 struct pfioc_queue *pq = (struct pfioc_queue *)addr;
1181 struct pf_queuespec *qs;
1182 u_int32_t nr = 0;
1183
1184 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1185 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1186 pq->ticket = pf_main_rulesetpf_main_anchor.ruleset.rules.active.ticket;
1187
1188 /* save state to not run over them all each time? */
1189 qs = TAILQ_FIRST(pf_queues_active)((pf_queues_active)->tqh_first);
1190 while (qs != NULL((void *)0)) {
1191 qs = TAILQ_NEXT(qs, entries)((qs)->entries.tqe_next);
1192 nr++;
1193 }
1194 pq->nr = nr;
1195 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1196 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1197 break;
1198 }
1199
1200 case DIOCGETQUEUE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_queue) & 0x1fff) << 16) | ((('D')) <<
8) | ((95)))
: {
1201 struct pfioc_queue *pq = (struct pfioc_queue *)addr;
1202 struct pf_queuespec *qs;
1203 u_int32_t nr = 0;
1204
1205 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1206 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1207 if (pq->ticket != pf_main_rulesetpf_main_anchor.ruleset.rules.active.ticket) {
1208 error = EBUSY16;
1209 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1210 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1211 break;
1212 }
1213
1214 /* save state to not run over them all each time? */
1215 qs = TAILQ_FIRST(pf_queues_active)((pf_queues_active)->tqh_first);
1216 while ((qs != NULL((void *)0)) && (nr++ < pq->nr))
1217 qs = TAILQ_NEXT(qs, entries)((qs)->entries.tqe_next);
1218 if (qs == NULL((void *)0)) {
1219 error = EBUSY16;
1220 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1221 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1222 break;
1223 }
1224 memcpy(&pq->queue, qs, sizeof(pq->queue))__builtin_memcpy((&pq->queue), (qs), (sizeof(pq->queue
)))
;
1225 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1226 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1227 break;
1228 }
1229
1230 case DIOCGETQSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_qstats) & 0x1fff) << 16) | ((('D')) <<
8) | ((96)))
: {
1231 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
1232 struct pf_queuespec *qs;
1233 u_int32_t nr;
1234 int nbytes;
1235
1236 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1237 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1238 if (pq->ticket != pf_main_rulesetpf_main_anchor.ruleset.rules.active.ticket) {
1239 error = EBUSY16;
1240 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1241 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1242 break;
1243 }
1244 nbytes = pq->nbytes;
1245 nr = 0;
1246
1247 /* save state to not run over them all each time? */
1248 qs = TAILQ_FIRST(pf_queues_active)((pf_queues_active)->tqh_first);
1249 while ((qs != NULL((void *)0)) && (nr++ < pq->nr))
1250 qs = TAILQ_NEXT(qs, entries)((qs)->entries.tqe_next);
1251 if (qs == NULL((void *)0)) {
1252 error = EBUSY16;
1253 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1254 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1255 break;
1256 }
1257 memcpy(&pq->queue, qs, sizeof(pq->queue))__builtin_memcpy((&pq->queue), (qs), (sizeof(pq->queue
)))
;
1258 /* It's a root flow queue but is not an HFSC root class */
1259 if ((qs->flags & PFQS_FLOWQUEUE0x0001) && qs->parent_qid == 0 &&
1260 !(qs->flags & PFQS_ROOTCLASS0x0002))
1261 error = pfq_fqcodel_ops->pfq_qstats(qs, pq->buf,
1262 &nbytes);
1263 else
1264 error = pfq_hfsc_ops->pfq_qstats(qs, pq->buf,
1265 &nbytes);
1266 if (error == 0)
1267 pq->nbytes = nbytes;
1268 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1269 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1270 break;
1271 }
1272
1273 case DIOCADDQUEUE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_queue) & 0x1fff) << 16) | ((('D')) <<
8) | ((93)))
: {
1274 struct pfioc_queue *q = (struct pfioc_queue *)addr;
1275 struct pf_queuespec *qs;
1276
1277 qs = pool_get(&pf_queue_pl, PR_WAITOK0x0001|PR_LIMITFAIL0x0004|PR_ZERO0x0008);
1278 if (qs == NULL((void *)0)) {
1279 error = ENOMEM12;
1280 break;
1281 }
1282
1283 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1284 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1285 if (q->ticket != pf_main_rulesetpf_main_anchor.ruleset.rules.inactive.ticket) {
1286 error = EBUSY16;
1287 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1288 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1289 pool_put(&pf_queue_pl, qs);
1290 break;
1291 }
1292 memcpy(qs, &q->queue, sizeof(*qs))__builtin_memcpy((qs), (&q->queue), (sizeof(*qs)));
1293 qs->qid = pf_qname2qid(qs->qname, 1);
1294 if (qs->qid == 0) {
1295 error = EBUSY16;
1296 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1297 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1298 pool_put(&pf_queue_pl, qs);
1299 break;
1300 }
1301 if (qs->parent[0] && (qs->parent_qid =
1302 pf_qname2qid(qs->parent, 0)) == 0) {
1303 error = ESRCH3;
1304 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1305 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1306 pool_put(&pf_queue_pl, qs);
1307 break;
1308 }
1309 qs->kif = pfi_kif_get(qs->ifname, NULL((void *)0));
1310 if (qs->kif == NULL((void *)0)) {
1311 error = ESRCH3;
1312 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1313 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1314 pool_put(&pf_queue_pl, qs);
1315 break;
1316 }
1317 /* XXX resolve bw percentage specs */
1318 pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE);
1319
1320 TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries)do { (qs)->entries.tqe_next = ((void *)0); (qs)->entries
.tqe_prev = (pf_queues_inactive)->tqh_last; *(pf_queues_inactive
)->tqh_last = (qs); (pf_queues_inactive)->tqh_last = &
(qs)->entries.tqe_next; } while (0)
;
1321 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1322 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1323
1324 break;
1325 }
1326
1327 case DIOCADDRULE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((4)))
: {
1328 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1329 struct pf_ruleset *ruleset;
1330 struct pf_rule *rule, *tail;
1331
1332 rule = pool_get(&pf_rule_pl, PR_WAITOK0x0001|PR_LIMITFAIL0x0004|PR_ZERO0x0008);
1333 if (rule == NULL((void *)0)) {
1334 error = ENOMEM12;
1335 break;
1336 }
1337
1338 if ((error = pf_rule_copyin(&pr->rule, rule))) {
1339 pf_rule_free(rule);
1340 rule = NULL((void *)0);
1341 break;
1342 }
1343
1344 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE40) {
1345 error = EINVAL22;
1346 pf_rule_free(rule);
1347 rule = NULL((void *)0);
1348 break;
1349 }
1350 switch (rule->af) {
1351 case 0:
1352 break;
1353 case AF_INET2:
1354 break;
1355#ifdef INET61
1356 case AF_INET624:
1357 break;
1358#endif /* INET6 */
1359 default:
1360 pf_rule_free(rule);
1361 rule = NULL((void *)0);
1362 error = EAFNOSUPPORT47;
1363 goto fail;
1364 }
1365
1366 if (rule->src.addr.type == PF_ADDR_NONE ||
1367 rule->dst.addr.type == PF_ADDR_NONE) {
1368 error = EINVAL22;
1369 pf_rule_free(rule);
1370 rule = NULL((void *)0);
1371 break;
1372 }
1373
1374 if (rule->rt && !rule->direction) {
1375 error = EINVAL22;
1376 pf_rule_free(rule);
1377 rule = NULL((void *)0);
1378 break;
1379 }
1380
1381 if (rule->scrub_flags & PFSTATE_SETPRIO0x0200 &&
1382 (rule->set_prio[0] > IFQ_MAXPRIO8 - 1 ||
1383 rule->set_prio[1] > IFQ_MAXPRIO8 - 1)) {
1384 error = EINVAL22;
1385 pf_rule_free(rule);
1386 rule = NULL((void *)0);
1387 break;
1388 }
1389
1390 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1391 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1392 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1393 ruleset = pf_find_ruleset(pr->anchor);
1394 if (ruleset == NULL((void *)0)) {
1395 error = EINVAL22;
1396 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1397 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1398 pf_rule_free(rule);
1399 break;
1400 }
1401 if (pr->ticket != ruleset->rules.inactive.ticket) {
1402 error = EBUSY16;
1403 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1404 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1405 pf_rule_free(rule);
1406 break;
1407 }
1408 rule->cuid = p->p_ucred->cr_ruid;
1409 rule->cpid = p->p_p->ps_pid;
1410
1411 tail = TAILQ_LAST(ruleset->rules.inactive.ptr,(*(((struct pf_rulequeue *)((ruleset->rules.inactive.ptr)->
tqh_last))->tqh_last))
1412 pf_rulequeue)(*(((struct pf_rulequeue *)((ruleset->rules.inactive.ptr)->
tqh_last))->tqh_last))
;
1413 if (tail)
1414 rule->nr = tail->nr + 1;
1415 else
1416 rule->nr = 0;
1417
1418 rule->kif = pf_kif_setup(rule->kif);
1419 rule->rcv_kif = pf_kif_setup(rule->rcv_kif);
1420 rule->rdr.kif = pf_kif_setup(rule->rdr.kif);
1421 rule->nat.kif = pf_kif_setup(rule->nat.kif);
1422 rule->route.kif = pf_kif_setup(rule->route.kif);
1423
1424 if (rule->overload_tblname[0]) {
1425 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1426 rule->overload_tblname, 0)) == NULL((void *)0))
1427 error = EINVAL22;
1428 else
1429 rule->overload_tbl->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags |= PFR_TFLAG_ACTIVE0x00000004;
1430 }
1431
1432 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
1433 error = EINVAL22;
1434 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
1435 error = EINVAL22;
1436 if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af))
1437 error = EINVAL22;
1438 if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af))
1439 error = EINVAL22;
1440 if (pf_addr_setup(ruleset, &rule->route.addr, rule->af))
1441 error = EINVAL22;
1442 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1443 error = EINVAL22;
1444
1445 if (error) {
1446 pf_rm_rule(NULL((void *)0), rule);
1447 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1448 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1449 break;
1450 }
1451 TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr,do { (rule)->entries.tqe_next = ((void *)0); (rule)->entries
.tqe_prev = (ruleset->rules.inactive.ptr)->tqh_last; *(
ruleset->rules.inactive.ptr)->tqh_last = (rule); (ruleset
->rules.inactive.ptr)->tqh_last = &(rule)->entries
.tqe_next; } while (0)
1452 rule, entries)do { (rule)->entries.tqe_next = ((void *)0); (rule)->entries
.tqe_prev = (ruleset->rules.inactive.ptr)->tqh_last; *(
ruleset->rules.inactive.ptr)->tqh_last = (rule); (ruleset
->rules.inactive.ptr)->tqh_last = &(rule)->entries
.tqe_next; } while (0)
;
1453 rule->ruleset = ruleset;
1454 ruleset->rules.inactive.rcount++;
1455 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1456 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1457 break;
1458 }
1459
1460 case DIOCGETRULES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((6)))
: {
1461 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1462 struct pf_ruleset *ruleset;
1463 struct pf_rule *tail;
1464
1465 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1466 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1467 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1468 ruleset = pf_find_ruleset(pr->anchor);
1469 if (ruleset == NULL((void *)0)) {
1470 error = EINVAL22;
1471 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1472 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1473 break;
1474 }
1475 tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue)(*(((struct pf_rulequeue *)((ruleset->rules.active.ptr)->
tqh_last))->tqh_last))
;
1476 if (tail)
1477 pr->nr = tail->nr + 1;
1478 else
1479 pr->nr = 0;
1480 pr->ticket = ruleset->rules.active.ticket;
1481 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1482 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1483 break;
1484 }
1485
1486 case DIOCGETRULE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((7)))
: {
1487 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1488 struct pf_ruleset *ruleset;
1489 struct pf_rule *rule;
1490 int i;
1491
1492 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1493 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1494 pr->anchor[sizeof(pr->anchor) - 1] = '\0';
1495 ruleset = pf_find_ruleset(pr->anchor);
1496 if (ruleset == NULL((void *)0)) {
1497 error = EINVAL22;
1498 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1499 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1500 break;
1501 }
1502 if (pr->ticket != ruleset->rules.active.ticket) {
1503 error = EBUSY16;
1504 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1505 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1506 break;
1507 }
1508 rule = TAILQ_FIRST(ruleset->rules.active.ptr)((ruleset->rules.active.ptr)->tqh_first);
1509 while ((rule != NULL((void *)0)) && (rule->nr != pr->nr))
1510 rule = TAILQ_NEXT(rule, entries)((rule)->entries.tqe_next);
1511 if (rule == NULL((void *)0)) {
1512 error = EBUSY16;
1513 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1514 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1515 break;
1516 }
1517 memcpy(&pr->rule, rule, sizeof(struct pf_rule))__builtin_memcpy((&pr->rule), (rule), (sizeof(struct pf_rule
)))
;
1518 memset(&pr->rule.entries, 0, sizeof(pr->rule.entries))__builtin_memset((&pr->rule.entries), (0), (sizeof(pr->
rule.entries)))
;
1519 pr->rule.kif = NULL((void *)0);
1520 pr->rule.nat.kif = NULL((void *)0);
1521 pr->rule.rdr.kif = NULL((void *)0);
1522 pr->rule.route.kif = NULL((void *)0);
1523 pr->rule.rcv_kif = NULL((void *)0);
1524 pr->rule.anchor = NULL((void *)0);
1525 pr->rule.overload_tbl = NULL((void *)0);
1526 pr->rule.pktrate.limit /= PF_THRESHOLD_MULT1000;
1527 memset(&pr->rule.gcle, 0, sizeof(pr->rule.gcle))__builtin_memset((&pr->rule.gcle), (0), (sizeof(pr->
rule.gcle)))
;
1528 pr->rule.ruleset = NULL((void *)0);
1529 if (pf_anchor_copyout(ruleset, rule, pr)) {
1530 error = EBUSY16;
1531 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1532 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1533 break;
1534 }
1535 pf_addr_copyout(&pr->rule.src.addr);
1536 pf_addr_copyout(&pr->rule.dst.addr);
1537 pf_addr_copyout(&pr->rule.rdr.addr);
1538 pf_addr_copyout(&pr->rule.nat.addr);
1539 pf_addr_copyout(&pr->rule.route.addr);
1540 for (i = 0; i < PF_SKIP_COUNT9; ++i)
1541 if (rule->skip[i].ptr == NULL((void *)0))
1542 pr->rule.skip[i].nr = (u_int32_t)-1;
1543 else
1544 pr->rule.skip[i].nr =
1545 rule->skip[i].ptr->nr;
1546
1547 if (pr->action == PF_GET_CLR_CNTR) {
1548 rule->evaluations = 0;
1549 rule->packets[0] = rule->packets[1] = 0;
1550 rule->bytes[0] = rule->bytes[1] = 0;
1551 rule->states_tot = 0;
1552 }
1553 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1554 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1555 break;
1556 }
1557
1558 case DIOCCHANGERULE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((26)))
: {
1559 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1560 struct pf_ruleset *ruleset;
1561 struct pf_rule *oldrule = NULL((void *)0), *newrule = NULL((void *)0);
1562 u_int32_t nr = 0;
1563
1564 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1565 pcr->action > PF_CHANGE_GET_TICKET) {
1566 error = EINVAL22;
1567 break;
1568 }
1569
1570 if (pcr->action == PF_CHANGE_GET_TICKET) {
1571 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1572 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1573
1574 ruleset = pf_find_ruleset(pcr->anchor);
1575 if (ruleset == NULL((void *)0))
1576 error = EINVAL22;
1577 else
1578 pcr->ticket = ++ruleset->rules.active.ticket;
1579
1580 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1581 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1582 break;
1583 }
1584
1585 if (pcr->action != PF_CHANGE_REMOVE) {
1586 newrule = pool_get(&pf_rule_pl,
1587 PR_WAITOK0x0001|PR_LIMITFAIL0x0004|PR_ZERO0x0008);
1588 if (newrule == NULL((void *)0)) {
1589 error = ENOMEM12;
1590 break;
1591 }
1592
1593 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE40) {
1594 error = EINVAL22;
1595 pool_put(&pf_rule_pl, newrule);
1596 break;
1597 }
1598 error = pf_rule_copyin(&pcr->rule, newrule);
1599 if (error != 0) {
1600 pf_rule_free(newrule);
1601 newrule = NULL((void *)0);
1602 break;
1603 }
1604
1605 switch (newrule->af) {
1606 case 0:
1607 break;
1608 case AF_INET2:
1609 break;
1610#ifdef INET61
1611 case AF_INET624:
1612 break;
1613#endif /* INET6 */
1614 default:
1615 error = EAFNOSUPPORT47;
1616 pf_rule_free(newrule);
1617 newrule = NULL((void *)0);
1618 goto fail;
1619 }
1620
1621 if (newrule->rt && !newrule->direction) {
1622 pf_rule_free(newrule);
1623 error = EINVAL22;
1624 newrule = NULL((void *)0);
1625 break;
1626 }
1627 }
1628
1629 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1630 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1631 ruleset = pf_find_ruleset(pcr->anchor);
1632 if (ruleset == NULL((void *)0)) {
1633 error = EINVAL22;
1634 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1635 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1636 pf_rule_free(newrule);
1637 break;
1638 }
1639
1640 if (pcr->ticket != ruleset->rules.active.ticket) {
1641 error = EINVAL22;
1642 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1643 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1644 pf_rule_free(newrule);
1645 break;
1646 }
1647
1648 if (pcr->action != PF_CHANGE_REMOVE) {
1649 KASSERT(newrule != NULL)((newrule != ((void *)0)) ? (void)0 : __assert("diagnostic ",
"/usr/src/sys/net/pf_ioctl.c", 1649, "newrule != NULL"))
;
1650 newrule->cuid = p->p_ucred->cr_ruid;
1651 newrule->cpid = p->p_p->ps_pid;
1652
1653 newrule->kif = pf_kif_setup(newrule->kif);
1654 newrule->rcv_kif = pf_kif_setup(newrule->rcv_kif);
1655 newrule->rdr.kif = pf_kif_setup(newrule->rdr.kif);
1656 newrule->nat.kif = pf_kif_setup(newrule->nat.kif);
1657 newrule->route.kif = pf_kif_setup(newrule->route.kif);
1658
1659 if (newrule->overload_tblname[0]) {
1660 newrule->overload_tbl = pfr_attach_table(
1661 ruleset, newrule->overload_tblname, 0);
1662 if (newrule->overload_tbl == NULL((void *)0))
1663 error = EINVAL22;
1664 else
1665 newrule->overload_tbl->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags |=
1666 PFR_TFLAG_ACTIVE0x00000004;
1667 }
1668
1669 if (pf_addr_setup(ruleset, &newrule->src.addr,
1670 newrule->af))
1671 error = EINVAL22;
1672 if (pf_addr_setup(ruleset, &newrule->dst.addr,
1673 newrule->af))
1674 error = EINVAL22;
1675 if (pf_addr_setup(ruleset, &newrule->rdr.addr,
1676 newrule->af))
1677 error = EINVAL22;
1678 if (pf_addr_setup(ruleset, &newrule->nat.addr,
1679 newrule->af))
1680 error = EINVAL22;
1681 if (pf_addr_setup(ruleset, &newrule->route.addr,
1682 newrule->af))
1683 error = EINVAL22;
1684 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1685 error = EINVAL22;
1686
1687 if (error) {
1688 pf_rm_rule(NULL((void *)0), newrule);
1689 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1690 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1691 break;
1692 }
1693 }
1694
1695 if (pcr->action == PF_CHANGE_ADD_HEAD)
1696 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr)((ruleset->rules.active.ptr)->tqh_first);
1697 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1698 oldrule = TAILQ_LAST(ruleset->rules.active.ptr,(*(((struct pf_rulequeue *)((ruleset->rules.active.ptr)->
tqh_last))->tqh_last))
1699 pf_rulequeue)(*(((struct pf_rulequeue *)((ruleset->rules.active.ptr)->
tqh_last))->tqh_last))
;
1700 else {
1701 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr)((ruleset->rules.active.ptr)->tqh_first);
1702 while ((oldrule != NULL((void *)0)) && (oldrule->nr != pcr->nr))
1703 oldrule = TAILQ_NEXT(oldrule, entries)((oldrule)->entries.tqe_next);
1704 if (oldrule == NULL((void *)0)) {
1705 if (newrule != NULL((void *)0))
1706 pf_rm_rule(NULL((void *)0), newrule);
1707 error = EINVAL22;
1708 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1709 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1710 break;
1711 }
1712 }
1713
1714 if (pcr->action == PF_CHANGE_REMOVE) {
1715 pf_rm_rule(ruleset->rules.active.ptr, oldrule);
1716 ruleset->rules.active.rcount--;
1717 } else {
1718 if (oldrule == NULL((void *)0))
1719 TAILQ_INSERT_TAIL(do { (newrule)->entries.tqe_next = ((void *)0); (newrule)->
entries.tqe_prev = (ruleset->rules.active.ptr)->tqh_last
; *(ruleset->rules.active.ptr)->tqh_last = (newrule); (
ruleset->rules.active.ptr)->tqh_last = &(newrule)->
entries.tqe_next; } while (0)
1720 ruleset->rules.active.ptr,do { (newrule)->entries.tqe_next = ((void *)0); (newrule)->
entries.tqe_prev = (ruleset->rules.active.ptr)->tqh_last
; *(ruleset->rules.active.ptr)->tqh_last = (newrule); (
ruleset->rules.active.ptr)->tqh_last = &(newrule)->
entries.tqe_next; } while (0)
1721 newrule, entries)do { (newrule)->entries.tqe_next = ((void *)0); (newrule)->
entries.tqe_prev = (ruleset->rules.active.ptr)->tqh_last
; *(ruleset->rules.active.ptr)->tqh_last = (newrule); (
ruleset->rules.active.ptr)->tqh_last = &(newrule)->
entries.tqe_next; } while (0)
;
1722 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1723 pcr->action == PF_CHANGE_ADD_BEFORE)
1724 TAILQ_INSERT_BEFORE(oldrule, newrule, entries)do { (newrule)->entries.tqe_prev = (oldrule)->entries.tqe_prev
; (newrule)->entries.tqe_next = (oldrule); *(oldrule)->
entries.tqe_prev = (newrule); (oldrule)->entries.tqe_prev =
&(newrule)->entries.tqe_next; } while (0)
;
1725 else
1726 TAILQ_INSERT_AFTER(do { if (((newrule)->entries.tqe_next = (oldrule)->entries
.tqe_next) != ((void *)0)) (newrule)->entries.tqe_next->
entries.tqe_prev = &(newrule)->entries.tqe_next; else (
ruleset->rules.active.ptr)->tqh_last = &(newrule)->
entries.tqe_next; (oldrule)->entries.tqe_next = (newrule);
(newrule)->entries.tqe_prev = &(oldrule)->entries.
tqe_next; } while (0)
1727 ruleset->rules.active.ptr,do { if (((newrule)->entries.tqe_next = (oldrule)->entries
.tqe_next) != ((void *)0)) (newrule)->entries.tqe_next->
entries.tqe_prev = &(newrule)->entries.tqe_next; else (
ruleset->rules.active.ptr)->tqh_last = &(newrule)->
entries.tqe_next; (oldrule)->entries.tqe_next = (newrule);
(newrule)->entries.tqe_prev = &(oldrule)->entries.
tqe_next; } while (0)
1728 oldrule, newrule, entries)do { if (((newrule)->entries.tqe_next = (oldrule)->entries
.tqe_next) != ((void *)0)) (newrule)->entries.tqe_next->
entries.tqe_prev = &(newrule)->entries.tqe_next; else (
ruleset->rules.active.ptr)->tqh_last = &(newrule)->
entries.tqe_next; (oldrule)->entries.tqe_next = (newrule);
(newrule)->entries.tqe_prev = &(oldrule)->entries.
tqe_next; } while (0)
;
1729 ruleset->rules.active.rcount++;
1730 newrule->ruleset = ruleset;
1731 }
1732
1733 nr = 0;
1734 TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries)for((oldrule) = ((ruleset->rules.active.ptr)->tqh_first
); (oldrule) != ((void *)0); (oldrule) = ((oldrule)->entries
.tqe_next))
1735 oldrule->nr = nr++;
1736
1737 ruleset->rules.active.ticket++;
1738
1739 pf_calc_skip_steps(ruleset->rules.active.ptr);
1740 pf_remove_if_empty_ruleset(ruleset);
1741
1742 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1743 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1744 break;
1745 }
1746
1747 case DIOCCLRSTATES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_state_kill) & 0x1fff) << 16) | ((('D'
)) << 8) | ((18)))
:
1748 error = pf_states_clr((struct pfioc_state_kill *)addr);
1749 break;
1750
1751 case DIOCKILLSTATES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_state_kill) & 0x1fff) << 16) | ((('D'
)) << 8) | ((41)))
: {
1752 struct pf_state *s, *nexts;
1753 struct pf_state_item *si, *sit;
1754 struct pf_state_key *sk, key;
1755 struct pf_addr *srcaddr, *dstaddr;
1756 u_int16_t srcport, dstport;
1757 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1758 u_int i, killed = 0;
1759 const int dirs[] = { PF_IN, PF_OUT };
1760 int sidx, didx;
1761
1762 if (psk->psk_pfcmp.id) {
1763 if (psk->psk_pfcmp.creatorid == 0)
1764 psk->psk_pfcmp.creatorid = pf_status.hostid;
1765 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1766 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1767 PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0);
1768 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) {
1769 pf_remove_state(s);
1770 psk->psk_killed = 1;
1771 }
1772 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
1773 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1774 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1775 break;
1776 }
1777
1778 if (psk->psk_af && psk->psk_proto &&
1779 psk->psk_src.port_op == PF_OP_EQ &&
1780 psk->psk_dst.port_op == PF_OP_EQ) {
1781
1782 key.af = psk->psk_af;
1783 key.proto = psk->psk_proto;
1784 key.rdomain = psk->psk_rdomain;
1785
1786 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1787 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1788 PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0);
1789 for (i = 0; i < nitems(dirs)(sizeof((dirs)) / sizeof((dirs)[0])); i++) {
1790 if (dirs[i] == PF_IN) {
1791 sidx = 0;
1792 didx = 1;
1793 } else {
1794 sidx = 1;
1795 didx = 0;
1796 }
1797 pf_addrcpy(&key.addr[sidx],
1798 &psk->psk_src.addr.v.a.addr, key.af);
1799 pf_addrcpy(&key.addr[didx],
1800 &psk->psk_dst.addr.v.a.addr, key.af);
1801 key.port[sidx] = psk->psk_src.port[0];
1802 key.port[didx] = psk->psk_dst.port[0];
1803
1804 sk = RB_FIND(pf_state_tree, &pf_statetbl, &key)pf_state_tree_RB_FIND(&pf_statetbl, &key);
1805 if (sk == NULL((void *)0))
1806 continue;
1807
1808 TAILQ_FOREACH_SAFE(si, &sk->states, entry, sit)for ((si) = ((&sk->states)->tqh_first); (si) != ((void
*)0) && ((sit) = ((si)->entry.tqe_next), 1); (si)
= (sit))
1809 if (((si->s->key[PF_SK_WIRE]->af ==
1810 si->s->key[PF_SK_STACK]->af &&
1811 sk == (dirs[i] == PF_IN ?
1812 si->s->key[PF_SK_WIRE] :
1813 si->s->key[PF_SK_STACK])) ||
1814 (si->s->key[PF_SK_WIRE]->af !=
1815 si->s->key[PF_SK_STACK]->af &&
1816 dirs[i] == PF_IN &&
1817 (sk == si->s->key[PF_SK_STACK] ||
1818 sk == si->s->key[PF_SK_WIRE]))) &&
1819 (!psk->psk_ifname[0] ||
1820 (si->s->kif != pfi_all &&
1821 !strcmp(psk->psk_ifname,
1822 si->s->kif->pfik_name)))) {
1823 pf_remove_state(si->s);
1824 killed++;
1825 }
1826 }
1827 if (killed)
1828 psk->psk_killed = killed;
1829 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
1830 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1831 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1832 break;
1833 }
1834
1835 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1836 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1837 PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0);
1838 for (s = RB_MIN(pf_state_tree_id, &tree_id)pf_state_tree_id_RB_MINMAX(&tree_id, -1); s;
1839 s = nexts) {
1840 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s)pf_state_tree_id_RB_NEXT(s);
1841
1842 if (s->direction == PF_OUT) {
1843 sk = s->key[PF_SK_STACK];
1844 srcaddr = &sk->addr[1];
1845 dstaddr = &sk->addr[0];
1846 srcport = sk->port[1];
1847 dstport = sk->port[0];
1848 } else {
1849 sk = s->key[PF_SK_WIRE];
1850 srcaddr = &sk->addr[0];
1851 dstaddr = &sk->addr[1];
1852 srcport = sk->port[0];
1853 dstport = sk->port[1];
1854 }
1855 if ((!psk->psk_af || sk->af == psk->psk_af)
1856 && (!psk->psk_proto || psk->psk_proto ==
1857 sk->proto) && psk->psk_rdomain == sk->rdomain &&
1858 pf_match_addr(psk->psk_src.neg,
1859 &psk->psk_src.addr.v.a.addr,
1860 &psk->psk_src.addr.v.a.mask,
1861 srcaddr, sk->af) &&
1862 pf_match_addr(psk->psk_dst.neg,
1863 &psk->psk_dst.addr.v.a.addr,
1864 &psk->psk_dst.addr.v.a.mask,
1865 dstaddr, sk->af) &&
1866 (psk->psk_src.port_op == 0 ||
1867 pf_match_port(psk->psk_src.port_op,
1868 psk->psk_src.port[0], psk->psk_src.port[1],
1869 srcport)) &&
1870 (psk->psk_dst.port_op == 0 ||
1871 pf_match_port(psk->psk_dst.port_op,
1872 psk->psk_dst.port[0], psk->psk_dst.port[1],
1873 dstport)) &&
1874 (!psk->psk_label[0] || (s->rule.ptr->label[0] &&
1875 !strcmp(psk->psk_label, s->rule.ptr->label))) &&
1876 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1877 s->kif->pfik_name))) {
1878 pf_remove_state(s);
1879 killed++;
1880 }
1881 }
1882 psk->psk_killed = killed;
1883 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
1884 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1885 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1886 break;
1887 }
1888
1889#if NPFSYNC1 > 0
1890 case DIOCADDSTATE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_state) & 0x1fff) << 16) | ((('D')) <<
8) | ((37)))
: {
1891 struct pfioc_state *ps = (struct pfioc_state *)addr;
1892 struct pfsync_state *sp = &ps->state;
1893
1894 if (sp->timeout >= PFTM_MAX) {
1895 error = EINVAL22;
1896 break;
1897 }
1898 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1899 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1900 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL0x01);
1901 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1902 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1903 break;
1904 }
1905#endif /* NPFSYNC > 0 */
1906
1907 case DIOCGETSTATE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_state) & 0x1fff) << 16) | ((('D')) <<
8) | ((19)))
: {
1908 struct pfioc_state *ps = (struct pfioc_state *)addr;
1909 struct pf_state *s;
1910 struct pf_state_cmp id_key;
1911
1912 memset(&id_key, 0, sizeof(id_key))__builtin_memset((&id_key), (0), (sizeof(id_key)));
1913 id_key.id = ps->state.id;
1914 id_key.creatorid = ps->state.creatorid;
1915
1916 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1917 PF_STATE_ENTER_READ()do { rw_enter_read(&pf_state_lock); } while (0);
1918 s = pf_find_state_byid(&id_key);
1919 s = pf_state_ref(s);
1920 PF_STATE_EXIT_READ()do { rw_exit_read(&pf_state_lock); } while (0);
1921 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1922 if (s == NULL((void *)0)) {
1923 error = ENOENT2;
1924 break;
1925 }
1926
1927 pf_state_export(&ps->state, s);
1928 pf_state_unref(s);
1929 break;
1930 }
1931
1932 case DIOCGETSTATES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_states) & 0x1fff) << 16) | ((('D')) <<
8) | ((25)))
:
1933 error = pf_states_get((struct pfioc_states *)addr);
1934 break;
1935
1936 case DIOCGETSTATUS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pf_status) & 0x1fff) << 16) | ((('D')) <<
8) | ((21)))
: {
1937 struct pf_status *s = (struct pf_status *)addr;
1938 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1939 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1940 memcpy(s, &pf_status, sizeof(struct pf_status))__builtin_memcpy((s), (&pf_status), (sizeof(struct pf_status
)))
;
1941 pfi_update_status(s->ifname, s);
1942 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1943 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1944 break;
1945 }
1946
1947 case DIOCSETSTATUSIF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((20)))
: {
1948 struct pfioc_iface *pi = (struct pfioc_iface *)addr;
1949
1950 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1951 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1952 if (pi->pfiio_name[0] == 0) {
1953 memset(pf_status.ifname, 0, IFNAMSIZ)__builtin_memset((pf_status.ifname), (0), (16));
1954 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1955 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1956 break;
1957 }
1958 strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ16);
1959 pf_trans_set.mask |= PF_TSET_STATUSIF0x01;
1960 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1961 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1962 break;
1963 }
1964
1965 case DIOCCLRSTATUS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((22)))
: {
1966 struct pfioc_iface *pi = (struct pfioc_iface *)addr;
1967
1968 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
1969 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
1970 /* if ifname is specified, clear counters there only */
1971 if (pi->pfiio_name[0]) {
1972 pfi_update_status(pi->pfiio_name, NULL((void *)0));
1973 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1974 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1975 break;
1976 }
1977
1978 memset(pf_status.counters, 0, sizeof(pf_status.counters))__builtin_memset((pf_status.counters), (0), (sizeof(pf_status
.counters)))
;
1979 memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters))__builtin_memset((pf_status.fcounters), (0), (sizeof(pf_status
.fcounters)))
;
1980 memset(pf_status.scounters, 0, sizeof(pf_status.scounters))__builtin_memset((pf_status.scounters), (0), (sizeof(pf_status
.scounters)))
;
1981 pf_status.since = getuptime();
1982
1983 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1984 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
1985 break;
1986 }
1987
1988 case DIOCNATLOOK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_natlook) & 0x1fff) << 16) | ((('D')) <<
8) | ((23)))
: {
1989 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
1990 struct pf_state_key *sk;
1991 struct pf_state *state;
1992 struct pf_state_key_cmp key;
1993 int m = 0, direction = pnl->direction;
1994 int sidx, didx;
1995
1996 switch (pnl->af) {
1997 case AF_INET2:
1998 break;
1999#ifdef INET61
2000 case AF_INET624:
2001 break;
2002#endif /* INET6 */
2003 default:
2004 error = EAFNOSUPPORT47;
2005 goto fail;
2006 }
2007
2008 /* NATLOOK src and dst are reversed, so reverse sidx/didx */
2009 sidx = (direction == PF_IN) ? 1 : 0;
2010 didx = (direction == PF_IN) ? 0 : 1;
2011
2012 if (!pnl->proto ||
2013 PF_AZERO(&pnl->saddr, pnl->af)((pnl->af == 2 && !(&pnl->saddr)->pfa.addr32
[0]) || (pnl->af == 24 && !(&pnl->saddr)->
pfa.addr32[0] && !(&pnl->saddr)->pfa.addr32
[1] && !(&pnl->saddr)->pfa.addr32[2] &&
!(&pnl->saddr)->pfa.addr32[3] ))
||
2014 PF_AZERO(&pnl->daddr, pnl->af)((pnl->af == 2 && !(&pnl->daddr)->pfa.addr32
[0]) || (pnl->af == 24 && !(&pnl->daddr)->
pfa.addr32[0] && !(&pnl->daddr)->pfa.addr32
[1] && !(&pnl->daddr)->pfa.addr32[2] &&
!(&pnl->daddr)->pfa.addr32[3] ))
||
2015 ((pnl->proto == IPPROTO_TCP6 ||
2016 pnl->proto == IPPROTO_UDP17) &&
2017 (!pnl->dport || !pnl->sport)) ||
2018 pnl->rdomain > RT_TABLEID_MAX255)
2019 error = EINVAL22;
2020 else {
2021 key.af = pnl->af;
2022 key.proto = pnl->proto;
2023 key.rdomain = pnl->rdomain;
2024 pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
2025 key.port[sidx] = pnl->sport;
2026 pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
2027 key.port[didx] = pnl->dport;
2028
2029 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2030 PF_STATE_ENTER_READ()do { rw_enter_read(&pf_state_lock); } while (0);
2031 state = pf_find_state_all(&key, direction, &m);
2032 state = pf_state_ref(state);
2033 PF_STATE_EXIT_READ()do { rw_exit_read(&pf_state_lock); } while (0);
2034 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2035
2036 if (m > 1)
2037 error = E2BIG7; /* more than one state */
2038 else if (state != NULL((void *)0)) {
2039 sk = state->key[sidx];
2040 pf_addrcpy(&pnl->rsaddr, &sk->addr[sidx],
2041 sk->af);
2042 pnl->rsport = sk->port[sidx];
2043 pf_addrcpy(&pnl->rdaddr, &sk->addr[didx],
2044 sk->af);
2045 pnl->rdport = sk->port[didx];
2046 pnl->rrdomain = sk->rdomain;
2047 } else
2048 error = ENOENT2;
2049 pf_state_unref(state);
2050 }
2051 break;
2052 }
2053
2054 case DIOCSETTIMEOUT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_tm) & 0x1fff) << 16) | ((('D')) <<
8) | ((29)))
: {
2055 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2056
2057 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2058 pt->seconds < 0) {
2059 error = EINVAL22;
2060 goto fail;
2061 }
2062 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2063 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2064 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
2065 pt->seconds = 1;
2066 pf_default_rule_new.timeout[pt->timeout] = pt->seconds;
2067 pt->seconds = pf_default_rule.timeout[pt->timeout];
2068 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2069 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2070 break;
2071 }
2072
2073 case DIOCGETTIMEOUT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_tm) & 0x1fff) << 16) | ((('D')) <<
8) | ((30)))
: {
2074 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2075
2076 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2077 error = EINVAL22;
2078 goto fail;
2079 }
2080 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2081 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2082 pt->seconds = pf_default_rule.timeout[pt->timeout];
2083 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2084 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2085 break;
2086 }
2087
2088 case DIOCGETLIMIT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_limit) & 0x1fff) << 16) | ((('D')) <<
8) | ((39)))
: {
2089 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2090
2091 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2092 error = EINVAL22;
2093 goto fail;
2094 }
2095 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2096 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2097 pl->limit = pf_pool_limits[pl->index].limit;
2098 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2099 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2100 break;
2101 }
2102
2103 case DIOCSETLIMIT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_limit) & 0x1fff) << 16) | ((('D')) <<
8) | ((40)))
: {
2104 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2105
2106 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2107 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2108 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2109 pf_pool_limits[pl->index].pp == NULL((void *)0)) {
2110 error = EINVAL22;
2111 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2112 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2113 goto fail;
2114 }
2115 if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout >
2116 pl->limit) {
2117 error = EBUSY16;
2118 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2119 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2120 goto fail;
2121 }
2122 /* Fragments reference mbuf clusters. */
2123 if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) {
2124 error = EINVAL22;
2125 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2126 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2127 goto fail;
2128 }
2129
2130 pf_pool_limits[pl->index].limit_new = pl->limit;
2131 pl->limit = pf_pool_limits[pl->index].limit;
2132 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2133 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2134 break;
2135 }
2136
2137 case DIOCSETDEBUG(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(u_int32_t) & 0x1fff) << 16) | ((('D')) << 8)
| ((24)))
: {
2138 u_int32_t *level = (u_int32_t *)addr;
2139
2140 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2141 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2142 pf_trans_set.debug = *level;
2143 pf_trans_set.mask |= PF_TSET_DEBUG0x02;
2144 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2145 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2146 break;
2147 }
2148
2149 case DIOCGETRULESETS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_ruleset) & 0x1fff) << 16) | ((('D')) <<
8) | ((58)))
: {
2150 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2151 struct pf_ruleset *ruleset;
2152 struct pf_anchor *anchor;
2153
2154 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2155 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2156 pr->path[sizeof(pr->path) - 1] = '\0';
2157 if ((ruleset = pf_find_ruleset(pr->path)) == NULL((void *)0)) {
2158 error = EINVAL22;
2159 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2160 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2161 break;
2162 }
2163 pr->nr = 0;
2164 if (ruleset == &pf_main_rulesetpf_main_anchor.ruleset) {
2165 /* XXX kludge for pf_main_ruleset */
2166 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)for ((anchor) = pf_anchor_global_RB_MINMAX(&pf_anchors, -
1); (anchor) != ((void *)0); (anchor) = pf_anchor_global_RB_NEXT
(anchor))
2167 if (anchor->parent == NULL((void *)0))
2168 pr->nr++;
2169 } else {
2170 RB_FOREACH(anchor, pf_anchor_node,for ((anchor) = pf_anchor_node_RB_MINMAX(&ruleset->anchor
->children, -1); (anchor) != ((void *)0); (anchor) = pf_anchor_node_RB_NEXT
(anchor))
2171 &ruleset->anchor->children)for ((anchor) = pf_anchor_node_RB_MINMAX(&ruleset->anchor
->children, -1); (anchor) != ((void *)0); (anchor) = pf_anchor_node_RB_NEXT
(anchor))
2172 pr->nr++;
2173 }
2174 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2175 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2176 break;
2177 }
2178
2179 case DIOCGETRULESET(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_ruleset) & 0x1fff) << 16) | ((('D')) <<
8) | ((59)))
: {
2180 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2181 struct pf_ruleset *ruleset;
2182 struct pf_anchor *anchor;
2183 u_int32_t nr = 0;
2184
2185 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2186 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2187 pr->path[sizeof(pr->path) - 1] = '\0';
2188 if ((ruleset = pf_find_ruleset(pr->path)) == NULL((void *)0)) {
2189 error = EINVAL22;
2190 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2191 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2192 break;
2193 }
2194 pr->name[0] = '\0';
2195 if (ruleset == &pf_main_rulesetpf_main_anchor.ruleset) {
2196 /* XXX kludge for pf_main_ruleset */
2197 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)for ((anchor) = pf_anchor_global_RB_MINMAX(&pf_anchors, -
1); (anchor) != ((void *)0); (anchor) = pf_anchor_global_RB_NEXT
(anchor))
2198 if (anchor->parent == NULL((void *)0) && nr++ == pr->nr) {
2199 strlcpy(pr->name, anchor->name,
2200 sizeof(pr->name));
2201 break;
2202 }
2203 } else {
2204 RB_FOREACH(anchor, pf_anchor_node,for ((anchor) = pf_anchor_node_RB_MINMAX(&ruleset->anchor
->children, -1); (anchor) != ((void *)0); (anchor) = pf_anchor_node_RB_NEXT
(anchor))
2205 &ruleset->anchor->children)for ((anchor) = pf_anchor_node_RB_MINMAX(&ruleset->anchor
->children, -1); (anchor) != ((void *)0); (anchor) = pf_anchor_node_RB_NEXT
(anchor))
2206 if (nr++ == pr->nr) {
2207 strlcpy(pr->name, anchor->name,
2208 sizeof(pr->name));
2209 break;
2210 }
2211 }
2212 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2213 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2214 if (!pr->name[0])
2215 error = EBUSY16;
2216 break;
2217 }
2218
2219 case DIOCRCLRTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((60)))
: {
2220 struct pfioc_table *io = (struct pfioc_table *)addr;
2221
2222 if (io->pfrio_esize != 0) {
2223 error = ENODEV19;
2224 break;
2225 }
2226 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2227 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2228 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2229 io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2230 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2231 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2232 break;
2233 }
2234
2235 case DIOCRADDTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((61)))
: {
2236 struct pfioc_table *io = (struct pfioc_table *)addr;
2237
2238 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2239 error = ENODEV19;
2240 break;
2241 }
2242 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2243 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2244 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2245 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2246 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2247 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2248 break;
2249 }
2250
2251 case DIOCRDELTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((62)))
: {
2252 struct pfioc_table *io = (struct pfioc_table *)addr;
2253
2254 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2255 error = ENODEV19;
2256 break;
2257 }
2258 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2259 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2260 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2261 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2262 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2263 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2264 break;
2265 }
2266
2267 case DIOCRGETTABLES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((63)))
: {
2268 struct pfioc_table *io = (struct pfioc_table *)addr;
2269
2270 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2271 error = ENODEV19;
2272 break;
2273 }
2274 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2275 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2276 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2277 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2278 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2279 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2280 break;
2281 }
2282
2283 case DIOCRGETTSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((64)))
: {
2284 struct pfioc_table *io = (struct pfioc_table *)addr;
2285
2286 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2287 error = ENODEV19;
2288 break;
2289 }
2290 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2291 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2292 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2293 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2294 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2295 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2296 break;
2297 }
2298
2299 case DIOCRCLRTSTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((65)))
: {
2300 struct pfioc_table *io = (struct pfioc_table *)addr;
2301
2302 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2303 error = ENODEV19;
2304 break;
2305 }
2306 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2307 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2308 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2309 &io->pfrio_nzeropfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2310 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2311 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2312 break;
2313 }
2314
2315 case DIOCRSETTFLAGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((74)))
: {
2316 struct pfioc_table *io = (struct pfioc_table *)addr;
2317
2318 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2319 error = ENODEV19;
2320 break;
2321 }
2322 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2323 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2324 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2325 io->pfrio_setflagpfrio_size2, io->pfrio_clrflagpfrio_nadd, &io->pfrio_nchange,
2326 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2327 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2328 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2329 break;
2330 }
2331
2332 case DIOCRCLRADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((66)))
: {
2333 struct pfioc_table *io = (struct pfioc_table *)addr;
2334
2335 if (io->pfrio_esize != 0) {
2336 error = ENODEV19;
2337 break;
2338 }
2339 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2340 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2341 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2342 io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2343 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2344 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2345 break;
2346 }
2347
2348 case DIOCRADDADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((67)))
: {
2349 struct pfioc_table *io = (struct pfioc_table *)addr;
2350
2351 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2352 error = ENODEV19;
2353 break;
2354 }
2355 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2356 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2357 PFR_FLAG_USERIOCTL0x10000000);
2358 break;
2359 }
2360
2361 case DIOCRDELADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((68)))
: {
2362 struct pfioc_table *io = (struct pfioc_table *)addr;
2363
2364 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2365 error = ENODEV19;
2366 break;
2367 }
2368 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2369 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2370 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2371 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2372 PFR_FLAG_USERIOCTL0x10000000);
2373 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2374 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2375 break;
2376 }
2377
2378 case DIOCRSETADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((69)))
: {
2379 struct pfioc_table *io = (struct pfioc_table *)addr;
2380
2381 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2382 error = ENODEV19;
2383 break;
2384 }
2385 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2386 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2387 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2388 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2389 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2390 PFR_FLAG_USERIOCTL0x10000000, 0);
2391 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2392 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2393 break;
2394 }
2395
2396 case DIOCRGETADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((70)))
: {
2397 struct pfioc_table *io = (struct pfioc_table *)addr;
2398
2399 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2400 error = ENODEV19;
2401 break;
2402 }
2403 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2404 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2405 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2406 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2407 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2408 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2409 break;
2410 }
2411
2412 case DIOCRGETASTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((71)))
: {
2413 struct pfioc_table *io = (struct pfioc_table *)addr;
2414
2415 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2416 error = ENODEV19;
2417 break;
2418 }
2419 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2420 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2421 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2422 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2423 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2424 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2425 break;
2426 }
2427
2428 case DIOCRCLRASTATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((72)))
: {
2429 struct pfioc_table *io = (struct pfioc_table *)addr;
2430
2431 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2432 error = ENODEV19;
2433 break;
2434 }
2435 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2436 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2437 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2438 io->pfrio_size, &io->pfrio_nzeropfrio_nadd, io->pfrio_flags |
2439 PFR_FLAG_USERIOCTL0x10000000);
2440 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2441 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2442 break;
2443 }
2444
2445 case DIOCRTSTADDRS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((73)))
: {
2446 struct pfioc_table *io = (struct pfioc_table *)addr;
2447
2448 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2449 error = ENODEV19;
2450 break;
2451 }
2452 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2453 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2454 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2455 io->pfrio_size, &io->pfrio_nmatchpfrio_nadd, io->pfrio_flags |
2456 PFR_FLAG_USERIOCTL0x10000000);
2457 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2458 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2459 break;
2460 }
2461
2462 case DIOCRINADEFINE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_table) & 0x1fff) << 16) | ((('D')) <<
8) | ((77)))
: {
2463 struct pfioc_table *io = (struct pfioc_table *)addr;
2464
2465 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2466 error = ENODEV19;
2467 break;
2468 }
2469 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2470 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2471 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2472 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddrpfrio_size2,
2473 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL0x10000000);
2474 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2475 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2476 break;
2477 }
2478
2479 case DIOCOSFPADD(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pf_osfp_ioctl) & 0x1fff) << 16) | ((('D')) <<
8) | ((79)))
: {
2480 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2481 error = pf_osfp_add(io);
2482 break;
2483 }
2484
2485 case DIOCOSFPGET(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pf_osfp_ioctl) & 0x1fff) << 16) | ((('D')) <<
8) | ((80)))
: {
2486 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2487 error = pf_osfp_get(io);
2488 break;
2489 }
2490
2491 case DIOCXBEGIN(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_trans) & 0x1fff) << 16) | ((('D')) <<
8) | ((81)))
: {
2492 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2493 struct pfioc_trans_e *ioe;
2494 struct pfr_table *table;
2495 int i;
2496
2497 if (io->esize != sizeof(*ioe)) {
2498 error = ENODEV19;
2499 goto fail;
2500 }
2501 ioe = malloc(sizeof(*ioe), M_TEMP127, M_WAITOK0x0001);
2502 table = malloc(sizeof(*table), M_TEMP127, M_WAITOK0x0001);
2503 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2504 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2505 pf_default_rule_new = pf_default_rule;
2506 memset(&pf_trans_set, 0, sizeof(pf_trans_set))__builtin_memset((&pf_trans_set), (0), (sizeof(pf_trans_set
)))
;
2507 for (i = 0; i < io->size; i++) {
2508 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2509 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2510 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2511 free(table, M_TEMP127, sizeof(*table));
2512 free(ioe, M_TEMP127, sizeof(*ioe));
2513 error = EFAULT14;
2514 goto fail;
2515 }
2516 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2517 sizeof(ioe->anchor)) {
2518 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2519 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2520 free(table, M_TEMP127, sizeof(*table));
2521 free(ioe, M_TEMP127, sizeof(*ioe));
2522 error = ENAMETOOLONG63;
2523 goto fail;
2524 }
2525 switch (ioe->type) {
2526 case PF_TRANS_TABLE:
2527 memset(table, 0, sizeof(*table))__builtin_memset((table), (0), (sizeof(*table)));
2528 strlcpy(table->pfrt_anchor, ioe->anchor,
2529 sizeof(table->pfrt_anchor));
2530 if ((error = pfr_ina_begin(table,
2531 &ioe->ticket, NULL((void *)0), 0))) {
2532 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2533 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2534 free(table, M_TEMP127, sizeof(*table));
2535 free(ioe, M_TEMP127, sizeof(*ioe));
2536 goto fail;
2537 }
2538 break;
2539 case PF_TRANS_RULESET:
2540 if ((error = pf_begin_rules(&ioe->ticket,
2541 ioe->anchor))) {
2542 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2543 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2544 free(table, M_TEMP127, sizeof(*table));
2545 free(ioe, M_TEMP127, sizeof(*ioe));
2546 goto fail;
2547 }
2548 break;
2549 default:
2550 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2551 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2552 free(table, M_TEMP127, sizeof(*table));
2553 free(ioe, M_TEMP127, sizeof(*ioe));
2554 error = EINVAL22;
2555 goto fail;
2556 }
2557 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) {
2558 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2559 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2560 free(table, M_TEMP127, sizeof(*table));
2561 free(ioe, M_TEMP127, sizeof(*ioe));
2562 error = EFAULT14;
2563 goto fail;
2564 }
2565 }
2566 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2567 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2568 free(table, M_TEMP127, sizeof(*table));
2569 free(ioe, M_TEMP127, sizeof(*ioe));
2570 break;
2571 }
2572
2573 case DIOCXROLLBACK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_trans) & 0x1fff) << 16) | ((('D')) <<
8) | ((83)))
: {
2574 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2575 struct pfioc_trans_e *ioe;
2576 struct pfr_table *table;
2577 int i;
2578
2579 if (io->esize != sizeof(*ioe)) {
2580 error = ENODEV19;
2581 goto fail;
2582 }
2583 ioe = malloc(sizeof(*ioe), M_TEMP127, M_WAITOK0x0001);
2584 table = malloc(sizeof(*table), M_TEMP127, M_WAITOK0x0001);
2585 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2586 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2587 for (i = 0; i < io->size; i++) {
2588 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2589 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2590 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2591 free(table, M_TEMP127, sizeof(*table));
2592 free(ioe, M_TEMP127, sizeof(*ioe));
2593 error = EFAULT14;
2594 goto fail;
2595 }
2596 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2597 sizeof(ioe->anchor)) {
2598 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2599 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2600 free(table, M_TEMP127, sizeof(*table));
2601 free(ioe, M_TEMP127, sizeof(*ioe));
2602 error = ENAMETOOLONG63;
2603 goto fail;
2604 }
2605 switch (ioe->type) {
2606 case PF_TRANS_TABLE:
2607 memset(table, 0, sizeof(*table))__builtin_memset((table), (0), (sizeof(*table)));
2608 strlcpy(table->pfrt_anchor, ioe->anchor,
2609 sizeof(table->pfrt_anchor));
2610 if ((error = pfr_ina_rollback(table,
2611 ioe->ticket, NULL((void *)0), 0))) {
2612 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2613 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2614 free(table, M_TEMP127, sizeof(*table));
2615 free(ioe, M_TEMP127, sizeof(*ioe));
2616 goto fail; /* really bad */
2617 }
2618 break;
2619 case PF_TRANS_RULESET:
2620 if ((error = pf_rollback_rules(ioe->ticket,
2621 ioe->anchor))) {
2622 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2623 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2624 free(table, M_TEMP127, sizeof(*table));
2625 free(ioe, M_TEMP127, sizeof(*ioe));
2626 goto fail; /* really bad */
2627 }
2628 break;
2629 default:
2630 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2631 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2632 free(table, M_TEMP127, sizeof(*table));
2633 free(ioe, M_TEMP127, sizeof(*ioe));
2634 error = EINVAL22;
2635 goto fail; /* really bad */
2636 }
2637 }
2638 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2639 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2640 free(table, M_TEMP127, sizeof(*table));
2641 free(ioe, M_TEMP127, sizeof(*ioe));
2642 break;
2643 }
2644
2645 case DIOCXCOMMIT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_trans) & 0x1fff) << 16) | ((('D')) <<
8) | ((82)))
: {
2646 struct pfioc_trans *io = (struct pfioc_trans *)addr;
2647 struct pfioc_trans_e *ioe;
2648 struct pfr_table *table;
2649 struct pf_ruleset *rs;
2650 int i;
2651
2652 if (io->esize != sizeof(*ioe)) {
2653 error = ENODEV19;
2654 goto fail;
2655 }
2656 ioe = malloc(sizeof(*ioe), M_TEMP127, M_WAITOK0x0001);
2657 table = malloc(sizeof(*table), M_TEMP127, M_WAITOK0x0001);
2658 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2659 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2660 /* first makes sure everything will succeed */
2661 for (i = 0; i < io->size; i++) {
2662 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2663 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2664 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2665 free(table, M_TEMP127, sizeof(*table));
2666 free(ioe, M_TEMP127, sizeof(*ioe));
2667 error = EFAULT14;
2668 goto fail;
2669 }
2670 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2671 sizeof(ioe->anchor)) {
2672 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2673 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2674 free(table, M_TEMP127, sizeof(*table));
2675 free(ioe, M_TEMP127, sizeof(*ioe));
2676 error = ENAMETOOLONG63;
2677 goto fail;
2678 }
2679 switch (ioe->type) {
2680 case PF_TRANS_TABLE:
2681 rs = pf_find_ruleset(ioe->anchor);
2682 if (rs == NULL((void *)0) || !rs->topen || ioe->ticket !=
2683 rs->tticket) {
2684 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2685 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2686 free(table, M_TEMP127, sizeof(*table));
2687 free(ioe, M_TEMP127, sizeof(*ioe));
2688 error = EBUSY16;
2689 goto fail;
2690 }
2691 break;
2692 case PF_TRANS_RULESET:
2693 rs = pf_find_ruleset(ioe->anchor);
2694 if (rs == NULL((void *)0) ||
2695 !rs->rules.inactive.open ||
2696 rs->rules.inactive.ticket !=
2697 ioe->ticket) {
2698 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2699 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2700 free(table, M_TEMP127, sizeof(*table));
2701 free(ioe, M_TEMP127, sizeof(*ioe));
2702 error = EBUSY16;
2703 goto fail;
2704 }
2705 break;
2706 default:
2707 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2708 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2709 free(table, M_TEMP127, sizeof(*table));
2710 free(ioe, M_TEMP127, sizeof(*ioe));
2711 error = EINVAL22;
2712 goto fail;
2713 }
2714 }
2715
2716 /*
2717 * Checked already in DIOCSETLIMIT, but check again as the
2718 * situation might have changed.
2719 */
2720 for (i = 0; i < PF_LIMIT_MAX; i++) {
2721 if (((struct pool *)pf_pool_limits[i].pp)->pr_nout >
2722 pf_pool_limits[i].limit_new) {
2723 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2724 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2725 free(table, M_TEMP127, sizeof(*table));
2726 free(ioe, M_TEMP127, sizeof(*ioe));
2727 error = EBUSY16;
2728 goto fail;
2729 }
2730 }
2731 /* now do the commit - no errors should happen here */
2732 for (i = 0; i < io->size; i++) {
2733 if (copyin(io->array+i, ioe, sizeof(*ioe))) {
2734 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2735 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2736 free(table, M_TEMP127, sizeof(*table));
2737 free(ioe, M_TEMP127, sizeof(*ioe));
2738 error = EFAULT14;
2739 goto fail;
2740 }
2741 if (strnlen(ioe->anchor, sizeof(ioe->anchor)) ==
2742 sizeof(ioe->anchor)) {
2743 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2744 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2745 free(table, M_TEMP127, sizeof(*table));
2746 free(ioe, M_TEMP127, sizeof(*ioe));
2747 error = ENAMETOOLONG63;
2748 goto fail;
2749 }
2750 switch (ioe->type) {
2751 case PF_TRANS_TABLE:
2752 memset(table, 0, sizeof(*table))__builtin_memset((table), (0), (sizeof(*table)));
2753 strlcpy(table->pfrt_anchor, ioe->anchor,
2754 sizeof(table->pfrt_anchor));
2755 if ((error = pfr_ina_commit(table, ioe->ticket,
2756 NULL((void *)0), NULL((void *)0), 0))) {
2757 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2758 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2759 free(table, M_TEMP127, sizeof(*table));
2760 free(ioe, M_TEMP127, sizeof(*ioe));
2761 goto fail; /* really bad */
2762 }
2763 break;
2764 case PF_TRANS_RULESET:
2765 if ((error = pf_commit_rules(ioe->ticket,
2766 ioe->anchor))) {
2767 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2768 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2769 free(table, M_TEMP127, sizeof(*table));
2770 free(ioe, M_TEMP127, sizeof(*ioe));
2771 goto fail; /* really bad */
2772 }
2773 break;
2774 default:
2775 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2776 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2777 free(table, M_TEMP127, sizeof(*table));
2778 free(ioe, M_TEMP127, sizeof(*ioe));
2779 error = EINVAL22;
2780 goto fail; /* really bad */
2781 }
2782 }
2783 for (i = 0; i < PF_LIMIT_MAX; i++) {
2784 if (pf_pool_limits[i].limit_new !=
2785 pf_pool_limits[i].limit &&
2786 pool_sethardlimit(pf_pool_limits[i].pp,
2787 pf_pool_limits[i].limit_new, NULL((void *)0), 0) != 0) {
2788 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2789 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2790 free(table, M_TEMP127, sizeof(*table));
2791 free(ioe, M_TEMP127, sizeof(*ioe));
2792 error = EBUSY16;
2793 goto fail; /* really bad */
2794 }
2795 pf_pool_limits[i].limit = pf_pool_limits[i].limit_new;
2796 }
2797 for (i = 0; i < PFTM_MAX; i++) {
2798 int old = pf_default_rule.timeout[i];
2799
2800 pf_default_rule.timeout[i] =
2801 pf_default_rule_new.timeout[i];
2802 if (pf_default_rule.timeout[i] == PFTM_INTERVAL &&
2803 pf_default_rule.timeout[i] < old)
2804 task_add(net_tq(0), &pf_purge_task);
2805 }
2806 pfi_xcommit();
2807 pf_trans_set_commit();
2808 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2809 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2810 free(table, M_TEMP127, sizeof(*table));
2811 free(ioe, M_TEMP127, sizeof(*ioe));
2812 break;
2813 }
2814
2815 case DIOCGETSRCNODES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_src_nodes) & 0x1fff) << 16) | ((('D')
) << 8) | ((84)))
: {
2816 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
2817 struct pf_src_node *n, *p, *pstore;
2818 u_int32_t nr = 0;
2819 size_t space = psn->psn_len;
2820
2821 pstore = malloc(sizeof(*pstore), M_TEMP127, M_WAITOK0x0001);
2822
2823 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2824 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2825 if (space == 0) {
2826 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)for ((n) = pf_src_tree_RB_MINMAX(&tree_src_tracking, -1);
(n) != ((void *)0); (n) = pf_src_tree_RB_NEXT(n))
2827 nr++;
2828 psn->psn_len = sizeof(struct pf_src_node) * nr;
2829 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2830 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2831 free(pstore, M_TEMP127, sizeof(*pstore));
2832 break;
2833 }
2834
2835 p = psn->psn_src_nodespsn_u.psu_src_nodes;
2836 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)for ((n) = pf_src_tree_RB_MINMAX(&tree_src_tracking, -1);
(n) != ((void *)0); (n) = pf_src_tree_RB_NEXT(n))
{
2837 int secs = getuptime(), diff;
2838
2839 if ((nr + 1) * sizeof(*p) > psn->psn_len)
2840 break;
2841
2842 memcpy(pstore, n, sizeof(*pstore))__builtin_memcpy((pstore), (n), (sizeof(*pstore)));
2843 memset(&pstore->entry, 0, sizeof(pstore->entry))__builtin_memset((&pstore->entry), (0), (sizeof(pstore
->entry)))
;
2844 pstore->rule.ptr = NULL((void *)0);
2845 pstore->kif = NULL((void *)0);
2846 pstore->rule.nr = n->rule.ptr->nr;
2847 pstore->creation = secs - pstore->creation;
2848 if (pstore->expire > secs)
2849 pstore->expire -= secs;
2850 else
2851 pstore->expire = 0;
2852
2853 /* adjust the connection rate estimate */
2854 diff = secs - n->conn_rate.last;
2855 if (diff >= n->conn_rate.seconds)
2856 pstore->conn_rate.count = 0;
2857 else
2858 pstore->conn_rate.count -=
2859 n->conn_rate.count * diff /
2860 n->conn_rate.seconds;
2861
2862 error = copyout(pstore, p, sizeof(*p));
2863 if (error) {
2864 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2865 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2866 free(pstore, M_TEMP127, sizeof(*pstore));
2867 goto fail;
2868 }
2869 p++;
2870 nr++;
2871 }
2872 psn->psn_len = sizeof(struct pf_src_node) * nr;
2873
2874 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2875 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2876 free(pstore, M_TEMP127, sizeof(*pstore));
2877 break;
2878 }
2879
2880 case DIOCCLRSRCNODES((unsigned long)0x20000000 | ((0 & 0x1fff) << 16) |
((('D')) << 8) | ((85)))
: {
2881 struct pf_src_node *n;
2882 struct pf_state *state;
2883
2884 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2885 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2886 PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0);
2887 RB_FOREACH(state, pf_state_tree_id, &tree_id)for ((state) = pf_state_tree_id_RB_MINMAX(&tree_id, -1); (
state) != ((void *)0); (state) = pf_state_tree_id_RB_NEXT(state
))
2888 pf_src_tree_remove_state(state);
2889 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
2890 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)for ((n) = pf_src_tree_RB_MINMAX(&tree_src_tracking, -1);
(n) != ((void *)0); (n) = pf_src_tree_RB_NEXT(n))
2891 n->expire = 1;
2892 pf_purge_expired_src_nodes();
2893 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2894 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2895 break;
2896 }
2897
2898 case DIOCKILLSRCNODES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_src_node_kill) & 0x1fff) << 16) | (((
'D')) << 8) | ((91)))
: {
2899 struct pf_src_node *sn;
2900 struct pf_state *s;
2901 struct pfioc_src_node_kill *psnk =
2902 (struct pfioc_src_node_kill *)addr;
2903 u_int killed = 0;
2904
2905 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2906 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2907 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking)for ((sn) = pf_src_tree_RB_MINMAX(&tree_src_tracking, -1)
; (sn) != ((void *)0); (sn) = pf_src_tree_RB_NEXT(sn))
{
2908 if (pf_match_addr(psnk->psnk_src.neg,
2909 &psnk->psnk_src.addr.v.a.addr,
2910 &psnk->psnk_src.addr.v.a.mask,
2911 &sn->addr, sn->af) &&
2912 pf_match_addr(psnk->psnk_dst.neg,
2913 &psnk->psnk_dst.addr.v.a.addr,
2914 &psnk->psnk_dst.addr.v.a.mask,
2915 &sn->raddr, sn->af)) {
2916 /* Handle state to src_node linkage */
2917 if (sn->states != 0) {
2918 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
2919 PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0);
2920 RB_FOREACH(s, pf_state_tree_id,for ((s) = pf_state_tree_id_RB_MINMAX(&tree_id, -1); (s) !=
((void *)0); (s) = pf_state_tree_id_RB_NEXT(s))
2921 &tree_id)for ((s) = pf_state_tree_id_RB_MINMAX(&tree_id, -1); (s) !=
((void *)0); (s) = pf_state_tree_id_RB_NEXT(s))
2922 pf_state_rm_src_node(s, sn);
2923 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
2924 }
2925 sn->expire = 1;
2926 killed++;
2927 }
2928 }
2929
2930 if (killed > 0)
2931 pf_purge_expired_src_nodes();
2932
2933 psnk->psnk_killed = killed;
2934 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2935 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2936 break;
2937 }
2938
2939 case DIOCSETHOSTID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(u_int32_t) & 0x1fff) << 16) | ((('D')) << 8)
| ((86)))
: {
2940 u_int32_t *hostid = (u_int32_t *)addr;
2941
2942 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2943 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2944 if (*hostid == 0)
2945 pf_trans_set.hostid = arc4random();
2946 else
2947 pf_trans_set.hostid = *hostid;
2948 pf_trans_set.mask |= PF_TSET_HOSTID0x04;
2949 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2950 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2951 break;
2952 }
2953
2954 case DIOCOSFPFLUSH((unsigned long)0x20000000 | ((0 & 0x1fff) << 16) |
((('D')) << 8) | ((78)))
:
2955 pf_osfp_flush();
2956 break;
2957
2958 case DIOCIGETIFACES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((87)))
: {
2959 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2960
2961 if (io->pfiio_esize != sizeof(struct pfi_kif)) {
2962 error = ENODEV19;
2963 break;
2964 }
2965 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2966 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2967 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
2968 &io->pfiio_size);
2969 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2970 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2971 break;
2972 }
2973
2974 case DIOCSETIFFLAG(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((89)))
: {
2975 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2976
2977 if (io == NULL((void *)0))
2978 return (EINVAL22);
2979
2980 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2981 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2982 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
2983 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2984 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2985 break;
2986 }
2987
2988 case DIOCCLRIFFLAG(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_iface) & 0x1fff) << 16) | ((('D')) <<
8) | ((90)))
: {
2989 struct pfioc_iface *io = (struct pfioc_iface *)addr;
2990
2991 if (io == NULL((void *)0))
2992 return (EINVAL22);
2993
2994 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2995 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
2996 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
2997 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
2998 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2999 break;
3000 }
3001
3002 case DIOCSETREASS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(u_int32_t) & 0x1fff) << 16) | ((('D')) << 8)
| ((92)))
: {
3003 u_int32_t *reass = (u_int32_t *)addr;
3004
3005 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3006 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
3007 pf_trans_set.reass = *reass;
3008 pf_trans_set.mask |= PF_TSET_REASS0x08;
3009 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
3010 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3011 break;
3012 }
3013
3014 case DIOCSETSYNFLWATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_synflwats) & 0x1fff) << 16) | ((('D')
) << 8) | ((97)))
: {
3015 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
3016
3017 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3018 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
3019 error = pf_syncookies_setwats(io->hiwat, io->lowat);
3020 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
3021 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3022 break;
3023 }
3024
3025 case DIOCGETSYNFLWATS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_synflwats) & 0x1fff) << 16) | ((('D')
) << 8) | ((99)))
: {
3026 struct pfioc_synflwats *io = (struct pfioc_synflwats *)addr;
3027
3028 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3029 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
3030 error = pf_syncookies_getwats(io);
3031 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
3032 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3033 break;
3034 }
3035
3036 case DIOCSETSYNCOOKIES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(u_int8_t) & 0x1fff) << 16) | ((('D')) << 8) |
((98)))
: {
3037 u_int8_t *mode = (u_int8_t *)addr;
3038
3039 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3040 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
3041 error = pf_syncookies_setmode(*mode);
3042 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
3043 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3044 break;
3045 }
3046
3047 default:
3048 error = ENODEV19;
3049 break;
3050 }
3051fail:
3052 return (error);
3053}
3054
3055void
3056pf_trans_set_commit(void)
3057{
3058 if (pf_trans_set.mask & PF_TSET_STATUSIF0x01)
3059 strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ16);
3060 if (pf_trans_set.mask & PF_TSET_DEBUG0x02)
3061 pf_status.debug = pf_trans_set.debug;
3062 if (pf_trans_set.mask & PF_TSET_HOSTID0x04)
3063 pf_status.hostid = pf_trans_set.hostid;
3064 if (pf_trans_set.mask & PF_TSET_REASS0x08)
3065 pf_status.reass = pf_trans_set.reass;
3066}
3067
3068void
3069pf_pool_copyin(struct pf_pool *from, struct pf_pool *to)
3070{
3071 memmove(to, from, sizeof(*to))__builtin_memmove((to), (from), (sizeof(*to)));
3072 to->kif = NULL((void *)0);
3073 to->addr.p.tbl = NULL((void *)0);
3074}
3075
3076int
3077pf_validate_range(u_int8_t op, u_int16_t port[2])
3078{
3079 u_int16_t a = ntohs(port[0])(__uint16_t)(__builtin_constant_p(port[0]) ? (__uint16_t)(((__uint16_t
)(port[0]) & 0xffU) << 8 | ((__uint16_t)(port[0]) &
0xff00U) >> 8) : __swap16md(port[0]))
;
3080 u_int16_t b = ntohs(port[1])(__uint16_t)(__builtin_constant_p(port[1]) ? (__uint16_t)(((__uint16_t
)(port[1]) & 0xffU) << 8 | ((__uint16_t)(port[1]) &
0xff00U) >> 8) : __swap16md(port[1]))
;
3081
3082 if ((op == PF_OP_RRG && a > b) || /* 34:12, i.e. none */
3083 (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
3084 (op == PF_OP_XRG && a > b)) /* 34<>22, i.e. all */
3085 return 1;
3086 return 0;
3087}
3088
3089int
3090pf_rule_copyin(struct pf_rule *from, struct pf_rule *to)
3091{
3092 int i;
3093
3094 to->src = from->src;
3095 to->src.addr.p.tbl = NULL((void *)0);
3096 to->dst = from->dst;
3097 to->dst.addr.p.tbl = NULL((void *)0);
3098
3099 if (pf_validate_range(to->src.port_op, to->src.port))
3100 return (EINVAL22);
3101 if (pf_validate_range(to->dst.port_op, to->dst.port))
3102 return (EINVAL22);
3103
3104 /* XXX union skip[] */
3105
3106 strlcpy(to->label, from->label, sizeof(to->label));
3107 strlcpy(to->ifname, from->ifname, sizeof(to->ifname));
3108 strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname));
3109 strlcpy(to->qname, from->qname, sizeof(to->qname));
3110 strlcpy(to->pqname, from->pqname, sizeof(to->pqname));
3111 strlcpy(to->tagname, from->tagname, sizeof(to->tagname));
3112 strlcpy(to->match_tagname, from->match_tagname,
3113 sizeof(to->match_tagname));
3114 strlcpy(to->overload_tblname, from->overload_tblname,
3115 sizeof(to->overload_tblname));
3116
3117 pf_pool_copyin(&from->nat, &to->nat);
3118 pf_pool_copyin(&from->rdr, &to->rdr);
3119 pf_pool_copyin(&from->route, &to->route);
3120
3121 if (pf_validate_range(to->rdr.port_op, to->rdr.proxy_port))
3122 return (EINVAL22);
3123
3124 to->kif = (to->ifname[0]) ?
3125 pfi_kif_alloc(to->ifname, M_WAITOK0x0001) : NULL((void *)0);
3126 to->rcv_kif = (to->rcv_ifname[0]) ?
3127 pfi_kif_alloc(to->rcv_ifname, M_WAITOK0x0001) : NULL((void *)0);
3128 to->rdr.kif = (to->rdr.ifname[0]) ?
3129 pfi_kif_alloc(to->rdr.ifname, M_WAITOK0x0001) : NULL((void *)0);
3130 to->nat.kif = (to->nat.ifname[0]) ?
3131 pfi_kif_alloc(to->nat.ifname, M_WAITOK0x0001) : NULL((void *)0);
3132 to->route.kif = (to->route.ifname[0]) ?
3133 pfi_kif_alloc(to->route.ifname, M_WAITOK0x0001) : NULL((void *)0);
3134
3135 to->os_fingerprint = from->os_fingerprint;
3136
3137 to->rtableid = from->rtableid;
3138 if (to->rtableid >= 0 && !rtable_exists(to->rtableid))
3139 return (EBUSY16);
3140 to->onrdomain = from->onrdomain;
3141 if (to->onrdomain != -1 && (to->onrdomain < 0 ||
3142 to->onrdomain > RT_TABLEID_MAX255))
3143 return (EINVAL22);
3144
3145 for (i = 0; i < PFTM_MAX; i++)
3146 to->timeout[i] = from->timeout[i];
3147 to->states_tot = from->states_tot;
3148 to->max_states = from->max_states;
3149 to->max_src_nodes = from->max_src_nodes;
3150 to->max_src_states = from->max_src_states;
3151 to->max_src_conn = from->max_src_conn;
3152 to->max_src_conn_rate.limit = from->max_src_conn_rate.limit;
3153 to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds;
3154 pf_init_threshold(&to->pktrate, from->pktrate.limit,
3155 from->pktrate.seconds);
3156
3157 if (to->qname[0] != 0) {
3158 if ((to->qid = pf_qname2qid(to->qname, 0)) == 0)
3159 return (EBUSY16);
3160 if (to->pqname[0] != 0) {
3161 if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0)
3162 return (EBUSY16);
3163 } else
3164 to->pqid = to->qid;
3165 }
3166 to->rt_listid = from->rt_listid;
3167 to->prob = from->prob;
3168 to->return_icmp = from->return_icmp;
3169 to->return_icmp6 = from->return_icmp6;
3170 to->max_mss = from->max_mss;
3171 if (to->tagname[0])
3172 if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0)
3173 return (EBUSY16);
3174 if (to->match_tagname[0])
3175 if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0)
3176 return (EBUSY16);
3177 to->scrub_flags = from->scrub_flags;
3178 to->delay = from->delay;
3179 to->uid = from->uid;
3180 to->gid = from->gid;
3181 to->rule_flag = from->rule_flag;
3182 to->action = from->action;
3183 to->direction = from->direction;
3184 to->log = from->log;
3185 to->logif = from->logif;
3186#if NPFLOG1 > 0
3187 if (!to->log)
3188 to->logif = 0;
3189#endif /* NPFLOG > 0 */
3190 to->quick = from->quick;
3191 to->ifnot = from->ifnot;
3192 to->rcvifnot = from->rcvifnot;
3193 to->match_tag_not = from->match_tag_not;
3194 to->keep_state = from->keep_state;
3195 to->af = from->af;
3196 to->naf = from->naf;
3197 to->proto = from->proto;
3198 to->type = from->type;
3199 to->code = from->code;
3200 to->flags = from->flags;
3201 to->flagset = from->flagset;
3202 to->min_ttl = from->min_ttl;
3203 to->allow_opts = from->allow_opts;
3204 to->rt = from->rt;
3205 to->return_ttl = from->return_ttl;
3206 to->tos = from->tos;
3207 to->set_tos = from->set_tos;
3208 to->anchor_relative = from->anchor_relative; /* XXX */
3209 to->anchor_wildcard = from->anchor_wildcard; /* XXX */
3210 to->flush = from->flush;
3211 to->divert.addr = from->divert.addr;
3212 to->divert.port = from->divert.port;
3213 to->divert.type = from->divert.type;
3214 to->prio = from->prio;
3215 to->set_prio[0] = from->set_prio[0];
3216 to->set_prio[1] = from->set_prio[1];
3217
3218 return (0);
3219}
3220
3221int
3222pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
3223{
3224 struct pf_status pfs;
3225
3226 NET_RLOCK_IN_IOCTL()do { rw_enter_read(&netlock); } while (0);
3227 PF_LOCK()do { do { int _s = rw_status(&netlock); if ((splassert_ctl
> 0) && (_s != 0x0001UL && _s != 0x0002UL
)) splassert_fail(0x0002UL, _s, __func__); } while (0); rw_enter_write
(&pf_lock); } while (0)
;
3228 memcpy(&pfs, &pf_status, sizeof(struct pf_status))__builtin_memcpy((&pfs), (&pf_status), (sizeof(struct
pf_status)))
;
3229 pfi_update_status(pfs.ifname, &pfs);
3230 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
3231 NET_RUNLOCK_IN_IOCTL()do { rw_exit_read(&netlock); } while (0);
3232
3233 return sysctl_rdstruct(oldp, oldlenp, newp, &pfs, sizeof(pfs));
3234}