Bug Summary

File:net/pf.c
Warning:line 477, column 6
Access to field 'timeout' results in a dereference of a null pointer (loaded from variable 'st')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name pf.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/net/pf.c
1/* $OpenBSD: pf.c,v 1.1193 2024/01/10 16:44:30 bluhm Exp $ */
2
3/*
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2013 Henning Brauer <henning@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 * Effort sponsored in part by the Defense Advanced Research Projects
33 * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 *
36 */
37
38#include "bpfilter.h"
39#include "carp.h"
40#include "pflog.h"
41#include "pfsync.h"
42#include "pflow.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/mbuf.h>
47#include <sys/filio.h>
48#include <sys/socket.h>
49#include <sys/socketvar.h>
50#include <sys/kernel.h>
51#include <sys/time.h>
52#include <sys/pool.h>
53#include <sys/proc.h>
54#include <sys/rwlock.h>
55#include <sys/syslog.h>
56
57#include <crypto/sha2.h>
58
59#include <net/if.h>
60#include <net/if_var.h>
61#include <net/if_types.h>
62#include <net/route.h>
63#include <net/toeplitz.h>
64
65#include <netinet/in.h>
66#include <netinet/in_var.h>
67#include <netinet/ip.h>
68#include <netinet/in_pcb.h>
69#include <netinet/ip_var.h>
70#include <netinet/ip_icmp.h>
71#include <netinet/icmp_var.h>
72#include <netinet/tcp.h>
73#include <netinet/tcp_seq.h>
74#include <netinet/tcp_timer.h>
75#include <netinet/tcp_var.h>
76#include <netinet/tcp_fsm.h>
77#include <netinet/udp.h>
78#include <netinet/udp_var.h>
79#include <netinet/ip_divert.h>
80
81#ifdef INET61
82#include <netinet6/in6_var.h>
83#include <netinet/ip6.h>
84#include <netinet6/ip6_var.h>
85#include <netinet/icmp6.h>
86#include <netinet6/nd6.h>
87#include <netinet6/ip6_divert.h>
88#endif /* INET6 */
89
90#include <net/pfvar.h>
91#include <net/pfvar_priv.h>
92
93#if NPFLOG1 > 0
94#include <net/if_pflog.h>
95#endif /* NPFLOG > 0 */
96
97#if NPFLOW1 > 0
98#include <net/if_pflow.h>
99#endif /* NPFLOW > 0 */
100
101#if NPFSYNC1 > 0
102#include <net/if_pfsync.h>
103#endif /* NPFSYNC > 0 */
104
105/*
106 * Global variables
107 */
108struct pf_state_tree pf_statetbl;
109struct pf_queuehead pf_queues[2];
110struct pf_queuehead *pf_queues_active;
111struct pf_queuehead *pf_queues_inactive;
112
113struct pf_status pf_status;
114
115struct mutex pf_inp_mtx = MUTEX_INITIALIZER(IPL_SOFTNET){ ((void *)0), ((((0x2)) > 0x0 && ((0x2)) < 0x9
) ? 0x9 : ((0x2))), 0x0 }
;
116
117int pf_hdr_limit = 20; /* arbitrary limit, tune in ddb */
118
119SHA2_CTX pf_tcp_secret_ctx;
120u_char pf_tcp_secret[16];
121int pf_tcp_secret_init;
122int pf_tcp_iss_off;
123
124enum pf_test_status {
125 PF_TEST_FAIL = -1,
126 PF_TEST_OK,
127 PF_TEST_QUICK
128};
129
130struct pf_test_ctx {
131 struct pf_pdesc *pd;
132 struct pf_rule_actions act;
133 u_int8_t icmpcode;
134 u_int8_t icmptype;
135 int icmp_dir;
136 int state_icmp;
137 int tag;
138 u_short reason;
139 struct pf_rule_item *ri;
140 struct pf_src_node *sns[PF_SN_MAX];
141 struct pf_rule_slist rules;
142 struct pf_rule *nr;
143 struct pf_rule **rm;
144 struct pf_rule *a;
145 struct pf_rule **am;
146 struct pf_ruleset **rsm;
147 struct pf_ruleset *arsm;
148 struct pf_ruleset *aruleset;
149 struct tcphdr *th;
150};
151
152struct pool pf_src_tree_pl, pf_rule_pl, pf_queue_pl;
153struct pool pf_state_pl, pf_state_key_pl, pf_state_item_pl;
154struct pool pf_rule_item_pl, pf_sn_item_pl, pf_pktdelay_pl;
155
156void pf_add_threshold(struct pf_threshold *);
157int pf_check_threshold(struct pf_threshold *);
158int pf_check_tcp_cksum(struct mbuf *, int, int,
159 sa_family_t);
160__inline void pf_cksum_fixup(u_int16_t *, u_int16_t, u_int16_t,
161 u_int8_t);
162void pf_cksum_fixup_a(u_int16_t *, const struct pf_addr *,
163 const struct pf_addr *, sa_family_t, u_int8_t);
164int pf_modulate_sack(struct pf_pdesc *,
165 struct pf_state_peer *);
166int pf_icmp_mapping(struct pf_pdesc *, u_int8_t, int *,
167 u_int16_t *, u_int16_t *);
168int pf_change_icmp_af(struct mbuf *, int,
169 struct pf_pdesc *, struct pf_pdesc *,
170 struct pf_addr *, struct pf_addr *, sa_family_t,
171 sa_family_t);
172int pf_translate_a(struct pf_pdesc *, struct pf_addr *,
173 struct pf_addr *);
174void pf_translate_icmp(struct pf_pdesc *, struct pf_addr *,
175 u_int16_t *, struct pf_addr *, struct pf_addr *,
176 u_int16_t);
177int pf_translate_icmp_af(struct pf_pdesc*, int, void *);
178void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, int,
179 sa_family_t, struct pf_rule *, u_int);
180void pf_detach_state(struct pf_state *);
181struct pf_state_key *pf_state_key_attach(struct pf_state_key *,
182 struct pf_state *, int);
183void pf_state_key_detach(struct pf_state *, int);
184u_int32_t pf_tcp_iss(struct pf_pdesc *);
185void pf_rule_to_actions(struct pf_rule *,
186 struct pf_rule_actions *);
187int pf_test_rule(struct pf_pdesc *, struct pf_rule **,
188 struct pf_state **, struct pf_rule **,
189 struct pf_ruleset **, u_short *);
190static __inline int pf_create_state(struct pf_pdesc *, struct pf_rule *,
191 struct pf_rule *, struct pf_rule *,
192 struct pf_state_key **, struct pf_state_key **,
193 int *, struct pf_state **, int,
194 struct pf_rule_slist *, struct pf_rule_actions *,
195 struct pf_src_node **);
196static __inline int pf_state_key_addr_setup(struct pf_pdesc *, void *,
197 int, struct pf_addr *, int, struct pf_addr *,
198 int, int);
199int pf_state_key_setup(struct pf_pdesc *, struct
200 pf_state_key **, struct pf_state_key **, int);
201int pf_tcp_track_full(struct pf_pdesc *,
202 struct pf_state **, u_short *, int *, int);
203int pf_tcp_track_sloppy(struct pf_pdesc *,
204 struct pf_state **, u_short *);
205static __inline int pf_synproxy(struct pf_pdesc *, struct pf_state **,
206 u_short *);
207int pf_test_state(struct pf_pdesc *, struct pf_state **,
208 u_short *);
209int pf_icmp_state_lookup(struct pf_pdesc *,
210 struct pf_state_key_cmp *, struct pf_state **,
211 u_int16_t, u_int16_t, int, int *, int, int);
212int pf_test_state_icmp(struct pf_pdesc *,
213 struct pf_state **, u_short *);
214u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, int,
215 u_int16_t);
216static __inline int pf_set_rt_ifp(struct pf_state *, struct pf_addr *,
217 sa_family_t, struct pf_src_node **);
218struct pf_divert *pf_get_divert(struct mbuf *);
219int pf_walk_option(struct pf_pdesc *, struct ip *,
220 int, int, u_short *);
221int pf_walk_header(struct pf_pdesc *, struct ip *,
222 u_short *);
223int pf_walk_option6(struct pf_pdesc *, struct ip6_hdr *,
224 int, int, u_short *);
225int pf_walk_header6(struct pf_pdesc *, struct ip6_hdr *,
226 u_short *);
227void pf_print_state_parts(struct pf_state *,
228 struct pf_state_key *, struct pf_state_key *);
229int pf_addr_wrap_neq(struct pf_addr_wrap *,
230 struct pf_addr_wrap *);
231int pf_compare_state_keys(struct pf_state_key *,
232 struct pf_state_key *, struct pfi_kif *, u_int);
233u_int16_t pf_pkt_hash(sa_family_t, uint8_t,
234 const struct pf_addr *, const struct pf_addr *,
235 uint16_t, uint16_t);
236int pf_find_state(struct pf_pdesc *,
237 struct pf_state_key_cmp *, struct pf_state **);
238int pf_src_connlimit(struct pf_state **);
239int pf_match_rcvif(struct mbuf *, struct pf_rule *);
240int pf_step_into_anchor(struct pf_test_ctx *,
241 struct pf_rule *);
242int pf_match_rule(struct pf_test_ctx *,
243 struct pf_ruleset *);
244void pf_counters_inc(int, struct pf_pdesc *,
245 struct pf_state *, struct pf_rule *,
246 struct pf_rule *);
247
248int pf_state_insert(struct pfi_kif *,
249 struct pf_state_key **, struct pf_state_key **,
250 struct pf_state *);
251
252int pf_state_key_isvalid(struct pf_state_key *);
253struct pf_state_key *pf_state_key_ref(struct pf_state_key *);
254void pf_state_key_unref(struct pf_state_key *);
255void pf_state_key_link_reverse(struct pf_state_key *,
256 struct pf_state_key *);
257void pf_state_key_unlink_reverse(struct pf_state_key *);
258void pf_state_key_link_inpcb(struct pf_state_key *,
259 struct inpcb *);
260void pf_state_key_unlink_inpcb(struct pf_state_key *);
261void pf_pktenqueue_delayed(void *);
262int32_t pf_state_expires(const struct pf_state *, uint8_t);
263
264#if NPFLOG1 > 0
265void pf_log_matches(struct pf_pdesc *, struct pf_rule *,
266 struct pf_rule *, struct pf_ruleset *,
267 struct pf_rule_slist *);
268#endif /* NPFLOG > 0 */
269
270extern struct pool pfr_ktable_pl;
271extern struct pool pfr_kentry_pl;
272
273struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
274 { &pf_state_pl, PFSTATE_HIWAT100000, PFSTATE_HIWAT100000 },
275 { &pf_src_tree_pl, PFSNODE_HIWAT10000, PFSNODE_HIWAT10000 },
276 { &pf_frent_pl, PFFRAG_FRENT_HIWAT((256 * 1024) / 16), PFFRAG_FRENT_HIWAT((256 * 1024) / 16) },
277 { &pfr_ktable_pl, PFR_KTABLE_HIWAT1000, PFR_KTABLE_HIWAT1000 },
278 { &pfr_kentry_pl, PFR_KENTRY_HIWAT200000, PFR_KENTRY_HIWAT200000 },
279 { &pf_pktdelay_pl, PF_PKTDELAY_MAXPKTS10000, PF_PKTDELAY_MAXPKTS10000 },
280 { &pf_anchor_pl, PF_ANCHOR_HIWAT512, PF_ANCHOR_HIWAT512 }
281};
282
283#define BOUND_IFACE(r, k)((r)->rule_flag & 0x00010000) ? (k) : pfi_all \
284 ((r)->rule_flag & PFRULE_IFBOUND0x00010000) ? (k) : pfi_all
285
286#define STATE_INC_COUNTERS(s)do { struct pf_rule_item *mrm; s->rule.ptr->states_cur++
; s->rule.ptr->states_tot++; if (s->anchor.ptr != ((
void *)0)) { s->anchor.ptr->states_cur++; s->anchor.
ptr->states_tot++; } for((mrm) = ((&s->match_rules)
->slh_first); (mrm) != ((void *)0); (mrm) = ((mrm)->entry
.sle_next)) mrm->r->states_cur++; } while (0)
\
287 do { \
288 struct pf_rule_item *mrm; \
289 s->rule.ptr->states_cur++; \
290 s->rule.ptr->states_tot++; \
291 if (s->anchor.ptr != NULL((void *)0)) { \
292 s->anchor.ptr->states_cur++; \
293 s->anchor.ptr->states_tot++; \
294 } \
295 SLIST_FOREACH(mrm, &s->match_rules, entry)for((mrm) = ((&s->match_rules)->slh_first); (mrm) !=
((void *)0); (mrm) = ((mrm)->entry.sle_next))
\
296 mrm->r->states_cur++; \
297 } while (0)
298
299static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
300static inline int pf_state_compare_key(const struct pf_state_key *,
301 const struct pf_state_key *);
302static inline int pf_state_compare_id(const struct pf_state *,
303 const struct pf_state *);
304#ifdef INET61
305static __inline void pf_cksum_uncover(u_int16_t *, u_int16_t, u_int8_t);
306static __inline void pf_cksum_cover(u_int16_t *, u_int16_t, u_int8_t);
307#endif /* INET6 */
308static __inline void pf_set_protostate(struct pf_state *, int, u_int8_t);
309
310struct pf_src_tree tree_src_tracking;
311
312struct pf_state_tree_id tree_id;
313struct pf_state_list pf_state_list = PF_STATE_LIST_INITIALIZER(pf_state_list){ .pfs_list = { ((void *)0), &(pf_state_list.pfs_list).tqh_first
}, .pfs_mtx = { ((void *)0), ((((0x2)) > 0x0 && (
(0x2)) < 0x9) ? 0x9 : ((0x2))), 0x0 }, .pfs_rwl = { 0, "pfstates"
}, }
;
314
315RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare)void pf_src_tree_RB_INSERT_COLOR(struct pf_src_tree *head, struct
pf_src_node *elm) { struct pf_src_node *parent, *gparent, *tmp
; while ((parent = (elm)->entry.rbe_parent) && (parent
)->entry.rbe_color == 1) { gparent = (parent)->entry.rbe_parent
; if (parent == (gparent)->entry.rbe_left) { tmp = (gparent
)->entry.rbe_right; if (tmp && (tmp)->entry.rbe_color
== 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry
.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0
); elm = gparent; continue; } if ((parent)->entry.rbe_right
== elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent
)->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->
entry.rbe_left)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); tmp = parent; parent = elm; elm = tmp; } do {
(parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left;
if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } else { tmp = (gparent)->
entry.rbe_left; if (tmp && (tmp)->entry.rbe_color ==
1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry.
rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0)
; elm = gparent; continue; } if ((parent)->entry.rbe_left ==
elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent
)->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)->
entry.rbe_right)->entry.rbe_parent = (parent); } do {} while
(0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_right = (parent); (parent)->entry.rbe_parent = (tmp);
do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while
(0); } while (0); tmp = parent; parent = elm; elm = tmp; } do
{ (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color
= 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right
; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent
)->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry
.rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); } } (head->rbh_root)->
entry.rbe_color = 0; } void pf_src_tree_RB_REMOVE_COLOR(struct
pf_src_tree *head, struct pf_src_node *parent, struct pf_src_node
*elm) { struct pf_src_node *tmp; while ((elm == ((void *)0) ||
(elm)->entry.rbe_color == 0) && elm != (head)->
rbh_root) { if ((parent)->entry.rbe_left == elm) { tmp = (
parent)->entry.rbe_right; if ((tmp)->entry.rbe_color ==
1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry.
rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.rbe_right
; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left
)) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right
; } if (((tmp)->entry.rbe_left == ((void *)0) || ((tmp)->
entry.rbe_left)->entry.rbe_color == 0) && ((tmp)->
entry.rbe_right == ((void *)0) || ((tmp)->entry.rbe_right)
->entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm
= parent; parent = (elm)->entry.rbe_parent; } else { if (
(tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry.
rbe_right)->entry.rbe_color == 0) { struct pf_src_node *oleft
; if ((oleft = (tmp)->entry.rbe_left)) (oleft)->entry.rbe_color
= 0; (tmp)->entry.rbe_color = 1; do { (oleft) = (tmp)->
entry.rbe_left; if (((tmp)->entry.rbe_left = (oleft)->entry
.rbe_right)) { ((oleft)->entry.rbe_right)->entry.rbe_parent
= (tmp); } do {} while (0); if (((oleft)->entry.rbe_parent
= (tmp)->entry.rbe_parent)) { if ((tmp) == ((tmp)->entry
.rbe_parent)->entry.rbe_left) ((tmp)->entry.rbe_parent)
->entry.rbe_left = (oleft); else ((tmp)->entry.rbe_parent
)->entry.rbe_right = (oleft); } else (head)->rbh_root =
(oleft); (oleft)->entry.rbe_right = (tmp); (tmp)->entry
.rbe_parent = (oleft); do {} while (0); if (((oleft)->entry
.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->
entry.rbe_right; } (tmp)->entry.rbe_color = (parent)->entry
.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry
.rbe_right) ((tmp)->entry.rbe_right)->entry.rbe_color =
0; do { (tmp) = (parent)->entry.rbe_right; if (((parent)->
entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->entry
.rbe_left)->entry.rbe_parent = (parent); } do {} while (0)
; if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent
)) { if ((parent) == ((parent)->entry.rbe_parent)->entry
.rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left
= (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right
= (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry
.rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do
{} while (0); if (((tmp)->entry.rbe_parent)) do {} while (
0); } while (0); elm = (head)->rbh_root; break; } } else {
tmp = (parent)->entry.rbe_left; if ((tmp)->entry.rbe_color
== 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry
.rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.
rbe_left; if (((parent)->entry.rbe_left = (tmp)->entry.
rbe_right)) { ((tmp)->entry.rbe_right)->entry.rbe_parent
= (parent); } do {} while (0); if (((tmp)->entry.rbe_parent
= (parent)->entry.rbe_parent)) { if ((parent) == ((parent
)->entry.rbe_parent)->entry.rbe_left) ((parent)->entry
.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry
.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root
= (tmp); (tmp)->entry.rbe_right = (parent); (parent)->
entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry
.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->
entry.rbe_left; } if (((tmp)->entry.rbe_left == ((void *)0
) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) &&
((tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry
.rbe_right)->entry.rbe_color == 0)) { (tmp)->entry.rbe_color
= 1; elm = parent; parent = (elm)->entry.rbe_parent; } else
{ if ((tmp)->entry.rbe_left == ((void *)0) || ((tmp)->
entry.rbe_left)->entry.rbe_color == 0) { struct pf_src_node
*oright; if ((oright = (tmp)->entry.rbe_right)) (oright)->
entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oright
) = (tmp)->entry.rbe_right; if (((tmp)->entry.rbe_right
= (oright)->entry.rbe_left)) { ((oright)->entry.rbe_left
)->entry.rbe_parent = (tmp); } do {} while (0); if (((oright
)->entry.rbe_parent = (tmp)->entry.rbe_parent)) { if ((
tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left) ((tmp
)->entry.rbe_parent)->entry.rbe_left = (oright); else (
(tmp)->entry.rbe_parent)->entry.rbe_right = (oright); }
else (head)->rbh_root = (oright); (oright)->entry.rbe_left
= (tmp); (tmp)->entry.rbe_parent = (oright); do {} while (
0); if (((oright)->entry.rbe_parent)) do {} while (0); } while
(0); tmp = (parent)->entry.rbe_left; } (tmp)->entry.rbe_color
= (parent)->entry.rbe_color; (parent)->entry.rbe_color
= 0; if ((tmp)->entry.rbe_left) ((tmp)->entry.rbe_left
)->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_left
; if (((parent)->entry.rbe_left = (tmp)->entry.rbe_right
)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (parent
); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent
)->entry.rbe_parent)) { if ((parent) == ((parent)->entry
.rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent
)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent
)->entry.rbe_right = (tmp); } else (head)->rbh_root = (
tmp); (tmp)->entry.rbe_right = (parent); (parent)->entry
.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent
)) do {} while (0); } while (0); elm = (head)->rbh_root; break
; } } } if (elm) (elm)->entry.rbe_color = 0; } struct pf_src_node
* pf_src_tree_RB_REMOVE(struct pf_src_tree *head, struct pf_src_node
*elm) { struct pf_src_node *child, *parent, *old = elm; int color
; if ((elm)->entry.rbe_left == ((void *)0)) child = (elm)->
entry.rbe_right; else if ((elm)->entry.rbe_right == ((void
*)0)) child = (elm)->entry.rbe_left; else { struct pf_src_node
*left; elm = (elm)->entry.rbe_right; while ((left = (elm)
->entry.rbe_left)) elm = left; child = (elm)->entry.rbe_right
; parent = (elm)->entry.rbe_parent; color = (elm)->entry
.rbe_color; if (child) (child)->entry.rbe_parent = parent;
if (parent) { if ((parent)->entry.rbe_left == elm) (parent
)->entry.rbe_left = child; else (parent)->entry.rbe_right
= child; do {} while (0); } else (head)->rbh_root = child
; if ((elm)->entry.rbe_parent == old) parent = elm; (elm)->
entry = (old)->entry; if ((old)->entry.rbe_parent) { if
(((old)->entry.rbe_parent)->entry.rbe_left == old) ((old
)->entry.rbe_parent)->entry.rbe_left = elm; else ((old)
->entry.rbe_parent)->entry.rbe_right = elm; do {} while
(0); } else (head)->rbh_root = elm; ((old)->entry.rbe_left
)->entry.rbe_parent = elm; if ((old)->entry.rbe_right) (
(old)->entry.rbe_right)->entry.rbe_parent = elm; if (parent
) { left = parent; do { do {} while (0); } while ((left = (left
)->entry.rbe_parent)); } goto color; } parent = (elm)->
entry.rbe_parent; color = (elm)->entry.rbe_color; if (child
) (child)->entry.rbe_parent = parent; if (parent) { if ((parent
)->entry.rbe_left == elm) (parent)->entry.rbe_left = child
; else (parent)->entry.rbe_right = child; do {} while (0);
} else (head)->rbh_root = child; color: if (color == 0) pf_src_tree_RB_REMOVE_COLOR
(head, parent, child); return (old); } struct pf_src_node * pf_src_tree_RB_INSERT
(struct pf_src_tree *head, struct pf_src_node *elm) { struct pf_src_node
*tmp; struct pf_src_node *parent = ((void *)0); int comp = 0
; tmp = (head)->rbh_root; while (tmp) { parent = tmp; comp
= (pf_src_compare)(elm, parent); if (comp < 0) tmp = (tmp
)->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry
.rbe_right; else return (tmp); } do { (elm)->entry.rbe_parent
= parent; (elm)->entry.rbe_left = (elm)->entry.rbe_right
= ((void *)0); (elm)->entry.rbe_color = 1; } while (0); if
(parent != ((void *)0)) { if (comp < 0) (parent)->entry
.rbe_left = elm; else (parent)->entry.rbe_right = elm; do {
} while (0); } else (head)->rbh_root = elm; pf_src_tree_RB_INSERT_COLOR
(head, elm); return (((void *)0)); } struct pf_src_node * pf_src_tree_RB_FIND
(struct pf_src_tree *head, struct pf_src_node *elm) { struct pf_src_node
*tmp = (head)->rbh_root; int comp; while (tmp) { comp = pf_src_compare
(elm, tmp); if (comp < 0) tmp = (tmp)->entry.rbe_left; else
if (comp > 0) tmp = (tmp)->entry.rbe_right; else return
(tmp); } return (((void *)0)); } struct pf_src_node * pf_src_tree_RB_NFIND
(struct pf_src_tree *head, struct pf_src_node *elm) { struct pf_src_node
*tmp = (head)->rbh_root; struct pf_src_node *res = ((void
*)0); int comp; while (tmp) { comp = pf_src_compare(elm, tmp
); if (comp < 0) { res = tmp; tmp = (tmp)->entry.rbe_left
; } else if (comp > 0) tmp = (tmp)->entry.rbe_right; else
return (tmp); } return (res); } struct pf_src_node * pf_src_tree_RB_NEXT
(struct pf_src_node *elm) { if ((elm)->entry.rbe_right) { elm
= (elm)->entry.rbe_right; while ((elm)->entry.rbe_left
) elm = (elm)->entry.rbe_left; } else { if ((elm)->entry
.rbe_parent && (elm == ((elm)->entry.rbe_parent)->
entry.rbe_left)) elm = (elm)->entry.rbe_parent; else { while
((elm)->entry.rbe_parent && (elm == ((elm)->entry
.rbe_parent)->entry.rbe_right)) elm = (elm)->entry.rbe_parent
; elm = (elm)->entry.rbe_parent; } } return (elm); } struct
pf_src_node * pf_src_tree_RB_PREV(struct pf_src_node *elm) {
if ((elm)->entry.rbe_left) { elm = (elm)->entry.rbe_left
; while ((elm)->entry.rbe_right) elm = (elm)->entry.rbe_right
; } else { if ((elm)->entry.rbe_parent && (elm == (
(elm)->entry.rbe_parent)->entry.rbe_right)) elm = (elm)
->entry.rbe_parent; else { while ((elm)->entry.rbe_parent
&& (elm == ((elm)->entry.rbe_parent)->entry.rbe_left
)) elm = (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent
; } } return (elm); } struct pf_src_node * pf_src_tree_RB_MINMAX
(struct pf_src_tree *head, int val) { struct pf_src_node *tmp
= (head)->rbh_root; struct pf_src_node *parent = ((void *
)0); while (tmp) { parent = tmp; if (val < 0) tmp = (tmp)->
entry.rbe_left; else tmp = (tmp)->entry.rbe_right; } return
(parent); }
;
316RBT_GENERATE(pf_state_tree, pf_state_key, sk_entry, pf_state_compare_key)static int pf_state_tree_RBT_COMPARE(const void *lptr, const void
*rptr) { const struct pf_state_key *l = lptr, *r = rptr; return
pf_state_compare_key(l, r); } static const struct rb_type pf_state_tree_RBT_INFO
= { pf_state_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof
(struct pf_state_key, sk_entry), }; const struct rb_type *const
pf_state_tree_RBT_TYPE = &pf_state_tree_RBT_INFO
;
317RBT_GENERATE(pf_state_tree_id, pf_state, entry_id, pf_state_compare_id)static int pf_state_tree_id_RBT_COMPARE(const void *lptr, const
void *rptr) { const struct pf_state *l = lptr, *r = rptr; return
pf_state_compare_id(l, r); } static const struct rb_type pf_state_tree_id_RBT_INFO
= { pf_state_tree_id_RBT_COMPARE, ((void *)0), __builtin_offsetof
(struct pf_state, entry_id), }; const struct rb_type *const pf_state_tree_id_RBT_TYPE
= &pf_state_tree_id_RBT_INFO
;
318
319int
320pf_addr_compare(const struct pf_addr *a, const struct pf_addr *b,
321 sa_family_t af)
322{
323 switch (af) {
324 case AF_INET2:
325 if (a->addr32pfa.addr32[0] > b->addr32pfa.addr32[0])
326 return (1);
327 if (a->addr32pfa.addr32[0] < b->addr32pfa.addr32[0])
328 return (-1);
329 break;
330#ifdef INET61
331 case AF_INET624:
332 if (a->addr32pfa.addr32[3] > b->addr32pfa.addr32[3])
333 return (1);
334 if (a->addr32pfa.addr32[3] < b->addr32pfa.addr32[3])
335 return (-1);
336 if (a->addr32pfa.addr32[2] > b->addr32pfa.addr32[2])
337 return (1);
338 if (a->addr32pfa.addr32[2] < b->addr32pfa.addr32[2])
339 return (-1);
340 if (a->addr32pfa.addr32[1] > b->addr32pfa.addr32[1])
341 return (1);
342 if (a->addr32pfa.addr32[1] < b->addr32pfa.addr32[1])
343 return (-1);
344 if (a->addr32pfa.addr32[0] > b->addr32pfa.addr32[0])
345 return (1);
346 if (a->addr32pfa.addr32[0] < b->addr32pfa.addr32[0])
347 return (-1);
348 break;
349#endif /* INET6 */
350 }
351 return (0);
352}
353
354static __inline int
355pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
356{
357 int diff;
358
359 if (a->rule.ptr > b->rule.ptr)
360 return (1);
361 if (a->rule.ptr < b->rule.ptr)
362 return (-1);
363 if ((diff = a->type - b->type) != 0)
364 return (diff);
365 if ((diff = a->af - b->af) != 0)
366 return (diff);
367 if ((diff = pf_addr_compare(&a->addr, &b->addr, a->af)) != 0)
368 return (diff);
369 return (0);
370}
371
372static __inline void
373pf_set_protostate(struct pf_state *st, int which, u_int8_t newstate)
374{
375 if (which == PF_PEER_DST || which == PF_PEER_BOTH)
376 st->dst.state = newstate;
377 if (which == PF_PEER_DST)
378 return;
379
380 if (st->src.state == newstate)
381 return;
382 if (st->creatorid == pf_status.hostid &&
383 st->key[PF_SK_STACK]->proto == IPPROTO_TCP6 &&
384 !(TCPS_HAVEESTABLISHED(st->src.state)((st->src.state) >= 4) ||
385 st->src.state == TCPS_CLOSED0) &&
386 (TCPS_HAVEESTABLISHED(newstate)((newstate) >= 4) || newstate == TCPS_CLOSED0))
387 pf_status.states_halfopen--;
388
389 st->src.state = newstate;
390}
391
392void
393pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
394{
395 switch (af) {
396 case AF_INET2:
397 dst->addr32pfa.addr32[0] = src->addr32pfa.addr32[0];
398 break;
399#ifdef INET61
400 case AF_INET624:
401 dst->addr32pfa.addr32[0] = src->addr32pfa.addr32[0];
402 dst->addr32pfa.addr32[1] = src->addr32pfa.addr32[1];
403 dst->addr32pfa.addr32[2] = src->addr32pfa.addr32[2];
404 dst->addr32pfa.addr32[3] = src->addr32pfa.addr32[3];
405 break;
406#endif /* INET6 */
407 default:
408 unhandled_af(af);
409 }
410}
411
412void
413pf_init_threshold(struct pf_threshold *threshold,
414 u_int32_t limit, u_int32_t seconds)
415{
416 threshold->limit = limit * PF_THRESHOLD_MULT1000;
417 threshold->seconds = seconds;
418 threshold->count = 0;
419 threshold->last = getuptime();
420}
421
422void
423pf_add_threshold(struct pf_threshold *threshold)
424{
425 u_int32_t t = getuptime(), diff = t - threshold->last;
426
427 if (diff >= threshold->seconds)
428 threshold->count = 0;
429 else
430 threshold->count -= threshold->count * diff /
431 threshold->seconds;
432 threshold->count += PF_THRESHOLD_MULT1000;
433 threshold->last = t;
434}
435
436int
437pf_check_threshold(struct pf_threshold *threshold)
438{
439 return (threshold->count > threshold->limit);
440}
441
442void
443pf_state_list_insert(struct pf_state_list *pfs, struct pf_state *st)
444{
445 /*
446 * we can always put states on the end of the list.
447 *
448 * things reading the list should take a read lock, then
449 * the mutex, get the head and tail pointers, release the
450 * mutex, and then they can iterate between the head and tail.
451 */
452
453 pf_state_ref(st); /* get a ref for the list */
454
455 mtx_enter(&pfs->pfs_mtx);
456 TAILQ_INSERT_TAIL(&pfs->pfs_list, st, entry_list)do { (st)->entry_list.tqe_next = ((void *)0); (st)->entry_list
.tqe_prev = (&pfs->pfs_list)->tqh_last; *(&pfs->
pfs_list)->tqh_last = (st); (&pfs->pfs_list)->tqh_last
= &(st)->entry_list.tqe_next; } while (0)
;
457 mtx_leave(&pfs->pfs_mtx);
458}
459
460void
461pf_state_list_remove(struct pf_state_list *pfs, struct pf_state *st)
462{
463 /* states can only be removed when the write lock is held */
464 rw_assert_wrlock(&pfs->pfs_rwl);
465
466 mtx_enter(&pfs->pfs_mtx);
467 TAILQ_REMOVE(&pfs->pfs_list, st, entry_list)do { if (((st)->entry_list.tqe_next) != ((void *)0)) (st)->
entry_list.tqe_next->entry_list.tqe_prev = (st)->entry_list
.tqe_prev; else (&pfs->pfs_list)->tqh_last = (st)->
entry_list.tqe_prev; *(st)->entry_list.tqe_prev = (st)->
entry_list.tqe_next; ((st)->entry_list.tqe_prev) = ((void *
)-1); ((st)->entry_list.tqe_next) = ((void *)-1); } while (
0)
;
468 mtx_leave(&pfs->pfs_mtx);
469
470 pf_state_unref(st); /* list no longer references the state */
471}
472
473void
474pf_update_state_timeout(struct pf_state *st, int to)
475{
476 mtx_enter(&st->mtx);
477 if (st->timeout != PFTM_UNLINKED)
61
Access to field 'timeout' results in a dereference of a null pointer (loaded from variable 'st')
478 st->timeout = to;
479 mtx_leave(&st->mtx);
480}
481
482int
483pf_src_connlimit(struct pf_state **stp)
484{
485 int bad = 0;
486 struct pf_src_node *sn;
487
488 if ((sn = pf_get_src_node((*stp), PF_SN_NONE)) == NULL((void *)0))
489 return (0);
490
491 sn->conn++;
492 (*stp)->src.tcp_est = 1;
493 pf_add_threshold(&sn->conn_rate);
494
495 if ((*stp)->rule.ptr->max_src_conn &&
496 (*stp)->rule.ptr->max_src_conn < sn->conn) {
497 pf_status.lcounters[LCNT_SRCCONN3]++;
498 bad++;
499 }
500
501 if ((*stp)->rule.ptr->max_src_conn_rate.limit &&
502 pf_check_threshold(&sn->conn_rate)) {
503 pf_status.lcounters[LCNT_SRCCONNRATE4]++;
504 bad++;
505 }
506
507 if (!bad)
508 return (0);
509
510 if ((*stp)->rule.ptr->overload_tbl) {
511 struct pfr_addr p;
512 u_int32_t killed = 0;
513
514 pf_status.lcounters[LCNT_OVERLOAD_TABLE5]++;
515 if (pf_status.debug >= LOG_NOTICE5) {
516 log(LOG_NOTICE5,
517 "pf: pf_src_connlimit: blocking address ");
518 pf_print_host(&sn->addr, 0,
519 (*stp)->key[PF_SK_WIRE]->af);
520 }
521
522 memset(&p, 0, sizeof(p))__builtin_memset((&p), (0), (sizeof(p)));
523 p.pfra_af = (*stp)->key[PF_SK_WIRE]->af;
524 switch ((*stp)->key[PF_SK_WIRE]->af) {
525 case AF_INET2:
526 p.pfra_net = 32;
527 p.pfra_ip4addrpfra_u._pfra_ip4addr = sn->addr.v4pfa.v4;
528 break;
529#ifdef INET61
530 case AF_INET624:
531 p.pfra_net = 128;
532 p.pfra_ip6addrpfra_u._pfra_ip6addr = sn->addr.v6pfa.v6;
533 break;
534#endif /* INET6 */
535 }
536
537 pfr_insert_kentry((*stp)->rule.ptr->overload_tbl,
538 &p, gettime());
539
540 /* kill existing states if that's required. */
541 if ((*stp)->rule.ptr->flush) {
542 struct pf_state_key *sk;
543 struct pf_state *st;
544
545 pf_status.lcounters[LCNT_OVERLOAD_FLUSH6]++;
546 RBT_FOREACH(st, pf_state_tree_id, &tree_id)for ((st) = pf_state_tree_id_RBT_MIN((&tree_id)); (st) !=
((void *)0); (st) = pf_state_tree_id_RBT_NEXT((st)))
{
547 sk = st->key[PF_SK_WIRE];
548 /*
549 * Kill states from this source. (Only those
550 * from the same rule if PF_FLUSH_GLOBAL is not
551 * set)
552 */
553 if (sk->af ==
554 (*stp)->key[PF_SK_WIRE]->af &&
555 (((*stp)->direction == PF_OUT &&
556 PF_AEQ(&sn->addr, &sk->addr[1], sk->af)((sk->af == 2 && (&sn->addr)->pfa.addr32
[0] == (&sk->addr[1])->pfa.addr32[0]) || (sk->af
== 24 && (&sn->addr)->pfa.addr32[3] == (&
sk->addr[1])->pfa.addr32[3] && (&sn->addr
)->pfa.addr32[2] == (&sk->addr[1])->pfa.addr32[2
] && (&sn->addr)->pfa.addr32[1] == (&sk
->addr[1])->pfa.addr32[1] && (&sn->addr)
->pfa.addr32[0] == (&sk->addr[1])->pfa.addr32[0]
))
) ||
557 ((*stp)->direction == PF_IN &&
558 PF_AEQ(&sn->addr, &sk->addr[0], sk->af)((sk->af == 2 && (&sn->addr)->pfa.addr32
[0] == (&sk->addr[0])->pfa.addr32[0]) || (sk->af
== 24 && (&sn->addr)->pfa.addr32[3] == (&
sk->addr[0])->pfa.addr32[3] && (&sn->addr
)->pfa.addr32[2] == (&sk->addr[0])->pfa.addr32[2
] && (&sn->addr)->pfa.addr32[1] == (&sk
->addr[0])->pfa.addr32[1] && (&sn->addr)
->pfa.addr32[0] == (&sk->addr[0])->pfa.addr32[0]
))
)) &&
559 ((*stp)->rule.ptr->flush &
560 PF_FLUSH_GLOBAL0x02 ||
561 (*stp)->rule.ptr == st->rule.ptr)) {
562 pf_update_state_timeout(st, PFTM_PURGE);
563 pf_set_protostate(st, PF_PEER_BOTH,
564 TCPS_CLOSED0);
565 killed++;
566 }
567 }
568 if (pf_status.debug >= LOG_NOTICE5)
569 addlog(", %u states killed", killed);
570 }
571 if (pf_status.debug >= LOG_NOTICE5)
572 addlog("\n");
573 }
574
575 /* kill this state */
576 pf_update_state_timeout(*stp, PFTM_PURGE);
577 pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_CLOSED0);
578 return (1);
579}
580
581int
582pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
583 enum pf_sn_types type, sa_family_t af, struct pf_addr *src,
584 struct pf_addr *raddr, struct pfi_kif *kif)
585{
586 struct pf_src_node k;
587
588 if (*sn == NULL((void *)0)) {
589 k.af = af;
590 k.type = type;
591 pf_addrcpy(&k.addr, src, af);
592 k.rule.ptr = rule;
593 pf_status.scounters[SCNT_SRC_NODE_SEARCH0]++;
594 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k)pf_src_tree_RB_FIND(&tree_src_tracking, &k);
595 }
596 if (*sn == NULL((void *)0)) {
597 if (!rule->max_src_nodes ||
598 rule->src_nodes < rule->max_src_nodes)
599 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT0x0002 | PR_ZERO0x0008);
600 else
601 pf_status.lcounters[LCNT_SRCNODES2]++;
602 if ((*sn) == NULL((void *)0))
603 return (-1);
604
605 pf_init_threshold(&(*sn)->conn_rate,
606 rule->max_src_conn_rate.limit,
607 rule->max_src_conn_rate.seconds);
608
609 (*sn)->type = type;
610 (*sn)->af = af;
611 (*sn)->rule.ptr = rule;
612 pf_addrcpy(&(*sn)->addr, src, af);
613 if (raddr)
614 pf_addrcpy(&(*sn)->raddr, raddr, af);
615 if (RB_INSERT(pf_src_tree,pf_src_tree_RB_INSERT(&tree_src_tracking, *sn)
616 &tree_src_tracking, *sn)pf_src_tree_RB_INSERT(&tree_src_tracking, *sn) != NULL((void *)0)) {
617 if (pf_status.debug >= LOG_NOTICE5) {
618 log(LOG_NOTICE5,
619 "pf: src_tree insert failed: ");
620 pf_print_host(&(*sn)->addr, 0, af);
621 addlog("\n");
622 }
623 pool_put(&pf_src_tree_pl, *sn);
624 return (-1);
625 }
626 (*sn)->creation = getuptime();
627 (*sn)->rule.ptr->src_nodes++;
628 if (kif != NULL((void *)0)) {
629 (*sn)->kif = kif;
630 pfi_kif_ref(kif, PFI_KIF_REF_SRCNODE);
631 }
632 pf_status.scounters[SCNT_SRC_NODE_INSERT1]++;
633 pf_status.src_nodes++;
634 } else {
635 if (rule->max_src_states &&
636 (*sn)->states >= rule->max_src_states) {
637 pf_status.lcounters[LCNT_SRCSTATES1]++;
638 return (-1);
639 }
640 }
641 return (0);
642}
643
644void
645pf_remove_src_node(struct pf_src_node *sn)
646{
647 if (sn->states > 0 || sn->expire > getuptime())
648 return;
649
650 sn->rule.ptr->src_nodes--;
651 if (sn->rule.ptr->states_cur == 0 &&
652 sn->rule.ptr->src_nodes == 0)
653 pf_rm_rule(NULL((void *)0), sn->rule.ptr);
654 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn)pf_src_tree_RB_REMOVE(&tree_src_tracking, sn);
655 pf_status.scounters[SCNT_SRC_NODE_REMOVALS2]++;
656 pf_status.src_nodes--;
657 pfi_kif_unref(sn->kif, PFI_KIF_REF_SRCNODE);
658 pool_put(&pf_src_tree_pl, sn);
659}
660
661struct pf_src_node *
662pf_get_src_node(struct pf_state *st, enum pf_sn_types type)
663{
664 struct pf_sn_item *sni;
665
666 SLIST_FOREACH(sni, &st->src_nodes, next)for((sni) = ((&st->src_nodes)->slh_first); (sni) !=
((void *)0); (sni) = ((sni)->next.sle_next))
667 if (sni->sn->type == type)
668 return (sni->sn);
669 return (NULL((void *)0));
670}
671
672void
673pf_state_rm_src_node(struct pf_state *st, struct pf_src_node *sn)
674{
675 struct pf_sn_item *sni, *snin, *snip = NULL((void *)0);
676
677 for (sni = SLIST_FIRST(&st->src_nodes)((&st->src_nodes)->slh_first); sni; sni = snin) {
678 snin = SLIST_NEXT(sni, next)((sni)->next.sle_next);
679 if (sni->sn == sn) {
680 if (snip)
681 SLIST_REMOVE_AFTER(snip, next)do { (snip)->next.sle_next = (snip)->next.sle_next->
next.sle_next; } while (0)
;
682 else
683 SLIST_REMOVE_HEAD(&st->src_nodes, next)do { (&st->src_nodes)->slh_first = (&st->src_nodes
)->slh_first->next.sle_next; } while (0)
;
684 pool_put(&pf_sn_item_pl, sni);
685 sni = NULL((void *)0);
686 sn->states--;
687 }
688 if (sni != NULL((void *)0))
689 snip = sni;
690 }
691}
692
693/* state table stuff */
694
695static inline int
696pf_state_compare_key(const struct pf_state_key *a,
697 const struct pf_state_key *b)
698{
699 int diff;
700
701 if ((diff = a->hash - b->hash) != 0)
702 return (diff);
703 if ((diff = a->proto - b->proto) != 0)
704 return (diff);
705 if ((diff = a->af - b->af) != 0)
706 return (diff);
707 if ((diff = pf_addr_compare(&a->addr[0], &b->addr[0], a->af)) != 0)
708 return (diff);
709 if ((diff = pf_addr_compare(&a->addr[1], &b->addr[1], a->af)) != 0)
710 return (diff);
711 if ((diff = a->port[0] - b->port[0]) != 0)
712 return (diff);
713 if ((diff = a->port[1] - b->port[1]) != 0)
714 return (diff);
715 if ((diff = a->rdomain - b->rdomain) != 0)
716 return (diff);
717 return (0);
718}
719
720static inline int
721pf_state_compare_id(const struct pf_state *a, const struct pf_state *b)
722{
723 if (a->id > b->id)
724 return (1);
725 if (a->id < b->id)
726 return (-1);
727 if (a->creatorid > b->creatorid)
728 return (1);
729 if (a->creatorid < b->creatorid)
730 return (-1);
731
732 return (0);
733}
734
735/*
736 * on failure, pf_state_key_attach() releases the pf_state_key
737 * reference and returns NULL.
738 */
739struct pf_state_key *
740pf_state_key_attach(struct pf_state_key *sk, struct pf_state *st, int idx)
741{
742 struct pf_state_item *si;
743 struct pf_state_key *cur;
744 struct pf_state *oldst = NULL((void *)0);
745
746 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
747
748 KASSERT(st->key[idx] == NULL)((st->key[idx] == ((void *)0)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/pf.c", 748, "st->key[idx] == NULL"))
;
749 sk->sk_removed = 0;
750 cur = RBT_INSERT(pf_state_tree, &pf_statetbl, sk)pf_state_tree_RBT_INSERT(&pf_statetbl, sk);
751 if (cur != NULL((void *)0)) {
752 sk->sk_removed = 1;
753 /* key exists. check for same kif, if none, add to key */
754 TAILQ_FOREACH(si, &cur->sk_states, si_entry)for((si) = ((&cur->sk_states)->tqh_first); (si) != (
(void *)0); (si) = ((si)->si_entry.tqe_next))
{
755 struct pf_state *sist = si->si_st;
756 if (sist->kif == st->kif &&
757 ((sist->key[PF_SK_WIRE]->af == sk->af &&
758 sist->direction == st->direction) ||
759 (sist->key[PF_SK_WIRE]->af !=
760 sist->key[PF_SK_STACK]->af &&
761 sk->af == sist->key[PF_SK_STACK]->af &&
762 sist->direction != st->direction))) {
763 int reuse = 0;
764
765 if (sk->proto == IPPROTO_TCP6 &&
766 sist->src.state >= TCPS_FIN_WAIT_29 &&
767 sist->dst.state >= TCPS_FIN_WAIT_29)
768 reuse = 1;
769 if (pf_status.debug >= LOG_NOTICE5) {
770 log(LOG_NOTICE5,
771 "pf: %s key attach %s on %s: ",
772 (idx == PF_SK_WIRE) ?
773 "wire" : "stack",
774 reuse ? "reuse" : "failed",
775 st->kif->pfik_name);
776 pf_print_state_parts(st,
777 (idx == PF_SK_WIRE) ? sk : NULL((void *)0),
778 (idx == PF_SK_STACK) ? sk : NULL((void *)0));
779 addlog(", existing: ");
780 pf_print_state_parts(sist,
781 (idx == PF_SK_WIRE) ? sk : NULL((void *)0),
782 (idx == PF_SK_STACK) ? sk : NULL((void *)0));
783 addlog("\n");
784 }
785 if (reuse) {
786 pf_set_protostate(sist, PF_PEER_BOTH,
787 TCPS_CLOSED0);
788 /* remove late or sks can go away */
789 oldst = sist;
790 } else {
791 pf_state_key_unref(sk);
792 return (NULL((void *)0)); /* collision! */
793 }
794 }
795 }
796
797 /* reuse the existing state key */
798 pf_state_key_unref(sk);
799 sk = cur;
800 }
801
802 if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT0x0002)) == NULL((void *)0)) {
803 if (TAILQ_EMPTY(&sk->sk_states)(((&sk->sk_states)->tqh_first) == ((void *)0))) {
804 KASSERT(cur == NULL)((cur == ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c"
, 804, "cur == NULL"))
;
805 RBT_REMOVE(pf_state_tree, &pf_statetbl, sk)pf_state_tree_RBT_REMOVE(&pf_statetbl, sk);
806 sk->sk_removed = 1;
807 pf_state_key_unref(sk);
808 }
809
810 return (NULL((void *)0));
811 }
812
813 st->key[idx] = pf_state_key_ref(sk); /* give a ref to state */
814 si->si_st = pf_state_ref(st);
815
816 /* list is sorted, if-bound states before floating */
817 if (st->kif == pfi_all)
818 TAILQ_INSERT_TAIL(&sk->sk_states, si, si_entry)do { (si)->si_entry.tqe_next = ((void *)0); (si)->si_entry
.tqe_prev = (&sk->sk_states)->tqh_last; *(&sk->
sk_states)->tqh_last = (si); (&sk->sk_states)->tqh_last
= &(si)->si_entry.tqe_next; } while (0)
;
819 else
820 TAILQ_INSERT_HEAD(&sk->sk_states, si, si_entry)do { if (((si)->si_entry.tqe_next = (&sk->sk_states
)->tqh_first) != ((void *)0)) (&sk->sk_states)->
tqh_first->si_entry.tqe_prev = &(si)->si_entry.tqe_next
; else (&sk->sk_states)->tqh_last = &(si)->si_entry
.tqe_next; (&sk->sk_states)->tqh_first = (si); (si)
->si_entry.tqe_prev = &(&sk->sk_states)->tqh_first
; } while (0)
;
821
822 if (oldst)
823 pf_remove_state(oldst);
824
825 /* caller owns the pf_state ref, which owns a pf_state_key ref now */
826 return (sk);
827}
828
829void
830pf_detach_state(struct pf_state *st)
831{
832 KASSERT(st->key[PF_SK_WIRE] != NULL)((st->key[PF_SK_WIRE] != ((void *)0)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/net/pf.c", 832, "st->key[PF_SK_WIRE] != NULL"
))
;
833 pf_state_key_detach(st, PF_SK_WIRE);
834
835 KASSERT(st->key[PF_SK_STACK] != NULL)((st->key[PF_SK_STACK] != ((void *)0)) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/net/pf.c", 835, "st->key[PF_SK_STACK] != NULL"
))
;
836 if (st->key[PF_SK_STACK] != st->key[PF_SK_WIRE])
837 pf_state_key_detach(st, PF_SK_STACK);
838}
839
840void
841pf_state_key_detach(struct pf_state *st, int idx)
842{
843 struct pf_state_item *si;
844 struct pf_state_key *sk;
845
846 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
847
848 sk = st->key[idx];
849 if (sk == NULL((void *)0))
850 return;
851
852 TAILQ_FOREACH(si, &sk->sk_states, si_entry)for((si) = ((&sk->sk_states)->tqh_first); (si) != (
(void *)0); (si) = ((si)->si_entry.tqe_next))
{
853 if (si->si_st == st)
854 break;
855 }
856 if (si == NULL((void *)0))
857 return;
858
859 TAILQ_REMOVE(&sk->sk_states, si, si_entry)do { if (((si)->si_entry.tqe_next) != ((void *)0)) (si)->
si_entry.tqe_next->si_entry.tqe_prev = (si)->si_entry.tqe_prev
; else (&sk->sk_states)->tqh_last = (si)->si_entry
.tqe_prev; *(si)->si_entry.tqe_prev = (si)->si_entry.tqe_next
; ((si)->si_entry.tqe_prev) = ((void *)-1); ((si)->si_entry
.tqe_next) = ((void *)-1); } while (0)
;
860 pool_put(&pf_state_item_pl, si);
861
862 if (TAILQ_EMPTY(&sk->sk_states)(((&sk->sk_states)->tqh_first) == ((void *)0))) {
863 RBT_REMOVE(pf_state_tree, &pf_statetbl, sk)pf_state_tree_RBT_REMOVE(&pf_statetbl, sk);
864 sk->sk_removed = 1;
865 pf_state_key_unlink_reverse(sk);
866 pf_state_key_unlink_inpcb(sk);
867 pf_state_key_unref(sk);
868 }
869
870 pf_state_unref(st);
871}
872
873struct pf_state_key *
874pf_alloc_state_key(int pool_flags)
875{
876 struct pf_state_key *sk;
877
878 if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL((void *)0))
879 return (NULL((void *)0));
880
881 PF_REF_INIT(sk->sk_refcnt)refcnt_init(&(sk->sk_refcnt));
882 TAILQ_INIT(&sk->sk_states)do { (&sk->sk_states)->tqh_first = ((void *)0); (&
sk->sk_states)->tqh_last = &(&sk->sk_states)
->tqh_first; } while (0)
;
883 sk->sk_removed = 1;
884
885 return (sk);
886}
887
888static __inline int
889pf_state_key_addr_setup(struct pf_pdesc *pd, void *arg, int sidx,
890 struct pf_addr *saddr, int didx, struct pf_addr *daddr, int af, int multi)
891{
892 struct pf_state_key_cmp *key = arg;
893#ifdef INET61
894 struct pf_addr *target;
895
896 if (af == AF_INET2 || pd->proto != IPPROTO_ICMPV658)
897 goto copy;
898
899 switch (pd->hdr.icmp6.icmp6_type) {
900 case ND_NEIGHBOR_SOLICIT135:
901 if (multi)
902 return (-1);
903 target = (struct pf_addr *)&pd->hdr.nd_ns.nd_ns_target;
904 daddr = target;
905 break;
906 case ND_NEIGHBOR_ADVERT136:
907 if (multi)
908 return (-1);
909 target = (struct pf_addr *)&pd->hdr.nd_ns.nd_ns_target;
910 saddr = target;
911 if (IN6_IS_ADDR_MULTICAST(&pd->dst->v6)((&pd->dst->pfa.v6)->__u6_addr.__u6_addr8[0] == 0xff
)
) {
912 key->addr[didx].addr32pfa.addr32[0] = 0;
913 key->addr[didx].addr32pfa.addr32[1] = 0;
914 key->addr[didx].addr32pfa.addr32[2] = 0;
915 key->addr[didx].addr32pfa.addr32[3] = 0;
916 daddr = NULL((void *)0); /* overwritten */
917 }
918 break;
919 default:
920 if (multi) {
921 key->addr[sidx].addr32pfa.addr32[0] = __IPV6_ADDR_INT32_MLL(__uint32_t)(__builtin_constant_p(0xff020000) ? (__uint32_t)(
((__uint32_t)(0xff020000) & 0xff) << 24 | ((__uint32_t
)(0xff020000) & 0xff00) << 8 | ((__uint32_t)(0xff020000
) & 0xff0000) >> 8 | ((__uint32_t)(0xff020000) &
0xff000000) >> 24) : __swap32md(0xff020000))
;
922 key->addr[sidx].addr32pfa.addr32[1] = 0;
923 key->addr[sidx].addr32pfa.addr32[2] = 0;
924 key->addr[sidx].addr32pfa.addr32[3] = __IPV6_ADDR_INT32_ONE(__uint32_t)(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t
)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00)
<< 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | (
(__uint32_t)(1) & 0xff000000) >> 24) : __swap32md(1
))
;
925 saddr = NULL((void *)0); /* overwritten */
926 }
927 }
928 copy:
929#endif /* INET6 */
930 if (saddr)
931 pf_addrcpy(&key->addr[sidx], saddr, af);
932 if (daddr)
933 pf_addrcpy(&key->addr[didx], daddr, af);
934
935 return (0);
936}
937
938int
939pf_state_key_setup(struct pf_pdesc *pd, struct pf_state_key **skw,
940 struct pf_state_key **sks, int rtableid)
941{
942 /* if returning error we MUST pool_put state keys ourselves */
943 struct pf_state_key *sk1, *sk2;
944 u_int wrdom = pd->rdomain;
945 int afto = pd->af != pd->naf;
946
947 if ((sk1 = pf_alloc_state_key(PR_NOWAIT0x0002 | PR_ZERO0x0008)) == NULL((void *)0))
948 return (ENOMEM12);
949
950 pf_state_key_addr_setup(pd, sk1, pd->sidx, pd->src, pd->didx, pd->dst,
951 pd->af, 0);
952 sk1->port[pd->sidx] = pd->osport;
953 sk1->port[pd->didx] = pd->odport;
954 sk1->proto = pd->proto;
955 sk1->af = pd->af;
956 sk1->rdomain = pd->rdomain;
957 sk1->hash = pf_pkt_hash(sk1->af, sk1->proto,
958 &sk1->addr[0], &sk1->addr[1], sk1->port[0], sk1->port[1]);
959 if (rtableid >= 0)
960 wrdom = rtable_l2(rtableid);
961
962 if (PF_ANEQ(&pd->nsaddr, pd->src, pd->af)((pd->af == 2 && (&pd->nsaddr)->pfa.addr32
[0] != (pd->src)->pfa.addr32[0]) || (pd->af == 24 &&
((&pd->nsaddr)->pfa.addr32[3] != (pd->src)->
pfa.addr32[3] || (&pd->nsaddr)->pfa.addr32[2] != (pd
->src)->pfa.addr32[2] || (&pd->nsaddr)->pfa.addr32
[1] != (pd->src)->pfa.addr32[1] || (&pd->nsaddr)
->pfa.addr32[0] != (pd->src)->pfa.addr32[0])))
||
963 PF_ANEQ(&pd->ndaddr, pd->dst, pd->af)((pd->af == 2 && (&pd->ndaddr)->pfa.addr32
[0] != (pd->dst)->pfa.addr32[0]) || (pd->af == 24 &&
((&pd->ndaddr)->pfa.addr32[3] != (pd->dst)->
pfa.addr32[3] || (&pd->ndaddr)->pfa.addr32[2] != (pd
->dst)->pfa.addr32[2] || (&pd->ndaddr)->pfa.addr32
[1] != (pd->dst)->pfa.addr32[1] || (&pd->ndaddr)
->pfa.addr32[0] != (pd->dst)->pfa.addr32[0])))
||
964 pd->nsport != pd->osport || pd->ndport != pd->odport ||
965 wrdom != pd->rdomain || afto) { /* NAT/NAT64 */
966 if ((sk2 = pf_alloc_state_key(PR_NOWAIT0x0002 | PR_ZERO0x0008)) == NULL((void *)0)) {
967 pf_state_key_unref(sk1);
968 return (ENOMEM12);
969 }
970 pf_state_key_addr_setup(pd, sk2, afto ? pd->didx : pd->sidx,
971 &pd->nsaddr, afto ? pd->sidx : pd->didx, &pd->ndaddr,
972 pd->naf, 0);
973 sk2->port[afto ? pd->didx : pd->sidx] = pd->nsport;
974 sk2->port[afto ? pd->sidx : pd->didx] = pd->ndport;
975 if (afto) {
976 switch (pd->proto) {
977 case IPPROTO_ICMP1:
978 sk2->proto = IPPROTO_ICMPV658;
979 break;
980 case IPPROTO_ICMPV658:
981 sk2->proto = IPPROTO_ICMP1;
982 break;
983 default:
984 sk2->proto = pd->proto;
985 }
986 } else
987 sk2->proto = pd->proto;
988 sk2->af = pd->naf;
989 sk2->rdomain = wrdom;
990 sk2->hash = pf_pkt_hash(sk2->af, sk2->proto,
991 &sk2->addr[0], &sk2->addr[1], sk2->port[0], sk2->port[1]);
992 } else
993 sk2 = pf_state_key_ref(sk1);
994
995 if (pd->dir == PF_IN) {
996 *skw = sk1;
997 *sks = sk2;
998 } else {
999 *sks = sk1;
1000 *skw = sk2;
1001 }
1002
1003 if (pf_status.debug >= LOG_DEBUG7) {
1004 log(LOG_DEBUG7, "pf: key setup: ");
1005 pf_print_state_parts(NULL((void *)0), *skw, *sks);
1006 addlog("\n");
1007 }
1008
1009 return (0);
1010}
1011
1012/*
1013 * pf_state_insert() does the following:
1014 * - links the pf_state up with pf_state_key(s).
1015 * - inserts the pf_state_keys into pf_state_tree.
1016 * - inserts the pf_state into the into pf_state_tree_id.
1017 * - tells pfsync about the state.
1018 *
1019 * pf_state_insert() owns the references to the pf_state_key structs
1020 * it is given. on failure to insert, these references are released.
1021 * on success, the caller owns a pf_state reference that allows it
1022 * to access the state keys.
1023 */
1024
1025int
1026pf_state_insert(struct pfi_kif *kif, struct pf_state_key **skwp,
1027 struct pf_state_key **sksp, struct pf_state *st)
1028{
1029 struct pf_state_key *skw = *skwp;
1030 struct pf_state_key *sks = *sksp;
1031 int same = (skw == sks);
1032
1033 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
1034
1035 st->kif = kif;
1036 PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0);
1037
1038 skw = pf_state_key_attach(skw, st, PF_SK_WIRE);
1039 if (skw == NULL((void *)0)) {
1040 pf_state_key_unref(sks);
1041 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
1042 return (-1);
1043 }
1044
1045 if (same) {
1046 /* pf_state_key_attach might have swapped skw */
1047 pf_state_key_unref(sks);
1048 st->key[PF_SK_STACK] = sks = pf_state_key_ref(skw);
1049 } else if (pf_state_key_attach(sks, st, PF_SK_STACK) == NULL((void *)0)) {
1050 pf_state_key_detach(st, PF_SK_WIRE);
1051 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
1052 return (-1);
1053 }
1054
1055 if (st->id == 0 && st->creatorid == 0) {
1056 st->id = htobe64(pf_status.stateid++)(__uint64_t)(__builtin_constant_p(pf_status.stateid++) ? (__uint64_t
)((((__uint64_t)(pf_status.stateid++) & 0xff) << 56
) | ((__uint64_t)(pf_status.stateid++) & 0xff00ULL) <<
40 | ((__uint64_t)(pf_status.stateid++) & 0xff0000ULL) <<
24 | ((__uint64_t)(pf_status.stateid++) & 0xff000000ULL)
<< 8 | ((__uint64_t)(pf_status.stateid++) & 0xff00000000ULL
) >> 8 | ((__uint64_t)(pf_status.stateid++) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pf_status.stateid++) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pf_status.stateid++) & 0xff00000000000000ULL
) >> 56) : __swap64md(pf_status.stateid++))
;
1057 st->creatorid = pf_status.hostid;
1058 }
1059 if (RBT_INSERT(pf_state_tree_id, &tree_id, st)pf_state_tree_id_RBT_INSERT(&tree_id, st) != NULL((void *)0)) {
1060 if (pf_status.debug >= LOG_NOTICE5) {
1061 log(LOG_NOTICE5, "pf: state insert failed: "
1062 "id: %016llx creatorid: %08x",
1063 betoh64(st->id)(__uint64_t)(__builtin_constant_p(st->id) ? (__uint64_t)((
((__uint64_t)(st->id) & 0xff) << 56) | ((__uint64_t
)(st->id) & 0xff00ULL) << 40 | ((__uint64_t)(st->
id) & 0xff0000ULL) << 24 | ((__uint64_t)(st->id)
& 0xff000000ULL) << 8 | ((__uint64_t)(st->id) &
0xff00000000ULL) >> 8 | ((__uint64_t)(st->id) &
0xff0000000000ULL) >> 24 | ((__uint64_t)(st->id) &
0xff000000000000ULL) >> 40 | ((__uint64_t)(st->id) &
0xff00000000000000ULL) >> 56) : __swap64md(st->id))
, ntohl(st->creatorid)(__uint32_t)(__builtin_constant_p(st->creatorid) ? (__uint32_t
)(((__uint32_t)(st->creatorid) & 0xff) << 24 | (
(__uint32_t)(st->creatorid) & 0xff00) << 8 | ((__uint32_t
)(st->creatorid) & 0xff0000) >> 8 | ((__uint32_t
)(st->creatorid) & 0xff000000) >> 24) : __swap32md
(st->creatorid))
);
1064 addlog("\n");
1065 }
1066 pf_detach_state(st);
1067 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
1068 return (-1);
1069 }
1070 pf_state_list_insert(&pf_state_list, st);
1071 pf_status.fcounters[FCNT_STATE_INSERT1]++;
1072 pf_status.states++;
1073 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
1074 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
1075
1076#if NPFSYNC1 > 0
1077 pfsync_insert_state(st);
1078#endif /* NPFSYNC > 0 */
1079
1080 *skwp = skw;
1081 *sksp = sks;
1082
1083 return (0);
1084}
1085
1086struct pf_state *
1087pf_find_state_byid(struct pf_state_cmp *key)
1088{
1089 pf_status.fcounters[FCNT_STATE_SEARCH0]++;
1090
1091 return (RBT_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key)pf_state_tree_id_RBT_FIND(&tree_id, (struct pf_state *)key
)
);
1092}
1093
1094int
1095pf_compare_state_keys(struct pf_state_key *a, struct pf_state_key *b,
1096 struct pfi_kif *kif, u_int dir)
1097{
1098 /* a (from hdr) and b (new) must be exact opposites of each other */
1099 if (a->af == b->af && a->proto == b->proto &&
1100 PF_AEQ(&a->addr[0], &b->addr[1], a->af)((a->af == 2 && (&a->addr[0])->pfa.addr32
[0] == (&b->addr[1])->pfa.addr32[0]) || (a->af ==
24 && (&a->addr[0])->pfa.addr32[3] == (&
b->addr[1])->pfa.addr32[3] && (&a->addr[
0])->pfa.addr32[2] == (&b->addr[1])->pfa.addr32[
2] && (&a->addr[0])->pfa.addr32[1] == (&
b->addr[1])->pfa.addr32[1] && (&a->addr[
0])->pfa.addr32[0] == (&b->addr[1])->pfa.addr32[
0]))
&&
1101 PF_AEQ(&a->addr[1], &b->addr[0], a->af)((a->af == 2 && (&a->addr[1])->pfa.addr32
[0] == (&b->addr[0])->pfa.addr32[0]) || (a->af ==
24 && (&a->addr[1])->pfa.addr32[3] == (&
b->addr[0])->pfa.addr32[3] && (&a->addr[
1])->pfa.addr32[2] == (&b->addr[0])->pfa.addr32[
2] && (&a->addr[1])->pfa.addr32[1] == (&
b->addr[0])->pfa.addr32[1] && (&a->addr[
1])->pfa.addr32[0] == (&b->addr[0])->pfa.addr32[
0]))
&&
1102 a->port[0] == b->port[1] &&
1103 a->port[1] == b->port[0] && a->rdomain == b->rdomain)
1104 return (0);
1105 else {
1106 /* mismatch. must not happen. */
1107 if (pf_status.debug >= LOG_ERR3) {
1108 log(LOG_ERR3,
1109 "pf: state key linking mismatch! dir=%s, "
1110 "if=%s, stored af=%u, a0: ",
1111 dir == PF_OUT ? "OUT" : "IN",
1112 kif->pfik_name, a->af);
1113 pf_print_host(&a->addr[0], a->port[0], a->af);
1114 addlog(", a1: ");
1115 pf_print_host(&a->addr[1], a->port[1], a->af);
1116 addlog(", proto=%u", a->proto);
1117 addlog(", found af=%u, a0: ", b->af);
1118 pf_print_host(&b->addr[0], b->port[0], b->af);
1119 addlog(", a1: ");
1120 pf_print_host(&b->addr[1], b->port[1], b->af);
1121 addlog(", proto=%u", b->proto);
1122 addlog("\n");
1123 }
1124 return (-1);
1125 }
1126}
1127
1128int
1129pf_find_state(struct pf_pdesc *pd, struct pf_state_key_cmp *key,
1130 struct pf_state **stp)
1131{
1132 struct pf_state_key *sk, *pkt_sk;
1133 struct pf_state_item *si;
1134 struct pf_state *st = NULL((void *)0);
1135
1136 pf_status.fcounters[FCNT_STATE_SEARCH0]++;
1137 if (pf_status.debug >= LOG_DEBUG7) {
1138 log(LOG_DEBUG7, "pf: key search, %s on %s: ",
1139 pd->dir == PF_OUT ? "out" : "in", pd->kif->pfik_name);
1140 pf_print_state_parts(NULL((void *)0), (struct pf_state_key *)key, NULL((void *)0));
1141 addlog("\n");
1142 }
1143
1144 pkt_sk = NULL((void *)0);
1145 sk = NULL((void *)0);
1146 if (pd->dir == PF_OUT) {
1147 /* first if block deals with outbound forwarded packet */
1148 pkt_sk = pd->m->m_pkthdrM_dat.MH.MH_pkthdr.pf.statekey;
1149
1150 if (!pf_state_key_isvalid(pkt_sk)) {
1151 pf_mbuf_unlink_state_key(pd->m);
1152 pkt_sk = NULL((void *)0);
1153 }
1154
1155 if (pkt_sk && pf_state_key_isvalid(pkt_sk->sk_reverse))
1156 sk = pkt_sk->sk_reverse;
1157
1158 if (pkt_sk == NULL((void *)0)) {
1159 struct inpcb *inp = pd->m->m_pkthdrM_dat.MH.MH_pkthdr.pf.inp;
1160
1161 /* here we deal with local outbound packet */
1162 if (inp != NULL((void *)0)) {
1163 struct pf_state_key *inp_sk;
1164
1165 mtx_enter(&pf_inp_mtx);
1166 inp_sk = inp->inp_pf_sk;
1167 if (pf_state_key_isvalid(inp_sk)) {
1168 sk = inp_sk;
1169 mtx_leave(&pf_inp_mtx);
1170 } else if (inp_sk != NULL((void *)0)) {
1171 KASSERT(inp_sk->sk_inp == inp)((inp_sk->sk_inp == inp) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/pf.c", 1171, "inp_sk->sk_inp == inp"))
;
1172 inp_sk->sk_inp = NULL((void *)0);
1173 inp->inp_pf_sk = NULL((void *)0);
1174 mtx_leave(&pf_inp_mtx);
1175
1176 pf_state_key_unref(inp_sk);
1177 in_pcbunref(inp);
1178 } else
1179 mtx_leave(&pf_inp_mtx);
1180 }
1181 }
1182 }
1183
1184 if (sk == NULL((void *)0)) {
1185 if ((sk = RBT_FIND(pf_state_tree, &pf_statetbl,pf_state_tree_RBT_FIND(&pf_statetbl, (struct pf_state_key
*)key)
1186 (struct pf_state_key *)key)pf_state_tree_RBT_FIND(&pf_statetbl, (struct pf_state_key
*)key)
) == NULL((void *)0))
1187 return (PF_DROP);
1188 if (pd->dir == PF_OUT && pkt_sk &&
1189 pf_compare_state_keys(pkt_sk, sk, pd->kif, pd->dir) == 0)
1190 pf_state_key_link_reverse(sk, pkt_sk);
1191 else if (pd->dir == PF_OUT)
1192 pf_state_key_link_inpcb(sk, pd->m->m_pkthdrM_dat.MH.MH_pkthdr.pf.inp);
1193 }
1194
1195 /* remove firewall data from outbound packet */
1196 if (pd->dir == PF_OUT)
1197 pf_pkt_addr_changed(pd->m);
1198
1199 /* list is sorted, if-bound states before floating ones */
1200 TAILQ_FOREACH(si, &sk->sk_states, si_entry)for((si) = ((&sk->sk_states)->tqh_first); (si) != (
(void *)0); (si) = ((si)->si_entry.tqe_next))
{
1201 struct pf_state *sist = si->si_st;
1202 if (sist->timeout != PFTM_PURGE &&
1203 (sist->kif == pfi_all || sist->kif == pd->kif) &&
1204 ((sist->key[PF_SK_WIRE]->af == sist->key[PF_SK_STACK]->af &&
1205 sk == (pd->dir == PF_IN ? sist->key[PF_SK_WIRE] :
1206 sist->key[PF_SK_STACK])) ||
1207 (sist->key[PF_SK_WIRE]->af != sist->key[PF_SK_STACK]->af
1208 && pd->dir == PF_IN && (sk == sist->key[PF_SK_STACK] ||
1209 sk == sist->key[PF_SK_WIRE])))) {
1210 st = sist;
1211 break;
1212 }
1213 }
1214
1215 if (st == NULL((void *)0))
1216 return (PF_DROP);
1217 if (ISSET(st->state_flags, PFSTATE_INP_UNLINKED)((st->state_flags) & (0x0400)))
1218 return (PF_DROP);
1219
1220 if (st->rule.ptr->pktrate.limit && pd->dir == st->direction) {
1221 pf_add_threshold(&st->rule.ptr->pktrate);
1222 if (pf_check_threshold(&st->rule.ptr->pktrate))
1223 return (PF_DROP);
1224 }
1225
1226 *stp = st;
1227
1228 return (PF_MATCH);
1229}
1230
1231struct pf_state *
1232pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1233{
1234 struct pf_state_key *sk;
1235 struct pf_state_item *si, *ret = NULL((void *)0);
1236
1237 pf_status.fcounters[FCNT_STATE_SEARCH0]++;
1238
1239 sk = RBT_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key)pf_state_tree_RBT_FIND(&pf_statetbl, (struct pf_state_key
*)key)
;
1240
1241 if (sk != NULL((void *)0)) {
1242 TAILQ_FOREACH(si, &sk->sk_states, si_entry)for((si) = ((&sk->sk_states)->tqh_first); (si) != (
(void *)0); (si) = ((si)->si_entry.tqe_next))
{
1243 struct pf_state *sist = si->si_st;
1244 if (dir == PF_INOUT ||
1245 (sk == (dir == PF_IN ? sist->key[PF_SK_WIRE] :
1246 sist->key[PF_SK_STACK]))) {
1247 if (more == NULL((void *)0))
1248 return (sist);
1249
1250 if (ret)
1251 (*more)++;
1252 else
1253 ret = si;
1254 }
1255 }
1256 }
1257 return (ret ? ret->si_st : NULL((void *)0));
1258}
1259
1260void
1261pf_state_peer_hton(const struct pf_state_peer *s, struct pfsync_state_peer *d)
1262{
1263 d->seqlo = htonl(s->seqlo)(__uint32_t)(__builtin_constant_p(s->seqlo) ? (__uint32_t)
(((__uint32_t)(s->seqlo) & 0xff) << 24 | ((__uint32_t
)(s->seqlo) & 0xff00) << 8 | ((__uint32_t)(s->
seqlo) & 0xff0000) >> 8 | ((__uint32_t)(s->seqlo
) & 0xff000000) >> 24) : __swap32md(s->seqlo))
;
1264 d->seqhi = htonl(s->seqhi)(__uint32_t)(__builtin_constant_p(s->seqhi) ? (__uint32_t)
(((__uint32_t)(s->seqhi) & 0xff) << 24 | ((__uint32_t
)(s->seqhi) & 0xff00) << 8 | ((__uint32_t)(s->
seqhi) & 0xff0000) >> 8 | ((__uint32_t)(s->seqhi
) & 0xff000000) >> 24) : __swap32md(s->seqhi))
;
1265 d->seqdiff = htonl(s->seqdiff)(__uint32_t)(__builtin_constant_p(s->seqdiff) ? (__uint32_t
)(((__uint32_t)(s->seqdiff) & 0xff) << 24 | ((__uint32_t
)(s->seqdiff) & 0xff00) << 8 | ((__uint32_t)(s->
seqdiff) & 0xff0000) >> 8 | ((__uint32_t)(s->seqdiff
) & 0xff000000) >> 24) : __swap32md(s->seqdiff))
;
1266 d->max_win = htons(s->max_win)(__uint16_t)(__builtin_constant_p(s->max_win) ? (__uint16_t
)(((__uint16_t)(s->max_win) & 0xffU) << 8 | ((__uint16_t
)(s->max_win) & 0xff00U) >> 8) : __swap16md(s->
max_win))
;
1267 d->mss = htons(s->mss)(__uint16_t)(__builtin_constant_p(s->mss) ? (__uint16_t)((
(__uint16_t)(s->mss) & 0xffU) << 8 | ((__uint16_t
)(s->mss) & 0xff00U) >> 8) : __swap16md(s->mss
))
;
1268 d->state = s->state;
1269 d->wscale = s->wscale;
1270 if (s->scrub) {
1271 d->scrub.pfss_flags =
1272 htons(s->scrub->pfss_flags & PFSS_TIMESTAMP)(__uint16_t)(__builtin_constant_p(s->scrub->pfss_flags &
0x0001) ? (__uint16_t)(((__uint16_t)(s->scrub->pfss_flags
& 0x0001) & 0xffU) << 8 | ((__uint16_t)(s->
scrub->pfss_flags & 0x0001) & 0xff00U) >> 8)
: __swap16md(s->scrub->pfss_flags & 0x0001))
;
1273 d->scrub.pfss_ttl = (s)->scrub->pfss_ttl;
1274 d->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod)(__uint32_t)(__builtin_constant_p((s)->scrub->pfss_ts_mod
) ? (__uint32_t)(((__uint32_t)((s)->scrub->pfss_ts_mod)
& 0xff) << 24 | ((__uint32_t)((s)->scrub->pfss_ts_mod
) & 0xff00) << 8 | ((__uint32_t)((s)->scrub->
pfss_ts_mod) & 0xff0000) >> 8 | ((__uint32_t)((s)->
scrub->pfss_ts_mod) & 0xff000000) >> 24) : __swap32md
((s)->scrub->pfss_ts_mod))
;
1275 d->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID0x01;
1276 }
1277}
1278
1279void
1280pf_state_peer_ntoh(const struct pfsync_state_peer *s, struct pf_state_peer *d)
1281{
1282 d->seqlo = ntohl(s->seqlo)(__uint32_t)(__builtin_constant_p(s->seqlo) ? (__uint32_t)
(((__uint32_t)(s->seqlo) & 0xff) << 24 | ((__uint32_t
)(s->seqlo) & 0xff00) << 8 | ((__uint32_t)(s->
seqlo) & 0xff0000) >> 8 | ((__uint32_t)(s->seqlo
) & 0xff000000) >> 24) : __swap32md(s->seqlo))
;
1283 d->seqhi = ntohl(s->seqhi)(__uint32_t)(__builtin_constant_p(s->seqhi) ? (__uint32_t)
(((__uint32_t)(s->seqhi) & 0xff) << 24 | ((__uint32_t
)(s->seqhi) & 0xff00) << 8 | ((__uint32_t)(s->
seqhi) & 0xff0000) >> 8 | ((__uint32_t)(s->seqhi
) & 0xff000000) >> 24) : __swap32md(s->seqhi))
;
1284 d->seqdiff = ntohl(s->seqdiff)(__uint32_t)(__builtin_constant_p(s->seqdiff) ? (__uint32_t
)(((__uint32_t)(s->seqdiff) & 0xff) << 24 | ((__uint32_t
)(s->seqdiff) & 0xff00) << 8 | ((__uint32_t)(s->
seqdiff) & 0xff0000) >> 8 | ((__uint32_t)(s->seqdiff
) & 0xff000000) >> 24) : __swap32md(s->seqdiff))
;
1285 d->max_win = ntohs(s->max_win)(__uint16_t)(__builtin_constant_p(s->max_win) ? (__uint16_t
)(((__uint16_t)(s->max_win) & 0xffU) << 8 | ((__uint16_t
)(s->max_win) & 0xff00U) >> 8) : __swap16md(s->
max_win))
;
1286 d->mss = ntohs(s->mss)(__uint16_t)(__builtin_constant_p(s->mss) ? (__uint16_t)((
(__uint16_t)(s->mss) & 0xffU) << 8 | ((__uint16_t
)(s->mss) & 0xff00U) >> 8) : __swap16md(s->mss
))
;
1287 d->state = s->state;
1288 d->wscale = s->wscale;
1289 if (s->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID0x01 &&
1290 d->scrub != NULL((void *)0)) {
1291 d->scrub->pfss_flags =
1292 ntohs(s->scrub.pfss_flags)(__uint16_t)(__builtin_constant_p(s->scrub.pfss_flags) ? (
__uint16_t)(((__uint16_t)(s->scrub.pfss_flags) & 0xffU
) << 8 | ((__uint16_t)(s->scrub.pfss_flags) & 0xff00U
) >> 8) : __swap16md(s->scrub.pfss_flags))
& PFSS_TIMESTAMP0x0001;
1293 d->scrub->pfss_ttl = s->scrub.pfss_ttl;
1294 d->scrub->pfss_ts_mod = ntohl(s->scrub.pfss_ts_mod)(__uint32_t)(__builtin_constant_p(s->scrub.pfss_ts_mod) ? (
__uint32_t)(((__uint32_t)(s->scrub.pfss_ts_mod) & 0xff
) << 24 | ((__uint32_t)(s->scrub.pfss_ts_mod) & 0xff00
) << 8 | ((__uint32_t)(s->scrub.pfss_ts_mod) & 0xff0000
) >> 8 | ((__uint32_t)(s->scrub.pfss_ts_mod) & 0xff000000
) >> 24) : __swap32md(s->scrub.pfss_ts_mod))
;
1295 }
1296}
1297
1298void
1299pf_state_export(struct pfsync_state *sp, struct pf_state *st)
1300{
1301 int32_t expire;
1302
1303 memset(sp, 0, sizeof(struct pfsync_state))__builtin_memset((sp), (0), (sizeof(struct pfsync_state)));
1304
1305 /* copy from state key */
1306 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
1307 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
1308 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
1309 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
1310 sp->key[PF_SK_WIRE].rdomain = htons(st->key[PF_SK_WIRE]->rdomain)(__uint16_t)(__builtin_constant_p(st->key[PF_SK_WIRE]->
rdomain) ? (__uint16_t)(((__uint16_t)(st->key[PF_SK_WIRE]->
rdomain) & 0xffU) << 8 | ((__uint16_t)(st->key[PF_SK_WIRE
]->rdomain) & 0xff00U) >> 8) : __swap16md(st->
key[PF_SK_WIRE]->rdomain))
;
1311 sp->key[PF_SK_WIRE].af = st->key[PF_SK_WIRE]->af;
1312 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
1313 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
1314 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
1315 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
1316 sp->key[PF_SK_STACK].rdomain = htons(st->key[PF_SK_STACK]->rdomain)(__uint16_t)(__builtin_constant_p(st->key[PF_SK_STACK]->
rdomain) ? (__uint16_t)(((__uint16_t)(st->key[PF_SK_STACK]
->rdomain) & 0xffU) << 8 | ((__uint16_t)(st->
key[PF_SK_STACK]->rdomain) & 0xff00U) >> 8) : __swap16md
(st->key[PF_SK_STACK]->rdomain))
;
1317 sp->key[PF_SK_STACK].af = st->key[PF_SK_STACK]->af;
1318 sp->rtableid[PF_SK_WIRE] = htonl(st->rtableid[PF_SK_WIRE])(__uint32_t)(__builtin_constant_p(st->rtableid[PF_SK_WIRE]
) ? (__uint32_t)(((__uint32_t)(st->rtableid[PF_SK_WIRE]) &
0xff) << 24 | ((__uint32_t)(st->rtableid[PF_SK_WIRE
]) & 0xff00) << 8 | ((__uint32_t)(st->rtableid[PF_SK_WIRE
]) & 0xff0000) >> 8 | ((__uint32_t)(st->rtableid
[PF_SK_WIRE]) & 0xff000000) >> 24) : __swap32md(st->
rtableid[PF_SK_WIRE]))
;
1319 sp->rtableid[PF_SK_STACK] = htonl(st->rtableid[PF_SK_STACK])(__uint32_t)(__builtin_constant_p(st->rtableid[PF_SK_STACK
]) ? (__uint32_t)(((__uint32_t)(st->rtableid[PF_SK_STACK])
& 0xff) << 24 | ((__uint32_t)(st->rtableid[PF_SK_STACK
]) & 0xff00) << 8 | ((__uint32_t)(st->rtableid[PF_SK_STACK
]) & 0xff0000) >> 8 | ((__uint32_t)(st->rtableid
[PF_SK_STACK]) & 0xff000000) >> 24) : __swap32md(st
->rtableid[PF_SK_STACK]))
;
1320 sp->proto = st->key[PF_SK_WIRE]->proto;
1321 sp->af = st->key[PF_SK_WIRE]->af;
1322
1323 /* copy from state */
1324 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
1325 sp->rt = st->rt;
1326 sp->rt_addr = st->rt_addr;
1327 sp->creation = htonl(getuptime() - st->creation)(__uint32_t)(__builtin_constant_p(getuptime() - st->creation
) ? (__uint32_t)(((__uint32_t)(getuptime() - st->creation)
& 0xff) << 24 | ((__uint32_t)(getuptime() - st->
creation) & 0xff00) << 8 | ((__uint32_t)(getuptime(
) - st->creation) & 0xff0000) >> 8 | ((__uint32_t
)(getuptime() - st->creation) & 0xff000000) >> 24
) : __swap32md(getuptime() - st->creation))
;
1328 expire = pf_state_expires(st, st->timeout);
1329 if (expire <= getuptime())
1330 sp->expire = htonl(0)(__uint32_t)(__builtin_constant_p(0) ? (__uint32_t)(((__uint32_t
)(0) & 0xff) << 24 | ((__uint32_t)(0) & 0xff00)
<< 8 | ((__uint32_t)(0) & 0xff0000) >> 8 | (
(__uint32_t)(0) & 0xff000000) >> 24) : __swap32md(0
))
;
1331 else
1332 sp->expire = htonl(expire - getuptime())(__uint32_t)(__builtin_constant_p(expire - getuptime()) ? (__uint32_t
)(((__uint32_t)(expire - getuptime()) & 0xff) << 24
| ((__uint32_t)(expire - getuptime()) & 0xff00) <<
8 | ((__uint32_t)(expire - getuptime()) & 0xff0000) >>
8 | ((__uint32_t)(expire - getuptime()) & 0xff000000) >>
24) : __swap32md(expire - getuptime()))
;
1333
1334 sp->direction = st->direction;
1335#if NPFLOG1 > 0
1336 sp->log = st->log;
1337#endif /* NPFLOG > 0 */
1338 sp->timeout = st->timeout;
1339 sp->state_flags = htons(st->state_flags)(__uint16_t)(__builtin_constant_p(st->state_flags) ? (__uint16_t
)(((__uint16_t)(st->state_flags) & 0xffU) << 8 |
((__uint16_t)(st->state_flags) & 0xff00U) >> 8)
: __swap16md(st->state_flags))
;
1340 if (READ_ONCE(st->sync_defer)({ typeof(st->sync_defer) __tmp = *(volatile typeof(st->
sync_defer) *)&(st->sync_defer); membar_datadep_consumer
(); __tmp; })
!= NULL((void *)0))
1341 sp->state_flags |= htons(PFSTATE_ACK)(__uint16_t)(__builtin_constant_p(0x0010) ? (__uint16_t)(((__uint16_t
)(0x0010) & 0xffU) << 8 | ((__uint16_t)(0x0010) &
0xff00U) >> 8) : __swap16md(0x0010))
;
1342 if (!SLIST_EMPTY(&st->src_nodes)(((&st->src_nodes)->slh_first) == ((void *)0)))
1343 sp->sync_flags |= PFSYNC_FLAG_SRCNODE0x04;
1344
1345 sp->id = st->id;
1346 sp->creatorid = st->creatorid;
1347 pf_state_peer_hton(&st->src, &sp->src);
1348 pf_state_peer_hton(&st->dst, &sp->dst);
1349
1350 if (st->rule.ptr == NULL((void *)0))
1351 sp->rule = htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t
)(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00
) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 |
((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md
(-1))
;
1352 else
1353 sp->rule = htonl(st->rule.ptr->nr)(__uint32_t)(__builtin_constant_p(st->rule.ptr->nr) ? (
__uint32_t)(((__uint32_t)(st->rule.ptr->nr) & 0xff)
<< 24 | ((__uint32_t)(st->rule.ptr->nr) & 0xff00
) << 8 | ((__uint32_t)(st->rule.ptr->nr) & 0xff0000
) >> 8 | ((__uint32_t)(st->rule.ptr->nr) & 0xff000000
) >> 24) : __swap32md(st->rule.ptr->nr))
;
1354 if (st->anchor.ptr == NULL((void *)0))
1355 sp->anchor = htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t
)(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00
) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 |
((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md
(-1))
;
1356 else
1357 sp->anchor = htonl(st->anchor.ptr->nr)(__uint32_t)(__builtin_constant_p(st->anchor.ptr->nr) ?
(__uint32_t)(((__uint32_t)(st->anchor.ptr->nr) & 0xff
) << 24 | ((__uint32_t)(st->anchor.ptr->nr) &
0xff00) << 8 | ((__uint32_t)(st->anchor.ptr->nr)
& 0xff0000) >> 8 | ((__uint32_t)(st->anchor.ptr
->nr) & 0xff000000) >> 24) : __swap32md(st->anchor
.ptr->nr))
;
1358 sp->nat_rule = htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t
)(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00
) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 |
((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md
(-1))
; /* left for compat, nat_rule is gone */
1359
1360 pf_state_counter_hton(st->packets[0], sp->packets[0])do { sp->packets[0][0] = (__uint32_t)(__builtin_constant_p
((st->packets[0]>>32)&0xffffffff) ? (__uint32_t)
(((__uint32_t)((st->packets[0]>>32)&0xffffffff) &
0xff) << 24 | ((__uint32_t)((st->packets[0]>>
32)&0xffffffff) & 0xff00) << 8 | ((__uint32_t)(
(st->packets[0]>>32)&0xffffffff) & 0xff0000)
>> 8 | ((__uint32_t)((st->packets[0]>>32)&
0xffffffff) & 0xff000000) >> 24) : __swap32md((st->
packets[0]>>32)&0xffffffff)); sp->packets[0][1] =
(__uint32_t)(__builtin_constant_p(st->packets[0]&0xffffffff
) ? (__uint32_t)(((__uint32_t)(st->packets[0]&0xffffffff
) & 0xff) << 24 | ((__uint32_t)(st->packets[0]&
0xffffffff) & 0xff00) << 8 | ((__uint32_t)(st->packets
[0]&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t
)(st->packets[0]&0xffffffff) & 0xff000000) >>
24) : __swap32md(st->packets[0]&0xffffffff)); } while
(0)
;
1361 pf_state_counter_hton(st->packets[1], sp->packets[1])do { sp->packets[1][0] = (__uint32_t)(__builtin_constant_p
((st->packets[1]>>32)&0xffffffff) ? (__uint32_t)
(((__uint32_t)((st->packets[1]>>32)&0xffffffff) &
0xff) << 24 | ((__uint32_t)((st->packets[1]>>
32)&0xffffffff) & 0xff00) << 8 | ((__uint32_t)(
(st->packets[1]>>32)&0xffffffff) & 0xff0000)
>> 8 | ((__uint32_t)((st->packets[1]>>32)&
0xffffffff) & 0xff000000) >> 24) : __swap32md((st->
packets[1]>>32)&0xffffffff)); sp->packets[1][1] =
(__uint32_t)(__builtin_constant_p(st->packets[1]&0xffffffff
) ? (__uint32_t)(((__uint32_t)(st->packets[1]&0xffffffff
) & 0xff) << 24 | ((__uint32_t)(st->packets[1]&
0xffffffff) & 0xff00) << 8 | ((__uint32_t)(st->packets
[1]&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t
)(st->packets[1]&0xffffffff) & 0xff000000) >>
24) : __swap32md(st->packets[1]&0xffffffff)); } while
(0)
;
1362 pf_state_counter_hton(st->bytes[0], sp->bytes[0])do { sp->bytes[0][0] = (__uint32_t)(__builtin_constant_p((
st->bytes[0]>>32)&0xffffffff) ? (__uint32_t)(((__uint32_t
)((st->bytes[0]>>32)&0xffffffff) & 0xff) <<
24 | ((__uint32_t)((st->bytes[0]>>32)&0xffffffff
) & 0xff00) << 8 | ((__uint32_t)((st->bytes[0]>>
32)&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t
)((st->bytes[0]>>32)&0xffffffff) & 0xff000000
) >> 24) : __swap32md((st->bytes[0]>>32)&0xffffffff
)); sp->bytes[0][1] = (__uint32_t)(__builtin_constant_p(st
->bytes[0]&0xffffffff) ? (__uint32_t)(((__uint32_t)(st
->bytes[0]&0xffffffff) & 0xff) << 24 | ((__uint32_t
)(st->bytes[0]&0xffffffff) & 0xff00) << 8 | (
(__uint32_t)(st->bytes[0]&0xffffffff) & 0xff0000) >>
8 | ((__uint32_t)(st->bytes[0]&0xffffffff) & 0xff000000
) >> 24) : __swap32md(st->bytes[0]&0xffffffff));
} while (0)
;
1363 pf_state_counter_hton(st->bytes[1], sp->bytes[1])do { sp->bytes[1][0] = (__uint32_t)(__builtin_constant_p((
st->bytes[1]>>32)&0xffffffff) ? (__uint32_t)(((__uint32_t
)((st->bytes[1]>>32)&0xffffffff) & 0xff) <<
24 | ((__uint32_t)((st->bytes[1]>>32)&0xffffffff
) & 0xff00) << 8 | ((__uint32_t)((st->bytes[1]>>
32)&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t
)((st->bytes[1]>>32)&0xffffffff) & 0xff000000
) >> 24) : __swap32md((st->bytes[1]>>32)&0xffffffff
)); sp->bytes[1][1] = (__uint32_t)(__builtin_constant_p(st
->bytes[1]&0xffffffff) ? (__uint32_t)(((__uint32_t)(st
->bytes[1]&0xffffffff) & 0xff) << 24 | ((__uint32_t
)(st->bytes[1]&0xffffffff) & 0xff00) << 8 | (
(__uint32_t)(st->bytes[1]&0xffffffff) & 0xff0000) >>
8 | ((__uint32_t)(st->bytes[1]&0xffffffff) & 0xff000000
) >> 24) : __swap32md(st->bytes[1]&0xffffffff));
} while (0)
;
1364
1365 sp->max_mss = htons(st->max_mss)(__uint16_t)(__builtin_constant_p(st->max_mss) ? (__uint16_t
)(((__uint16_t)(st->max_mss) & 0xffU) << 8 | ((__uint16_t
)(st->max_mss) & 0xff00U) >> 8) : __swap16md(st->
max_mss))
;
1366 sp->min_ttl = st->min_ttl;
1367 sp->set_tos = st->set_tos;
1368 sp->set_prio[0] = st->set_prio[0];
1369 sp->set_prio[1] = st->set_prio[1];
1370}
1371
1372int
1373pf_state_alloc_scrub_memory(const struct pfsync_state_peer *s,
1374 struct pf_state_peer *d)
1375{
1376 if (s->scrub.scrub_flag && d->scrub == NULL((void *)0))
1377 return (pf_normalize_tcp_alloc(d));
1378
1379 return (0);
1380}
1381
1382#if NPFSYNC1 > 0
1383int
1384pf_state_import(const struct pfsync_state *sp, int flags)
1385{
1386 struct pf_state *st = NULL((void *)0);
1387 struct pf_state_key *skw = NULL((void *)0), *sks = NULL((void *)0);
1388 struct pf_rule *r = NULL((void *)0);
1389 struct pfi_kif *kif;
1390 int pool_flags;
1391 int error = ENOMEM12;
1392 int n = 0;
1393
1394 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
1395
1396 if (sp->creatorid == 0) {
1397 DPFPRINTF(LOG_NOTICE, "%s: invalid creator id: %08x", __func__,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog(
"%s: invalid creator id: %08x", __func__, (__uint32_t)(__builtin_constant_p
(sp->creatorid) ? (__uint32_t)(((__uint32_t)(sp->creatorid
) & 0xff) << 24 | ((__uint32_t)(sp->creatorid) &
0xff00) << 8 | ((__uint32_t)(sp->creatorid) & 0xff0000
) >> 8 | ((__uint32_t)(sp->creatorid) & 0xff000000
) >> 24) : __swap32md(sp->creatorid))); addlog("\n")
; } } while (0)
1398 ntohl(sp->creatorid))do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog(
"%s: invalid creator id: %08x", __func__, (__uint32_t)(__builtin_constant_p
(sp->creatorid) ? (__uint32_t)(((__uint32_t)(sp->creatorid
) & 0xff) << 24 | ((__uint32_t)(sp->creatorid) &
0xff00) << 8 | ((__uint32_t)(sp->creatorid) & 0xff0000
) >> 8 | ((__uint32_t)(sp->creatorid) & 0xff000000
) >> 24) : __swap32md(sp->creatorid))); addlog("\n")
; } } while (0)
;
1399 return (EINVAL22);
1400 }
1401
1402 if ((kif = pfi_kif_get(sp->ifname, NULL((void *)0))) == NULL((void *)0)) {
1403 DPFPRINTF(LOG_NOTICE, "%s: unknown interface: %s", __func__,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog(
"%s: unknown interface: %s", __func__, sp->ifname); addlog
("\n"); } } while (0)
1404 sp->ifname)do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog(
"%s: unknown interface: %s", __func__, sp->ifname); addlog
("\n"); } } while (0)
;
1405 if (flags & PFSYNC_SI_IOCTL0x01)
1406 return (EINVAL22);
1407 return (0); /* skip this state */
1408 }
1409
1410 if (sp->af == 0)
1411 return (0); /* skip this state */
1412
1413 /*
1414 * If the ruleset checksums match or the state is coming from the ioctl,
1415 * it's safe to associate the state with the rule of that number.
1416 */
1417 if (sp->rule != htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t
)(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00
) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 |
((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md
(-1))
&& sp->anchor == htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t
)(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00
) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 |
((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md
(-1))
&&
1418 (flags & (PFSYNC_SI_IOCTL0x01 | PFSYNC_SI_CKSUM0x02)) &&
1419 ntohl(sp->rule)(__uint32_t)(__builtin_constant_p(sp->rule) ? (__uint32_t)
(((__uint32_t)(sp->rule) & 0xff) << 24 | ((__uint32_t
)(sp->rule) & 0xff00) << 8 | ((__uint32_t)(sp->
rule) & 0xff0000) >> 8 | ((__uint32_t)(sp->rule)
& 0xff000000) >> 24) : __swap32md(sp->rule))
< pf_main_rulesetpf_main_anchor.ruleset.rules.active.rcount) {
1420 TAILQ_FOREACH(r, pf_main_ruleset.rules.active.ptr, entries)for((r) = ((pf_main_anchor.ruleset.rules.active.ptr)->tqh_first
); (r) != ((void *)0); (r) = ((r)->entries.tqe_next))
1421 if (ntohl(sp->rule)(__uint32_t)(__builtin_constant_p(sp->rule) ? (__uint32_t)
(((__uint32_t)(sp->rule) & 0xff) << 24 | ((__uint32_t
)(sp->rule) & 0xff00) << 8 | ((__uint32_t)(sp->
rule) & 0xff0000) >> 8 | ((__uint32_t)(sp->rule)
& 0xff000000) >> 24) : __swap32md(sp->rule))
== n++)
1422 break;
1423 } else
1424 r = &pf_default_rule;
1425
1426 if ((r->max_states && r->states_cur >= r->max_states))
1427 goto cleanup;
1428
1429 if (flags & PFSYNC_SI_IOCTL0x01)
1430 pool_flags = PR_WAITOK0x0001 | PR_LIMITFAIL0x0004 | PR_ZERO0x0008;
1431 else
1432 pool_flags = PR_NOWAIT0x0002 | PR_LIMITFAIL0x0004 | PR_ZERO0x0008;
1433
1434 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL((void *)0))
1435 goto cleanup;
1436
1437 if ((skw = pf_alloc_state_key(pool_flags)) == NULL((void *)0))
1438 goto cleanup;
1439
1440 if ((sp->key[PF_SK_WIRE].af &&
1441 (sp->key[PF_SK_WIRE].af != sp->key[PF_SK_STACK].af)) ||
1442 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],((sp->af == 2 && (&sp->key[PF_SK_WIRE].addr
[0])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr[
0])->pfa.addr32[0]) || (sp->af == 24 && ((&
sp->key[PF_SK_WIRE].addr[0])->pfa.addr32[3] != (&sp
->key[PF_SK_STACK].addr[0])->pfa.addr32[3] || (&sp->
key[PF_SK_WIRE].addr[0])->pfa.addr32[2] != (&sp->key
[PF_SK_STACK].addr[0])->pfa.addr32[2] || (&sp->key[
PF_SK_WIRE].addr[0])->pfa.addr32[1] != (&sp->key[PF_SK_STACK
].addr[0])->pfa.addr32[1] || (&sp->key[PF_SK_WIRE].
addr[0])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr
[0])->pfa.addr32[0])))
1443 &sp->key[PF_SK_STACK].addr[0], sp->af)((sp->af == 2 && (&sp->key[PF_SK_WIRE].addr
[0])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr[
0])->pfa.addr32[0]) || (sp->af == 24 && ((&
sp->key[PF_SK_WIRE].addr[0])->pfa.addr32[3] != (&sp
->key[PF_SK_STACK].addr[0])->pfa.addr32[3] || (&sp->
key[PF_SK_WIRE].addr[0])->pfa.addr32[2] != (&sp->key
[PF_SK_STACK].addr[0])->pfa.addr32[2] || (&sp->key[
PF_SK_WIRE].addr[0])->pfa.addr32[1] != (&sp->key[PF_SK_STACK
].addr[0])->pfa.addr32[1] || (&sp->key[PF_SK_WIRE].
addr[0])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr
[0])->pfa.addr32[0])))
||
1444 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],((sp->af == 2 && (&sp->key[PF_SK_WIRE].addr
[1])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr[
1])->pfa.addr32[0]) || (sp->af == 24 && ((&
sp->key[PF_SK_WIRE].addr[1])->pfa.addr32[3] != (&sp
->key[PF_SK_STACK].addr[1])->pfa.addr32[3] || (&sp->
key[PF_SK_WIRE].addr[1])->pfa.addr32[2] != (&sp->key
[PF_SK_STACK].addr[1])->pfa.addr32[2] || (&sp->key[
PF_SK_WIRE].addr[1])->pfa.addr32[1] != (&sp->key[PF_SK_STACK
].addr[1])->pfa.addr32[1] || (&sp->key[PF_SK_WIRE].
addr[1])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr
[1])->pfa.addr32[0])))
1445 &sp->key[PF_SK_STACK].addr[1], sp->af)((sp->af == 2 && (&sp->key[PF_SK_WIRE].addr
[1])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr[
1])->pfa.addr32[0]) || (sp->af == 24 && ((&
sp->key[PF_SK_WIRE].addr[1])->pfa.addr32[3] != (&sp
->key[PF_SK_STACK].addr[1])->pfa.addr32[3] || (&sp->
key[PF_SK_WIRE].addr[1])->pfa.addr32[2] != (&sp->key
[PF_SK_STACK].addr[1])->pfa.addr32[2] || (&sp->key[
PF_SK_WIRE].addr[1])->pfa.addr32[1] != (&sp->key[PF_SK_STACK
].addr[1])->pfa.addr32[1] || (&sp->key[PF_SK_WIRE].
addr[1])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr
[1])->pfa.addr32[0])))
||
1446 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
1447 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1] ||
1448 sp->key[PF_SK_WIRE].rdomain != sp->key[PF_SK_STACK].rdomain) {
1449 if ((sks = pf_alloc_state_key(pool_flags)) == NULL((void *)0))
1450 goto cleanup;
1451 } else
1452 sks = pf_state_key_ref(skw);
1453
1454 /* allocate memory for scrub info */
1455 if (pf_state_alloc_scrub_memory(&sp->src, &st->src) ||
1456 pf_state_alloc_scrub_memory(&sp->dst, &st->dst))
1457 goto cleanup;
1458
1459 /* copy to state key(s) */
1460 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
1461 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
1462 skw->port[0] = sp->key[PF_SK_WIRE].port[0];
1463 skw->port[1] = sp->key[PF_SK_WIRE].port[1];
1464 skw->rdomain = ntohs(sp->key[PF_SK_WIRE].rdomain)(__uint16_t)(__builtin_constant_p(sp->key[PF_SK_WIRE].rdomain
) ? (__uint16_t)(((__uint16_t)(sp->key[PF_SK_WIRE].rdomain
) & 0xffU) << 8 | ((__uint16_t)(sp->key[PF_SK_WIRE
].rdomain) & 0xff00U) >> 8) : __swap16md(sp->key
[PF_SK_WIRE].rdomain))
;
1465 skw->proto = sp->proto;
1466 if (!(skw->af = sp->key[PF_SK_WIRE].af))
1467 skw->af = sp->af;
1468 skw->hash = pf_pkt_hash(skw->af, skw->proto,
1469 &skw->addr[0], &skw->addr[1], skw->port[0], skw->port[1]);
1470
1471 if (sks != skw) {
1472 sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
1473 sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
1474 sks->port[0] = sp->key[PF_SK_STACK].port[0];
1475 sks->port[1] = sp->key[PF_SK_STACK].port[1];
1476 sks->rdomain = ntohs(sp->key[PF_SK_STACK].rdomain)(__uint16_t)(__builtin_constant_p(sp->key[PF_SK_STACK].rdomain
) ? (__uint16_t)(((__uint16_t)(sp->key[PF_SK_STACK].rdomain
) & 0xffU) << 8 | ((__uint16_t)(sp->key[PF_SK_STACK
].rdomain) & 0xff00U) >> 8) : __swap16md(sp->key
[PF_SK_STACK].rdomain))
;
1477 if (!(sks->af = sp->key[PF_SK_STACK].af))
1478 sks->af = sp->af;
1479 if (sks->af != skw->af) {
1480 switch (sp->proto) {
1481 case IPPROTO_ICMP1:
1482 sks->proto = IPPROTO_ICMPV658;
1483 break;
1484 case IPPROTO_ICMPV658:
1485 sks->proto = IPPROTO_ICMP1;
1486 break;
1487 default:
1488 sks->proto = sp->proto;
1489 }
1490 } else
1491 sks->proto = sp->proto;
1492
1493 if (((sks->af != AF_INET2) && (sks->af != AF_INET624)) ||
1494 ((skw->af != AF_INET2) && (skw->af != AF_INET624))) {
1495 error = EINVAL22;
1496 goto cleanup;
1497 }
1498
1499 sks->hash = pf_pkt_hash(sks->af, sks->proto,
1500 &sks->addr[0], &sks->addr[1], sks->port[0], sks->port[1]);
1501
1502 } else if ((sks->af != AF_INET2) && (sks->af != AF_INET624)) {
1503 error = EINVAL22;
1504 goto cleanup;
1505 }
1506 st->rtableid[PF_SK_WIRE] = ntohl(sp->rtableid[PF_SK_WIRE])(__uint32_t)(__builtin_constant_p(sp->rtableid[PF_SK_WIRE]
) ? (__uint32_t)(((__uint32_t)(sp->rtableid[PF_SK_WIRE]) &
0xff) << 24 | ((__uint32_t)(sp->rtableid[PF_SK_WIRE
]) & 0xff00) << 8 | ((__uint32_t)(sp->rtableid[PF_SK_WIRE
]) & 0xff0000) >> 8 | ((__uint32_t)(sp->rtableid
[PF_SK_WIRE]) & 0xff000000) >> 24) : __swap32md(sp->
rtableid[PF_SK_WIRE]))
;
1507 st->rtableid[PF_SK_STACK] = ntohl(sp->rtableid[PF_SK_STACK])(__uint32_t)(__builtin_constant_p(sp->rtableid[PF_SK_STACK
]) ? (__uint32_t)(((__uint32_t)(sp->rtableid[PF_SK_STACK])
& 0xff) << 24 | ((__uint32_t)(sp->rtableid[PF_SK_STACK
]) & 0xff00) << 8 | ((__uint32_t)(sp->rtableid[PF_SK_STACK
]) & 0xff0000) >> 8 | ((__uint32_t)(sp->rtableid
[PF_SK_STACK]) & 0xff000000) >> 24) : __swap32md(sp
->rtableid[PF_SK_STACK]))
;
1508
1509 /* copy to state */
1510 st->rt_addr = sp->rt_addr;
1511 st->rt = sp->rt;
1512 st->creation = getuptime() - ntohl(sp->creation)(__uint32_t)(__builtin_constant_p(sp->creation) ? (__uint32_t
)(((__uint32_t)(sp->creation) & 0xff) << 24 | ((
__uint32_t)(sp->creation) & 0xff00) << 8 | ((__uint32_t
)(sp->creation) & 0xff0000) >> 8 | ((__uint32_t)
(sp->creation) & 0xff000000) >> 24) : __swap32md
(sp->creation))
;
1513 st->expire = getuptime();
1514 if (ntohl(sp->expire)(__uint32_t)(__builtin_constant_p(sp->expire) ? (__uint32_t
)(((__uint32_t)(sp->expire) & 0xff) << 24 | ((__uint32_t
)(sp->expire) & 0xff00) << 8 | ((__uint32_t)(sp->
expire) & 0xff0000) >> 8 | ((__uint32_t)(sp->expire
) & 0xff000000) >> 24) : __swap32md(sp->expire))
) {
1515 u_int32_t timeout;
1516
1517 timeout = r->timeout[sp->timeout];
1518 if (!timeout)
1519 timeout = pf_default_rule.timeout[sp->timeout];
1520
1521 /* sp->expire may have been adaptively scaled by export. */
1522 st->expire -= timeout - ntohl(sp->expire)(__uint32_t)(__builtin_constant_p(sp->expire) ? (__uint32_t
)(((__uint32_t)(sp->expire) & 0xff) << 24 | ((__uint32_t
)(sp->expire) & 0xff00) << 8 | ((__uint32_t)(sp->
expire) & 0xff0000) >> 8 | ((__uint32_t)(sp->expire
) & 0xff000000) >> 24) : __swap32md(sp->expire))
;
1523 }
1524
1525 st->direction = sp->direction;
1526 st->log = sp->log;
1527 st->timeout = sp->timeout;
1528 st->state_flags = ntohs(sp->state_flags)(__uint16_t)(__builtin_constant_p(sp->state_flags) ? (__uint16_t
)(((__uint16_t)(sp->state_flags) & 0xffU) << 8 |
((__uint16_t)(sp->state_flags) & 0xff00U) >> 8)
: __swap16md(sp->state_flags))
;
1529 st->max_mss = ntohs(sp->max_mss)(__uint16_t)(__builtin_constant_p(sp->max_mss) ? (__uint16_t
)(((__uint16_t)(sp->max_mss) & 0xffU) << 8 | ((__uint16_t
)(sp->max_mss) & 0xff00U) >> 8) : __swap16md(sp->
max_mss))
;
1530 st->min_ttl = sp->min_ttl;
1531 st->set_tos = sp->set_tos;
1532 st->set_prio[0] = sp->set_prio[0];
1533 st->set_prio[1] = sp->set_prio[1];
1534
1535 st->id = sp->id;
1536 st->creatorid = sp->creatorid;
1537 pf_state_peer_ntoh(&sp->src, &st->src);
1538 pf_state_peer_ntoh(&sp->dst, &st->dst);
1539
1540 st->rule.ptr = r;
1541 st->anchor.ptr = NULL((void *)0);
1542
1543 PF_REF_INIT(st->refcnt)refcnt_init(&(st->refcnt));
1544 mtx_init(&st->mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&st->
mtx), ((((0x4)) > 0x0 && ((0x4)) < 0x9) ? 0x9 :
((0x4)))); } while (0)
;
1545
1546 /* XXX when we have anchors, use STATE_INC_COUNTERS */
1547 r->states_cur++;
1548 r->states_tot++;
1549
1550 st->sync_state = PFSYNC_S_NONE0xd0;
1551 st->pfsync_time = getuptime();
1552#if NPFSYNC1 > 0
1553 pfsync_init_state(st, skw, sks, flags);
1554#endif
1555
1556 if (pf_state_insert(kif, &skw, &sks, st) != 0) {
1557 /* XXX when we have anchors, use STATE_DEC_COUNTERS */
1558 r->states_cur--;
1559 error = EEXIST17;
1560 goto cleanup_state;
1561 }
1562
1563 return (0);
1564
1565 cleanup:
1566 if (skw != NULL((void *)0))
1567 pf_state_key_unref(skw);
1568 if (sks != NULL((void *)0))
1569 pf_state_key_unref(sks);
1570
1571 cleanup_state: /* pf_state_insert frees the state keys */
1572 if (st) {
1573 if (st->dst.scrub)
1574 pool_put(&pf_state_scrub_pl, st->dst.scrub);
1575 if (st->src.scrub)
1576 pool_put(&pf_state_scrub_pl, st->src.scrub);
1577 pool_put(&pf_state_pl, st);
1578 }
1579 return (error);
1580}
1581#endif /* NPFSYNC > 0 */
1582
1583/* END state table stuff */
1584
1585void pf_purge_states(void *);
1586struct task pf_purge_states_task =
1587 TASK_INITIALIZER(pf_purge_states, NULL){{ ((void *)0), ((void *)0) }, (pf_purge_states), (((void *)0
)), 0 }
;
1588
1589void pf_purge_states_tick(void *);
1590struct timeout pf_purge_states_to =
1591 TIMEOUT_INITIALIZER(pf_purge_states_tick, NULL){ .to_list = { ((void *)0), ((void *)0) }, .to_abstime = { .tv_sec
= 0, .tv_nsec = 0 }, .to_func = ((pf_purge_states_tick)), .to_arg
= ((((void *)0))), .to_time = 0, .to_flags = (0) | 0x04, .to_kclock
= ((-1)) }
;
1592
1593unsigned int pf_purge_expired_states(unsigned int, unsigned int);
1594
1595/*
1596 * how many states to scan this interval.
1597 *
1598 * this is set when the timeout fires, and reduced by the task. the
1599 * task will reschedule itself until the limit is reduced to zero,
1600 * and then it adds the timeout again.
1601 */
1602unsigned int pf_purge_states_limit;
1603
1604/*
1605 * limit how many states are processed with locks held per run of
1606 * the state purge task.
1607 */
1608unsigned int pf_purge_states_collect = 64;
1609
1610 void
1611pf_purge_states_tick(void *null)
1612 {
1613 unsigned int limit = pf_status.states;
1614 unsigned int interval = pf_default_rule.timeout[PFTM_INTERVAL];
1615
1616 if (limit == 0) {
1617 timeout_add_sec(&pf_purge_states_to, 1);
1618 return;
1619 }
1620
1621 /*
1622 * process a fraction of the state table every second
1623 */
1624
1625 if (interval > 1)
1626 limit /= interval;
1627
1628 pf_purge_states_limit = limit;
1629 task_add(systqmp, &pf_purge_states_task);
1630}
1631
1632void
1633pf_purge_states(void *null)
1634{
1635 unsigned int limit;
1636 unsigned int scanned;
1637
1638 limit = pf_purge_states_limit;
1639 if (limit < pf_purge_states_collect)
1640 limit = pf_purge_states_collect;
1641
1642 scanned = pf_purge_expired_states(limit, pf_purge_states_collect);
1643 if (scanned >= pf_purge_states_limit) {
1644 /* we've run out of states to scan this "interval" */
1645 timeout_add_sec(&pf_purge_states_to, 1);
1646 return;
1647 }
1648
1649 pf_purge_states_limit -= scanned;
1650 task_add(systqmp, &pf_purge_states_task);
1651}
1652
1653void pf_purge_tick(void *);
1654struct timeout pf_purge_to =
1655 TIMEOUT_INITIALIZER(pf_purge_tick, NULL){ .to_list = { ((void *)0), ((void *)0) }, .to_abstime = { .tv_sec
= 0, .tv_nsec = 0 }, .to_func = ((pf_purge_tick)), .to_arg =
((((void *)0))), .to_time = 0, .to_flags = (0) | 0x04, .to_kclock
= ((-1)) }
;
1656
1657void pf_purge(void *);
1658struct task pf_purge_task =
1659 TASK_INITIALIZER(pf_purge, NULL){{ ((void *)0), ((void *)0) }, (pf_purge), (((void *)0)), 0 };
1660
1661void
1662pf_purge_tick(void *null)
1663{
1664 task_add(systqmp, &pf_purge_task);
1665}
1666
1667void
1668pf_purge(void *null)
1669{
1670 unsigned int interval = max(1, pf_default_rule.timeout[PFTM_INTERVAL]);
1671
1672 PF_LOCK()do { rw_enter_write(&pf_lock); } while (0);
1673
1674 pf_purge_expired_src_nodes();
1675
1676 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1677
1678 /*
1679 * Fragments don't require PF_LOCK(), they use their own lock.
1680 */
1681 pf_purge_expired_fragments();
1682
1683 /* interpret the interval as idle time between runs */
1684 timeout_add_sec(&pf_purge_to, interval);
1685}
1686
1687int32_t
1688pf_state_expires(const struct pf_state *st, uint8_t stimeout)
1689{
1690 u_int32_t timeout;
1691 u_int32_t start;
1692 u_int32_t end;
1693 u_int32_t states;
1694
1695 /*
1696 * pf_state_expires is used by the state purge task to
1697 * decide if a state is a candidate for cleanup, and by the
1698 * pfsync state export code to populate an expiry time.
1699 *
1700 * this function may be called by the state purge task while
1701 * the state is being modified. avoid inconsistent reads of
1702 * state->timeout by having the caller do the read (and any
1703 * checks it needs to do on the same variable) and then pass
1704 * their view of the timeout in here for this function to use.
1705 * the only consequence of using a stale timeout value is
1706 * that the state won't be a candidate for purging until the
1707 * next pass of the purge task.
1708 */
1709
1710 /* handle all PFTM_* >= PFTM_MAX here */
1711 if (stimeout >= PFTM_MAX)
1712 return (0);
1713
1714 KASSERT(stimeout < PFTM_MAX)((stimeout < PFTM_MAX) ? (void)0 : __assert("diagnostic ",
"/usr/src/sys/net/pf.c", 1714, "stimeout < PFTM_MAX"))
;
1715
1716 timeout = st->rule.ptr->timeout[stimeout];
1717 if (!timeout)
1718 timeout = pf_default_rule.timeout[stimeout];
1719
1720 start = st->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1721 if (start) {
1722 end = st->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1723 states = st->rule.ptr->states_cur;
1724 } else {
1725 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1726 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1727 states = pf_status.states;
1728 }
1729 if (end && states > start && start < end) {
1730 if (states >= end)
1731 return (0);
1732
1733 timeout = (u_int64_t)timeout * (end - states) / (end - start);
1734 }
1735
1736 return (st->expire + timeout);
1737}
1738
1739void
1740pf_purge_expired_src_nodes(void)
1741{
1742 struct pf_src_node *cur, *next;
1743
1744 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
1745
1746 RB_FOREACH_SAFE(cur, pf_src_tree, &tree_src_tracking, next)for ((cur) = pf_src_tree_RB_MINMAX(&tree_src_tracking, -1
); ((cur) != ((void *)0)) && ((next) = pf_src_tree_RB_NEXT
(cur), 1); (cur) = (next))
{
1747 if (cur->states == 0 && cur->expire <= getuptime()) {
1748 pf_remove_src_node(cur);
1749 }
1750 }
1751}
1752
1753void
1754pf_src_tree_remove_state(struct pf_state *st)
1755{
1756 u_int32_t timeout;
1757 struct pf_sn_item *sni;
1758
1759 while ((sni = SLIST_FIRST(&st->src_nodes)((&st->src_nodes)->slh_first)) != NULL((void *)0)) {
1760 SLIST_REMOVE_HEAD(&st->src_nodes, next)do { (&st->src_nodes)->slh_first = (&st->src_nodes
)->slh_first->next.sle_next; } while (0)
;
1761 if (st->src.tcp_est)
1762 --sni->sn->conn;
1763 if (--sni->sn->states == 0) {
1764 timeout = st->rule.ptr->timeout[PFTM_SRC_NODE];
1765 if (!timeout)
1766 timeout =
1767 pf_default_rule.timeout[PFTM_SRC_NODE];
1768 sni->sn->expire = getuptime() + timeout;
1769 }
1770 pool_put(&pf_sn_item_pl, sni);
1771 }
1772}
1773
1774void
1775pf_remove_state(struct pf_state *st)
1776{
1777 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
1778
1779 mtx_enter(&st->mtx);
1780 if (st->timeout == PFTM_UNLINKED) {
1781 mtx_leave(&st->mtx);
1782 return;
1783 }
1784 st->timeout = PFTM_UNLINKED;
1785 mtx_leave(&st->mtx);
1786
1787 /* handle load balancing related tasks */
1788 pf_postprocess_addr(st);
1789
1790 if (st->src.state == PF_TCPS_PROXY_DST((11)+1)) {
1791 pf_send_tcp(st->rule.ptr, st->key[PF_SK_WIRE]->af,
1792 &st->key[PF_SK_WIRE]->addr[1],
1793 &st->key[PF_SK_WIRE]->addr[0],
1794 st->key[PF_SK_WIRE]->port[1],
1795 st->key[PF_SK_WIRE]->port[0],
1796 st->src.seqhi, st->src.seqlo + 1,
1797 TH_RST0x04|TH_ACK0x10, 0, 0, 0, 1, st->tag,
1798 st->key[PF_SK_WIRE]->rdomain);
1799 }
1800 if (st->key[PF_SK_STACK]->proto == IPPROTO_TCP6)
1801 pf_set_protostate(st, PF_PEER_BOTH, TCPS_CLOSED0);
1802
1803 RBT_REMOVE(pf_state_tree_id, &tree_id, st)pf_state_tree_id_RBT_REMOVE(&tree_id, st);
1804#if NPFLOW1 > 0
1805 if (st->state_flags & PFSTATE_PFLOW0x0004)
1806 export_pflow(st);
1807#endif /* NPFLOW > 0 */
1808#if NPFSYNC1 > 0
1809 pfsync_delete_state(st);
1810#endif /* NPFSYNC > 0 */
1811 pf_src_tree_remove_state(st);
1812 pf_detach_state(st);
1813}
1814
1815void
1816pf_remove_divert_state(struct inpcb *inp)
1817{
1818 struct pf_state_key *sk;
1819 struct pf_state_item *si;
1820
1821 PF_ASSERT_UNLOCKED()do { if (rw_status(&pf_lock) == 0x0001UL) splassert_fail(
0, rw_status(&pf_lock), __func__); } while (0)
;
1822
1823 if (READ_ONCE(inp->inp_pf_sk)({ typeof(inp->inp_pf_sk) __tmp = *(volatile typeof(inp->
inp_pf_sk) *)&(inp->inp_pf_sk); membar_datadep_consumer
(); __tmp; })
== NULL((void *)0))
1824 return;
1825
1826 mtx_enter(&pf_inp_mtx);
1827 sk = pf_state_key_ref(inp->inp_pf_sk);
1828 mtx_leave(&pf_inp_mtx);
1829 if (sk == NULL((void *)0))
1830 return;
1831
1832 PF_LOCK()do { rw_enter_write(&pf_lock); } while (0);
1833 PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0);
1834 TAILQ_FOREACH(si, &sk->sk_states, si_entry)for((si) = ((&sk->sk_states)->tqh_first); (si) != (
(void *)0); (si) = ((si)->si_entry.tqe_next))
{
1835 struct pf_state *sist = si->si_st;
1836 if (sk == sist->key[PF_SK_STACK] && sist->rule.ptr &&
1837 (sist->rule.ptr->divert.type == PF_DIVERT_TO ||
1838 sist->rule.ptr->divert.type == PF_DIVERT_REPLY)) {
1839 if (sist->key[PF_SK_STACK]->proto == IPPROTO_TCP6 &&
1840 sist->key[PF_SK_WIRE] != sist->key[PF_SK_STACK]) {
1841 /*
1842 * If the local address is translated, keep
1843 * the state for "tcp.closed" seconds to
1844 * prevent its source port from being reused.
1845 */
1846 if (sist->src.state < TCPS_FIN_WAIT_29 ||
1847 sist->dst.state < TCPS_FIN_WAIT_29) {
1848 pf_set_protostate(sist, PF_PEER_BOTH,
1849 TCPS_TIME_WAIT10);
1850 pf_update_state_timeout(sist,
1851 PFTM_TCP_CLOSED);
1852 sist->expire = getuptime();
1853 }
1854 sist->state_flags |= PFSTATE_INP_UNLINKED0x0400;
1855 } else
1856 pf_remove_state(sist);
1857 break;
1858 }
1859 }
1860 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
1861 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1862
1863 pf_state_key_unref(sk);
1864}
1865
1866void
1867pf_free_state(struct pf_state *st)
1868{
1869 struct pf_rule_item *ri;
1870
1871 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
1872
1873#if NPFSYNC1 > 0
1874 if (pfsync_state_in_use(st))
1875 return;
1876#endif /* NPFSYNC > 0 */
1877
1878 KASSERT(st->timeout == PFTM_UNLINKED)((st->timeout == PFTM_UNLINKED) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/pf.c", 1878, "st->timeout == PFTM_UNLINKED"
))
;
1879 if (--st->rule.ptr->states_cur == 0 &&
1880 st->rule.ptr->src_nodes == 0)
1881 pf_rm_rule(NULL((void *)0), st->rule.ptr);
1882 if (st->anchor.ptr != NULL((void *)0))
1883 if (--st->anchor.ptr->states_cur == 0)
1884 pf_rm_rule(NULL((void *)0), st->anchor.ptr);
1885 while ((ri = SLIST_FIRST(&st->match_rules)((&st->match_rules)->slh_first))) {
1886 SLIST_REMOVE_HEAD(&st->match_rules, entry)do { (&st->match_rules)->slh_first = (&st->match_rules
)->slh_first->entry.sle_next; } while (0)
;
1887 if (--ri->r->states_cur == 0 &&
1888 ri->r->src_nodes == 0)
1889 pf_rm_rule(NULL((void *)0), ri->r);
1890 pool_put(&pf_rule_item_pl, ri);
1891 }
1892 pf_normalize_tcp_cleanup(st);
1893 pfi_kif_unref(st->kif, PFI_KIF_REF_STATE);
1894 pf_state_list_remove(&pf_state_list, st);
1895 if (st->tag)
1896 pf_tag_unref(st->tag);
1897 pf_state_unref(st);
1898 pf_status.fcounters[FCNT_STATE_REMOVALS2]++;
1899 pf_status.states--;
1900}
1901
1902unsigned int
1903pf_purge_expired_states(const unsigned int limit, const unsigned int collect)
1904{
1905 /*
1906 * this task/thread/context/whatever is the only thing that
1907 * removes states from the pf_state_list, so the cur reference
1908 * it holds between calls is guaranteed to still be in the
1909 * list.
1910 */
1911 static struct pf_state *cur = NULL((void *)0);
1912
1913 struct pf_state *head, *tail;
1914 struct pf_state *st;
1915 SLIST_HEAD(pf_state_gcl, pf_state)struct pf_state_gcl { struct pf_state *slh_first; } gcl = SLIST_HEAD_INITIALIZER(gcl){ ((void *)0) };
1916 time_t now;
1917 unsigned int scanned;
1918 unsigned int collected = 0;
1919
1920 PF_ASSERT_UNLOCKED()do { if (rw_status(&pf_lock) == 0x0001UL) splassert_fail(
0, rw_status(&pf_lock), __func__); } while (0)
;
1921
1922 rw_enter_read(&pf_state_list.pfs_rwl);
1923
1924 mtx_enter(&pf_state_list.pfs_mtx);
1925 head = TAILQ_FIRST(&pf_state_list.pfs_list)((&pf_state_list.pfs_list)->tqh_first);
1926 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue)(*(((struct pf_state_queue *)((&pf_state_list.pfs_list)->
tqh_last))->tqh_last))
;
1927 mtx_leave(&pf_state_list.pfs_mtx);
1928
1929 if (head == NULL((void *)0)) {
1930 /* the list is empty */
1931 rw_exit_read(&pf_state_list.pfs_rwl);
1932 return (limit);
1933 }
1934
1935 /* (re)start at the front of the list */
1936 if (cur == NULL((void *)0))
1937 cur = head;
1938
1939 now = getuptime();
1940
1941 for (scanned = 0; scanned < limit; scanned++) {
1942 uint8_t stimeout = cur->timeout;
1943 unsigned int limited = 0;
1944
1945 if ((stimeout == PFTM_UNLINKED) ||
1946 (pf_state_expires(cur, stimeout) <= now)) {
1947 st = pf_state_ref(cur);
1948 SLIST_INSERT_HEAD(&gcl, st, gc_list)do { (st)->gc_list.sle_next = (&gcl)->slh_first; (&
gcl)->slh_first = (st); } while (0)
;
1949
1950 if (++collected >= collect)
1951 limited = 1;
1952 }
1953
1954 /* don't iterate past the end of our view of the list */
1955 if (cur == tail) {
1956 cur = NULL((void *)0);
1957 break;
1958 }
1959
1960 cur = TAILQ_NEXT(cur, entry_list)((cur)->entry_list.tqe_next);
1961
1962 /* don't spend too much time here. */
1963 if (ISSET(READ_ONCE(curcpu()->ci_schedstate.spc_schedflags),((({ typeof(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags) __tmp = *(volatile
typeof(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags) *)&(({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_schedstate.spc_schedflags); membar_datadep_consumer
(); __tmp; })) & (0x0002))
1964 SPCF_SHOULDYIELD)((({ typeof(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags) __tmp = *(volatile
typeof(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0"
: "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self
))); __ci;})->ci_schedstate.spc_schedflags) *)&(({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_schedstate.spc_schedflags); membar_datadep_consumer
(); __tmp; })) & (0x0002))
|| limited)
1965 break;
1966 }
1967
1968 rw_exit_read(&pf_state_list.pfs_rwl);
1969
1970 if (SLIST_EMPTY(&gcl)(((&gcl)->slh_first) == ((void *)0)))
1971 return (scanned);
1972
1973 rw_enter_write(&pf_state_list.pfs_rwl);
1974 PF_LOCK()do { rw_enter_write(&pf_lock); } while (0);
1975 PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0);
1976 SLIST_FOREACH(st, &gcl, gc_list)for((st) = ((&gcl)->slh_first); (st) != ((void *)0); (
st) = ((st)->gc_list.sle_next))
{
1977 if (st->timeout != PFTM_UNLINKED)
1978 pf_remove_state(st);
1979
1980 pf_free_state(st);
1981 }
1982 PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_state_lock), __func__); } while (
0); rw_exit_write(&pf_state_lock); } while (0)
;
1983 PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail
(0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write
(&pf_lock); } while (0)
;
1984 rw_exit_write(&pf_state_list.pfs_rwl);
1985
1986 while ((st = SLIST_FIRST(&gcl)((&gcl)->slh_first)) != NULL((void *)0)) {
1987 SLIST_REMOVE_HEAD(&gcl, gc_list)do { (&gcl)->slh_first = (&gcl)->slh_first->
gc_list.sle_next; } while (0)
;
1988 pf_state_unref(st);
1989 }
1990
1991 return (scanned);
1992}
1993
1994int
1995pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw, int wait)
1996{
1997 if (aw->type != PF_ADDR_TABLE)
1998 return (0);
1999 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname, wait)) == NULL((void *)0))
2000 return (1);
2001 return (0);
2002}
2003
2004void
2005pf_tbladdr_remove(struct pf_addr_wrap *aw)
2006{
2007 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL((void *)0))
2008 return;
2009 pfr_detach_table(aw->p.tbl);
2010 aw->p.tbl = NULL((void *)0);
2011}
2012
2013void
2014pf_tbladdr_copyout(struct pf_addr_wrap *aw)
2015{
2016 struct pfr_ktable *kt = aw->p.tbl;
2017
2018 if (aw->type != PF_ADDR_TABLE || kt == NULL((void *)0))
2019 return;
2020 if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) && kt->pfrkt_root != NULL((void *)0))
2021 kt = kt->pfrkt_root;
2022 aw->p.tbl = NULL((void *)0);
2023 aw->p.tblcnt = (kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) ?
2024 kt->pfrkt_cntpfrkt_ts.pfrts_cnt : -1;
2025}
2026
2027void
2028pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
2029{
2030 switch (af) {
2031 case AF_INET2: {
2032 u_int32_t a = ntohl(addr->addr32[0])(__uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? (
__uint32_t)(((__uint32_t)(addr->pfa.addr32[0]) & 0xff)
<< 24 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00
) << 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000
) >> 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000
) >> 24) : __swap32md(addr->pfa.addr32[0]))
;
2033 addlog("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
2034 (a>>8)&255, a&255);
2035 if (p) {
2036 p = ntohs(p)(__uint16_t)(__builtin_constant_p(p) ? (__uint16_t)(((__uint16_t
)(p) & 0xffU) << 8 | ((__uint16_t)(p) & 0xff00U
) >> 8) : __swap16md(p))
;
2037 addlog(":%u", p);
2038 }
2039 break;
2040 }
2041#ifdef INET61
2042 case AF_INET624: {
2043 u_int16_t b;
2044 u_int8_t i, curstart, curend, maxstart, maxend;
2045 curstart = curend = maxstart = maxend = 255;
2046 for (i = 0; i < 8; i++) {
2047 if (!addr->addr16pfa.addr16[i]) {
2048 if (curstart == 255)
2049 curstart = i;
2050 curend = i;
2051 } else {
2052 if ((curend - curstart) >
2053 (maxend - maxstart)) {
2054 maxstart = curstart;
2055 maxend = curend;
2056 }
2057 curstart = curend = 255;
2058 }
2059 }
2060 if ((curend - curstart) >
2061 (maxend - maxstart)) {
2062 maxstart = curstart;
2063 maxend = curend;
2064 }
2065 for (i = 0; i < 8; i++) {
2066 if (i >= maxstart && i <= maxend) {
2067 if (i == 0)
2068 addlog(":");
2069 if (i == maxend)
2070 addlog(":");
2071 } else {
2072 b = ntohs(addr->addr16[i])(__uint16_t)(__builtin_constant_p(addr->pfa.addr16[i]) ? (
__uint16_t)(((__uint16_t)(addr->pfa.addr16[i]) & 0xffU
) << 8 | ((__uint16_t)(addr->pfa.addr16[i]) & 0xff00U
) >> 8) : __swap16md(addr->pfa.addr16[i]))
;
2073 addlog("%x", b);
2074 if (i < 7)
2075 addlog(":");
2076 }
2077 }
2078 if (p) {
2079 p = ntohs(p)(__uint16_t)(__builtin_constant_p(p) ? (__uint16_t)(((__uint16_t
)(p) & 0xffU) << 8 | ((__uint16_t)(p) & 0xff00U
) >> 8) : __swap16md(p))
;
2080 addlog("[%u]", p);
2081 }
2082 break;
2083 }
2084#endif /* INET6 */
2085 }
2086}
2087
2088void
2089pf_print_state(struct pf_state *st)
2090{
2091 pf_print_state_parts(st, NULL((void *)0), NULL((void *)0));
2092}
2093
2094void
2095pf_print_state_parts(struct pf_state *st,
2096 struct pf_state_key *skwp, struct pf_state_key *sksp)
2097{
2098 struct pf_state_key *skw, *sks;
2099 u_int8_t proto, dir;
2100
2101 /* Do our best to fill these, but they're skipped if NULL */
2102 skw = skwp ? skwp : (st ? st->key[PF_SK_WIRE] : NULL((void *)0));
2103 sks = sksp ? sksp : (st ? st->key[PF_SK_STACK] : NULL((void *)0));
2104 proto = skw ? skw->proto : (sks ? sks->proto : 0);
2105 dir = st ? st->direction : 0;
2106
2107 switch (proto) {
2108 case IPPROTO_IPV44:
2109 addlog("IPv4");
2110 break;
2111 case IPPROTO_IPV641:
2112 addlog("IPv6");
2113 break;
2114 case IPPROTO_TCP6:
2115 addlog("TCP");
2116 break;
2117 case IPPROTO_UDP17:
2118 addlog("UDP");
2119 break;
2120 case IPPROTO_ICMP1:
2121 addlog("ICMP");
2122 break;
2123 case IPPROTO_ICMPV658:
2124 addlog("ICMPv6");
2125 break;
2126 default:
2127 addlog("%u", proto);
2128 break;
2129 }
2130 switch (dir) {
2131 case PF_IN:
2132 addlog(" in");
2133 break;
2134 case PF_OUT:
2135 addlog(" out");
2136 break;
2137 }
2138 if (skw) {
2139 addlog(" wire: (%d) ", skw->rdomain);
2140 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
2141 addlog(" ");
2142 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
2143 }
2144 if (sks) {
2145 addlog(" stack: (%d) ", sks->rdomain);
2146 if (sks != skw) {
2147 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
2148 addlog(" ");
2149 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
2150 } else
2151 addlog("-");
2152 }
2153 if (st) {
2154 if (proto == IPPROTO_TCP6) {
2155 addlog(" [lo=%u high=%u win=%u modulator=%u",
2156 st->src.seqlo, st->src.seqhi,
2157 st->src.max_win, st->src.seqdiff);
2158 if (st->src.wscale && st->dst.wscale)
2159 addlog(" wscale=%u",
2160 st->src.wscale & PF_WSCALE_MASK0x0f);
2161 addlog("]");
2162 addlog(" [lo=%u high=%u win=%u modulator=%u",
2163 st->dst.seqlo, st->dst.seqhi,
2164 st->dst.max_win, st->dst.seqdiff);
2165 if (st->src.wscale && st->dst.wscale)
2166 addlog(" wscale=%u",
2167 st->dst.wscale & PF_WSCALE_MASK0x0f);
2168 addlog("]");
2169 }
2170 addlog(" %u:%u", st->src.state, st->dst.state);
2171 if (st->rule.ptr)
2172 addlog(" @%d", st->rule.ptr->nr);
2173 }
2174}
2175
2176void
2177pf_print_flags(u_int8_t f)
2178{
2179 if (f)
2180 addlog(" ");
2181 if (f & TH_FIN0x01)
2182 addlog("F");
2183 if (f & TH_SYN0x02)
2184 addlog("S");
2185 if (f & TH_RST0x04)
2186 addlog("R");
2187 if (f & TH_PUSH0x08)
2188 addlog("P");
2189 if (f & TH_ACK0x10)
2190 addlog("A");
2191 if (f & TH_URG0x20)
2192 addlog("U");
2193 if (f & TH_ECE0x40)
2194 addlog("E");
2195 if (f & TH_CWR0x80)
2196 addlog("W");
2197}
2198
2199#define PF_SET_SKIP_STEPS(i)do { while (head[i] != cur) { head[i]->skip[i].ptr = cur; head
[i] = ((head[i])->entries.tqe_next); } } while (0)
\
2200 do { \
2201 while (head[i] != cur) { \
2202 head[i]->skip[i].ptr = cur; \
2203 head[i] = TAILQ_NEXT(head[i], entries)((head[i])->entries.tqe_next); \
2204 } \
2205 } while (0)
2206
2207void
2208pf_calc_skip_steps(struct pf_rulequeue *rules)
2209{
2210 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT9];
2211 int i;
2212
2213 cur = TAILQ_FIRST(rules)((rules)->tqh_first);
2214 prev = cur;
2215 for (i = 0; i < PF_SKIP_COUNT9; ++i)
2216 head[i] = cur;
2217 while (cur != NULL((void *)0)) {
2218 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2219 PF_SET_SKIP_STEPS(PF_SKIP_IFP)do { while (head[0] != cur) { head[0]->skip[0].ptr = cur; head
[0] = ((head[0])->entries.tqe_next); } } while (0)
;
2220 if (cur->direction != prev->direction)
2221 PF_SET_SKIP_STEPS(PF_SKIP_DIR)do { while (head[1] != cur) { head[1]->skip[1].ptr = cur; head
[1] = ((head[1])->entries.tqe_next); } } while (0)
;
2222 if (cur->onrdomain != prev->onrdomain ||
2223 cur->ifnot != prev->ifnot)
2224 PF_SET_SKIP_STEPS(PF_SKIP_RDOM)do { while (head[2] != cur) { head[2]->skip[2].ptr = cur; head
[2] = ((head[2])->entries.tqe_next); } } while (0)
;
2225 if (cur->af != prev->af)
2226 PF_SET_SKIP_STEPS(PF_SKIP_AF)do { while (head[3] != cur) { head[3]->skip[3].ptr = cur; head
[3] = ((head[3])->entries.tqe_next); } } while (0)
;
2227 if (cur->proto != prev->proto)
2228 PF_SET_SKIP_STEPS(PF_SKIP_PROTO)do { while (head[4] != cur) { head[4]->skip[4].ptr = cur; head
[4] = ((head[4])->entries.tqe_next); } } while (0)
;
2229 if (cur->src.neg != prev->src.neg ||
2230 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2231 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR)do { while (head[5] != cur) { head[5]->skip[5].ptr = cur; head
[5] = ((head[5])->entries.tqe_next); } } while (0)
;
2232 if (cur->dst.neg != prev->dst.neg ||
2233 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2234 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR)do { while (head[6] != cur) { head[6]->skip[6].ptr = cur; head
[6] = ((head[6])->entries.tqe_next); } } while (0)
;
2235 if (cur->src.port[0] != prev->src.port[0] ||
2236 cur->src.port[1] != prev->src.port[1] ||
2237 cur->src.port_op != prev->src.port_op)
2238 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT)do { while (head[7] != cur) { head[7]->skip[7].ptr = cur; head
[7] = ((head[7])->entries.tqe_next); } } while (0)
;
2239 if (cur->dst.port[0] != prev->dst.port[0] ||
2240 cur->dst.port[1] != prev->dst.port[1] ||
2241 cur->dst.port_op != prev->dst.port_op)
2242 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT)do { while (head[8] != cur) { head[8]->skip[8].ptr = cur; head
[8] = ((head[8])->entries.tqe_next); } } while (0)
;
2243
2244 prev = cur;
2245 cur = TAILQ_NEXT(cur, entries)((cur)->entries.tqe_next);
2246 }
2247 for (i = 0; i < PF_SKIP_COUNT9; ++i)
2248 PF_SET_SKIP_STEPS(i)do { while (head[i] != cur) { head[i]->skip[i].ptr = cur; head
[i] = ((head[i])->entries.tqe_next); } } while (0)
;
2249}
2250
2251int
2252pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2253{
2254 if (aw1->type != aw2->type)
2255 return (1);
2256 switch (aw1->type) {
2257 case PF_ADDR_ADDRMASK:
2258 case PF_ADDR_RANGE:
2259 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6)((24 == 2 && (&aw1->v.a.addr)->pfa.addr32[0
] != (&aw2->v.a.addr)->pfa.addr32[0]) || (24 == 24 &&
((&aw1->v.a.addr)->pfa.addr32[3] != (&aw2->
v.a.addr)->pfa.addr32[3] || (&aw1->v.a.addr)->pfa
.addr32[2] != (&aw2->v.a.addr)->pfa.addr32[2] || (&
aw1->v.a.addr)->pfa.addr32[1] != (&aw2->v.a.addr
)->pfa.addr32[1] || (&aw1->v.a.addr)->pfa.addr32
[0] != (&aw2->v.a.addr)->pfa.addr32[0])))
)
2260 return (1);
2261 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6)((24 == 2 && (&aw1->v.a.mask)->pfa.addr32[0
] != (&aw2->v.a.mask)->pfa.addr32[0]) || (24 == 24 &&
((&aw1->v.a.mask)->pfa.addr32[3] != (&aw2->
v.a.mask)->pfa.addr32[3] || (&aw1->v.a.mask)->pfa
.addr32[2] != (&aw2->v.a.mask)->pfa.addr32[2] || (&
aw1->v.a.mask)->pfa.addr32[1] != (&aw2->v.a.mask
)->pfa.addr32[1] || (&aw1->v.a.mask)->pfa.addr32
[0] != (&aw2->v.a.mask)->pfa.addr32[0])))
)
2262 return (1);
2263 return (0);
2264 case PF_ADDR_DYNIFTL:
2265 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2266 case PF_ADDR_NONE:
2267 case PF_ADDR_NOROUTE:
2268 case PF_ADDR_URPFFAILED:
2269 return (0);
2270 case PF_ADDR_TABLE:
2271 return (aw1->p.tbl != aw2->p.tbl);
2272 case PF_ADDR_RTLABEL:
2273 return (aw1->v.rtlabel != aw2->v.rtlabel);
2274 default:
2275 addlog("invalid address type: %d\n", aw1->type);
2276 return (1);
2277 }
2278}
2279
2280/* This algorithm computes 'a + b - c' in ones-complement using a trick to
2281 * emulate at most one ones-complement subtraction. This thereby limits net
2282 * carries/borrows to at most one, eliminating a reduction step and saving one
2283 * each of +, >>, & and ~.
2284 *
2285 * def. x mod y = x - (x//y)*y for integer x,y
2286 * def. sum = x mod 2^16
2287 * def. accumulator = (x >> 16) mod 2^16
2288 *
2289 * The trick works as follows: subtracting exactly one u_int16_t from the
2290 * u_int32_t x incurs at most one underflow, wrapping its upper 16-bits, the
2291 * accumulator, to 2^16 - 1. Adding this to the 16-bit sum preserves the
2292 * ones-complement borrow:
2293 *
2294 * (sum + accumulator) mod 2^16
2295 * = { assume underflow: accumulator := 2^16 - 1 }
2296 * (sum + 2^16 - 1) mod 2^16
2297 * = { mod }
2298 * (sum - 1) mod 2^16
2299 *
2300 * Although this breaks for sum = 0, giving 0xffff, which is ones-complement's
2301 * other zero, not -1, that cannot occur: the 16-bit sum cannot be underflown
2302 * to zero as that requires subtraction of at least 2^16, which exceeds a
2303 * single u_int16_t's range.
2304 *
2305 * We use the following theorem to derive the implementation:
2306 *
2307 * th. (x + (y mod z)) mod z = (x + y) mod z (0)
2308 * proof.
2309 * (x + (y mod z)) mod z
2310 * = { def mod }
2311 * (x + y - (y//z)*z) mod z
2312 * = { (a + b*c) mod c = a mod c }
2313 * (x + y) mod z [end of proof]
2314 *
2315 * ... and thereby obtain:
2316 *
2317 * (sum + accumulator) mod 2^16
2318 * = { def. accumulator, def. sum }
2319 * (x mod 2^16 + (x >> 16) mod 2^16) mod 2^16
2320 * = { (0), twice }
2321 * (x + (x >> 16)) mod 2^16
2322 * = { x mod 2^n = x & (2^n - 1) }
2323 * (x + (x >> 16)) & 0xffff
2324 *
2325 * Note: this serves also as a reduction step for at most one add (as the
2326 * trailing mod 2^16 prevents further reductions by destroying carries).
2327 */
2328__inline void
2329pf_cksum_fixup(u_int16_t *cksum, u_int16_t was, u_int16_t now,
2330 u_int8_t proto)
2331{
2332 u_int32_t x;
2333 const int udp = proto == IPPROTO_UDP17;
2334
2335 x = *cksum + was - now;
2336 x = (x + (x >> 16)) & 0xffff;
2337
2338 /* optimise: eliminate a branch when not udp */
2339 if (udp && *cksum == 0x0000)
2340 return;
2341 if (udp && x == 0x0000)
2342 x = 0xffff;
2343
2344 *cksum = (u_int16_t)(x);
2345}
2346
2347#ifdef INET61
2348/* pre: coverage(cksum) is superset of coverage(covered_cksum) */
2349static __inline void
2350pf_cksum_uncover(u_int16_t *cksum, u_int16_t covered_cksum, u_int8_t proto)
2351{
2352 pf_cksum_fixup(cksum, ~covered_cksum, 0x0, proto);
2353}
2354
2355/* pre: disjoint(coverage(cksum), coverage(uncovered_cksum)) */
2356static __inline void
2357pf_cksum_cover(u_int16_t *cksum, u_int16_t uncovered_cksum, u_int8_t proto)
2358{
2359 pf_cksum_fixup(cksum, 0x0, ~uncovered_cksum, proto);
2360}
2361#endif /* INET6 */
2362
2363/* pre: *a is 16-bit aligned within its packet
2364 *
2365 * This algorithm emulates 16-bit ones-complement sums on a twos-complement
2366 * machine by conserving ones-complement's otherwise discarded carries in the
2367 * upper bits of x. These accumulated carries when added to the lower 16-bits
2368 * over at least zero 'reduction' steps then complete the ones-complement sum.
2369 *
2370 * def. sum = x mod 2^16
2371 * def. accumulator = (x >> 16)
2372 *
2373 * At most two reduction steps
2374 *
2375 * x := sum + accumulator
2376 * = { def sum, def accumulator }
2377 * x := x mod 2^16 + (x >> 16)
2378 * = { x mod 2^n = x & (2^n - 1) }
2379 * x := (x & 0xffff) + (x >> 16)
2380 *
2381 * are necessary to incorporate the accumulated carries (at most one per add)
2382 * i.e. to reduce x < 2^16 from at most 16 carries in the upper 16 bits.
2383 *
2384 * The function is also invariant over the endian of the host. Why?
2385 *
2386 * Define the unary transpose operator ~ on a bitstring in python slice
2387 * notation as lambda m: m[P:] + m[:P] , for some constant pivot P.
2388 *
2389 * th. ~ distributes over ones-complement addition, denoted by +_1, i.e.
2390 *
2391 * ~m +_1 ~n = ~(m +_1 n) (for all bitstrings m,n of equal length)
2392 *
2393 * proof. Regard the bitstrings in m +_1 n as split at P, forming at most two
2394 * 'half-adds'. Under ones-complement addition, each half-add carries to the
2395 * other, so the sum of each half-add is unaffected by their relative
2396 * order. Therefore:
2397 *
2398 * ~m +_1 ~n
2399 * = { half-adds invariant under transposition }
2400 * ~s
2401 * = { substitute }
2402 * ~(m +_1 n) [end of proof]
2403 *
2404 * th. Summing two in-memory ones-complement 16-bit variables m,n on a machine
2405 * with the converse endian does not alter the result.
2406 *
2407 * proof.
2408 * { converse machine endian: load/store transposes, P := 8 }
2409 * ~(~m +_1 ~n)
2410 * = { ~ over +_1 }
2411 * ~~m +_1 ~~n
2412 * = { ~ is an involution }
2413 * m +_1 n [end of proof]
2414 *
2415 */
2416#define NEG(x)((u_int16_t)~(x)) ((u_int16_t)~(x))
2417void
2418pf_cksum_fixup_a(u_int16_t *cksum, const struct pf_addr *a,
2419 const struct pf_addr *an, sa_family_t af, u_int8_t proto)
2420{
2421 u_int32_t x;
2422 const u_int16_t *n = an->addr16pfa.addr16;
2423 const u_int16_t *o = a->addr16pfa.addr16;
2424 const int udp = proto == IPPROTO_UDP17;
2425
2426 switch (af) {
2427 case AF_INET2:
2428 x = *cksum + o[0] + NEG(n[0])((u_int16_t)~(n[0])) + o[1] + NEG(n[1])((u_int16_t)~(n[1]));
2429 break;
2430#ifdef INET61
2431 case AF_INET624:
2432 x = *cksum + o[0] + NEG(n[0])((u_int16_t)~(n[0])) + o[1] + NEG(n[1])((u_int16_t)~(n[1])) +\
2433 o[2] + NEG(n[2])((u_int16_t)~(n[2])) + o[3] + NEG(n[3])((u_int16_t)~(n[3])) +\
2434 o[4] + NEG(n[4])((u_int16_t)~(n[4])) + o[5] + NEG(n[5])((u_int16_t)~(n[5])) +\
2435 o[6] + NEG(n[6])((u_int16_t)~(n[6])) + o[7] + NEG(n[7])((u_int16_t)~(n[7]));
2436 break;
2437#endif /* INET6 */
2438 default:
2439 unhandled_af(af);
2440 }
2441
2442 x = (x & 0xffff) + (x >> 16);
2443 x = (x & 0xffff) + (x >> 16);
2444
2445 /* optimise: eliminate a branch when not udp */
2446 if (udp && *cksum == 0x0000)
2447 return;
2448 if (udp && x == 0x0000)
2449 x = 0xffff;
2450
2451 *cksum = (u_int16_t)(x);
2452}
2453
2454int
2455pf_patch_8(struct pf_pdesc *pd, u_int8_t *f, u_int8_t v, bool_Bool hi)
2456{
2457 int rewrite = 0;
2458
2459 if (*f != v) {
2460 u_int16_t old = htons(hi ? (*f << 8) : *f)(__uint16_t)(__builtin_constant_p(hi ? (*f << 8) : *f) ?
(__uint16_t)(((__uint16_t)(hi ? (*f << 8) : *f) & 0xffU
) << 8 | ((__uint16_t)(hi ? (*f << 8) : *f) &
0xff00U) >> 8) : __swap16md(hi ? (*f << 8) : *f)
)
;
2461 u_int16_t new = htons(hi ? ( v << 8) : v)(__uint16_t)(__builtin_constant_p(hi ? ( v << 8) : v) ?
(__uint16_t)(((__uint16_t)(hi ? ( v << 8) : v) & 0xffU
) << 8 | ((__uint16_t)(hi ? ( v << 8) : v) & 0xff00U
) >> 8) : __swap16md(hi ? ( v << 8) : v))
;
2462
2463 pf_cksum_fixup(pd->pcksum, old, new, pd->proto);
2464 *f = v;
2465 rewrite = 1;
2466 }
2467
2468 return (rewrite);
2469}
2470
2471/* pre: *f is 16-bit aligned within its packet */
2472int
2473pf_patch_16(struct pf_pdesc *pd, u_int16_t *f, u_int16_t v)
2474{
2475 int rewrite = 0;
2476
2477 if (*f != v) {
2478 pf_cksum_fixup(pd->pcksum, *f, v, pd->proto);
2479 *f = v;
2480 rewrite = 1;
2481 }
2482
2483 return (rewrite);
2484}
2485
2486int
2487pf_patch_16_unaligned(struct pf_pdesc *pd, void *f, u_int16_t v, bool_Bool hi)
2488{
2489 int rewrite = 0;
2490 u_int8_t *fb = (u_int8_t*)f;
2491 u_int8_t *vb = (u_int8_t*)&v;
2492
2493 if (hi && ALIGNED_POINTER(f, u_int16_t)1) {
2494 return (pf_patch_16(pd, f, v)); /* optimise */
2495 }
2496
2497 rewrite += pf_patch_8(pd, fb++, *vb++, hi);
2498 rewrite += pf_patch_8(pd, fb++, *vb++,!hi);
2499
2500 return (rewrite);
2501}
2502
2503/* pre: *f is 16-bit aligned within its packet */
2504/* pre: pd->proto != IPPROTO_UDP */
2505int
2506pf_patch_32(struct pf_pdesc *pd, u_int32_t *f, u_int32_t v)
2507{
2508 int rewrite = 0;
2509 u_int16_t *pc = pd->pcksum;
2510 u_int8_t proto = pd->proto;
2511
2512 /* optimise: inline udp fixup code is unused; let compiler scrub it */
2513 if (proto == IPPROTO_UDP17)
2514 panic("%s: udp", __func__);
2515
2516 /* optimise: skip *f != v guard; true for all use-cases */
2517 pf_cksum_fixup(pc, *f / (1 << 16), v / (1 << 16), proto);
2518 pf_cksum_fixup(pc, *f % (1 << 16), v % (1 << 16), proto);
2519
2520 *f = v;
2521 rewrite = 1;
2522
2523 return (rewrite);
2524}
2525
2526int
2527pf_patch_32_unaligned(struct pf_pdesc *pd, void *f, u_int32_t v, bool_Bool hi)
2528{
2529 int rewrite = 0;
2530 u_int8_t *fb = (u_int8_t*)f;
2531 u_int8_t *vb = (u_int8_t*)&v;
2532
2533 if (hi && ALIGNED_POINTER(f, u_int32_t)1) {
2534 return (pf_patch_32(pd, f, v)); /* optimise */
2535 }
2536
2537 rewrite += pf_patch_8(pd, fb++, *vb++, hi);
2538 rewrite += pf_patch_8(pd, fb++, *vb++,!hi);
2539 rewrite += pf_patch_8(pd, fb++, *vb++, hi);
2540 rewrite += pf_patch_8(pd, fb++, *vb++,!hi);
2541
2542 return (rewrite);
2543}
2544
2545int
2546pf_icmp_mapping(struct pf_pdesc *pd, u_int8_t type, int *icmp_dir,
2547 u_int16_t *virtual_id, u_int16_t *virtual_type)
2548{
2549 /*
2550 * ICMP types marked with PF_OUT are typically responses to
2551 * PF_IN, and will match states in the opposite direction.
2552 * PF_IN ICMP types need to match a state with that type.
2553 */
2554 *icmp_dir = PF_OUT;
2555
2556 /* Queries (and responses) */
2557 switch (pd->af) {
2558 case AF_INET2:
2559 switch (type) {
2560 case ICMP_ECHO8:
2561 *icmp_dir = PF_IN;
2562 /* FALLTHROUGH */
2563 case ICMP_ECHOREPLY0:
2564 *virtual_type = ICMP_ECHO8;
2565 *virtual_id = pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id;
2566 break;
2567
2568 case ICMP_TSTAMP13:
2569 *icmp_dir = PF_IN;
2570 /* FALLTHROUGH */
2571 case ICMP_TSTAMPREPLY14:
2572 *virtual_type = ICMP_TSTAMP13;
2573 *virtual_id = pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id;
2574 break;
2575
2576 case ICMP_IREQ15:
2577 *icmp_dir = PF_IN;
2578 /* FALLTHROUGH */
2579 case ICMP_IREQREPLY16:
2580 *virtual_type = ICMP_IREQ15;
2581 *virtual_id = pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id;
2582 break;
2583
2584 case ICMP_MASKREQ17:
2585 *icmp_dir = PF_IN;
2586 /* FALLTHROUGH */
2587 case ICMP_MASKREPLY18:
2588 *virtual_type = ICMP_MASKREQ17;
2589 *virtual_id = pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id;
2590 break;
2591
2592 case ICMP_IPV6_WHEREAREYOU33:
2593 *icmp_dir = PF_IN;
2594 /* FALLTHROUGH */
2595 case ICMP_IPV6_IAMHERE34:
2596 *virtual_type = ICMP_IPV6_WHEREAREYOU33;
2597 *virtual_id = 0; /* Nothing sane to match on! */
2598 break;
2599
2600 case ICMP_MOBILE_REGREQUEST35:
2601 *icmp_dir = PF_IN;
2602 /* FALLTHROUGH */
2603 case ICMP_MOBILE_REGREPLY36:
2604 *virtual_type = ICMP_MOBILE_REGREQUEST35;
2605 *virtual_id = 0; /* Nothing sane to match on! */
2606 break;
2607
2608 case ICMP_ROUTERSOLICIT10:
2609 *icmp_dir = PF_IN;
2610 /* FALLTHROUGH */
2611 case ICMP_ROUTERADVERT9:
2612 *virtual_type = ICMP_ROUTERSOLICIT10;
2613 *virtual_id = 0; /* Nothing sane to match on! */
2614 break;
2615
2616 /* These ICMP types map to other connections */
2617 case ICMP_UNREACH3:
2618 case ICMP_SOURCEQUENCH4:
2619 case ICMP_REDIRECT5:
2620 case ICMP_TIMXCEED11:
2621 case ICMP_PARAMPROB12:
2622 /* These will not be used, but set them anyway */
2623 *icmp_dir = PF_IN;
2624 *virtual_type = htons(type)(__uint16_t)(__builtin_constant_p(type) ? (__uint16_t)(((__uint16_t
)(type) & 0xffU) << 8 | ((__uint16_t)(type) & 0xff00U
) >> 8) : __swap16md(type))
;
2625 *virtual_id = 0;
2626 return (1); /* These types match to another state */
2627
2628 /*
2629 * All remaining ICMP types get their own states,
2630 * and will only match in one direction.
2631 */
2632 default:
2633 *icmp_dir = PF_IN;
2634 *virtual_type = type;
2635 *virtual_id = 0;
2636 break;
2637 }
2638 break;
2639#ifdef INET61
2640 case AF_INET624:
2641 switch (type) {
2642 case ICMP6_ECHO_REQUEST128:
2643 *icmp_dir = PF_IN;
2644 /* FALLTHROUGH */
2645 case ICMP6_ECHO_REPLY129:
2646 *virtual_type = ICMP6_ECHO_REQUEST128;
2647 *virtual_id = pd->hdr.icmp6.icmp6_idicmp6_dataun.icmp6_un_data16[0];
2648 break;
2649
2650 case MLD_LISTENER_QUERY130:
2651 case MLD_LISTENER_REPORT131: {
2652 struct mld_hdr *mld = &pd->hdr.mld;
2653 u_int32_t h;
2654
2655 /*
2656 * Listener Report can be sent by clients
2657 * without an associated Listener Query.
2658 * In addition to that, when Report is sent as a
2659 * reply to a Query its source and destination
2660 * address are different.
2661 */
2662 *icmp_dir = PF_IN;
2663 *virtual_type = MLD_LISTENER_QUERY130;
2664 /* generate fake id for these messages */
2665 h = mld->mld_addr.s6_addr32__u6_addr.__u6_addr32[0] ^
2666 mld->mld_addr.s6_addr32__u6_addr.__u6_addr32[1] ^
2667 mld->mld_addr.s6_addr32__u6_addr.__u6_addr32[2] ^
2668 mld->mld_addr.s6_addr32__u6_addr.__u6_addr32[3];
2669 *virtual_id = (h >> 16) ^ (h & 0xffff);
2670 break;
2671 }
2672
2673 /*
2674 * ICMP6_FQDN and ICMP6_NI query/reply are the same type as
2675 * ICMP6_WRU
2676 */
2677 case ICMP6_WRUREQUEST139:
2678 *icmp_dir = PF_IN;
2679 /* FALLTHROUGH */
2680 case ICMP6_WRUREPLY140:
2681 *virtual_type = ICMP6_WRUREQUEST139;
2682 *virtual_id = 0; /* Nothing sane to match on! */
2683 break;
2684
2685 case MLD_MTRACE201:
2686 *icmp_dir = PF_IN;
2687 /* FALLTHROUGH */
2688 case MLD_MTRACE_RESP200:
2689 *virtual_type = MLD_MTRACE201;
2690 *virtual_id = 0; /* Nothing sane to match on! */
2691 break;
2692
2693 case ND_NEIGHBOR_SOLICIT135:
2694 *icmp_dir = PF_IN;
2695 /* FALLTHROUGH */
2696 case ND_NEIGHBOR_ADVERT136: {
2697 struct nd_neighbor_solicit *nd = &pd->hdr.nd_ns;
2698 u_int32_t h;
2699
2700 *virtual_type = ND_NEIGHBOR_SOLICIT135;
2701 /* generate fake id for these messages */
2702 h = nd->nd_ns_target.s6_addr32__u6_addr.__u6_addr32[0] ^
2703 nd->nd_ns_target.s6_addr32__u6_addr.__u6_addr32[1] ^
2704 nd->nd_ns_target.s6_addr32__u6_addr.__u6_addr32[2] ^
2705 nd->nd_ns_target.s6_addr32__u6_addr.__u6_addr32[3];
2706 *virtual_id = (h >> 16) ^ (h & 0xffff);
2707 /*
2708 * the extra work here deals with 'keep state' option
2709 * at pass rule for unsolicited advertisement. By
2710 * returning 1 (state_icmp = 1) we override 'keep
2711 * state' to 'no state' so we don't create state for
2712 * unsolicited advertisements. No one expects answer to
2713 * unsolicited advertisements so we should be good.
2714 */
2715 if (type == ND_NEIGHBOR_ADVERT136) {
2716 *virtual_type = htons(*virtual_type)(__uint16_t)(__builtin_constant_p(*virtual_type) ? (__uint16_t
)(((__uint16_t)(*virtual_type) & 0xffU) << 8 | ((__uint16_t
)(*virtual_type) & 0xff00U) >> 8) : __swap16md(*virtual_type
))
;
2717 return (1);
2718 }
2719 break;
2720 }
2721
2722 /*
2723 * These ICMP types map to other connections.
2724 * ND_REDIRECT can't be in this list because the triggering
2725 * packet header is optional.
2726 */
2727 case ICMP6_DST_UNREACH1:
2728 case ICMP6_PACKET_TOO_BIG2:
2729 case ICMP6_TIME_EXCEEDED3:
2730 case ICMP6_PARAM_PROB4:
2731 /* These will not be used, but set them anyway */
2732 *icmp_dir = PF_IN;
2733 *virtual_type = htons(type)(__uint16_t)(__builtin_constant_p(type) ? (__uint16_t)(((__uint16_t
)(type) & 0xffU) << 8 | ((__uint16_t)(type) & 0xff00U
) >> 8) : __swap16md(type))
;
2734 *virtual_id = 0;
2735 return (1); /* These types match to another state */
2736 /*
2737 * All remaining ICMP6 types get their own states,
2738 * and will only match in one direction.
2739 */
2740 default:
2741 *icmp_dir = PF_IN;
2742 *virtual_type = type;
2743 *virtual_id = 0;
2744 break;
2745 }
2746 break;
2747#endif /* INET6 */
2748 }
2749 *virtual_type = htons(*virtual_type)(__uint16_t)(__builtin_constant_p(*virtual_type) ? (__uint16_t
)(((__uint16_t)(*virtual_type) & 0xffU) << 8 | ((__uint16_t
)(*virtual_type) & 0xff00U) >> 8) : __swap16md(*virtual_type
))
;
2750 return (0); /* These types match to their own state */
2751}
2752
2753void
2754pf_translate_icmp(struct pf_pdesc *pd, struct pf_addr *qa, u_int16_t *qp,
2755 struct pf_addr *oa, struct pf_addr *na, u_int16_t np)
2756{
2757 /* note: doesn't trouble to fixup quoted checksums, if any */
2758
2759 /* change quoted protocol port */
2760 if (qp != NULL((void *)0))
2761 pf_patch_16(pd, qp, np);
2762
2763 /* change quoted ip address */
2764 pf_cksum_fixup_a(pd->pcksum, qa, na, pd->af, pd->proto);
2765 pf_addrcpy(qa, na, pd->af);
2766
2767 /* change network-header's ip address */
2768 if (oa)
2769 pf_translate_a(pd, oa, na);
2770}
2771
2772/* pre: *a is 16-bit aligned within its packet */
2773/* *a is a network header src/dst address */
2774int
2775pf_translate_a(struct pf_pdesc *pd, struct pf_addr *a, struct pf_addr *an)
2776{
2777 int rewrite = 0;
2778
2779 /* warning: !PF_ANEQ != PF_AEQ */
2780 if (!PF_ANEQ(a, an, pd->af)((pd->af == 2 && (a)->pfa.addr32[0] != (an)->
pfa.addr32[0]) || (pd->af == 24 && ((a)->pfa.addr32
[3] != (an)->pfa.addr32[3] || (a)->pfa.addr32[2] != (an
)->pfa.addr32[2] || (a)->pfa.addr32[1] != (an)->pfa.
addr32[1] || (a)->pfa.addr32[0] != (an)->pfa.addr32[0])
))
)
2781 return (0);
2782
2783 /* fixup transport pseudo-header, if any */
2784 switch (pd->proto) {
2785 case IPPROTO_TCP6: /* FALLTHROUGH */
2786 case IPPROTO_UDP17: /* FALLTHROUGH */
2787 case IPPROTO_ICMPV658:
2788 pf_cksum_fixup_a(pd->pcksum, a, an, pd->af, pd->proto);
2789 break;
2790 default:
2791 break; /* assume no pseudo-header */
2792 }
2793
2794 pf_addrcpy(a, an, pd->af);
2795 rewrite = 1;
2796
2797 return (rewrite);
2798}
2799
2800#ifdef INET61
2801/* pf_translate_af() may change pd->m, adjust local copies after calling */
2802int
2803pf_translate_af(struct pf_pdesc *pd)
2804{
2805 static const struct pf_addr zero;
2806 struct ip *ip4;
2807 struct ip6_hdr *ip6;
2808 int copyback = 0;
2809 u_int hlen, ohlen, dlen;
2810 u_int16_t *pc;
2811 u_int8_t af_proto, naf_proto;
2812
2813 hlen = (pd->naf == AF_INET2) ? sizeof(*ip4) : sizeof(*ip6);
2814 ohlen = pd->off;
2815 dlen = pd->tot_len - pd->off;
2816 pc = pd->pcksum;
2817
2818 af_proto = naf_proto = pd->proto;
2819 if (naf_proto == IPPROTO_ICMP1)
2820 af_proto = IPPROTO_ICMPV658;
2821 if (naf_proto == IPPROTO_ICMPV658)
2822 af_proto = IPPROTO_ICMP1;
2823
2824 /* uncover stale pseudo-header */
2825 switch (af_proto) {
2826 case IPPROTO_ICMPV658:
2827 /* optimise: unchanged for TCP/UDP */
2828 pf_cksum_fixup(pc, htons(af_proto)(__uint16_t)(__builtin_constant_p(af_proto) ? (__uint16_t)(((
__uint16_t)(af_proto) & 0xffU) << 8 | ((__uint16_t)
(af_proto) & 0xff00U) >> 8) : __swap16md(af_proto))
, 0x0, af_proto);
2829 pf_cksum_fixup(pc, htons(dlen)(__uint16_t)(__builtin_constant_p(dlen) ? (__uint16_t)(((__uint16_t
)(dlen) & 0xffU) << 8 | ((__uint16_t)(dlen) & 0xff00U
) >> 8) : __swap16md(dlen))
, 0x0, af_proto);
2830 /* FALLTHROUGH */
2831 case IPPROTO_UDP17: /* FALLTHROUGH */
2832 case IPPROTO_TCP6:
2833 pf_cksum_fixup_a(pc, pd->src, &zero, pd->af, af_proto);
2834 pf_cksum_fixup_a(pc, pd->dst, &zero, pd->af, af_proto);
2835 copyback = 1;
2836 break;
2837 default:
2838 break; /* assume no pseudo-header */
2839 }
2840
2841 /* replace the network header */
2842 m_adj(pd->m, pd->off);
2843 pd->src = NULL((void *)0);
2844 pd->dst = NULL((void *)0);
2845
2846 if ((M_PREPEND(pd->m, hlen, M_DONTWAIT)(pd->m) = m_prepend((pd->m), (hlen), (0x0002))) == NULL((void *)0)) {
2847 pd->m = NULL((void *)0);
2848 return (-1);
2849 }
2850
2851 pd->off = hlen;
2852 pd->tot_len += hlen - ohlen;
2853
2854 switch (pd->naf) {
2855 case AF_INET2:
2856 ip4 = mtod(pd->m, struct ip *)((struct ip *)((pd->m)->m_hdr.mh_data));
2857 memset(ip4, 0, hlen)__builtin_memset((ip4), (0), (hlen));
2858 ip4->ip_v = IPVERSION4;
2859 ip4->ip_hl = hlen >> 2;
2860 ip4->ip_tos = pd->tos;
2861 ip4->ip_len = htons(hlen + dlen)(__uint16_t)(__builtin_constant_p(hlen + dlen) ? (__uint16_t)
(((__uint16_t)(hlen + dlen) & 0xffU) << 8 | ((__uint16_t
)(hlen + dlen) & 0xff00U) >> 8) : __swap16md(hlen +
dlen))
;
2862 ip4->ip_id = htons(ip_randomid())(__uint16_t)(__builtin_constant_p(ip_randomid()) ? (__uint16_t
)(((__uint16_t)(ip_randomid()) & 0xffU) << 8 | ((__uint16_t
)(ip_randomid()) & 0xff00U) >> 8) : __swap16md(ip_randomid
()))
;
2863 ip4->ip_off = htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
;
2864 ip4->ip_ttl = pd->ttl;
2865 ip4->ip_p = pd->proto;
2866 ip4->ip_src = pd->nsaddr.v4pfa.v4;
2867 ip4->ip_dst = pd->ndaddr.v4pfa.v4;
2868 break;
2869 case AF_INET624:
2870 ip6 = mtod(pd->m, struct ip6_hdr *)((struct ip6_hdr *)((pd->m)->m_hdr.mh_data));
2871 memset(ip6, 0, hlen)__builtin_memset((ip6), (0), (hlen));
2872 ip6->ip6_vfcip6_ctlun.ip6_un2_vfc = IPV6_VERSION0x60;
2873 ip6->ip6_flowip6_ctlun.ip6_un1.ip6_un1_flow |= htonl((u_int32_t)pd->tos << 20)(__uint32_t)(__builtin_constant_p((u_int32_t)pd->tos <<
20) ? (__uint32_t)(((__uint32_t)((u_int32_t)pd->tos <<
20) & 0xff) << 24 | ((__uint32_t)((u_int32_t)pd->
tos << 20) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)pd->tos << 20) & 0xff0000) >> 8 | ((__uint32_t
)((u_int32_t)pd->tos << 20) & 0xff000000) >>
24) : __swap32md((u_int32_t)pd->tos << 20))
;
2874 ip6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(dlen)(__uint16_t)(__builtin_constant_p(dlen) ? (__uint16_t)(((__uint16_t
)(dlen) & 0xffU) << 8 | ((__uint16_t)(dlen) & 0xff00U
) >> 8) : __swap16md(dlen))
;
2875 ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = pd->proto;
2876 if (!pd->ttl || pd->ttl > IPV6_DEFHLIM64)
2877 ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = IPV6_DEFHLIM64;
2878 else
2879 ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = pd->ttl;
2880 ip6->ip6_src = pd->nsaddr.v6pfa.v6;
2881 ip6->ip6_dst = pd->ndaddr.v6pfa.v6;
2882 break;
2883 default:
2884 unhandled_af(pd->naf);
2885 }
2886
2887 /* UDP over IPv6 must be checksummed per rfc2460 p27 */
2888 if (naf_proto == IPPROTO_UDP17 && *pc == 0x0000 &&
2889 pd->naf == AF_INET624) {
2890 pd->m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_UDP_CSUM_OUT0x0004;
2891 }
2892
2893 /* cover fresh pseudo-header */
2894 switch (naf_proto) {
2895 case IPPROTO_ICMPV658:
2896 /* optimise: unchanged for TCP/UDP */
2897 pf_cksum_fixup(pc, 0x0, htons(naf_proto)(__uint16_t)(__builtin_constant_p(naf_proto) ? (__uint16_t)((
(__uint16_t)(naf_proto) & 0xffU) << 8 | ((__uint16_t
)(naf_proto) & 0xff00U) >> 8) : __swap16md(naf_proto
))
, naf_proto);
2898 pf_cksum_fixup(pc, 0x0, htons(dlen)(__uint16_t)(__builtin_constant_p(dlen) ? (__uint16_t)(((__uint16_t
)(dlen) & 0xffU) << 8 | ((__uint16_t)(dlen) & 0xff00U
) >> 8) : __swap16md(dlen))
, naf_proto);
2899 /* FALLTHROUGH */
2900 case IPPROTO_UDP17: /* FALLTHROUGH */
2901 case IPPROTO_TCP6:
2902 pf_cksum_fixup_a(pc, &zero, &pd->nsaddr, pd->naf, naf_proto);
2903 pf_cksum_fixup_a(pc, &zero, &pd->ndaddr, pd->naf, naf_proto);
2904 copyback = 1;
2905 break;
2906 default:
2907 break; /* assume no pseudo-header */
2908 }
2909
2910 /* flush pd->pcksum */
2911 if (copyback)
2912 m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT0x0002);
2913
2914 return (0);
2915}
2916
2917int
2918pf_change_icmp_af(struct mbuf *m, int ipoff2, struct pf_pdesc *pd,
2919 struct pf_pdesc *pd2, struct pf_addr *src, struct pf_addr *dst,
2920 sa_family_t af, sa_family_t naf)
2921{
2922 struct mbuf *n = NULL((void *)0);
2923 struct ip *ip4;
2924 struct ip6_hdr *ip6;
2925 u_int hlen, ohlen, dlen;
2926 int d;
2927
2928 if (af == naf || (af != AF_INET2 && af != AF_INET624) ||
2929 (naf != AF_INET2 && naf != AF_INET624))
2930 return (-1);
2931
2932 /* split the mbuf chain on the quoted ip/ip6 header boundary */
2933 if ((n = m_split(m, ipoff2, M_DONTWAIT0x0002)) == NULL((void *)0))
2934 return (-1);
2935
2936 /* new quoted header */
2937 hlen = naf == AF_INET2 ? sizeof(*ip4) : sizeof(*ip6);
2938 /* old quoted header */
2939 ohlen = pd2->off - ipoff2;
2940
2941 /* trim old quoted header */
2942 pf_cksum_uncover(pd->pcksum, in_cksum(n, ohlen), pd->proto);
2943 m_adj(n, ohlen);
2944
2945 /* prepend a new, translated, quoted header */
2946 if ((M_PREPEND(n, hlen, M_DONTWAIT)(n) = m_prepend((n), (hlen), (0x0002))) == NULL((void *)0))
2947 return (-1);
2948
2949 switch (naf) {
2950 case AF_INET2:
2951 ip4 = mtod(n, struct ip *)((struct ip *)((n)->m_hdr.mh_data));
2952 memset(ip4, 0, sizeof(*ip4))__builtin_memset((ip4), (0), (sizeof(*ip4)));
2953 ip4->ip_v = IPVERSION4;
2954 ip4->ip_hl = sizeof(*ip4) >> 2;
2955 ip4->ip_len = htons(sizeof(*ip4) + pd2->tot_len - ohlen)(__uint16_t)(__builtin_constant_p(sizeof(*ip4) + pd2->tot_len
- ohlen) ? (__uint16_t)(((__uint16_t)(sizeof(*ip4) + pd2->
tot_len - ohlen) & 0xffU) << 8 | ((__uint16_t)(sizeof
(*ip4) + pd2->tot_len - ohlen) & 0xff00U) >> 8) :
__swap16md(sizeof(*ip4) + pd2->tot_len - ohlen))
;
2956 ip4->ip_id = htons(ip_randomid())(__uint16_t)(__builtin_constant_p(ip_randomid()) ? (__uint16_t
)(((__uint16_t)(ip_randomid()) & 0xffU) << 8 | ((__uint16_t
)(ip_randomid()) & 0xff00U) >> 8) : __swap16md(ip_randomid
()))
;
2957 ip4->ip_off = htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
;
2958 ip4->ip_ttl = pd2->ttl;
2959 if (pd2->proto == IPPROTO_ICMPV658)
2960 ip4->ip_p = IPPROTO_ICMP1;
2961 else
2962 ip4->ip_p = pd2->proto;
2963 ip4->ip_src = src->v4pfa.v4;
2964 ip4->ip_dst = dst->v4pfa.v4;
2965 in_hdr_cksum_out(n, NULL((void *)0));
2966 break;
2967 case AF_INET624:
2968 ip6 = mtod(n, struct ip6_hdr *)((struct ip6_hdr *)((n)->m_hdr.mh_data));
2969 memset(ip6, 0, sizeof(*ip6))__builtin_memset((ip6), (0), (sizeof(*ip6)));
2970 ip6->ip6_vfcip6_ctlun.ip6_un2_vfc = IPV6_VERSION0x60;
2971 ip6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(pd2->tot_len - ohlen)(__uint16_t)(__builtin_constant_p(pd2->tot_len - ohlen) ? (
__uint16_t)(((__uint16_t)(pd2->tot_len - ohlen) & 0xffU
) << 8 | ((__uint16_t)(pd2->tot_len - ohlen) & 0xff00U
) >> 8) : __swap16md(pd2->tot_len - ohlen))
;
2972 if (pd2->proto == IPPROTO_ICMP1)
2973 ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = IPPROTO_ICMPV658;
2974 else
2975 ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = pd2->proto;
2976 if (!pd2->ttl || pd2->ttl > IPV6_DEFHLIM64)
2977 ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = IPV6_DEFHLIM64;
2978 else
2979 ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = pd2->ttl;
2980 ip6->ip6_src = src->v6pfa.v6;
2981 ip6->ip6_dst = dst->v6pfa.v6;
2982 break;
2983 }
2984
2985 /* cover new quoted header */
2986 /* optimise: any new AF_INET header of ours sums to zero */
2987 if (naf != AF_INET2) {
2988 pf_cksum_cover(pd->pcksum, in_cksum(n, hlen), pd->proto);
2989 }
2990
2991 /* reattach modified quoted packet to outer header */
2992 {
2993 int nlen = n->m_pkthdrM_dat.MH.MH_pkthdr.len;
2994 m_cat(m, n);
2995 m->m_pkthdrM_dat.MH.MH_pkthdr.len += nlen;
2996 }
2997
2998 /* account for altered length */
2999 d = hlen - ohlen;
3000
3001 if (pd->proto == IPPROTO_ICMPV658) {
3002 /* fixup pseudo-header */
3003 dlen = pd->tot_len - pd->off;
3004 pf_cksum_fixup(pd->pcksum,
3005 htons(dlen)(__uint16_t)(__builtin_constant_p(dlen) ? (__uint16_t)(((__uint16_t
)(dlen) & 0xffU) << 8 | ((__uint16_t)(dlen) & 0xff00U
) >> 8) : __swap16md(dlen))
, htons(dlen + d)(__uint16_t)(__builtin_constant_p(dlen + d) ? (__uint16_t)(((
__uint16_t)(dlen + d) & 0xffU) << 8 | ((__uint16_t)
(dlen + d) & 0xff00U) >> 8) : __swap16md(dlen + d))
, pd->proto);
3006 }
3007
3008 pd->tot_len += d;
3009 pd2->tot_len += d;
3010 pd2->off += d;
3011
3012 /* note: not bothering to update network headers as
3013 these due for rewrite by pf_translate_af() */
3014
3015 return (0);
3016}
3017
3018
3019#define PTR_IP(field)(__builtin_offsetof(struct ip, field)) (offsetof(struct ip, field)__builtin_offsetof(struct ip, field))
3020#define PTR_IP6(field)(__builtin_offsetof(struct ip6_hdr, field)) (offsetof(struct ip6_hdr, field)__builtin_offsetof(struct ip6_hdr, field))
3021
3022int
3023pf_translate_icmp_af(struct pf_pdesc *pd, int af, void *arg)
3024{
3025 struct icmp *icmp4;
3026 struct icmp6_hdr *icmp6;
3027 u_int32_t mtu;
3028 int32_t ptr = -1;
3029 u_int8_t type;
3030 u_int8_t code;
3031
3032 switch (af) {
3033 case AF_INET2:
3034 icmp6 = arg;
3035 type = icmp6->icmp6_type;
3036 code = icmp6->icmp6_code;
3037 mtu = ntohl(icmp6->icmp6_mtu)(__uint32_t)(__builtin_constant_p(icmp6->icmp6_dataun.icmp6_un_data32
[0]) ? (__uint32_t)(((__uint32_t)(icmp6->icmp6_dataun.icmp6_un_data32
[0]) & 0xff) << 24 | ((__uint32_t)(icmp6->icmp6_dataun
.icmp6_un_data32[0]) & 0xff00) << 8 | ((__uint32_t)
(icmp6->icmp6_dataun.icmp6_un_data32[0]) & 0xff0000) >>
8 | ((__uint32_t)(icmp6->icmp6_dataun.icmp6_un_data32[0])
& 0xff000000) >> 24) : __swap32md(icmp6->icmp6_dataun
.icmp6_un_data32[0]))
;
3038
3039 switch (type) {
3040 case ICMP6_ECHO_REQUEST128:
3041 type = ICMP_ECHO8;
3042 break;
3043 case ICMP6_ECHO_REPLY129:
3044 type = ICMP_ECHOREPLY0;
3045 break;
3046 case ICMP6_DST_UNREACH1:
3047 type = ICMP_UNREACH3;
3048 switch (code) {
3049 case ICMP6_DST_UNREACH_NOROUTE0:
3050 case ICMP6_DST_UNREACH_BEYONDSCOPE2:
3051 case ICMP6_DST_UNREACH_ADDR3:
3052 code = ICMP_UNREACH_HOST1;
3053 break;
3054 case ICMP6_DST_UNREACH_ADMIN1:
3055 code = ICMP_UNREACH_HOST_PROHIB10;
3056 break;
3057 case ICMP6_DST_UNREACH_NOPORT4:
3058 code = ICMP_UNREACH_PORT3;
3059 break;
3060 default:
3061 return (-1);
3062 }
3063 break;
3064 case ICMP6_PACKET_TOO_BIG2:
3065 type = ICMP_UNREACH3;
3066 code = ICMP_UNREACH_NEEDFRAG4;
3067 mtu -= 20;
3068 break;
3069 case ICMP6_TIME_EXCEEDED3:
3070 type = ICMP_TIMXCEED11;
3071 break;
3072 case ICMP6_PARAM_PROB4:
3073 switch (code) {
3074 case ICMP6_PARAMPROB_HEADER0:
3075 type = ICMP_PARAMPROB12;
3076 code = ICMP_PARAMPROB_ERRATPTR0;
3077 ptr = ntohl(icmp6->icmp6_pptr)(__uint32_t)(__builtin_constant_p(icmp6->icmp6_dataun.icmp6_un_data32
[0]) ? (__uint32_t)(((__uint32_t)(icmp6->icmp6_dataun.icmp6_un_data32
[0]) & 0xff) << 24 | ((__uint32_t)(icmp6->icmp6_dataun
.icmp6_un_data32[0]) & 0xff00) << 8 | ((__uint32_t)
(icmp6->icmp6_dataun.icmp6_un_data32[0]) & 0xff0000) >>
8 | ((__uint32_t)(icmp6->icmp6_dataun.icmp6_un_data32[0])
& 0xff000000) >> 24) : __swap32md(icmp6->icmp6_dataun
.icmp6_un_data32[0]))
;
3078
3079 if (ptr == PTR_IP6(ip6_vfc)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un2_vfc)))
3080 ; /* preserve */
3081 else if (ptr == PTR_IP6(ip6_vfc)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un2_vfc)) + 1)
3082 ptr = PTR_IP(ip_tos)(__builtin_offsetof(struct ip, ip_tos));
3083 else if (ptr == PTR_IP6(ip6_plen)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_plen
))
||
3084 ptr == PTR_IP6(ip6_plen)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_plen
))
+ 1)
3085 ptr = PTR_IP(ip_len)(__builtin_offsetof(struct ip, ip_len));
3086 else if (ptr == PTR_IP6(ip6_nxt)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_nxt
))
)
3087 ptr = PTR_IP(ip_p)(__builtin_offsetof(struct ip, ip_p));
3088 else if (ptr == PTR_IP6(ip6_hlim)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_hlim
))
)
3089 ptr = PTR_IP(ip_ttl)(__builtin_offsetof(struct ip, ip_ttl));
3090 else if (ptr >= PTR_IP6(ip6_src)(__builtin_offsetof(struct ip6_hdr, ip6_src)) &&
3091 ptr < PTR_IP6(ip6_dst)(__builtin_offsetof(struct ip6_hdr, ip6_dst)))
3092 ptr = PTR_IP(ip_src)(__builtin_offsetof(struct ip, ip_src));
3093 else if (ptr >= PTR_IP6(ip6_dst)(__builtin_offsetof(struct ip6_hdr, ip6_dst)) &&
3094 ptr < sizeof(struct ip6_hdr))
3095 ptr = PTR_IP(ip_dst)(__builtin_offsetof(struct ip, ip_dst));
3096 else {
3097 return (-1);
3098 }
3099 break;
3100 case ICMP6_PARAMPROB_NEXTHEADER1:
3101 type = ICMP_UNREACH3;
3102 code = ICMP_UNREACH_PROTOCOL2;
3103 break;
3104 default:
3105 return (-1);
3106 }
3107 break;
3108 default:
3109 return (-1);
3110 }
3111
3112 pf_patch_8(pd, &icmp6->icmp6_type, type, PF_HI(1));
3113 pf_patch_8(pd, &icmp6->icmp6_code, code, PF_LO(!(1)));
3114
3115 /* aligns well with a icmpv4 nextmtu */
3116 pf_patch_32(pd, &icmp6->icmp6_mtuicmp6_dataun.icmp6_un_data32[0], htonl(mtu)(__uint32_t)(__builtin_constant_p(mtu) ? (__uint32_t)(((__uint32_t
)(mtu) & 0xff) << 24 | ((__uint32_t)(mtu) & 0xff00
) << 8 | ((__uint32_t)(mtu) & 0xff0000) >> 8 |
((__uint32_t)(mtu) & 0xff000000) >> 24) : __swap32md
(mtu))
);
3117
3118 /* icmpv4 pptr is a one most significant byte */
3119 if (ptr >= 0)
3120 pf_patch_32(pd, &icmp6->icmp6_pptricmp6_dataun.icmp6_un_data32[0], htonl(ptr << 24)(__uint32_t)(__builtin_constant_p(ptr << 24) ? (__uint32_t
)(((__uint32_t)(ptr << 24) & 0xff) << 24 | ((
__uint32_t)(ptr << 24) & 0xff00) << 8 | ((__uint32_t
)(ptr << 24) & 0xff0000) >> 8 | ((__uint32_t)
(ptr << 24) & 0xff000000) >> 24) : __swap32md
(ptr << 24))
);
3121 break;
3122 case AF_INET624:
3123 icmp4 = arg;
3124 type = icmp4->icmp_type;
3125 code = icmp4->icmp_code;
3126 mtu = ntohs(icmp4->icmp_nextmtu)(__uint16_t)(__builtin_constant_p(icmp4->icmp_hun.ih_pmtu.
ipm_nextmtu) ? (__uint16_t)(((__uint16_t)(icmp4->icmp_hun.
ih_pmtu.ipm_nextmtu) & 0xffU) << 8 | ((__uint16_t)(
icmp4->icmp_hun.ih_pmtu.ipm_nextmtu) & 0xff00U) >>
8) : __swap16md(icmp4->icmp_hun.ih_pmtu.ipm_nextmtu))
;
3127
3128 switch (type) {
3129 case ICMP_ECHO8:
3130 type = ICMP6_ECHO_REQUEST128;
3131 break;
3132 case ICMP_ECHOREPLY0:
3133 type = ICMP6_ECHO_REPLY129;
3134 break;
3135 case ICMP_UNREACH3:
3136 type = ICMP6_DST_UNREACH1;
3137 switch (code) {
3138 case ICMP_UNREACH_NET0:
3139 case ICMP_UNREACH_HOST1:
3140 case ICMP_UNREACH_NET_UNKNOWN6:
3141 case ICMP_UNREACH_HOST_UNKNOWN7:
3142 case ICMP_UNREACH_ISOLATED8:
3143 case ICMP_UNREACH_TOSNET11:
3144 case ICMP_UNREACH_TOSHOST12:
3145 code = ICMP6_DST_UNREACH_NOROUTE0;
3146 break;
3147 case ICMP_UNREACH_PORT3:
3148 code = ICMP6_DST_UNREACH_NOPORT4;
3149 break;
3150 case ICMP_UNREACH_NET_PROHIB9:
3151 case ICMP_UNREACH_HOST_PROHIB10:
3152 case ICMP_UNREACH_FILTER_PROHIB13:
3153 case ICMP_UNREACH_PRECEDENCE_CUTOFF15:
3154 code = ICMP6_DST_UNREACH_ADMIN1;
3155 break;
3156 case ICMP_UNREACH_PROTOCOL2:
3157 type = ICMP6_PARAM_PROB4;
3158 code = ICMP6_PARAMPROB_NEXTHEADER1;
3159 ptr = offsetof(struct ip6_hdr, ip6_nxt)__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_nxt
)
;
3160 break;
3161 case ICMP_UNREACH_NEEDFRAG4:
3162 type = ICMP6_PACKET_TOO_BIG2;
3163 code = 0;
3164 mtu += 20;
3165 break;
3166 default:
3167 return (-1);
3168 }
3169 break;
3170 case ICMP_TIMXCEED11:
3171 type = ICMP6_TIME_EXCEEDED3;
3172 break;
3173 case ICMP_PARAMPROB12:
3174 type = ICMP6_PARAM_PROB4;
3175 switch (code) {
3176 case ICMP_PARAMPROB_ERRATPTR0:
3177 code = ICMP6_PARAMPROB_HEADER0;
3178 break;
3179 case ICMP_PARAMPROB_LENGTH2:
3180 code = ICMP6_PARAMPROB_HEADER0;
3181 break;
3182 default:
3183 return (-1);
3184 }
3185
3186 ptr = icmp4->icmp_pptricmp_hun.ih_pptr;
3187 if (ptr == 0 || ptr == PTR_IP(ip_tos)(__builtin_offsetof(struct ip, ip_tos)))
3188 ; /* preserve */
3189 else if (ptr == PTR_IP(ip_len)(__builtin_offsetof(struct ip, ip_len)) ||
3190 ptr == PTR_IP(ip_len)(__builtin_offsetof(struct ip, ip_len)) + 1)
3191 ptr = PTR_IP6(ip6_plen)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_plen
))
;
3192 else if (ptr == PTR_IP(ip_ttl)(__builtin_offsetof(struct ip, ip_ttl)))
3193 ptr = PTR_IP6(ip6_hlim)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_hlim
))
;
3194 else if (ptr == PTR_IP(ip_p)(__builtin_offsetof(struct ip, ip_p)))
3195 ptr = PTR_IP6(ip6_nxt)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_nxt
))
;
3196 else if (ptr >= PTR_IP(ip_src)(__builtin_offsetof(struct ip, ip_src)) &&
3197 ptr < PTR_IP(ip_dst)(__builtin_offsetof(struct ip, ip_dst)))
3198 ptr = PTR_IP6(ip6_src)(__builtin_offsetof(struct ip6_hdr, ip6_src));
3199 else if (ptr >= PTR_IP(ip_dst)(__builtin_offsetof(struct ip, ip_dst)) &&
3200 ptr < sizeof(struct ip))
3201 ptr = PTR_IP6(ip6_dst)(__builtin_offsetof(struct ip6_hdr, ip6_dst));
3202 else {
3203 return (-1);
3204 }
3205 break;
3206 default:
3207 return (-1);
3208 }
3209
3210 pf_patch_8(pd, &icmp4->icmp_type, type, PF_HI(1));
3211 pf_patch_8(pd, &icmp4->icmp_code, code, PF_LO(!(1)));
3212 pf_patch_16(pd, &icmp4->icmp_nextmtuicmp_hun.ih_pmtu.ipm_nextmtu, htons(mtu)(__uint16_t)(__builtin_constant_p(mtu) ? (__uint16_t)(((__uint16_t
)(mtu) & 0xffU) << 8 | ((__uint16_t)(mtu) & 0xff00U
) >> 8) : __swap16md(mtu))
);
3213 if (ptr >= 0)
3214 pf_patch_32(pd, &icmp4->icmp_voidicmp_hun.ih_void, htonl(ptr)(__uint32_t)(__builtin_constant_p(ptr) ? (__uint32_t)(((__uint32_t
)(ptr) & 0xff) << 24 | ((__uint32_t)(ptr) & 0xff00
) << 8 | ((__uint32_t)(ptr) & 0xff0000) >> 8 |
((__uint32_t)(ptr) & 0xff000000) >> 24) : __swap32md
(ptr))
);
3215 break;
3216 }
3217
3218 return (0);
3219}
3220#endif /* INET6 */
3221
3222/*
3223 * Need to modulate the sequence numbers in the TCP SACK option
3224 * (credits to Krzysztof Pfaff for report and patch)
3225 */
3226int
3227pf_modulate_sack(struct pf_pdesc *pd, struct pf_state_peer *dst)
3228{
3229 struct sackblk sack;
3230 int copyback = 0, i;
3231 int olen, optsoff;
3232 u_int8_t opts[MAX_TCPOPTLEN40], *opt, *eoh;
3233
3234 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
3235 optsoff = pd->off + sizeof(struct tcphdr);
3236#define TCPOLEN_MINSACK(8 + 2) (TCPOLEN_SACK8 + 2)
3237 if (olen < TCPOLEN_MINSACK(8 + 2) ||
3238 !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL((void *)0), pd->af))
3239 return (0);
3240
3241 eoh = opts + olen;
3242 opt = opts;
3243 while ((opt = pf_find_tcpopt(opt, opts, olen,
3244 TCPOPT_SACK5, TCPOLEN_MINSACK(8 + 2))) != NULL((void *)0))
3245 {
3246 size_t safelen = MIN(opt[1], (eoh - opt))(((opt[1])<((eoh - opt)))?(opt[1]):((eoh - opt)));
3247 for (i = 2; i + TCPOLEN_SACK8 <= safelen; i += TCPOLEN_SACK8) {
3248 size_t startoff = (opt + i) - opts;
3249 memcpy(&sack, &opt[i], sizeof(sack))__builtin_memcpy((&sack), (&opt[i]), (sizeof(sack)));
3250 pf_patch_32_unaligned(pd, &sack.start,
3251 htonl(ntohl(sack.start) - dst->seqdiff)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p
(sack.start) ? (__uint32_t)(((__uint32_t)(sack.start) & 0xff
) << 24 | ((__uint32_t)(sack.start) & 0xff00) <<
8 | ((__uint32_t)(sack.start) & 0xff0000) >> 8 | (
(__uint32_t)(sack.start) & 0xff000000) >> 24) : __swap32md
(sack.start)) - dst->seqdiff) ? (__uint32_t)(((__uint32_t)
((__uint32_t)(__builtin_constant_p(sack.start) ? (__uint32_t)
(((__uint32_t)(sack.start) & 0xff) << 24 | ((__uint32_t
)(sack.start) & 0xff00) << 8 | ((__uint32_t)(sack.start
) & 0xff0000) >> 8 | ((__uint32_t)(sack.start) &
0xff000000) >> 24) : __swap32md(sack.start)) - dst->
seqdiff) & 0xff) << 24 | ((__uint32_t)((__uint32_t)
(__builtin_constant_p(sack.start) ? (__uint32_t)(((__uint32_t
)(sack.start) & 0xff) << 24 | ((__uint32_t)(sack.start
) & 0xff00) << 8 | ((__uint32_t)(sack.start) & 0xff0000
) >> 8 | ((__uint32_t)(sack.start) & 0xff000000) >>
24) : __swap32md(sack.start)) - dst->seqdiff) & 0xff00
) << 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p
(sack.start) ? (__uint32_t)(((__uint32_t)(sack.start) & 0xff
) << 24 | ((__uint32_t)(sack.start) & 0xff00) <<
8 | ((__uint32_t)(sack.start) & 0xff0000) >> 8 | (
(__uint32_t)(sack.start) & 0xff000000) >> 24) : __swap32md
(sack.start)) - dst->seqdiff) & 0xff0000) >> 8 |
((__uint32_t)((__uint32_t)(__builtin_constant_p(sack.start) ?
(__uint32_t)(((__uint32_t)(sack.start) & 0xff) << 24
| ((__uint32_t)(sack.start) & 0xff00) << 8 | ((__uint32_t
)(sack.start) & 0xff0000) >> 8 | ((__uint32_t)(sack
.start) & 0xff000000) >> 24) : __swap32md(sack.start
)) - dst->seqdiff) & 0xff000000) >> 24) : __swap32md
((__uint32_t)(__builtin_constant_p(sack.start) ? (__uint32_t)
(((__uint32_t)(sack.start) & 0xff) << 24 | ((__uint32_t
)(sack.start) & 0xff00) << 8 | ((__uint32_t)(sack.start
) & 0xff0000) >> 8 | ((__uint32_t)(sack.start) &
0xff000000) >> 24) : __swap32md(sack.start)) - dst->
seqdiff))
,
3252 PF_ALGNMNT(startoff)(((startoff) % 2) == 0 ? (1) : (!(1))));
3253 pf_patch_32_unaligned(pd, &sack.end,
3254 htonl(ntohl(sack.end) - dst->seqdiff)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p
(sack.end) ? (__uint32_t)(((__uint32_t)(sack.end) & 0xff)
<< 24 | ((__uint32_t)(sack.end) & 0xff00) <<
8 | ((__uint32_t)(sack.end) & 0xff0000) >> 8 | ((__uint32_t
)(sack.end) & 0xff000000) >> 24) : __swap32md(sack.
end)) - dst->seqdiff) ? (__uint32_t)(((__uint32_t)((__uint32_t
)(__builtin_constant_p(sack.end) ? (__uint32_t)(((__uint32_t)
(sack.end) & 0xff) << 24 | ((__uint32_t)(sack.end) &
0xff00) << 8 | ((__uint32_t)(sack.end) & 0xff0000)
>> 8 | ((__uint32_t)(sack.end) & 0xff000000) >>
24) : __swap32md(sack.end)) - dst->seqdiff) & 0xff) <<
24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(sack.end
) ? (__uint32_t)(((__uint32_t)(sack.end) & 0xff) <<
24 | ((__uint32_t)(sack.end) & 0xff00) << 8 | ((__uint32_t
)(sack.end) & 0xff0000) >> 8 | ((__uint32_t)(sack.end
) & 0xff000000) >> 24) : __swap32md(sack.end)) - dst
->seqdiff) & 0xff00) << 8 | ((__uint32_t)((__uint32_t
)(__builtin_constant_p(sack.end) ? (__uint32_t)(((__uint32_t)
(sack.end) & 0xff) << 24 | ((__uint32_t)(sack.end) &
0xff00) << 8 | ((__uint32_t)(sack.end) & 0xff0000)
>> 8 | ((__uint32_t)(sack.end) & 0xff000000) >>
24) : __swap32md(sack.end)) - dst->seqdiff) & 0xff0000
) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p
(sack.end) ? (__uint32_t)(((__uint32_t)(sack.end) & 0xff)
<< 24 | ((__uint32_t)(sack.end) & 0xff00) <<
8 | ((__uint32_t)(sack.end) & 0xff0000) >> 8 | ((__uint32_t
)(sack.end) & 0xff000000) >> 24) : __swap32md(sack.
end)) - dst->seqdiff) & 0xff000000) >> 24) : __swap32md
((__uint32_t)(__builtin_constant_p(sack.end) ? (__uint32_t)((
(__uint32_t)(sack.end) & 0xff) << 24 | ((__uint32_t
)(sack.end) & 0xff00) << 8 | ((__uint32_t)(sack.end
) & 0xff0000) >> 8 | ((__uint32_t)(sack.end) & 0xff000000
) >> 24) : __swap32md(sack.end)) - dst->seqdiff))
,
3255 PF_ALGNMNT(startoff + sizeof(sack.start))(((startoff + sizeof(sack.start)) % 2) == 0 ? (1) : (!(1))));
3256 memcpy(&opt[i], &sack, sizeof(sack))__builtin_memcpy((&opt[i]), (&sack), (sizeof(sack)));
3257 }
3258 copyback = 1;
3259 opt += opt[1];
3260 }
3261
3262 if (copyback)
3263 m_copyback(pd->m, optsoff, olen, opts, M_NOWAIT0x0002);
3264 return (copyback);
3265}
3266
3267struct mbuf *
3268pf_build_tcp(const struct pf_rule *r, sa_family_t af,
3269 const struct pf_addr *saddr, const struct pf_addr *daddr,
3270 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
3271 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
3272 u_int16_t rtag, u_int sack, u_int rdom)
3273{
3274 struct mbuf *m;
3275 int len, tlen;
3276 struct ip *h;
3277#ifdef INET61
3278 struct ip6_hdr *h6;
3279#endif /* INET6 */
3280 struct tcphdr *th;
3281 char *opt;
3282
3283 /* maximum segment size tcp option */
3284 tlen = sizeof(struct tcphdr);
3285 if (mss)
3286 tlen += 4;
3287 if (sack)
3288 tlen += 2;
3289
3290 switch (af) {
3291 case AF_INET2:
3292 len = sizeof(struct ip) + tlen;
3293 break;
3294#ifdef INET61
3295 case AF_INET624:
3296 len = sizeof(struct ip6_hdr) + tlen;
3297 break;
3298#endif /* INET6 */
3299 default:
3300 unhandled_af(af);
3301 }
3302
3303 /* create outgoing mbuf */
3304 m = m_gethdr(M_DONTWAIT0x0002, MT_HEADER2);
3305 if (m == NULL((void *)0))
3306 return (NULL((void *)0));
3307 if (tag)
3308 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags |= PF_TAG_GENERATED0x01;
3309 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.tag = rtag;
3310 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = rdom;
3311 if (r && (r->scrub_flags & PFSTATE_SETPRIO0x0200))
3312 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = r->set_prio[0];
3313 if (r && r->qid)
3314 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.qid = r->qid;
3315 m->m_datam_hdr.mh_data += max_linkhdr;
3316 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
3317 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = 0;
3318 m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_OUT0x0002;
3319 memset(m->m_data, 0, len)__builtin_memset((m->m_hdr.mh_data), (0), (len));
3320 switch (af) {
3321 case AF_INET2:
3322 h = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
3323 h->ip_p = IPPROTO_TCP6;
3324 h->ip_len = htons(tlen)(__uint16_t)(__builtin_constant_p(tlen) ? (__uint16_t)(((__uint16_t
)(tlen) & 0xffU) << 8 | ((__uint16_t)(tlen) & 0xff00U
) >> 8) : __swap16md(tlen))
;
3325 h->ip_v = 4;
3326 h->ip_hl = sizeof(*h) >> 2;
3327 h->ip_tos = IPTOS_LOWDELAY0x10;
3328 h->ip_len = htons(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t
)(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U
) >> 8) : __swap16md(len))
;
3329 h->ip_off = htons(ip_mtudisc ? IP_DF : 0)(__uint16_t)(__builtin_constant_p(ip_mtudisc ? 0x4000 : 0) ? (
__uint16_t)(((__uint16_t)(ip_mtudisc ? 0x4000 : 0) & 0xffU
) << 8 | ((__uint16_t)(ip_mtudisc ? 0x4000 : 0) & 0xff00U
) >> 8) : __swap16md(ip_mtudisc ? 0x4000 : 0))
;
3330 h->ip_ttl = ttl ? ttl : ip_defttl;
3331 h->ip_sum = 0;
3332 h->ip_src.s_addr = saddr->v4pfa.v4.s_addr;
3333 h->ip_dst.s_addr = daddr->v4pfa.v4.s_addr;
3334
3335 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
3336 break;
3337#ifdef INET61
3338 case AF_INET624:
3339 h6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
3340 h6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = IPPROTO_TCP6;
3341 h6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(tlen)(__uint16_t)(__builtin_constant_p(tlen) ? (__uint16_t)(((__uint16_t
)(tlen) & 0xffU) << 8 | ((__uint16_t)(tlen) & 0xff00U
) >> 8) : __swap16md(tlen))
;
3342 h6->ip6_vfcip6_ctlun.ip6_un2_vfc |= IPV6_VERSION0x60;
3343 h6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = IPV6_DEFHLIM64;
3344 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr))__builtin_memcpy((&h6->ip6_src), (&saddr->pfa.v6
), (sizeof(struct in6_addr)))
;
3345 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr))__builtin_memcpy((&h6->ip6_dst), (&daddr->pfa.v6
), (sizeof(struct in6_addr)))
;
3346
3347 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
3348 break;
3349#endif /* INET6 */
3350 default:
3351 unhandled_af(af);
3352 }
3353
3354 /* TCP header */
3355 th->th_sport = sport;
3356 th->th_dport = dport;
3357 th->th_seq = htonl(seq)(__uint32_t)(__builtin_constant_p(seq) ? (__uint32_t)(((__uint32_t
)(seq) & 0xff) << 24 | ((__uint32_t)(seq) & 0xff00
) << 8 | ((__uint32_t)(seq) & 0xff0000) >> 8 |
((__uint32_t)(seq) & 0xff000000) >> 24) : __swap32md
(seq))
;
3358 th->th_ack = htonl(ack)(__uint32_t)(__builtin_constant_p(ack) ? (__uint32_t)(((__uint32_t
)(ack) & 0xff) << 24 | ((__uint32_t)(ack) & 0xff00
) << 8 | ((__uint32_t)(ack) & 0xff0000) >> 8 |
((__uint32_t)(ack) & 0xff000000) >> 24) : __swap32md
(ack))
;
3359 th->th_off = tlen >> 2;
3360 th->th_flags = flags;
3361 th->th_win = htons(win)(__uint16_t)(__builtin_constant_p(win) ? (__uint16_t)(((__uint16_t
)(win) & 0xffU) << 8 | ((__uint16_t)(win) & 0xff00U
) >> 8) : __swap16md(win))
;
3362
3363 opt = (char *)(th + 1);
3364 if (mss) {
3365 opt[0] = TCPOPT_MAXSEG2;
3366 opt[1] = 4;
3367 mss = htons(mss)(__uint16_t)(__builtin_constant_p(mss) ? (__uint16_t)(((__uint16_t
)(mss) & 0xffU) << 8 | ((__uint16_t)(mss) & 0xff00U
) >> 8) : __swap16md(mss))
;
3368 memcpy((opt + 2), &mss, 2)__builtin_memcpy(((opt + 2)), (&mss), (2));
3369 opt += 4;
3370 }
3371 if (sack) {
3372 opt[0] = TCPOPT_SACK_PERMITTED4;
3373 opt[1] = 2;
3374 opt += 2;
3375 }
3376
3377 return (m);
3378}
3379
3380void
3381pf_send_tcp(const struct pf_rule *r, sa_family_t af,
3382 const struct pf_addr *saddr, const struct pf_addr *daddr,
3383 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
3384 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
3385 u_int16_t rtag, u_int rdom)
3386{
3387 struct mbuf *m;
3388
3389 if ((m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack,
3390 flags, win, mss, ttl, tag, rtag, 0, rdom)) == NULL((void *)0))
3391 return;
3392
3393 switch (af) {
3394 case AF_INET2:
3395 ip_send(m);
3396 break;
3397#ifdef INET61
3398 case AF_INET624:
3399 ip6_send(m);
3400 break;
3401#endif /* INET6 */
3402 }
3403}
3404
3405static void
3406pf_send_challenge_ack(struct pf_pdesc *pd, struct pf_state *st,
3407 struct pf_state_peer *src, struct pf_state_peer *dst)
3408{
3409 /*
3410 * We are sending challenge ACK as a response to SYN packet, which
3411 * matches existing state (modulo TCP window check). Therefore packet
3412 * must be sent on behalf of destination.
3413 *
3414 * We expect sender to remain either silent, or send RST packet
3415 * so both, firewall and remote peer, can purge dead state from
3416 * memory.
3417 */
3418 pf_send_tcp(st->rule.ptr, pd->af, pd->dst, pd->src,
3419 pd->hdr.tcp.th_dport, pd->hdr.tcp.th_sport, dst->seqlo,
3420 src->seqlo, TH_ACK0x10, 0, 0, st->rule.ptr->return_ttl, 1, 0,
3421 pd->rdomain);
3422}
3423
3424void
3425pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, int param,
3426 sa_family_t af, struct pf_rule *r, u_int rdomain)
3427{
3428 struct mbuf *m0;
3429
3430 if ((m0 = m_copym(m, 0, M_COPYALL1000000000, M_NOWAIT0x0002)) == NULL((void *)0))
3431 return;
3432
3433 m0->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags |= PF_TAG_GENERATED0x01;
3434 m0->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = rdomain;
3435 if (r && (r->scrub_flags & PFSTATE_SETPRIO0x0200))
3436 m0->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = r->set_prio[0];
3437 if (r && r->qid)
3438 m0->m_pkthdrM_dat.MH.MH_pkthdr.pf.qid = r->qid;
3439
3440 switch (af) {
3441 case AF_INET2:
3442 icmp_error(m0, type, code, 0, param);
3443 break;
3444#ifdef INET61
3445 case AF_INET624:
3446 icmp6_error(m0, type, code, param);
3447 break;
3448#endif /* INET6 */
3449 }
3450}
3451
3452/*
3453 * Return ((n = 0) == (a = b [with mask m]))
3454 * Note: n != 0 => returns (a != b [with mask m])
3455 */
3456int
3457pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
3458 struct pf_addr *b, sa_family_t af)
3459{
3460 switch (af) {
3461 case AF_INET2:
3462 if ((a->addr32pfa.addr32[0] & m->addr32pfa.addr32[0]) ==
3463 (b->addr32pfa.addr32[0] & m->addr32pfa.addr32[0]))
3464 return (n == 0);
3465 break;
3466#ifdef INET61
3467 case AF_INET624:
3468 if (((a->addr32pfa.addr32[0] & m->addr32pfa.addr32[0]) ==
3469 (b->addr32pfa.addr32[0] & m->addr32pfa.addr32[0])) &&
3470 ((a->addr32pfa.addr32[1] & m->addr32pfa.addr32[1]) ==
3471 (b->addr32pfa.addr32[1] & m->addr32pfa.addr32[1])) &&
3472 ((a->addr32pfa.addr32[2] & m->addr32pfa.addr32[2]) ==
3473 (b->addr32pfa.addr32[2] & m->addr32pfa.addr32[2])) &&
3474 ((a->addr32pfa.addr32[3] & m->addr32pfa.addr32[3]) ==
3475 (b->addr32pfa.addr32[3] & m->addr32pfa.addr32[3])))
3476 return (n == 0);
3477 break;
3478#endif /* INET6 */
3479 }
3480
3481 return (n != 0);
3482}
3483
3484/*
3485 * Return 1 if b <= a <= e, otherwise return 0.
3486 */
3487int
3488pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
3489 struct pf_addr *a, sa_family_t af)
3490{
3491 switch (af) {
3492 case AF_INET2:
3493 if ((ntohl(a->addr32[0])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[0]) ? (__uint32_t
)(((__uint32_t)(a->pfa.addr32[0]) & 0xff) << 24 |
((__uint32_t)(a->pfa.addr32[0]) & 0xff00) << 8 |
((__uint32_t)(a->pfa.addr32[0]) & 0xff0000) >> 8
| ((__uint32_t)(a->pfa.addr32[0]) & 0xff000000) >>
24) : __swap32md(a->pfa.addr32[0]))
< ntohl(b->addr32[0])(__uint32_t)(__builtin_constant_p(b->pfa.addr32[0]) ? (__uint32_t
)(((__uint32_t)(b->pfa.addr32[0]) & 0xff) << 24 |
((__uint32_t)(b->pfa.addr32[0]) & 0xff00) << 8 |
((__uint32_t)(b->pfa.addr32[0]) & 0xff0000) >> 8
| ((__uint32_t)(b->pfa.addr32[0]) & 0xff000000) >>
24) : __swap32md(b->pfa.addr32[0]))
) ||
3494 (ntohl(a->addr32[0])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[0]) ? (__uint32_t
)(((__uint32_t)(a->pfa.addr32[0]) & 0xff) << 24 |
((__uint32_t)(a->pfa.addr32[0]) & 0xff00) << 8 |
((__uint32_t)(a->pfa.addr32[0]) & 0xff0000) >> 8
| ((__uint32_t)(a->pfa.addr32[0]) & 0xff000000) >>
24) : __swap32md(a->pfa.addr32[0]))
> ntohl(e->addr32[0])(__uint32_t)(__builtin_constant_p(e->pfa.addr32[0]) ? (__uint32_t
)(((__uint32_t)(e->pfa.addr32[0]) & 0xff) << 24 |
((__uint32_t)(e->pfa.addr32[0]) & 0xff00) << 8 |
((__uint32_t)(e->pfa.addr32[0]) & 0xff0000) >> 8
| ((__uint32_t)(e->pfa.addr32[0]) & 0xff000000) >>
24) : __swap32md(e->pfa.addr32[0]))
))
3495 return (0);
3496 break;
3497#ifdef INET61
3498 case AF_INET624: {
3499 int i;
3500
3501 /* check a >= b */
3502 for (i = 0; i < 4; ++i)
3503 if (ntohl(a->addr32[i])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[i]) ? (__uint32_t
)(((__uint32_t)(a->pfa.addr32[i]) & 0xff) << 24 |
((__uint32_t)(a->pfa.addr32[i]) & 0xff00) << 8 |
((__uint32_t)(a->pfa.addr32[i]) & 0xff0000) >> 8
| ((__uint32_t)(a->pfa.addr32[i]) & 0xff000000) >>
24) : __swap32md(a->pfa.addr32[i]))
> ntohl(b->addr32[i])(__uint32_t)(__builtin_constant_p(b->pfa.addr32[i]) ? (__uint32_t
)(((__uint32_t)(b->pfa.addr32[i]) & 0xff) << 24 |
((__uint32_t)(b->pfa.addr32[i]) & 0xff00) << 8 |
((__uint32_t)(b->pfa.addr32[i]) & 0xff0000) >> 8
| ((__uint32_t)(b->pfa.addr32[i]) & 0xff000000) >>
24) : __swap32md(b->pfa.addr32[i]))
)
3504 break;
3505 else if (ntohl(a->addr32[i])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[i]) ? (__uint32_t
)(((__uint32_t)(a->pfa.addr32[i]) & 0xff) << 24 |
((__uint32_t)(a->pfa.addr32[i]) & 0xff00) << 8 |
((__uint32_t)(a->pfa.addr32[i]) & 0xff0000) >> 8
| ((__uint32_t)(a->pfa.addr32[i]) & 0xff000000) >>
24) : __swap32md(a->pfa.addr32[i]))
< ntohl(b->addr32[i])(__uint32_t)(__builtin_constant_p(b->pfa.addr32[i]) ? (__uint32_t
)(((__uint32_t)(b->pfa.addr32[i]) & 0xff) << 24 |
((__uint32_t)(b->pfa.addr32[i]) & 0xff00) << 8 |
((__uint32_t)(b->pfa.addr32[i]) & 0xff0000) >> 8
| ((__uint32_t)(b->pfa.addr32[i]) & 0xff000000) >>
24) : __swap32md(b->pfa.addr32[i]))
)
3506 return (0);
3507 /* check a <= e */
3508 for (i = 0; i < 4; ++i)
3509 if (ntohl(a->addr32[i])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[i]) ? (__uint32_t
)(((__uint32_t)(a->pfa.addr32[i]) & 0xff) << 24 |
((__uint32_t)(a->pfa.addr32[i]) & 0xff00) << 8 |
((__uint32_t)(a->pfa.addr32[i]) & 0xff0000) >> 8
| ((__uint32_t)(a->pfa.addr32[i]) & 0xff000000) >>
24) : __swap32md(a->pfa.addr32[i]))
< ntohl(e->addr32[i])(__uint32_t)(__builtin_constant_p(e->pfa.addr32[i]) ? (__uint32_t
)(((__uint32_t)(e->pfa.addr32[i]) & 0xff) << 24 |
((__uint32_t)(e->pfa.addr32[i]) & 0xff00) << 8 |
((__uint32_t)(e->pfa.addr32[i]) & 0xff0000) >> 8
| ((__uint32_t)(e->pfa.addr32[i]) & 0xff000000) >>
24) : __swap32md(e->pfa.addr32[i]))
)
3510 break;
3511 else if (ntohl(a->addr32[i])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[i]) ? (__uint32_t
)(((__uint32_t)(a->pfa.addr32[i]) & 0xff) << 24 |
((__uint32_t)(a->pfa.addr32[i]) & 0xff00) << 8 |
((__uint32_t)(a->pfa.addr32[i]) & 0xff0000) >> 8
| ((__uint32_t)(a->pfa.addr32[i]) & 0xff000000) >>
24) : __swap32md(a->pfa.addr32[i]))
> ntohl(e->addr32[i])(__uint32_t)(__builtin_constant_p(e->pfa.addr32[i]) ? (__uint32_t
)(((__uint32_t)(e->pfa.addr32[i]) & 0xff) << 24 |
((__uint32_t)(e->pfa.addr32[i]) & 0xff00) << 8 |
((__uint32_t)(e->pfa.addr32[i]) & 0xff0000) >> 8
| ((__uint32_t)(e->pfa.addr32[i]) & 0xff000000) >>
24) : __swap32md(e->pfa.addr32[i]))
)
3512 return (0);
3513 break;
3514 }
3515#endif /* INET6 */
3516 }
3517 return (1);
3518}
3519
3520int
3521pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
3522{
3523 switch (op) {
3524 case PF_OP_IRG:
3525 return ((p > a1) && (p < a2));
3526 case PF_OP_XRG:
3527 return ((p < a1) || (p > a2));
3528 case PF_OP_RRG:
3529 return ((p >= a1) && (p <= a2));
3530 case PF_OP_EQ:
3531 return (p == a1);
3532 case PF_OP_NE:
3533 return (p != a1);
3534 case PF_OP_LT:
3535 return (p < a1);
3536 case PF_OP_LE:
3537 return (p <= a1);
3538 case PF_OP_GT:
3539 return (p > a1);
3540 case PF_OP_GE:
3541 return (p >= a1);
3542 }
3543 return (0); /* never reached */
3544}
3545
3546int
3547pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
3548{
3549 return (pf_match(op, ntohs(a1)(__uint16_t)(__builtin_constant_p(a1) ? (__uint16_t)(((__uint16_t
)(a1) & 0xffU) << 8 | ((__uint16_t)(a1) & 0xff00U
) >> 8) : __swap16md(a1))
, ntohs(a2)(__uint16_t)(__builtin_constant_p(a2) ? (__uint16_t)(((__uint16_t
)(a2) & 0xffU) << 8 | ((__uint16_t)(a2) & 0xff00U
) >> 8) : __swap16md(a2))
, ntohs(p)(__uint16_t)(__builtin_constant_p(p) ? (__uint16_t)(((__uint16_t
)(p) & 0xffU) << 8 | ((__uint16_t)(p) & 0xff00U
) >> 8) : __swap16md(p))
));
3550}
3551
3552int
3553pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
3554{
3555 if (u == -1 && op != PF_OP_EQ && op != PF_OP_NE)
3556 return (0);
3557 return (pf_match(op, a1, a2, u));
3558}
3559
3560int
3561pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
3562{
3563 if (g == -1 && op != PF_OP_EQ && op != PF_OP_NE)
3564 return (0);
3565 return (pf_match(op, a1, a2, g));
3566}
3567
3568int
3569pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag)
3570{
3571 if (*tag == -1)
3572 *tag = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.tag;
3573
3574 return ((!r->match_tag_not && r->match_tag == *tag) ||
3575 (r->match_tag_not && r->match_tag != *tag));
3576}
3577
3578int
3579pf_match_rcvif(struct mbuf *m, struct pf_rule *r)
3580{
3581 struct ifnet *ifp;
3582#if NCARP1 > 0
3583 struct ifnet *ifp0;
3584#endif
3585 struct pfi_kif *kif;
3586
3587 ifp = if_get(m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx);
3588 if (ifp == NULL((void *)0))
3589 return (0);
3590
3591#if NCARP1 > 0
3592 if (ifp->if_typeif_data.ifi_type == IFT_CARP0xf7 &&
3593 (ifp0 = if_get(ifp->if_carpdevidxif_carp_ptr.carp_idx)) != NULL((void *)0)) {
3594 kif = (struct pfi_kif *)ifp0->if_pf_kif;
3595 if_put(ifp0);
3596 } else
3597#endif /* NCARP */
3598 kif = (struct pfi_kif *)ifp->if_pf_kif;
3599
3600 if_put(ifp);
3601
3602 if (kif == NULL((void *)0)) {
3603 DPFPRINTF(LOG_ERR,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog(
"%s: kif == NULL, @%d via %s", __func__, r->nr, r->rcv_ifname
); addlog("\n"); } } while (0)
3604 "%s: kif == NULL, @%d via %s", __func__,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog(
"%s: kif == NULL, @%d via %s", __func__, r->nr, r->rcv_ifname
); addlog("\n"); } } while (0)
3605 r->nr, r->rcv_ifname)do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog(
"%s: kif == NULL, @%d via %s", __func__, r->nr, r->rcv_ifname
); addlog("\n"); } } while (0)
;
3606 return (0);
3607 }
3608
3609 return (pfi_kif_match(r->rcv_kif, kif));
3610}
3611
3612void
3613pf_tag_packet(struct mbuf *m, int tag, int rtableid)
3614{
3615 if (tag > 0)
3616 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.tag = tag;
3617 if (rtableid >= 0)
3618 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = (u_int)rtableid;
3619}
3620
3621void
3622pf_anchor_stack_init(void)
3623{
3624 struct pf_anchor_stackframe *stack;
3625
3626 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3627 stack[PF_ANCHOR_STACK_MAX64].sf_stack_topu.u_stack_top = &stack[0];
3628 cpumem_leave(pf_anchor_stack, stack);
3629}
3630
3631int
3632pf_anchor_stack_is_full(struct pf_anchor_stackframe *sf)
3633{
3634 struct pf_anchor_stackframe *stack;
3635 int rv;
3636
3637 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3638 rv = (sf == &stack[PF_ANCHOR_STACK_MAX64]);
3639 cpumem_leave(pf_anchor_stack, stack);
3640
3641 return (rv);
3642}
3643
3644int
3645pf_anchor_stack_is_empty(struct pf_anchor_stackframe *sf)
3646{
3647 struct pf_anchor_stackframe *stack;
3648 int rv;
3649
3650 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3651 rv = (sf == &stack[0]);
3652 cpumem_leave(pf_anchor_stack, stack);
3653
3654 return (rv);
3655}
3656
3657struct pf_anchor_stackframe *
3658pf_anchor_stack_top(void)
3659{
3660 struct pf_anchor_stackframe *stack;
3661 struct pf_anchor_stackframe *top_sf;
3662
3663 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3664 top_sf = stack[PF_ANCHOR_STACK_MAX64].sf_stack_topu.u_stack_top;
3665 cpumem_leave(pf_anchor_stack, stack);
3666
3667 return (top_sf);
3668}
3669
3670int
3671pf_anchor_stack_push(struct pf_ruleset *rs, struct pf_rule *r,
3672 struct pf_anchor *child, int jump_target)
3673{
3674 struct pf_anchor_stackframe *stack;
3675 struct pf_anchor_stackframe *top_sf = pf_anchor_stack_top();
3676
3677 top_sf++;
3678 if (pf_anchor_stack_is_full(top_sf))
3679 return (-1);
3680
3681 top_sf->sf_rs = rs;
3682 top_sf->sf_ru.u_r = r;
3683 top_sf->sf_child = child;
3684 top_sf->sf_jump_target = jump_target;
3685
3686 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3687
3688 if ((top_sf <= &stack[0]) || (top_sf >= &stack[PF_ANCHOR_STACK_MAX64]))
3689 panic("%s: top frame outside of anchor stack range", __func__);
3690
3691 stack[PF_ANCHOR_STACK_MAX64].sf_stack_topu.u_stack_top = top_sf;
3692 cpumem_leave(pf_anchor_stack, stack);
3693
3694 return (0);
3695}
3696
3697int
3698pf_anchor_stack_pop(struct pf_ruleset **rs, struct pf_rule **r,
3699 struct pf_anchor **child, int *jump_target)
3700{
3701 struct pf_anchor_stackframe *top_sf = pf_anchor_stack_top();
3702 struct pf_anchor_stackframe *stack;
3703 int on_top;
3704
3705 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3706 if (pf_anchor_stack_is_empty(top_sf)) {
3707 on_top = -1;
3708 } else {
3709 if ((top_sf <= &stack[0]) ||
3710 (top_sf >= &stack[PF_ANCHOR_STACK_MAX64]))
3711 panic("%s: top frame outside of anchor stack range",
3712 __func__);
3713
3714 *rs = top_sf->sf_rs;
3715 *r = top_sf->sf_ru.u_r;
3716 *child = top_sf->sf_child;
3717 *jump_target = top_sf->sf_jump_target;
3718 top_sf--;
3719 stack[PF_ANCHOR_STACK_MAX64].sf_stack_topu.u_stack_top = top_sf;
3720 on_top = 0;
3721 }
3722 cpumem_leave(pf_anchor_stack, stack);
3723
3724 return (on_top);
3725}
3726
3727void
3728pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3729 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3730{
3731 switch (af) {
3732 case AF_INET2:
3733 naddr->addr32pfa.addr32[0] = (raddr->addr32pfa.addr32[0] & rmask->addr32pfa.addr32[0]) |
3734 ((rmask->addr32pfa.addr32[0] ^ 0xffffffff ) & saddr->addr32pfa.addr32[0]);
3735 break;
3736#ifdef INET61
3737 case AF_INET624:
3738 naddr->addr32pfa.addr32[0] = (raddr->addr32pfa.addr32[0] & rmask->addr32pfa.addr32[0]) |
3739 ((rmask->addr32pfa.addr32[0] ^ 0xffffffff ) & saddr->addr32pfa.addr32[0]);
3740 naddr->addr32pfa.addr32[1] = (raddr->addr32pfa.addr32[1] & rmask->addr32pfa.addr32[1]) |
3741 ((rmask->addr32pfa.addr32[1] ^ 0xffffffff ) & saddr->addr32pfa.addr32[1]);
3742 naddr->addr32pfa.addr32[2] = (raddr->addr32pfa.addr32[2] & rmask->addr32pfa.addr32[2]) |
3743 ((rmask->addr32pfa.addr32[2] ^ 0xffffffff ) & saddr->addr32pfa.addr32[2]);
3744 naddr->addr32pfa.addr32[3] = (raddr->addr32pfa.addr32[3] & rmask->addr32pfa.addr32[3]) |
3745 ((rmask->addr32pfa.addr32[3] ^ 0xffffffff ) & saddr->addr32pfa.addr32[3]);
3746 break;
3747#endif /* INET6 */
3748 default:
3749 unhandled_af(af);
3750 }
3751}
3752
3753void
3754pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3755{
3756 switch (af) {
3757 case AF_INET2:
3758 addr->addr32pfa.addr32[0] = htonl(ntohl(addr->addr32[0]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[0])) + 1) ? (__uint32_t)(((__uint32_t)((
__uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? (__uint32_t
)(((__uint32_t)(addr->pfa.addr32[0]) & 0xff) << 24
| ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00) <<
8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000) >>
8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000) >>
24) : __swap32md(addr->pfa.addr32[0])) + 1) & 0xff) <<
24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr->
pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32
[0]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[0]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[0])) + 1) & 0xff00) << 8 | ((__uint32_t)
((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? (
__uint32_t)(((__uint32_t)(addr->pfa.addr32[0]) & 0xff)
<< 24 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00
) << 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000
) >> 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000
) >> 24) : __swap32md(addr->pfa.addr32[0])) + 1) &
0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[0])) + 1) & 0xff000000) >> 24)
: __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa.
addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[0
]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[0]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[0])) + 1))
;
3759 break;
3760#ifdef INET61
3761 case AF_INET624:
3762 if (addr->addr32pfa.addr32[3] == 0xffffffff) {
3763 addr->addr32pfa.addr32[3] = 0;
3764 if (addr->addr32pfa.addr32[2] == 0xffffffff) {
3765 addr->addr32pfa.addr32[2] = 0;
3766 if (addr->addr32pfa.addr32[1] == 0xffffffff) {
3767 addr->addr32pfa.addr32[1] = 0;
3768 addr->addr32pfa.addr32[0] =
3769 htonl(ntohl(addr->addr32[0]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[0])) + 1) ? (__uint32_t)(((__uint32_t)((
__uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? (__uint32_t
)(((__uint32_t)(addr->pfa.addr32[0]) & 0xff) << 24
| ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00) <<
8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000) >>
8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000) >>
24) : __swap32md(addr->pfa.addr32[0])) + 1) & 0xff) <<
24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr->
pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32
[0]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[0]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[0])) + 1) & 0xff00) << 8 | ((__uint32_t)
((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? (
__uint32_t)(((__uint32_t)(addr->pfa.addr32[0]) & 0xff)
<< 24 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00
) << 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000
) >> 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000
) >> 24) : __swap32md(addr->pfa.addr32[0])) + 1) &
0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[0])) + 1) & 0xff000000) >> 24)
: __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa.
addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[0
]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[0]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[0])) + 1))
;
3770 } else
3771 addr->addr32pfa.addr32[1] =
3772 htonl(ntohl(addr->addr32[1]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[1]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[1]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[1]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[1]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[1]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[1])) + 1) ? (__uint32_t)(((__uint32_t)((
__uint32_t)(__builtin_constant_p(addr->pfa.addr32[1]) ? (__uint32_t
)(((__uint32_t)(addr->pfa.addr32[1]) & 0xff) << 24
| ((__uint32_t)(addr->pfa.addr32[1]) & 0xff00) <<
8 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff0000) >>
8 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff000000) >>
24) : __swap32md(addr->pfa.addr32[1])) + 1) & 0xff) <<
24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr->
pfa.addr32[1]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32
[1]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[1]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[1]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[1]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[1])) + 1) & 0xff00) << 8 | ((__uint32_t)
((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[1]) ? (
__uint32_t)(((__uint32_t)(addr->pfa.addr32[1]) & 0xff)
<< 24 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff00
) << 8 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff0000
) >> 8 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff000000
) >> 24) : __swap32md(addr->pfa.addr32[1])) + 1) &
0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[1]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[1]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[1]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[1]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[1]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[1])) + 1) & 0xff000000) >> 24)
: __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa.
addr32[1]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[1
]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[1]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[1]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[1]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[1])) + 1))
;
3773 } else
3774 addr->addr32pfa.addr32[2] =
3775 htonl(ntohl(addr->addr32[2]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[2]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[2]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[2]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[2]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[2]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[2])) + 1) ? (__uint32_t)(((__uint32_t)((
__uint32_t)(__builtin_constant_p(addr->pfa.addr32[2]) ? (__uint32_t
)(((__uint32_t)(addr->pfa.addr32[2]) & 0xff) << 24
| ((__uint32_t)(addr->pfa.addr32[2]) & 0xff00) <<
8 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff0000) >>
8 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff000000) >>
24) : __swap32md(addr->pfa.addr32[2])) + 1) & 0xff) <<
24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr->
pfa.addr32[2]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32
[2]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[2]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[2]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[2]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[2])) + 1) & 0xff00) << 8 | ((__uint32_t)
((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[2]) ? (
__uint32_t)(((__uint32_t)(addr->pfa.addr32[2]) & 0xff)
<< 24 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff00
) << 8 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff0000
) >> 8 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff000000
) >> 24) : __swap32md(addr->pfa.addr32[2])) + 1) &
0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[2]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[2]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[2]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[2]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[2]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[2])) + 1) & 0xff000000) >> 24)
: __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa.
addr32[2]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[2
]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[2]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[2]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[2]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[2])) + 1))
;
3776 } else
3777 addr->addr32pfa.addr32[3] =
3778 htonl(ntohl(addr->addr32[3]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[3]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[3]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[3]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[3]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[3]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[3])) + 1) ? (__uint32_t)(((__uint32_t)((
__uint32_t)(__builtin_constant_p(addr->pfa.addr32[3]) ? (__uint32_t
)(((__uint32_t)(addr->pfa.addr32[3]) & 0xff) << 24
| ((__uint32_t)(addr->pfa.addr32[3]) & 0xff00) <<
8 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff0000) >>
8 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff000000) >>
24) : __swap32md(addr->pfa.addr32[3])) + 1) & 0xff) <<
24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr->
pfa.addr32[3]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32
[3]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[3]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[3]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[3]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[3])) + 1) & 0xff00) << 8 | ((__uint32_t)
((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[3]) ? (
__uint32_t)(((__uint32_t)(addr->pfa.addr32[3]) & 0xff)
<< 24 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff00
) << 8 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff0000
) >> 8 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff000000
) >> 24) : __swap32md(addr->pfa.addr32[3])) + 1) &
0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p
(addr->pfa.addr32[3]) ? (__uint32_t)(((__uint32_t)(addr->
pfa.addr32[3]) & 0xff) << 24 | ((__uint32_t)(addr->
pfa.addr32[3]) & 0xff00) << 8 | ((__uint32_t)(addr->
pfa.addr32[3]) & 0xff0000) >> 8 | ((__uint32_t)(addr
->pfa.addr32[3]) & 0xff000000) >> 24) : __swap32md
(addr->pfa.addr32[3])) + 1) & 0xff000000) >> 24)
: __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa.
addr32[3]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[3
]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32
[3]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32
[3]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.
addr32[3]) & 0xff000000) >> 24) : __swap32md(addr->
pfa.addr32[3])) + 1))
;
3779 break;
3780#endif /* INET6 */
3781 default:
3782 unhandled_af(af);
3783 }
3784}
3785
3786int
3787pf_socket_lookup(struct pf_pdesc *pd)
3788{
3789 struct pf_addr *saddr, *daddr;
3790 u_int16_t sport, dport;
3791 struct inpcbtable *tb;
3792 struct inpcb *inp;
3793
3794 pd->lookup.uid = -1;
3795 pd->lookup.gid = -1;
3796 pd->lookup.pid = NO_PID(99999 +1);
3797 switch (pd->virtual_proto) {
3798 case IPPROTO_TCP6:
3799 sport = pd->hdr.tcp.th_sport;
3800 dport = pd->hdr.tcp.th_dport;
3801 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
3802 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3803 tb = &tcbtable;
3804 break;
3805 case IPPROTO_UDP17:
3806 sport = pd->hdr.udp.uh_sport;
3807 dport = pd->hdr.udp.uh_dport;
3808 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
3809 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3810 tb = &udbtable;
3811 break;
3812 default:
3813 return (-1);
3814 }
3815 if (pd->dir == PF_IN) {
3816 saddr = pd->src;
3817 daddr = pd->dst;
3818 } else {
3819 u_int16_t p;
3820
3821 p = sport;
3822 sport = dport;
3823 dport = p;
3824 saddr = pd->dst;
3825 daddr = pd->src;
3826 }
3827 switch (pd->af) {
3828 case AF_INET2:
3829 /*
3830 * Fails when rtable is changed while evaluating the ruleset
3831 * The socket looked up will not match the one hit in the end.
3832 */
3833 inp = in_pcblookup(tb, saddr->v4pfa.v4, sport, daddr->v4pfa.v4, dport,
3834 pd->rdomain);
3835 if (inp == NULL((void *)0)) {
3836 inp = in_pcblookup_listen(tb, daddr->v4pfa.v4, dport,
3837 NULL((void *)0), pd->rdomain);
3838 if (inp == NULL((void *)0))
3839 return (-1);
3840 }
3841 break;
3842#ifdef INET61
3843 case AF_INET624:
3844 if (pd->virtual_proto == IPPROTO_UDP17)
3845 tb = &udb6table;
3846 inp = in6_pcblookup(tb, &saddr->v6pfa.v6, sport, &daddr->v6pfa.v6,
3847 dport, pd->rdomain);
3848 if (inp == NULL((void *)0)) {
3849 inp = in6_pcblookup_listen(tb, &daddr->v6pfa.v6, dport,
3850 NULL((void *)0), pd->rdomain);
3851 if (inp == NULL((void *)0))
3852 return (-1);
3853 }
3854 break;
3855#endif /* INET6 */
3856 default:
3857 unhandled_af(pd->af);
3858 }
3859 pd->lookup.uid = inp->inp_socket->so_euid;
3860 pd->lookup.gid = inp->inp_socket->so_egid;
3861 pd->lookup.pid = inp->inp_socket->so_cpid;
3862 in_pcbunref(inp);
3863 return (1);
3864}
3865
3866/* post: r => (r[0] == type /\ r[1] >= min_typelen >= 2 "validity"
3867 * /\ (eoh - r) >= min_typelen >= 2 "safety" )
3868 *
3869 * warning: r + r[1] may exceed opts bounds for r[1] > min_typelen
3870 */
3871u_int8_t*
3872pf_find_tcpopt(u_int8_t *opt, u_int8_t *opts, size_t hlen, u_int8_t type,
3873 u_int8_t min_typelen)
3874{
3875 u_int8_t *eoh = opts + hlen;
3876
3877 if (min_typelen < 2)
3878 return (NULL((void *)0));
3879
3880 while ((eoh - opt) >= min_typelen) {
3881 switch (*opt) {
3882 case TCPOPT_EOL0:
3883 /* FALLTHROUGH - Workaround the failure of some
3884 systems to NOP-pad their bzero'd option buffers,
3885 producing spurious EOLs */
3886 case TCPOPT_NOP1:
3887 opt++;
3888 continue;
3889 default:
3890 if (opt[0] == type &&
3891 opt[1] >= min_typelen)
3892 return (opt);
3893 }
3894
3895 opt += MAX(opt[1], 2)(((opt[1])>(2))?(opt[1]):(2)); /* evade infinite loops */
3896 }
3897
3898 return (NULL((void *)0));
3899}
3900
3901u_int8_t
3902pf_get_wscale(struct pf_pdesc *pd)
3903{
3904 int olen;
3905 u_int8_t opts[MAX_TCPOPTLEN40], *opt;
3906 u_int8_t wscale = 0;
3907
3908 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
3909 if (olen < TCPOLEN_WINDOW3 || !pf_pull_hdr(pd->m,
3910 pd->off + sizeof(struct tcphdr), opts, olen, NULL((void *)0), pd->af))
3911 return (0);
3912
3913 opt = opts;
3914 while ((opt = pf_find_tcpopt(opt, opts, olen,
3915 TCPOPT_WINDOW3, TCPOLEN_WINDOW3)) != NULL((void *)0)) {
3916 wscale = opt[2];
3917 wscale = MIN(wscale, TCP_MAX_WINSHIFT)(((wscale)<(14))?(wscale):(14));
3918 wscale |= PF_WSCALE_FLAG0x80;
3919
3920 opt += opt[1];
3921 }
3922
3923 return (wscale);
3924}
3925
3926u_int16_t
3927pf_get_mss(struct pf_pdesc *pd)
3928{
3929 int olen;
3930 u_int8_t opts[MAX_TCPOPTLEN40], *opt;
3931 u_int16_t mss = tcp_mssdflt;
3932
3933 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
3934 if (olen < TCPOLEN_MAXSEG4 || !pf_pull_hdr(pd->m,
3935 pd->off + sizeof(struct tcphdr), opts, olen, NULL((void *)0), pd->af))
3936 return (0);
3937
3938 opt = opts;
3939 while ((opt = pf_find_tcpopt(opt, opts, olen,
3940 TCPOPT_MAXSEG2, TCPOLEN_MAXSEG4)) != NULL((void *)0)) {
3941 memcpy(&mss, (opt + 2), 2)__builtin_memcpy((&mss), ((opt + 2)), (2));
3942 mss = ntohs(mss)(__uint16_t)(__builtin_constant_p(mss) ? (__uint16_t)(((__uint16_t
)(mss) & 0xffU) << 8 | ((__uint16_t)(mss) & 0xff00U
) >> 8) : __swap16md(mss))
;
3943
3944 opt += opt[1];
3945 }
3946 return (mss);
3947}
3948
3949u_int16_t
3950pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3951{
3952 struct ifnet *ifp;
3953 struct sockaddr_in *dst;
3954#ifdef INET61
3955 struct sockaddr_in6 *dst6;
3956#endif /* INET6 */
3957 struct rtentry *rt = NULL((void *)0);
3958 struct sockaddr_storage ss;
3959 int hlen;
3960 u_int16_t mss = tcp_mssdflt;
3961
3962 memset(&ss, 0, sizeof(ss))__builtin_memset((&ss), (0), (sizeof(ss)));
3963
3964 switch (af) {
3965 case AF_INET2:
3966 hlen = sizeof(struct ip);
3967 dst = (struct sockaddr_in *)&ss;
3968 dst->sin_family = AF_INET2;
3969 dst->sin_len = sizeof(*dst);
3970 dst->sin_addr = addr->v4pfa.v4;
3971 rt = rtalloc(sintosa(dst), 0, rtableid);
3972 break;
3973#ifdef INET61
3974 case AF_INET624:
3975 hlen = sizeof(struct ip6_hdr);
3976 dst6 = (struct sockaddr_in6 *)&ss;
3977 dst6->sin6_family = AF_INET624;
3978 dst6->sin6_len = sizeof(*dst6);
3979 dst6->sin6_addr = addr->v6pfa.v6;
3980 rt = rtalloc(sin6tosa(dst6), 0, rtableid);
3981 break;
3982#endif /* INET6 */
3983 }
3984
3985 if (rt != NULL((void *)0) && (ifp = if_get(rt->rt_ifidx)) != NULL((void *)0)) {
3986 mss = ifp->if_mtuif_data.ifi_mtu - hlen - sizeof(struct tcphdr);
3987 mss = max(tcp_mssdflt, mss);
3988 if_put(ifp);
3989 }
3990 rtfree(rt);
3991 mss = min(mss, offer);
3992 mss = max(mss, 64); /* sanity - at least max opt space */
3993 return (mss);
3994}
3995
3996static __inline int
3997pf_set_rt_ifp(struct pf_state *st, struct pf_addr *saddr, sa_family_t af,
3998 struct pf_src_node **sns)
3999{
4000 struct pf_rule *r = st->rule.ptr;
4001 int rv;
4002
4003 if (!r->rt)
4004 return (0);
4005
4006 rv = pf_map_addr(af, r, saddr, &st->rt_addr, NULL((void *)0), sns,
4007 &r->route, PF_SN_ROUTE);
4008 if (rv == 0)
4009 st->rt = r->rt;
4010
4011 return (rv);
4012}
4013
4014u_int32_t
4015pf_tcp_iss(struct pf_pdesc *pd)
4016{
4017 SHA2_CTX ctx;
4018 union {
4019 uint8_t bytes[SHA512_DIGEST_LENGTH64];
4020 uint32_t words[1];
4021 } digest;
4022
4023 if (pf_tcp_secret_init == 0) {
4024 arc4random_buf(pf_tcp_secret, sizeof(pf_tcp_secret));
4025 SHA512Init(&pf_tcp_secret_ctx);
4026 SHA512Update(&pf_tcp_secret_ctx, pf_tcp_secret,
4027 sizeof(pf_tcp_secret));
4028 pf_tcp_secret_init = 1;
4029 }
4030 ctx = pf_tcp_secret_ctx;
4031
4032 SHA512Update(&ctx, &pd->rdomain, sizeof(pd->rdomain));
4033 SHA512Update(&ctx, &pd->hdr.tcp.th_sport, sizeof(u_short));
4034 SHA512Update(&ctx, &pd->hdr.tcp.th_dport, sizeof(u_short));
4035 switch (pd->af) {
4036 case AF_INET2:
4037 SHA512Update(&ctx, &pd->src->v4pfa.v4, sizeof(struct in_addr));
4038 SHA512Update(&ctx, &pd->dst->v4pfa.v4, sizeof(struct in_addr));
4039 break;
4040#ifdef INET61
4041 case AF_INET624:
4042 SHA512Update(&ctx, &pd->src->v6pfa.v6, sizeof(struct in6_addr));
4043 SHA512Update(&ctx, &pd->dst->v6pfa.v6, sizeof(struct in6_addr));
4044 break;
4045#endif /* INET6 */
4046 }
4047 SHA512Final(digest.bytes, &ctx);
4048 pf_tcp_iss_off += 4096;
4049 return (digest.words[0] + READ_ONCE(tcp_iss)({ typeof(tcp_iss) __tmp = *(volatile typeof(tcp_iss) *)&
(tcp_iss); membar_datadep_consumer(); __tmp; })
+ pf_tcp_iss_off);
4050}
4051
4052void
4053pf_rule_to_actions(struct pf_rule *r, struct pf_rule_actions *a)
4054{
4055 if (r->qid)
4056 a->qid = r->qid;
4057 if (r->pqid)
4058 a->pqid = r->pqid;
4059 if (r->rtableid >= 0)
4060 a->rtableid = r->rtableid;
4061#if NPFLOG1 > 0
4062 a->log |= r->log;
4063#endif /* NPFLOG > 0 */
4064 if (r->scrub_flags & PFSTATE_SETTOS0x0040)
4065 a->set_tos = r->set_tos;
4066 if (r->min_ttl)
4067 a->min_ttl = r->min_ttl;
4068 if (r->max_mss)
4069 a->max_mss = r->max_mss;
4070 a->flags |= (r->scrub_flags & (PFSTATE_NODF0x0020|PFSTATE_RANDOMID0x0080|
4071 PFSTATE_SETTOS0x0040|PFSTATE_SCRUB_TCP0x0100|PFSTATE_SETPRIO0x0200));
4072 if (r->scrub_flags & PFSTATE_SETPRIO0x0200) {
4073 a->set_prio[0] = r->set_prio[0];
4074 a->set_prio[1] = r->set_prio[1];
4075 }
4076 if (r->rule_flag & PFRULE_SETDELAY0x0080)
4077 a->delay = r->delay;
4078}
4079
4080#define PF_TEST_ATTRIB(t, a)if (t) { r = a; continue; } else do { } while (0) \
4081 if (t) { \
4082 r = a; \
4083 continue; \
4084 } else do { \
4085 } while (0)
4086
4087enum pf_test_status
4088pf_match_rule(struct pf_test_ctx *ctx, struct pf_ruleset *ruleset)
4089{
4090 struct pf_rule *r;
4091 struct pf_anchor *child = NULL((void *)0);
4092 int target;
4093
4094 pf_anchor_stack_init();
4095enter_ruleset:
4096 r = TAILQ_FIRST(ruleset->rules.active.ptr)((ruleset->rules.active.ptr)->tqh_first);
4097 while (r != NULL((void *)0)) {
4098 PF_TEST_ATTRIB(r->rule_flag & PFRULE_EXPIRED,if (r->rule_flag & 0x00400000) { r = ((r)->entries.
tqe_next); continue; } else do { } while (0)
4099 TAILQ_NEXT(r, entries))if (r->rule_flag & 0x00400000) { r = ((r)->entries.
tqe_next); continue; } else do { } while (0)
;
4100 r->evaluations++;
4101 PF_TEST_ATTRIB(if ((pfi_kif_match(r->kif, ctx->pd->kif) == r->ifnot
)) { r = r->skip[0].ptr; continue; } else do { } while (0)
4102 (pfi_kif_match(r->kif, ctx->pd->kif) == r->ifnot),if ((pfi_kif_match(r->kif, ctx->pd->kif) == r->ifnot
)) { r = r->skip[0].ptr; continue; } else do { } while (0)
4103 r->skip[PF_SKIP_IFP].ptr)if ((pfi_kif_match(r->kif, ctx->pd->kif) == r->ifnot
)) { r = r->skip[0].ptr; continue; } else do { } while (0)
;
4104 PF_TEST_ATTRIB((r->direction && r->direction != ctx->pd->dir),if ((r->direction && r->direction != ctx->pd
->dir)) { r = r->skip[1].ptr; continue; } else do { } while
(0)
4105 r->skip[PF_SKIP_DIR].ptr)if ((r->direction && r->direction != ctx->pd
->dir)) { r = r->skip[1].ptr; continue; } else do { } while
(0)
;
4106 PF_TEST_ATTRIB((r->onrdomain >= 0 &&if ((r->onrdomain >= 0 && (r->onrdomain == ctx
->pd->rdomain) == r->ifnot)) { r = r->skip[2].ptr
; continue; } else do { } while (0)
4107 (r->onrdomain == ctx->pd->rdomain) == r->ifnot),if ((r->onrdomain >= 0 && (r->onrdomain == ctx
->pd->rdomain) == r->ifnot)) { r = r->skip[2].ptr
; continue; } else do { } while (0)
4108 r->skip[PF_SKIP_RDOM].ptr)if ((r->onrdomain >= 0 && (r->onrdomain == ctx
->pd->rdomain) == r->ifnot)) { r = r->skip[2].ptr
; continue; } else do { } while (0)
;
4109 PF_TEST_ATTRIB((r->af && r->af != ctx->pd->af),if ((r->af && r->af != ctx->pd->af)) { r =
r->skip[3].ptr; continue; } else do { } while (0)
4110 r->skip[PF_SKIP_AF].ptr)if ((r->af && r->af != ctx->pd->af)) { r =
r->skip[3].ptr; continue; } else do { } while (0)
;
4111 PF_TEST_ATTRIB((r->proto && r->proto != ctx->pd->proto),if ((r->proto && r->proto != ctx->pd->proto
)) { r = r->skip[4].ptr; continue; } else do { } while (0)
4112 r->skip[PF_SKIP_PROTO].ptr)if ((r->proto && r->proto != ctx->pd->proto
)) { r = r->skip[4].ptr; continue; } else do { } while (0)
;
4113 PF_TEST_ATTRIB((PF_MISMATCHAW(&r->src.addr, &ctx->pd->nsaddr,if ((( (((&r->src.addr)->type == PF_ADDR_NOROUTE &&
pf_routable((&ctx->pd->nsaddr), (ctx->pd->naf
), ((void *)0), (ctx->act.rtableid))) || (((&r->src
.addr)->type == PF_ADDR_URPFFAILED && (ctx->pd->
kif) != ((void *)0) && pf_routable((&ctx->pd->
nsaddr), (ctx->pd->naf), (ctx->pd->kif), (ctx->
act.rtableid))) || ((&r->src.addr)->type == PF_ADDR_RTLABEL
&& !pf_rtlabel_match((&ctx->pd->nsaddr), (
ctx->pd->naf), (&r->src.addr), (ctx->act.rtableid
))) || ((&r->src.addr)->type == PF_ADDR_TABLE &&
!pfr_match_addr((&r->src.addr)->p.tbl, (&ctx->
pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr
)->type == PF_ADDR_DYNIFTL && !pfi_match_addr((&
r->src.addr)->p.dyn, (&ctx->pd->nsaddr), (ctx
->pd->naf))) || ((&r->src.addr)->type == PF_ADDR_RANGE
&& !pf_match_addr_range(&(&r->src.addr)->
v.a.addr, &(&r->src.addr)->v.a.mask, (&ctx->
pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr
)->type == PF_ADDR_ADDRMASK && !(((ctx->pd->
naf) == 2 && !(&(&r->src.addr)->v.a.mask
)->pfa.addr32[0]) || ((ctx->pd->naf) == 24 &&
!(&(&r->src.addr)->v.a.mask)->pfa.addr32[0]
&& !(&(&r->src.addr)->v.a.mask)->pfa
.addr32[1] && !(&(&r->src.addr)->v.a.mask
)->pfa.addr32[2] && !(&(&r->src.addr)->
v.a.mask)->pfa.addr32[3] )) && !pf_match_addr(0, &
(&r->src.addr)->v.a.addr, &(&r->src.addr
)->v.a.mask, (&ctx->pd->nsaddr), (ctx->pd->
naf))))) != (r->src.neg) ))) { r = r->skip[5].ptr; continue
; } else do { } while (0)
4114 ctx->pd->naf, r->src.neg, ctx->pd->kif,if ((( (((&r->src.addr)->type == PF_ADDR_NOROUTE &&
pf_routable((&ctx->pd->nsaddr), (ctx->pd->naf
), ((void *)0), (ctx->act.rtableid))) || (((&r->src
.addr)->type == PF_ADDR_URPFFAILED && (ctx->pd->
kif) != ((void *)0) && pf_routable((&ctx->pd->
nsaddr), (ctx->pd->naf), (ctx->pd->kif), (ctx->
act.rtableid))) || ((&r->src.addr)->type == PF_ADDR_RTLABEL
&& !pf_rtlabel_match((&ctx->pd->nsaddr), (
ctx->pd->naf), (&r->src.addr), (ctx->act.rtableid
))) || ((&r->src.addr)->type == PF_ADDR_TABLE &&
!pfr_match_addr((&r->src.addr)->p.tbl, (&ctx->
pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr
)->type == PF_ADDR_DYNIFTL && !pfi_match_addr((&
r->src.addr)->p.dyn, (&ctx->pd->nsaddr), (ctx
->pd->naf))) || ((&r->src.addr)->type == PF_ADDR_RANGE
&& !pf_match_addr_range(&(&r->src.addr)->
v.a.addr, &(&r->src.addr)->v.a.mask, (&ctx->
pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr
)->type == PF_ADDR_ADDRMASK && !(((ctx->pd->
naf) == 2 && !(&(&r->src.addr)->v.a.mask
)->pfa.addr32[0]) || ((ctx->pd->naf) == 24 &&
!(&(&r->src.addr)->v.a.mask)->pfa.addr32[0]
&& !(&(&r->src.addr)->v.a.mask)->pfa
.addr32[1] && !(&(&r->src.addr)->v.a.mask
)->pfa.addr32[2] && !(&(&r->src.addr)->
v.a.mask)->pfa.addr32[3] )) && !pf_match_addr(0, &
(&r->src.addr)->v.a.addr, &(&r->src.addr
)->v.a.mask, (&ctx->pd->nsaddr), (ctx->pd->
naf))))) != (r->src.neg) ))) { r = r->skip[5].ptr; continue
; } else do { } while (0)
4115 ctx->act.rtableid)),if ((( (((&r->src.addr)->type == PF_ADDR_NOROUTE &&
pf_routable((&ctx->pd->nsaddr), (ctx->pd->naf
), ((void *)0), (ctx->act.rtableid))) || (((&r->src
.addr)->type == PF_ADDR_URPFFAILED && (ctx->pd->
kif) != ((void *)0) && pf_routable((&ctx->pd->
nsaddr), (ctx->pd->naf), (ctx->pd->kif), (ctx->
act.rtableid))) || ((&r->src.addr)->type == PF_ADDR_RTLABEL
&& !pf_rtlabel_match((&ctx->pd->nsaddr), (
ctx->pd->naf), (&r->src.addr), (ctx->act.rtableid
))) || ((&r->src.addr)->type == PF_ADDR_TABLE &&
!pfr_match_addr((&r->src.addr)->p.tbl, (&ctx->
pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr
)->type == PF_ADDR_DYNIFTL && !pfi_match_addr((&
r->src.addr)->p.dyn, (&ctx->pd->nsaddr), (ctx
->pd->naf))) || ((&r->src.addr)->type == PF_ADDR_RANGE
&& !pf_match_addr_range(&(&r->src.addr)->
v.a.addr, &(&r->src.addr)->v.a.mask, (&ctx->
pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr
)->type == PF_ADDR_ADDRMASK && !(((ctx->pd->
naf) == 2 && !(&(&r->src.addr)->v.a.mask
)->pfa.addr32[0]) || ((ctx->pd->naf) == 24 &&
!(&(&r->src.addr)->v.a.mask)->pfa.addr32[0]
&& !(&(&r->src.addr)->v.a.mask)->pfa
.addr32[1] && !(&(&r->src.addr)->v.a.mask
)->pfa.addr32[2] && !(&(&r->src.addr)->
v.a.mask)->pfa.addr32[3] )) && !pf_match_addr(0, &
(&r->src.addr)->v.a.addr, &(&r->src.addr
)->v.a.mask, (&ctx->pd->nsaddr), (ctx->pd->
naf))))) != (r->src.neg) ))) { r = r->skip[5].ptr; continue
; } else do { } while (0)
4116 r->skip[PF_SKIP_SRC_ADDR].ptr)if ((( (((&r->src.addr)->type == PF_ADDR_NOROUTE &&
pf_routable((&ctx->pd->nsaddr), (ctx->pd->naf
), ((void *)0), (ctx->act.rtableid))) || (((&r->src
.addr)->type == PF_ADDR_URPFFAILED && (ctx->pd->
kif) != ((void *)0) && pf_routable((&ctx->pd->
nsaddr), (ctx->pd->naf), (ctx->pd->kif), (ctx->
act.rtableid))) || ((&r->src.addr)->type == PF_ADDR_RTLABEL
&& !pf_rtlabel_match((&ctx->pd->nsaddr), (
ctx->pd->naf), (&r->src.addr), (ctx->act.rtableid
))) || ((&r->src.addr)->type == PF_ADDR_TABLE &&
!pfr_match_addr((&r->src.addr)->p.tbl, (&ctx->
pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr
)->type == PF_ADDR_DYNIFTL && !pfi_match_addr((&
r->src.addr)->p.dyn, (&ctx->pd->nsaddr), (ctx
->pd->naf))) || ((&r->src.addr)->type == PF_ADDR_RANGE
&& !pf_match_addr_range(&(&r->src.addr)->
v.a.addr, &(&r->src.addr)->v.a.mask, (&ctx->
pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr
)->type == PF_ADDR_ADDRMASK && !(((ctx->pd->
naf) == 2 && !(&(&r->src.addr)->v.a.mask
)->pfa.addr32[0]) || ((ctx->pd->naf) == 24 &&
!(&(&r->src.addr)->v.a.mask)->pfa.addr32[0]
&& !(&(&r->src.addr)->v.a.mask)->pfa
.addr32[1] && !(&(&r->src.addr)->v.a.mask
)->pfa.addr32[2] && !(&(&r->src.addr)->
v.a.mask)->pfa.addr32[3] )) && !pf_match_addr(0, &
(&r->src.addr)->v.a.addr, &(&r->src.addr
)->v.a.mask, (&ctx->pd->nsaddr), (ctx->pd->
naf))))) != (r->src.neg) ))) { r = r->skip[5].ptr; continue
; } else do { } while (0)
;
4117 PF_TEST_ATTRIB((PF_MISMATCHAW(&r->dst.addr, &ctx->pd->ndaddr,if ((( (((&r->dst.addr)->type == PF_ADDR_NOROUTE &&
pf_routable((&ctx->pd->ndaddr), (ctx->pd->af
), ((void *)0), (ctx->act.rtableid))) || (((&r->dst
.addr)->type == PF_ADDR_URPFFAILED && (((void *)0)
) != ((void *)0) && pf_routable((&ctx->pd->
ndaddr), (ctx->pd->af), (((void *)0)), (ctx->act.rtableid
))) || ((&r->dst.addr)->type == PF_ADDR_RTLABEL &&
!pf_rtlabel_match((&ctx->pd->ndaddr), (ctx->pd->
af), (&r->dst.addr), (ctx->act.rtableid))) || ((&
r->dst.addr)->type == PF_ADDR_TABLE && !pfr_match_addr
((&r->dst.addr)->p.tbl, (&ctx->pd->ndaddr
), (ctx->pd->af))) || ((&r->dst.addr)->type ==
PF_ADDR_DYNIFTL && !pfi_match_addr((&r->dst.addr
)->p.dyn, (&ctx->pd->ndaddr), (ctx->pd->af
))) || ((&r->dst.addr)->type == PF_ADDR_RANGE &&
!pf_match_addr_range(&(&r->dst.addr)->v.a.addr
, &(&r->dst.addr)->v.a.mask, (&ctx->pd->
ndaddr), (ctx->pd->af))) || ((&r->dst.addr)->
type == PF_ADDR_ADDRMASK && !(((ctx->pd->af) ==
2 && !(&(&r->dst.addr)->v.a.mask)->
pfa.addr32[0]) || ((ctx->pd->af) == 24 && !(&
(&r->dst.addr)->v.a.mask)->pfa.addr32[0] &&
!(&(&r->dst.addr)->v.a.mask)->pfa.addr32[1]
&& !(&(&r->dst.addr)->v.a.mask)->pfa
.addr32[2] && !(&(&r->dst.addr)->v.a.mask
)->pfa.addr32[3] )) && !pf_match_addr(0, &(&
r->dst.addr)->v.a.addr, &(&r->dst.addr)->
v.a.mask, (&ctx->pd->ndaddr), (ctx->pd->af)))
)) != (r->dst.neg) ))) { r = r->skip[6].ptr; continue; }
else do { } while (0)
4118 ctx->pd->af, r->dst.neg, NULL, ctx->act.rtableid)),if ((( (((&r->dst.addr)->type == PF_ADDR_NOROUTE &&
pf_routable((&ctx->pd->ndaddr), (ctx->pd->af
), ((void *)0), (ctx->act.rtableid))) || (((&r->dst
.addr)->type == PF_ADDR_URPFFAILED && (((void *)0)
) != ((void *)0) && pf_routable((&ctx->pd->
ndaddr), (ctx->pd->af), (((void *)0)), (ctx->act.rtableid
))) || ((&r->dst.addr)->type == PF_ADDR_RTLABEL &&
!pf_rtlabel_match((&ctx->pd->ndaddr), (ctx->pd->
af), (&r->dst.addr), (ctx->act.rtableid))) || ((&
r->dst.addr)->type == PF_ADDR_TABLE && !pfr_match_addr
((&r->dst.addr)->p.tbl, (&ctx->pd->ndaddr
), (ctx->pd->af))) || ((&r->dst.addr)->type ==
PF_ADDR_DYNIFTL && !pfi_match_addr((&r->dst.addr
)->p.dyn, (&ctx->pd->ndaddr), (ctx->pd->af
))) || ((&r->dst.addr)->type == PF_ADDR_RANGE &&
!pf_match_addr_range(&(&r->dst.addr)->v.a.addr
, &(&r->dst.addr)->v.a.mask, (&ctx->pd->
ndaddr), (ctx->pd->af))) || ((&r->dst.addr)->
type == PF_ADDR_ADDRMASK && !(((ctx->pd->af) ==
2 && !(&(&r->dst.addr)->v.a.mask)->
pfa.addr32[0]) || ((ctx->pd->af) == 24 && !(&
(&r->dst.addr)->v.a.mask)->pfa.addr32[0] &&
!(&(&r->dst.addr)->v.a.mask)->pfa.addr32[1]
&& !(&(&r->dst.addr)->v.a.mask)->pfa
.addr32[2] && !(&(&r->dst.addr)->v.a.mask
)->pfa.addr32[3] )) && !pf_match_addr(0, &(&
r->dst.addr)->v.a.addr, &(&r->dst.addr)->
v.a.mask, (&ctx->pd->ndaddr), (ctx->pd->af)))
)) != (r->dst.neg) ))) { r = r->skip[6].ptr; continue; }
else do { } while (0)
4119 r->skip[PF_SKIP_DST_ADDR].ptr)if ((( (((&r->dst.addr)->type == PF_ADDR_NOROUTE &&
pf_routable((&ctx->pd->ndaddr), (ctx->pd->af
), ((void *)0), (ctx->act.rtableid))) || (((&r->dst
.addr)->type == PF_ADDR_URPFFAILED && (((void *)0)
) != ((void *)0) && pf_routable((&ctx->pd->
ndaddr), (ctx->pd->af), (((void *)0)), (ctx->act.rtableid
))) || ((&r->dst.addr)->type == PF_ADDR_RTLABEL &&
!pf_rtlabel_match((&ctx->pd->ndaddr), (ctx->pd->
af), (&r->dst.addr), (ctx->act.rtableid))) || ((&
r->dst.addr)->type == PF_ADDR_TABLE && !pfr_match_addr
((&r->dst.addr)->p.tbl, (&ctx->pd->ndaddr
), (ctx->pd->af))) || ((&r->dst.addr)->type ==
PF_ADDR_DYNIFTL && !pfi_match_addr((&r->dst.addr
)->p.dyn, (&ctx->pd->ndaddr), (ctx->pd->af
))) || ((&r->dst.addr)->type == PF_ADDR_RANGE &&
!pf_match_addr_range(&(&r->dst.addr)->v.a.addr
, &(&r->dst.addr)->v.a.mask, (&ctx->pd->
ndaddr), (ctx->pd->af))) || ((&r->dst.addr)->
type == PF_ADDR_ADDRMASK && !(((ctx->pd->af) ==
2 && !(&(&r->dst.addr)->v.a.mask)->
pfa.addr32[0]) || ((ctx->pd->af) == 24 && !(&
(&r->dst.addr)->v.a.mask)->pfa.addr32[0] &&
!(&(&r->dst.addr)->v.a.mask)->pfa.addr32[1]
&& !(&(&r->dst.addr)->v.a.mask)->pfa
.addr32[2] && !(&(&r->dst.addr)->v.a.mask
)->pfa.addr32[3] )) && !pf_match_addr(0, &(&
r->dst.addr)->v.a.addr, &(&r->dst.addr)->
v.a.mask, (&ctx->pd->ndaddr), (ctx->pd->af)))
)) != (r->dst.neg) ))) { r = r->skip[6].ptr; continue; }
else do { } while (0)
;
4120
4121 switch (ctx->pd->virtual_proto) {
4122 case PF_VPROTO_FRAGMENT256:
4123 /* tcp/udp only. port_op always 0 in other cases */
4124 PF_TEST_ATTRIB((r->src.port_op || r->dst.port_op),if ((r->src.port_op || r->dst.port_op)) { r = ((r)->
entries.tqe_next); continue; } else do { } while (0)
4125 TAILQ_NEXT(r, entries))if ((r->src.port_op || r->dst.port_op)) { r = ((r)->
entries.tqe_next); continue; } else do { } while (0)
;
4126 PF_TEST_ATTRIB((ctx->pd->proto == IPPROTO_TCP &&if ((ctx->pd->proto == 6 && r->flagset)) { r
= ((r)->entries.tqe_next); continue; } else do { } while (
0)
4127 r->flagset),if ((ctx->pd->proto == 6 && r->flagset)) { r
= ((r)->entries.tqe_next); continue; } else do { } while (
0)
4128 TAILQ_NEXT(r, entries))if ((ctx->pd->proto == 6 && r->flagset)) { r
= ((r)->entries.tqe_next); continue; } else do { } while (
0)
;
4129 /* icmp only. type/code always 0 in other cases */
4130 PF_TEST_ATTRIB((r->type || r->code),if ((r->type || r->code)) { r = ((r)->entries.tqe_next
); continue; } else do { } while (0)
4131 TAILQ_NEXT(r, entries))if ((r->type || r->code)) { r = ((r)->entries.tqe_next
); continue; } else do { } while (0)
;
4132 /* tcp/udp only. {uid|gid}.op always 0 in other cases */
4133 PF_TEST_ATTRIB((r->gid.op || r->uid.op),if ((r->gid.op || r->uid.op)) { r = ((r)->entries.tqe_next
); continue; } else do { } while (0)
4134 TAILQ_NEXT(r, entries))if ((r->gid.op || r->uid.op)) { r = ((r)->entries.tqe_next
); continue; } else do { } while (0)
;
4135 break;
4136
4137 case IPPROTO_TCP6:
4138 PF_TEST_ATTRIB(((r->flagset & ctx->th->th_flags) !=if (((r->flagset & ctx->th->th_flags) != r->flags
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4139 r->flags),if (((r->flagset & ctx->th->th_flags) != r->flags
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4140 TAILQ_NEXT(r, entries))if (((r->flagset & ctx->th->th_flags) != r->flags
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
;
4141 PF_TEST_ATTRIB((r->os_fingerprint != PF_OSFP_ANY &&if ((r->os_fingerprint != ((pf_osfp_t)0) && !pf_osfp_match
(pf_osfp_fingerprint(ctx->pd), r->os_fingerprint))) { r
= ((r)->entries.tqe_next); continue; } else do { } while (
0)
4142 !pf_osfp_match(pf_osfp_fingerprint(ctx->pd),if ((r->os_fingerprint != ((pf_osfp_t)0) && !pf_osfp_match
(pf_osfp_fingerprint(ctx->pd), r->os_fingerprint))) { r
= ((r)->entries.tqe_next); continue; } else do { } while (
0)
4143 r->os_fingerprint)),if ((r->os_fingerprint != ((pf_osfp_t)0) && !pf_osfp_match
(pf_osfp_fingerprint(ctx->pd), r->os_fingerprint))) { r
= ((r)->entries.tqe_next); continue; } else do { } while (
0)
4144 TAILQ_NEXT(r, entries))if ((r->os_fingerprint != ((pf_osfp_t)0) && !pf_osfp_match
(pf_osfp_fingerprint(ctx->pd), r->os_fingerprint))) { r
= ((r)->entries.tqe_next); continue; } else do { } while (
0)
;
4145 /* FALLTHROUGH */
4146
4147 case IPPROTO_UDP17:
4148 /* tcp/udp only. port_op always 0 in other cases */
4149 PF_TEST_ATTRIB((r->src.port_op &&if ((r->src.port_op && !pf_match_port(r->src.port_op
, r->src.port[0], r->src.port[1], ctx->pd->nsport
))) { r = r->skip[7].ptr; continue; } else do { } while (0
)
4150 !pf_match_port(r->src.port_op, r->src.port[0],if ((r->src.port_op && !pf_match_port(r->src.port_op
, r->src.port[0], r->src.port[1], ctx->pd->nsport
))) { r = r->skip[7].ptr; continue; } else do { } while (0
)
4151 r->src.port[1], ctx->pd->nsport)),if ((r->src.port_op && !pf_match_port(r->src.port_op
, r->src.port[0], r->src.port[1], ctx->pd->nsport
))) { r = r->skip[7].ptr; continue; } else do { } while (0
)
4152 r->skip[PF_SKIP_SRC_PORT].ptr)if ((r->src.port_op && !pf_match_port(r->src.port_op
, r->src.port[0], r->src.port[1], ctx->pd->nsport
))) { r = r->skip[7].ptr; continue; } else do { } while (0
)
;
4153 PF_TEST_ATTRIB((r->dst.port_op &&if ((r->dst.port_op && !pf_match_port(r->dst.port_op
, r->dst.port[0], r->dst.port[1], ctx->pd->ndport
))) { r = r->skip[8].ptr; continue; } else do { } while (0
)
4154 !pf_match_port(r->dst.port_op, r->dst.port[0],if ((r->dst.port_op && !pf_match_port(r->dst.port_op
, r->dst.port[0], r->dst.port[1], ctx->pd->ndport
))) { r = r->skip[8].ptr; continue; } else do { } while (0
)
4155 r->dst.port[1], ctx->pd->ndport)),if ((r->dst.port_op && !pf_match_port(r->dst.port_op
, r->dst.port[0], r->dst.port[1], ctx->pd->ndport
))) { r = r->skip[8].ptr; continue; } else do { } while (0
)
4156 r->skip[PF_SKIP_DST_PORT].ptr)if ((r->dst.port_op && !pf_match_port(r->dst.port_op
, r->dst.port[0], r->dst.port[1], ctx->pd->ndport
))) { r = r->skip[8].ptr; continue; } else do { } while (0
)
;
4157 /* tcp/udp only. uid.op always 0 in other cases */
4158 PF_TEST_ATTRIB((r->uid.op && (ctx->pd->lookup.done ||if ((r->uid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_uid(r->uid.op, r->uid.uid[0], r->
uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4159 (ctx->pd->lookup.done =if ((r->uid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_uid(r->uid.op, r->uid.uid[0], r->
uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4160 pf_socket_lookup(ctx->pd), 1)) &&if ((r->uid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_uid(r->uid.op, r->uid.uid[0], r->
uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4161 !pf_match_uid(r->uid.op, r->uid.uid[0],if ((r->uid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_uid(r->uid.op, r->uid.uid[0], r->
uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4162 r->uid.uid[1], ctx->pd->lookup.uid)),if ((r->uid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_uid(r->uid.op, r->uid.uid[0], r->
uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4163 TAILQ_NEXT(r, entries))if ((r->uid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_uid(r->uid.op, r->uid.uid[0], r->
uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
;
4164 /* tcp/udp only. gid.op always 0 in other cases */
4165 PF_TEST_ATTRIB((r->gid.op && (ctx->pd->lookup.done ||if ((r->gid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_gid(r->gid.op, r->gid.gid[0], r->
gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4166 (ctx->pd->lookup.done =if ((r->gid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_gid(r->gid.op, r->gid.gid[0], r->
gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4167 pf_socket_lookup(ctx->pd), 1)) &&if ((r->gid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_gid(r->gid.op, r->gid.gid[0], r->
gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4168 !pf_match_gid(r->gid.op, r->gid.gid[0],if ((r->gid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_gid(r->gid.op, r->gid.gid[0], r->
gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4169 r->gid.gid[1], ctx->pd->lookup.gid)),if ((r->gid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_gid(r->gid.op, r->gid.gid[0], r->
gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
4170 TAILQ_NEXT(r, entries))if ((r->gid.op && (ctx->pd->lookup.done || (
ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1)
) && !pf_match_gid(r->gid.op, r->gid.gid[0], r->
gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries
.tqe_next); continue; } else do { } while (0)
;
4171 break;
4172
4173 case IPPROTO_ICMP1:
4174 /* icmp only. type always 0 in other cases */
4175 PF_TEST_ATTRIB((r->type &&if ((r->type && r->type != ctx->icmptype + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4176 r->type != ctx->icmptype + 1),if ((r->type && r->type != ctx->icmptype + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4177 TAILQ_NEXT(r, entries))if ((r->type && r->type != ctx->icmptype + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
;
4178 /* icmp only. type always 0 in other cases */
4179 PF_TEST_ATTRIB((r->code &&if ((r->code && r->code != ctx->icmpcode + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4180 r->code != ctx->icmpcode + 1),if ((r->code && r->code != ctx->icmpcode + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4181 TAILQ_NEXT(r, entries))if ((r->code && r->code != ctx->icmpcode + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
;
4182 /* icmp only. don't create states on replies */
4183 PF_TEST_ATTRIB((r->keep_state && !ctx->state_icmp &&if ((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir
!= PF_IN)) { r = ((r)->entries.tqe_next); continue; } else
do { } while (0)
4184 (r->rule_flag & PFRULE_STATESLOPPY) == 0 &&if ((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir
!= PF_IN)) { r = ((r)->entries.tqe_next); continue; } else
do { } while (0)
4185 ctx->icmp_dir != PF_IN),if ((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir
!= PF_IN)) { r = ((r)->entries.tqe_next); continue; } else
do { } while (0)
4186 TAILQ_NEXT(r, entries))if ((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir
!= PF_IN)) { r = ((r)->entries.tqe_next); continue; } else
do { } while (0)
;
4187 break;
4188
4189 case IPPROTO_ICMPV658:
4190 /* icmp only. type always 0 in other cases */
4191 PF_TEST_ATTRIB((r->type &&if ((r->type && r->type != ctx->icmptype + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4192 r->type != ctx->icmptype + 1),if ((r->type && r->type != ctx->icmptype + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4193 TAILQ_NEXT(r, entries))if ((r->type && r->type != ctx->icmptype + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
;
4194 /* icmp only. type always 0 in other cases */
4195 PF_TEST_ATTRIB((r->code &&if ((r->code && r->code != ctx->icmpcode + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4196 r->code != ctx->icmpcode + 1),if ((r->code && r->code != ctx->icmpcode + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4197 TAILQ_NEXT(r, entries))if ((r->code && r->code != ctx->icmpcode + 1
)) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
;
4198 /* icmp only. don't create states on replies */
4199 PF_TEST_ATTRIB((r->keep_state && !ctx->state_icmp &&if ((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir
!= PF_IN && ctx->icmptype != 136)) { r = ((r)->
entries.tqe_next); continue; } else do { } while (0)
4200 (r->rule_flag & PFRULE_STATESLOPPY) == 0 &&if ((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir
!= PF_IN && ctx->icmptype != 136)) { r = ((r)->
entries.tqe_next); continue; } else do { } while (0)
4201 ctx->icmp_dir != PF_IN &&if ((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir
!= PF_IN && ctx->icmptype != 136)) { r = ((r)->
entries.tqe_next); continue; } else do { } while (0)
4202 ctx->icmptype != ND_NEIGHBOR_ADVERT),if ((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir
!= PF_IN && ctx->icmptype != 136)) { r = ((r)->
entries.tqe_next); continue; } else do { } while (0)
4203 TAILQ_NEXT(r, entries))if ((r->keep_state && !ctx->state_icmp &&
(r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir
!= PF_IN && ctx->icmptype != 136)) { r = ((r)->
entries.tqe_next); continue; } else do { } while (0)
;
4204 break;
4205
4206 default:
4207 break;
4208 }
4209
4210 PF_TEST_ATTRIB((r->rule_flag & PFRULE_FRAGMENT &&if ((r->rule_flag & 0x0002 && ctx->pd->virtual_proto
!= 256)) { r = ((r)->entries.tqe_next); continue; } else do
{ } while (0)
4211 ctx->pd->virtual_proto != PF_VPROTO_FRAGMENT),if ((r->rule_flag & 0x0002 && ctx->pd->virtual_proto
!= 256)) { r = ((r)->entries.tqe_next); continue; } else do
{ } while (0)
4212 TAILQ_NEXT(r, entries))if ((r->rule_flag & 0x0002 && ctx->pd->virtual_proto
!= 256)) { r = ((r)->entries.tqe_next); continue; } else do
{ } while (0)
;
4213 PF_TEST_ATTRIB((r->tos && !(r->tos == ctx->pd->tos)),if ((r->tos && !(r->tos == ctx->pd->tos))
) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
4214 TAILQ_NEXT(r, entries))if ((r->tos && !(r->tos == ctx->pd->tos))
) { r = ((r)->entries.tqe_next); continue; } else do { } while
(0)
;
4215 PF_TEST_ATTRIB((r->prob &&if ((r->prob && r->prob <= arc4random_uniform
(0xffffffffU - 1) + 1)) { r = ((r)->entries.tqe_next); continue
; } else do { } while (0)
4216 r->prob <= arc4random_uniform(UINT_MAX - 1) + 1),if ((r->prob && r->prob <= arc4random_uniform
(0xffffffffU - 1) + 1)) { r = ((r)->entries.tqe_next); continue
; } else do { } while (0)
4217 TAILQ_NEXT(r, entries))if ((r->prob && r->prob <= arc4random_uniform
(0xffffffffU - 1) + 1)) { r = ((r)->entries.tqe_next); continue
; } else do { } while (0)
;
4218 PF_TEST_ATTRIB((r->match_tag &&if ((r->match_tag && !pf_match_tag(ctx->pd->
m, r, &ctx->tag))) { r = ((r)->entries.tqe_next); continue
; } else do { } while (0)
4219 !pf_match_tag(ctx->pd->m, r, &ctx->tag)),if ((r->match_tag && !pf_match_tag(ctx->pd->
m, r, &ctx->tag))) { r = ((r)->entries.tqe_next); continue
; } else do { } while (0)
4220 TAILQ_NEXT(r, entries))if ((r->match_tag && !pf_match_tag(ctx->pd->
m, r, &ctx->tag))) { r = ((r)->entries.tqe_next); continue
; } else do { } while (0)
;
4221 PF_TEST_ATTRIB((r->rcv_kif && pf_match_rcvif(ctx->pd->m, r) ==if ((r->rcv_kif && pf_match_rcvif(ctx->pd->m
, r) == r->rcvifnot)) { r = ((r)->entries.tqe_next); continue
; } else do { } while (0)
4222 r->rcvifnot),if ((r->rcv_kif && pf_match_rcvif(ctx->pd->m
, r) == r->rcvifnot)) { r = ((r)->entries.tqe_next); continue
; } else do { } while (0)
4223 TAILQ_NEXT(r, entries))if ((r->rcv_kif && pf_match_rcvif(ctx->pd->m
, r) == r->rcvifnot)) { r = ((r)->entries.tqe_next); continue
; } else do { } while (0)
;
4224 PF_TEST_ATTRIB((r->prio &&if ((r->prio && (r->prio == 0xff ? 0 : r->prio
) != ctx->pd->m->M_dat.MH.MH_pkthdr.pf.prio)) { r = (
(r)->entries.tqe_next); continue; } else do { } while (0)
4225 (r->prio == PF_PRIO_ZERO ? 0 : r->prio) !=if ((r->prio && (r->prio == 0xff ? 0 : r->prio
) != ctx->pd->m->M_dat.MH.MH_pkthdr.pf.prio)) { r = (
(r)->entries.tqe_next); continue; } else do { } while (0)
4226 ctx->pd->m->m_pkthdr.pf.prio),if ((r->prio && (r->prio == 0xff ? 0 : r->prio
) != ctx->pd->m->M_dat.MH.MH_pkthdr.pf.prio)) { r = (
(r)->entries.tqe_next); continue; } else do { } while (0)
4227 TAILQ_NEXT(r, entries))if ((r->prio && (r->prio == 0xff ? 0 : r->prio
) != ctx->pd->m->M_dat.MH.MH_pkthdr.pf.prio)) { r = (
(r)->entries.tqe_next); continue; } else do { } while (0)
;
4228
4229 /* must be last! */
4230 if (r->pktrate.limit) {
4231 pf_add_threshold(&r->pktrate);
4232 PF_TEST_ATTRIB((pf_check_threshold(&r->pktrate)),if ((pf_check_threshold(&r->pktrate))) { r = ((r)->
entries.tqe_next); continue; } else do { } while (0)
4233 TAILQ_NEXT(r, entries))if ((pf_check_threshold(&r->pktrate))) { r = ((r)->
entries.tqe_next); continue; } else do { } while (0)
;
4234 }
4235
4236 /* FALLTHROUGH */
4237 if (r->tag)
4238 ctx->tag = r->tag;
4239 if (r->anchor == NULL((void *)0)) {
4240
4241 if (r->rule_flag & PFRULE_ONCE0x00100000) {
4242 u_int32_t rule_flag;
4243
4244 rule_flag = r->rule_flag;
4245 if (((rule_flag & PFRULE_EXPIRED0x00400000) == 0) &&
4246 atomic_cas_uint(&r->rule_flag, rule_flag,_atomic_cas_uint((&r->rule_flag), (rule_flag), (rule_flag
| 0x00400000))
4247 rule_flag | PFRULE_EXPIRED)_atomic_cas_uint((&r->rule_flag), (rule_flag), (rule_flag
| 0x00400000))
== rule_flag) {
4248 r->exptime = gettime();
4249 } else {
4250 r = TAILQ_NEXT(r, entries)((r)->entries.tqe_next);
4251 continue;
4252 }
4253 }
4254
4255 if (r->action == PF_MATCH) {
4256 if ((ctx->ri = pool_get(&pf_rule_item_pl,
4257 PR_NOWAIT0x0002)) == NULL((void *)0)) {
4258 REASON_SET(&ctx->reason, PFRES_MEMORY)do { if ((void *)(&ctx->reason) != ((void *)0)) { *(&
ctx->reason) = (5); if (5 < 17) pf_status.counters[5]++
; } } while (0)
;
4259 return (PF_TEST_FAIL);
4260 }
4261 ctx->ri->r = r;
4262 /* order is irrelevant */
4263 SLIST_INSERT_HEAD(&ctx->rules, ctx->ri, entry)do { (ctx->ri)->entry.sle_next = (&ctx->rules)->
slh_first; (&ctx->rules)->slh_first = (ctx->ri);
} while (0)
;
4264 ctx->ri = NULL((void *)0);
4265 pf_rule_to_actions(r, &ctx->act);
4266 if (r->rule_flag & PFRULE_AFTO0x00200000)
4267 ctx->pd->naf = r->naf;
4268 if (pf_get_transaddr(r, ctx->pd, ctx->sns,
4269 &ctx->nr) == -1) {
4270 REASON_SET(&ctx->reason,do { if ((void *)(&ctx->reason) != ((void *)0)) { *(&
ctx->reason) = (15); if (15 < 17) pf_status.counters[15
]++; } } while (0)
4271 PFRES_TRANSLATE)do { if ((void *)(&ctx->reason) != ((void *)0)) { *(&
ctx->reason) = (15); if (15 < 17) pf_status.counters[15
]++; } } while (0)
;
4272 return (PF_TEST_FAIL);
4273 }
4274#if NPFLOG1 > 0
4275 if (r->log) {
4276 REASON_SET(&ctx->reason, PFRES_MATCH)do { if ((void *)(&ctx->reason) != ((void *)0)) { *(&
ctx->reason) = (0); if (0 < 17) pf_status.counters[0]++
; } } while (0)
;
4277 pflog_packet(ctx->pd, ctx->reason, r,
4278 ctx->a, ruleset, NULL((void *)0));
4279 }
4280#endif /* NPFLOG > 0 */
4281 } else {
4282 /*
4283 * found matching r
4284 */
4285 *ctx->rm = r;
4286 /*
4287 * anchor, with ruleset, where r belongs to
4288 */
4289 *ctx->am = ctx->a;
4290 /*
4291 * ruleset where r belongs to
4292 */
4293 *ctx->rsm = ruleset;
4294 /*
4295 * ruleset, where anchor belongs to.
4296 */
4297 ctx->arsm = ctx->aruleset;
4298 }
4299
4300#if NPFLOG1 > 0
4301 if (ctx->act.log & PF_LOG_MATCHES0x10)
4302 pf_log_matches(ctx->pd, r, ctx->a, ruleset,
4303 &ctx->rules);
4304#endif /* NPFLOG > 0 */
4305
4306 if (r->quick)
4307 return (PF_TEST_QUICK);
4308 } else {
4309 ctx->a = r;
4310 ctx->aruleset = &r->anchor->ruleset;
4311 if (r->anchor_wildcard) {
4312 RB_FOREACH(child, pf_anchor_node,for ((child) = pf_anchor_node_RB_MINMAX(&r->anchor->
children, -1); (child) != ((void *)0); (child) = pf_anchor_node_RB_NEXT
(child))
4313 &r->anchor->children)for ((child) = pf_anchor_node_RB_MINMAX(&r->anchor->
children, -1); (child) != ((void *)0); (child) = pf_anchor_node_RB_NEXT
(child))
{
4314 if (pf_anchor_stack_push(ruleset, r,
4315 child, PF_NEXT_CHILD) != 0)
4316 return (PF_TEST_FAIL);
4317
4318 ruleset = &child->ruleset;
4319 goto enter_ruleset;
4320next_child:
4321 continue; /* with RB_FOREACH() */
4322 }
4323 } else {
4324 if (pf_anchor_stack_push(ruleset, r, child,
4325 PF_NEXT_RULE) != 0)
4326 return (PF_TEST_FAIL);
4327
4328 ruleset = &r->anchor->ruleset;
4329 child = NULL((void *)0);
4330 goto enter_ruleset;
4331next_rule:
4332 ;
4333 }
4334 }
4335 r = TAILQ_NEXT(r, entries)((r)->entries.tqe_next);
4336 }
4337
4338 if (pf_anchor_stack_pop(&ruleset, &r, &child, &target) == 0) {
4339 /* stop if any rule matched within quick anchors. */
4340 if (r->quick == PF_TEST_QUICK && *ctx->am == r)
4341 return (PF_TEST_QUICK);
4342
4343 switch (target) {
4344 case PF_NEXT_CHILD:
4345 goto next_child;
4346 case PF_NEXT_RULE:
4347 goto next_rule;
4348 default:
4349 panic("%s: unknown jump target", __func__);
4350 }
4351 }
4352
4353 return (PF_TEST_OK);
4354}
4355
4356int
4357pf_test_rule(struct pf_pdesc *pd, struct pf_rule **rm, struct pf_state **sm,
4358 struct pf_rule **am, struct pf_ruleset **rsm, u_short *reason)
4359{
4360 struct pf_rule *r = NULL((void *)0);
4361 struct pf_rule *a = NULL((void *)0);
4362 struct pf_ruleset *ruleset = NULL((void *)0);
4363 struct pf_state_key *skw = NULL((void *)0), *sks = NULL((void *)0);
4364 int rewrite = 0;
4365 u_int16_t virtual_type, virtual_id;
4366 int action = PF_DROP;
4367 struct pf_test_ctx ctx;
4368 int rv;
4369
4370 PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail(
0x0001UL, rw_status(&pf_lock),__func__); } while (0)
;
4371
4372 memset(&ctx, 0, sizeof(ctx))__builtin_memset((&ctx), (0), (sizeof(ctx)));
4373 ctx.pd = pd;
4374 ctx.rm = rm;
4375 ctx.am = am;
4376 ctx.rsm = rsm;
4377 ctx.th = &pd->hdr.tcp;
4378 ctx.act.rtableid = pd->rdomain;
4379 ctx.tag = -1;
4380 SLIST_INIT(&ctx.rules){ ((&ctx.rules)->slh_first) = ((void *)0); };
4381
4382 if (pd->dir == PF_IN && if_congested()) {
4383 REASON_SET(&ctx.reason, PFRES_CONGEST)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx
.reason) = (7); if (7 < 17) pf_status.counters[7]++; } } while
(0)
;
4384 return (PF_DROP);
4385 }
4386
4387 switch (pd->virtual_proto) {
4388 case IPPROTO_ICMP1:
4389 ctx.icmptype = pd->hdr.icmp.icmp_type;
4390 ctx.icmpcode = pd->hdr.icmp.icmp_code;
4391 ctx.state_icmp = pf_icmp_mapping(pd, ctx.icmptype,
4392 &ctx.icmp_dir, &virtual_id, &virtual_type);
4393 if (ctx.icmp_dir == PF_IN) {
4394 pd->osport = pd->nsport = virtual_id;
4395 pd->odport = pd->ndport = virtual_type;
4396 } else {
4397 pd->osport = pd->nsport = virtual_type;
4398 pd->odport = pd->ndport = virtual_id;
4399 }
4400 break;
4401#ifdef INET61
4402 case IPPROTO_ICMPV658:
4403 ctx.icmptype = pd->hdr.icmp6.icmp6_type;
4404 ctx.icmpcode = pd->hdr.icmp6.icmp6_code;
4405 ctx.state_icmp = pf_icmp_mapping(pd, ctx.icmptype,
4406 &ctx.icmp_dir, &virtual_id, &virtual_type);
4407 if (ctx.icmp_dir == PF_IN) {
4408 pd->osport = pd->nsport = virtual_id;
4409 pd->odport = pd->ndport = virtual_type;
4410 } else {
4411 pd->osport = pd->nsport = virtual_type;
4412 pd->odport = pd->ndport = virtual_id;
4413 }
4414 break;
4415#endif /* INET6 */
4416 }
4417
4418 ruleset = &pf_main_rulesetpf_main_anchor.ruleset;
4419 rv = pf_match_rule(&ctx, ruleset);
4420 if (rv == PF_TEST_FAIL) {
4421 /*
4422 * Reason has been set in pf_match_rule() already.
4423 */
4424 goto cleanup;
4425 }
4426
4427 r = *ctx.rm; /* matching rule */
4428 a = *ctx.am; /* rule that defines an anchor containing 'r' */
4429 ruleset = *ctx.rsm;/* ruleset of the anchor defined by the rule 'a' */
4430 ctx.aruleset = ctx.arsm;/* ruleset of the 'a' rule itself */
4431
4432 /* apply actions for last matching pass/block rule */
4433 pf_rule_to_actions(r, &ctx.act);
4434 if (r->rule_flag & PFRULE_AFTO0x00200000)
4435 pd->naf = r->naf;
4436 if (pf_get_transaddr(r, pd, ctx.sns, &ctx.nr) == -1) {
4437 REASON_SET(&ctx.reason, PFRES_TRANSLATE)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx
.reason) = (15); if (15 < 17) pf_status.counters[15]++; } }
while (0)
;
4438 goto cleanup;
4439 }
4440 REASON_SET(&ctx.reason, PFRES_MATCH)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx
.reason) = (0); if (0 < 17) pf_status.counters[0]++; } } while
(0)
;
4441
4442#if NPFLOG1 > 0
4443 if (r->log)
4444 pflog_packet(pd, ctx.reason, r, a, ruleset, NULL((void *)0));
4445 if (ctx.act.log & PF_LOG_MATCHES0x10)
4446 pf_log_matches(pd, r, a, ruleset, &ctx.rules);
4447#endif /* NPFLOG > 0 */
4448
4449 if (pd->virtual_proto != PF_VPROTO_FRAGMENT256 &&
4450 (r->action == PF_DROP) &&
4451 ((r->rule_flag & PFRULE_RETURNRST0x0001) ||
4452 (r->rule_flag & PFRULE_RETURNICMP0x0004) ||
4453 (r->rule_flag & PFRULE_RETURN0x0008))) {
4454 if (pd->proto == IPPROTO_TCP6 &&
4455 ((r->rule_flag & PFRULE_RETURNRST0x0001) ||
4456 (r->rule_flag & PFRULE_RETURN0x0008)) &&
4457 !(ctx.th->th_flags & TH_RST0x04)) {
4458 u_int32_t ack =
4459 ntohl(ctx.th->th_seq)(__uint32_t)(__builtin_constant_p(ctx.th->th_seq) ? (__uint32_t
)(((__uint32_t)(ctx.th->th_seq) & 0xff) << 24 | (
(__uint32_t)(ctx.th->th_seq) & 0xff00) << 8 | ((
__uint32_t)(ctx.th->th_seq) & 0xff0000) >> 8 | (
(__uint32_t)(ctx.th->th_seq) & 0xff000000) >> 24
) : __swap32md(ctx.th->th_seq))
+ pd->p_len;
4460
4461 if (pf_check_tcp_cksum(pd->m, pd->off,
4462 pd->tot_len - pd->off, pd->af))
4463 REASON_SET(&ctx.reason, PFRES_PROTCKSUM)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx
.reason) = (9); if (9 < 17) pf_status.counters[9]++; } } while
(0)
;
4464 else {
4465 if (ctx.th->th_flags & TH_SYN0x02)
4466 ack++;
4467 if (ctx.th->th_flags & TH_FIN0x01)
4468 ack++;
4469 pf_send_tcp(r, pd->af, pd->dst,
4470 pd->src, ctx.th->th_dport,
4471 ctx.th->th_sport, ntohl(ctx.th->th_ack)(__uint32_t)(__builtin_constant_p(ctx.th->th_ack) ? (__uint32_t
)(((__uint32_t)(ctx.th->th_ack) & 0xff) << 24 | (
(__uint32_t)(ctx.th->th_ack) & 0xff00) << 8 | ((
__uint32_t)(ctx.th->th_ack) & 0xff0000) >> 8 | (
(__uint32_t)(ctx.th->th_ack) & 0xff000000) >> 24
) : __swap32md(ctx.th->th_ack))
,
4472 ack, TH_RST0x04|TH_ACK0x10, 0, 0, r->return_ttl,
4473 1, 0, pd->rdomain);
4474 }
4475 } else if ((pd->proto != IPPROTO_ICMP1 ||
4476 ICMP_INFOTYPE(ctx.icmptype)((ctx.icmptype) == 0 || (ctx.icmptype) == 8 || (ctx.icmptype)
== 9 || (ctx.icmptype) == 10 || (ctx.icmptype) == 13 || (ctx
.icmptype) == 14 || (ctx.icmptype) == 15 || (ctx.icmptype) ==
16 || (ctx.icmptype) == 17 || (ctx.icmptype) == 18)
) && pd->af == AF_INET2 &&
4477 r->return_icmp)
4478 pf_send_icmp(pd->m, r->return_icmp >> 8,
4479 r->return_icmp & 255, 0, pd->af, r, pd->rdomain);
4480 else if ((pd->proto != IPPROTO_ICMPV658 ||
4481 (ctx.icmptype >= ICMP6_ECHO_REQUEST128 &&
4482 ctx.icmptype != ND_REDIRECT137)) && pd->af == AF_INET624 &&
4483 r->return_icmp6)
4484 pf_send_icmp(pd->m, r->return_icmp6 >> 8,
4485 r->return_icmp6 & 255, 0, pd->af, r, pd->rdomain);
4486 }
4487
4488 if (r->action == PF_DROP)
4489 goto cleanup;
4490
4491 pf_tag_packet(pd->m, ctx.tag, ctx.act.rtableid);
4492 if (ctx.act.rtableid >= 0 &&
4493 rtable_l2(ctx.act.rtableid) != pd->rdomain)
4494 pd->destchg = 1;
4495
4496 if (r->action == PF_PASS && pd->badopts != 0 && ! r->allow_opts) {
4497 REASON_SET(&ctx.reason, PFRES_IPOPTIONS)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx
.reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while
(0)
;
4498#if NPFLOG1 > 0
4499 pd->pflog |= PF_LOG_FORCE0x08;
4500#endif /* NPFLOG > 0 */
4501 DPFPRINTF(LOG_NOTICE, "dropping packet with "do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog(
"dropping packet with " "ip/ipv6 options in pf_test_rule()");
addlog("\n"); } } while (0)
4502 "ip/ipv6 options in pf_test_rule()")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog(
"dropping packet with " "ip/ipv6 options in pf_test_rule()");
addlog("\n"); } } while (0)
;
4503 goto cleanup;
4504 }
4505
4506 if (pd->virtual_proto != PF_VPROTO_FRAGMENT256
4507 && !ctx.state_icmp && r->keep_state) {
4508
4509 if (r->rule_flag & PFRULE_SRCTRACK0x0020 &&
4510 pf_insert_src_node(&ctx.sns[PF_SN_NONE], r, PF_SN_NONE,
4511 pd->af, pd->src, NULL((void *)0), NULL((void *)0)) != 0) {
4512 REASON_SET(&ctx.reason, PFRES_SRCLIMIT)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx
.reason) = (13); if (13 < 17) pf_status.counters[13]++; } }
while (0)
;
4513 goto cleanup;
4514 }
4515
4516 if (r->max_states && (r->states_cur >= r->max_states)) {
4517 pf_status.lcounters[LCNT_STATES0]++;
4518 REASON_SET(&ctx.reason, PFRES_MAXSTATES)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx
.reason) = (12); if (12 < 17) pf_status.counters[12]++; } }
while (0)
;
4519 goto cleanup;
4520 }
4521
4522 action = pf_create_state(pd, r, a, ctx.nr, &skw, &sks,
4523 &rewrite, sm, ctx.tag, &ctx.rules, &ctx.act, ctx.sns);
4524
4525 if (action != PF_PASS)
4526 goto cleanup;
4527 if (sks != skw) {
4528 struct pf_state_key *sk;
4529
4530 if (pd->dir == PF_IN)
4531 sk = sks;
4532 else
4533 sk = skw;
4534 rewrite += pf_translate(pd,
4535 &sk->addr[pd->af == pd->naf ? pd->sidx : pd->didx],
4536 sk->port[pd->af == pd->naf ? pd->sidx : pd->didx],
4537 &sk->addr[pd->af == pd->naf ? pd->didx : pd->sidx],
4538 sk->port[pd->af == pd->naf ? pd->didx : pd->sidx],
4539 virtual_type, ctx.icmp_dir);
4540 }
4541
4542#ifdef INET61
4543 if (rewrite && skw->af != sks->af)
4544 action = PF_AFRT;
4545#endif /* INET6 */
4546
4547 } else {
4548 action = PF_PASS;
4549
4550 while ((ctx.ri = SLIST_FIRST(&ctx.rules)((&ctx.rules)->slh_first))) {
4551 SLIST_REMOVE_HEAD(&ctx.rules, entry)do { (&ctx.rules)->slh_first = (&ctx.rules)->slh_first
->entry.sle_next; } while (0)
;
4552 pool_put(&pf_rule_item_pl, ctx.ri);
4553 }
4554 }
4555
4556 /* copy back packet headers if needed */
4557 if (rewrite && pd->hdrlen) {
4558 m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT0x0002);
4559 }
4560
4561#if NPFSYNC1 > 0
4562 if (*sm != NULL((void *)0) && !ISSET((*sm)->state_flags, PFSTATE_NOSYNC)(((*sm)->state_flags) & (0x0008)) &&
4563 pd->dir == PF_OUT && pfsync_is_up()) {
4564 /*
4565 * We want the state created, but we dont
4566 * want to send this in case a partner
4567 * firewall has to know about it to allow
4568 * replies through it.
4569 */
4570 if (pfsync_defer(*sm, pd->m))
4571 return (PF_DEFER);
4572 }
4573#endif /* NPFSYNC > 0 */
4574
4575 return (action);
4576
4577cleanup:
4578 while ((ctx.ri = SLIST_FIRST(&ctx.rules)((&ctx.rules)->slh_first))) {
4579 SLIST_REMOVE_HEAD(&ctx.rules, entry)do { (&ctx.rules)->slh_first = (&ctx.rules)->slh_first
->entry.sle_next; } while (0)
;
4580 pool_put(&pf_rule_item_pl, ctx.ri);
4581 }
4582
4583 return (action);
4584}
4585
4586static __inline int
4587pf_create_state(struct pf_pdesc *pd, struct pf_rule *r, struct pf_rule *a,
4588 struct pf_rule *nr, struct pf_state_key **skw, struct pf_state_key **sks,
4589 int *rewrite, struct pf_state **sm, int tag, struct pf_rule_slist *rules,
4590 struct pf_rule_actions *act, struct pf_src_node *sns[PF_SN_MAX])
4591{
4592 struct pf_state *st = NULL((void *)0);
4593 struct tcphdr *th = &pd->hdr.tcp;
4594 u_int16_t mss = tcp_mssdflt;
4595 u_short reason;
4596 u_int i;
4597
4598 st = pool_get(&pf_state_pl, PR_NOWAIT0x0002 | PR_ZERO0x0008);
4599 if (st == NULL((void *)0)) {
4600 REASON_SET(&reason, PFRES_MEMORY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason
) = (5); if (5 < 17) pf_status.counters[5]++; } } while (0
)
;
4601 goto csfailed;
4602 }
4603 st->rule.ptr = r;
4604 st->anchor.ptr = a;
4605 st->natrule.ptr = nr;
4606 if (r->allow_opts)
4607 st->state_flags |= PFSTATE_ALLOWOPTS0x0001;
4608 if (r->rule_flag & PFRULE_STATESLOPPY0x00020000)
4609 st->state_flags |= PFSTATE_SLOPPY0x0002;
4610 if (r->rule_flag & PFRULE_PFLOW0x00040000)
4611 st->state_flags |= PFSTATE_PFLOW0x0004;
4612 if (r->rule_flag & PFRULE_NOSYNC0x0010)
4613 st->state_flags |= PFSTATE_NOSYNC0x0008;
4614#if NPFLOG1 > 0
4615 st->log = act->log & PF_LOG_ALL0x02;
4616#endif /* NPFLOG > 0 */
4617 st->qid = act->qid;
4618 st->pqid = act->pqid;
4619 st->rtableid[pd->didx] = act->rtableid;
4620 st->rtableid[pd->sidx] = -1; /* return traffic is routed normally */
4621 st->min_ttl = act->min_ttl;
4622 st->set_tos = act->set_tos;
4623 st->max_mss = act->max_mss;
4624 st->state_flags |= act->flags;
4625#if NPFSYNC1 > 0
4626 st->sync_state = PFSYNC_S_NONE0xd0;
4627#endif /* NPFSYNC > 0 */
4628 st->set_prio[0] = act->set_prio[0];
4629 st->set_prio[1] = act->set_prio[1];
4630 st->delay = act->delay;
4631 SLIST_INIT(&st->src_nodes){ ((&st->src_nodes)->slh_first) = ((void *)0); };
4632
4633 /*
4634 * must initialize refcnt, before pf_state_insert() gets called.
4635 * pf_state_inserts() grabs reference for pfsync!
4636 */
4637 PF_REF_INIT(st->refcnt)refcnt_init(&(st->refcnt));
4638 mtx_init(&st->mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&st->
mtx), ((((0x4)) > 0x0 && ((0x4)) < 0x9) ? 0x9 :
((0x4)))); } while (0)
;
4639
4640 switch (pd->proto) {
4641 case IPPROTO_TCP6:
4642 st->src.seqlo = ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t
)(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t
)(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th->
th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq
) & 0xff000000) >> 24) : __swap32md(th->th_seq))
;
4643 st->src.seqhi = st->src.seqlo + pd->p_len + 1;
4644 if ((th->th_flags & (TH_SYN0x02|TH_ACK0x10)) == TH_SYN0x02 &&
4645 r->keep_state == PF_STATE_MODULATE0x2) {
4646 /* Generate sequence number modulator */
4647 st->src.seqdiff = pf_tcp_iss(pd) - st->src.seqlo;
4648 if (st->src.seqdiff == 0)
4649 st->src.seqdiff = 1;
4650 pf_patch_32(pd, &th->th_seq,
4651 htonl(st->src.seqlo + st->src.seqdiff)(__uint32_t)(__builtin_constant_p(st->src.seqlo + st->src
.seqdiff) ? (__uint32_t)(((__uint32_t)(st->src.seqlo + st->
src.seqdiff) & 0xff) << 24 | ((__uint32_t)(st->src
.seqlo + st->src.seqdiff) & 0xff00) << 8 | ((__uint32_t
)(st->src.seqlo + st->src.seqdiff) & 0xff0000) >>
8 | ((__uint32_t)(st->src.seqlo + st->src.seqdiff) &
0xff000000) >> 24) : __swap32md(st->src.seqlo + st->
src.seqdiff))
);
4652 *rewrite = 1;
4653 } else
4654 st->src.seqdiff = 0;
4655 if (th->th_flags & TH_SYN0x02) {
4656 st->src.seqhi++;
4657 st->src.wscale = pf_get_wscale(pd);
4658 }
4659 st->src.max_win = MAX(ntohs(th->th_win), 1)((((__uint16_t)(__builtin_constant_p(th->th_win) ? (__uint16_t
)(((__uint16_t)(th->th_win) & 0xffU) << 8 | ((__uint16_t
)(th->th_win) & 0xff00U) >> 8) : __swap16md(th->
th_win)))>(1))?((__uint16_t)(__builtin_constant_p(th->th_win
) ? (__uint16_t)(((__uint16_t)(th->th_win) & 0xffU) <<
8 | ((__uint16_t)(th->th_win) & 0xff00U) >> 8) :
__swap16md(th->th_win))):(1))
;
4660 if (st->src.wscale & PF_WSCALE_MASK0x0f) {
4661 /* Remove scale factor from initial window */
4662 int win = st->src.max_win;
4663 win += 1 << (st->src.wscale & PF_WSCALE_MASK0x0f);
4664 st->src.max_win = (win - 1) >>
4665 (st->src.wscale & PF_WSCALE_MASK0x0f);
4666 }
4667 if (th->th_flags & TH_FIN0x01)
4668 st->src.seqhi++;
4669 st->dst.seqhi = 1;
4670 st->dst.max_win = 1;
4671 pf_set_protostate(st, PF_PEER_SRC, TCPS_SYN_SENT2);
4672 pf_set_protostate(st, PF_PEER_DST, TCPS_CLOSED0);
4673 st->timeout = PFTM_TCP_FIRST_PACKET;
4674 pf_status.states_halfopen++;
4675 break;
4676 case IPPROTO_UDP17:
4677 pf_set_protostate(st, PF_PEER_SRC, PFUDPS_SINGLE1);
4678 pf_set_protostate(st, PF_PEER_DST, PFUDPS_NO_TRAFFIC0);
4679 st->timeout = PFTM_UDP_FIRST_PACKET;
4680 break;
4681 case IPPROTO_ICMP1:
4682#ifdef INET61
4683 case IPPROTO_ICMPV658:
4684#endif /* INET6 */
4685 st->timeout = PFTM_ICMP_FIRST_PACKET;
4686 break;
4687 default:
4688 pf_set_protostate(st, PF_PEER_SRC, PFOTHERS_SINGLE1);
4689 pf_set_protostate(st, PF_PEER_DST, PFOTHERS_NO_TRAFFIC0);
4690 st->timeout = PFTM_OTHER_FIRST_PACKET;
4691 }
4692
4693 st->creation = getuptime();
4694 st->expire = getuptime();
4695
4696 if (pd->proto == IPPROTO_TCP6) {
4697 if (st->state_flags & PFSTATE_SCRUB_TCP0x0100 &&
4698 pf_normalize_tcp_init(pd, &st->src)) {
4699 REASON_SET(&reason, PFRES_MEMORY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason
) = (5); if (5 < 17) pf_status.counters[5]++; } } while (0
)
;
4700 goto csfailed;
4701 }
4702 if (st->state_flags & PFSTATE_SCRUB_TCP0x0100 && st->src.scrub &&
4703 pf_normalize_tcp_stateful(pd, &reason, st,
4704 &st->src, &st->dst, rewrite)) {
4705 /* This really shouldn't happen!!! */
4706 DPFPRINTF(LOG_ERR,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog(
"%s: tcp normalize failed on first pkt", __func__); addlog("\n"
); } } while (0)
4707 "%s: tcp normalize failed on first pkt", __func__)do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog(
"%s: tcp normalize failed on first pkt", __func__); addlog("\n"
); } } while (0)
;
4708 goto csfailed;
4709 }
4710 }
4711 st->direction = pd->dir;
4712
4713 if (pf_state_key_setup(pd, skw, sks, act->rtableid)) {
4714 REASON_SET(&reason, PFRES_MEMORY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason
) = (5); if (5 < 17) pf_status.counters[5]++; } } while (0
)
;
4715 goto csfailed;
4716 }
4717
4718 if (pf_set_rt_ifp(st, pd->src, (*skw)->af, sns) != 0) {
4719 REASON_SET(&reason, PFRES_NOROUTE)do { if ((void *)(&reason) != ((void *)0)) { *(&reason
) = (16); if (16 < 17) pf_status.counters[16]++; } } while
(0)
;
4720 goto csfailed;
4721 }
4722
4723 for (i = 0; i < PF_SN_MAX; i++)
4724 if (sns[i] != NULL((void *)0)) {
4725 struct pf_sn_item *sni;
4726
4727 sni = pool_get(&pf_sn_item_pl, PR_NOWAIT0x0002);
4728 if (sni == NULL((void *)0)) {
4729 REASON_SET(&reason, PFRES_MEMORY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason
) = (5); if (5 < 17) pf_status.counters[5]++; } } while (0
)
;
4730 goto csfailed;
4731 }
4732 sni->sn = sns[i];
4733 SLIST_INSERT_HEAD(&st->src_nodes, sni, next)do { (sni)->next.sle_next = (&st->src_nodes)->slh_first
; (&st->src_nodes)->slh_first = (sni); } while (0)
;
4734 sni->sn->states++;
4735 }
4736
4737#if NPFSYNC1 > 0
4738 pfsync_init_state(st, *skw, *sks, 0);
4739#endif
4740
4741 if (pf_state_insert(BOUND_IFACE(r, pd->kif)((r)->rule_flag & 0x00010000) ? (pd->kif) : pfi_all, skw, sks, st)) {
4742 *sks = *skw = NULL((void *)0);
4743 REASON_SET(&reason, PFRES_STATEINS)do { if ((void *)(&reason) != ((void *)0)) { *(&reason
) = (11); if (11 < 17) pf_status.counters[11]++; } } while
(0)
;
4744 goto csfailed;
4745 } else
4746 *sm = st;
4747
4748 /*
4749 * Make state responsible for rules it binds here.
4750 */
4751 memcpy(&st->match_rules, rules, sizeof(st->match_rules))__builtin_memcpy((&st->match_rules), (rules), (sizeof(
st->match_rules)))
;
4752 memset(rules, 0, sizeof(*rules))__builtin_memset((rules), (0), (sizeof(*rules)));
4753 STATE_INC_COUNTERS(st)do { struct pf_rule_item *mrm; st->rule.ptr->states_cur
++; st->rule.ptr->states_tot++; if (st->anchor.ptr !=
((void *)0)) { st->anchor.ptr->states_cur++; st->anchor
.ptr->states_tot++; } for((mrm) = ((&st->match_rules
)->slh_first); (mrm) != ((void *)0); (mrm) = ((mrm)->entry
.sle_next)) mrm->r->states_cur++; } while (0)
;
4754
4755 if (tag > 0) {
4756 pf_tag_ref(tag);
4757 st->tag = tag;
4758 }
4759 if (pd->proto == IPPROTO_TCP6 && (th->th_flags & (TH_SYN0x02|TH_ACK0x10)) ==
4760 TH_SYN0x02 && r->keep_state == PF_STATE_SYNPROXY0x3 && pd->dir == PF_IN) {
4761 int rtid = pd->rdomain;
4762 if (act->rtableid >= 0)
4763 rtid = act->rtableid;
4764 pf_set_protostate(st, PF_PEER_SRC, PF_TCPS_PROXY_SRC((11)+0));
4765 st->src.seqhi = arc4random();
4766 /* Find mss option */
4767 mss = pf_get_mss(pd);
4768 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
4769 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
4770 st->src.mss = mss;
4771 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
4772 th->th_sport, st->src.seqhi, ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t
)(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t
)(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th->
th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq
) & 0xff000000) >> 24) : __swap32md(th->th_seq))
+ 1,
4773 TH_SYN0x02|TH_ACK0x10, 0, st->src.mss, 0, 1, 0, pd->rdomain);
4774 REASON_SET(&reason, PFRES_SYNPROXY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason
) = (14); if (14 < 17) pf_status.counters[14]++; } } while
(0)
;
4775 return (PF_SYNPROXY_DROP);
4776 }
4777
4778 return (PF_PASS);
4779
4780csfailed:
4781 if (st) {
4782 pf_normalize_tcp_cleanup(st); /* safe even w/o init */
4783 pf_src_tree_remove_state(st);
4784 pool_put(&pf_state_pl, st);
4785 }
4786
4787 for (i = 0; i < PF_SN_MAX; i++)
4788 if (sns[i] != NULL((void *)0))
4789 pf_remove_src_node(sns[i]);
4790
4791 return (PF_DROP);
4792}
4793
4794int
4795pf_translate(struct pf_pdesc *pd, struct pf_addr *saddr, u_int16_t sport,
4796 struct pf_addr *daddr, u_int16_t dport, u_int16_t virtual_type,
4797 int icmp_dir)
4798{
4799 int rewrite = 0;
4800 int afto = pd->af != pd->naf;
4801
4802 if (afto || PF_ANEQ(daddr, pd->dst, pd->af)((pd->af == 2 && (daddr)->pfa.addr32[0] != (pd->
dst)->pfa.addr32[0]) || (pd->af == 24 && ((daddr
)->pfa.addr32[3] != (pd->dst)->pfa.addr32[3] || (daddr
)->pfa.addr32[2] != (pd->dst)->pfa.addr32[2] || (daddr
)->pfa.addr32[1] != (pd->dst)->pfa.addr32[1] || (daddr
)->pfa.addr32[0] != (pd->dst)->pfa.addr32[0])))
)
4803 pd->destchg = 1;
4804
4805 switch (pd->proto) {
4806 case IPPROTO_TCP6: /* FALLTHROUGH */
4807 case IPPROTO_UDP17:
4808 rewrite += pf_patch_16(pd, pd->sport, sport);
4809 rewrite += pf_patch_16(pd, pd->dport, dport);
4810 break;
4811
4812 case IPPROTO_ICMP1:
4813 if (pd->af != AF_INET2)
4814 return (0);
4815
4816#ifdef INET61
4817 if (afto) {
4818 if (pf_translate_icmp_af(pd, AF_INET624, &pd->hdr.icmp))
4819 return (0);
4820 pd->proto = IPPROTO_ICMPV658;
4821 rewrite = 1;
4822 }
4823#endif /* INET6 */
4824 if (virtual_type == htons(ICMP_ECHO)(__uint16_t)(__builtin_constant_p(8) ? (__uint16_t)(((__uint16_t
)(8) & 0xffU) << 8 | ((__uint16_t)(8) & 0xff00U
) >> 8) : __swap16md(8))
) {
4825 u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport;
4826 rewrite += pf_patch_16(pd,
4827 &pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id, icmpid);
4828 }
4829 break;
4830
4831#ifdef INET61
4832 case IPPROTO_ICMPV658:
4833 if (pd->af != AF_INET624)
4834 return (0);
4835
4836 if (afto) {
4837 if (pf_translate_icmp_af(pd, AF_INET2, &pd->hdr.icmp6))
4838 return (0);
4839 pd->proto = IPPROTO_ICMP1;
4840 rewrite = 1;
4841 }
4842 if (virtual_type == htons(ICMP6_ECHO_REQUEST)(__uint16_t)(__builtin_constant_p(128) ? (__uint16_t)(((__uint16_t
)(128) & 0xffU) << 8 | ((__uint16_t)(128) & 0xff00U
) >> 8) : __swap16md(128))
) {
4843 u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport;
4844 rewrite += pf_patch_16(pd,
4845 &pd->hdr.icmp6.icmp6_idicmp6_dataun.icmp6_un_data16[0], icmpid);
4846 }
4847 break;
4848#endif /* INET6 */
4849 }
4850
4851 if (!afto) {
4852 rewrite += pf_translate_a(pd, pd->src, saddr);
4853 rewrite += pf_translate_a(pd, pd->dst, daddr);
4854 }
4855
4856 return (rewrite);
4857}
4858
4859int
4860pf_tcp_track_full(struct pf_pdesc *pd, struct pf_state **stp, u_short *reason,
4861 int *copyback, int reverse)
4862{
4863 struct tcphdr *th = &pd->hdr.tcp;
4864 struct pf_state_peer *src, *dst;
4865 u_int16_t win = ntohs(th->th_win)(__uint16_t)(__builtin_constant_p(th->th_win) ? (__uint16_t
)(((__uint16_t)(th->th_win) & 0xffU) << 8 | ((__uint16_t
)(th->th_win) & 0xff00U) >> 8) : __swap16md(th->
th_win))
;
4866 u_int32_t ack, end, data_end, seq, orig_seq;
4867 u_int8_t sws, dws, psrc, pdst;
4868 int ackskew;
4869
4870 if ((pd->dir == (*stp)->direction && !reverse) ||
4871 (pd->dir != (*stp)->direction && reverse)) {
4872 src = &(*stp)->src;
4873 dst = &(*stp)->dst;
4874 psrc = PF_PEER_SRC;
4875 pdst = PF_PEER_DST;
4876 } else {
4877 src = &(*stp)->dst;
4878 dst = &(*stp)->src;
4879 psrc = PF_PEER_DST;
4880 pdst = PF_PEER_SRC;
4881 }
4882
4883 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN0x02)) {
4884 sws = src->wscale & PF_WSCALE_MASK0x0f;
4885 dws = dst->wscale & PF_WSCALE_MASK0x0f;
4886 } else
4887 sws = dws = 0;
4888
4889 /*
4890 * Sequence tracking algorithm from Guido van Rooij's paper:
4891 * http://www.madison-gurkha.com/publications/tcp_filtering/
4892 * tcp_filtering.ps
4893 */
4894
4895 orig_seq = seq = ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t
)(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t
)(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th->
th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq
) & 0xff000000) >> 24) : __swap32md(th->th_seq))
;
4896 if (src->seqlo == 0) {
4897 /* First packet from this end. Set its state */
4898
4899 if (((*stp)->state_flags & PFSTATE_SCRUB_TCP0x0100 || dst->scrub) &&
4900 src->scrub == NULL((void *)0)) {
4901 if (pf_normalize_tcp_init(pd, src)) {
4902 REASON_SET(reason, PFRES_MEMORY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (5); if
(5 < 17) pf_status.counters[5]++; } } while (0)
;
4903 return (PF_DROP);
4904 }
4905 }
4906
4907 /* Deferred generation of sequence number modulator */
4908 if (dst->seqdiff && !src->seqdiff) {
4909 /* use random iss for the TCP server */
4910 while ((src->seqdiff = arc4random() - seq) == 0)
4911 continue;
4912 ack = ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t
)(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t
)(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th->
th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack
) & 0xff000000) >> 24) : __swap32md(th->th_ack))
- dst->seqdiff;
4913 pf_patch_32(pd, &th->th_seq, htonl(seq + src->seqdiff)(__uint32_t)(__builtin_constant_p(seq + src->seqdiff) ? (__uint32_t
)(((__uint32_t)(seq + src->seqdiff) & 0xff) << 24
| ((__uint32_t)(seq + src->seqdiff) & 0xff00) <<
8 | ((__uint32_t)(seq + src->seqdiff) & 0xff0000) >>
8 | ((__uint32_t)(seq + src->seqdiff) & 0xff000000) >>
24) : __swap32md(seq + src->seqdiff))
);
4914 pf_patch_32(pd, &th->th_ack, htonl(ack)(__uint32_t)(__builtin_constant_p(ack) ? (__uint32_t)(((__uint32_t
)(ack) & 0xff) << 24 | ((__uint32_t)(ack) & 0xff00
) << 8 | ((__uint32_t)(ack) & 0xff0000) >> 8 |
((__uint32_t)(ack) & 0xff000000) >> 24) : __swap32md
(ack))
);
4915 *copyback = 1;
4916 } else {
4917 ack = ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t
)(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t
)(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th->
th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack
) & 0xff000000) >> 24) : __swap32md(th->th_ack))
;
4918 }
4919
4920 end = seq + pd->p_len;
4921 if (th->th_flags & TH_SYN0x02) {
4922 end++;
4923 if (dst->wscale & PF_WSCALE_FLAG0x80) {
4924 src->wscale = pf_get_wscale(pd);
4925 if (src->wscale & PF_WSCALE_FLAG0x80) {
4926 /* Remove scale factor from initial
4927 * window */
4928 sws = src->wscale & PF_WSCALE_MASK0x0f;
4929 win = ((u_int32_t)win + (1 << sws) - 1)
4930 >> sws;
4931 dws = dst->wscale & PF_WSCALE_MASK0x0f;
4932 } else {
4933 /* fixup other window */
4934 dst->max_win = MIN(TCP_MAXWIN,(((65535)<((u_int32_t)dst->max_win << (dst->wscale
& 0x0f)))?(65535):((u_int32_t)dst->max_win << (
dst->wscale & 0x0f)))
4935 (u_int32_t)dst->max_win <<(((65535)<((u_int32_t)dst->max_win << (dst->wscale
& 0x0f)))?(65535):((u_int32_t)dst->max_win << (
dst->wscale & 0x0f)))
4936 (dst->wscale & PF_WSCALE_MASK))(((65535)<((u_int32_t)dst->max_win << (dst->wscale
& 0x0f)))?(65535):((u_int32_t)dst->max_win << (
dst->wscale & 0x0f)))
;
4937 /* in case of a retrans SYN|ACK */
4938 dst->wscale = 0;
4939 }
4940 }
4941 }
4942 data_end = end;
4943 if (th->th_flags & TH_FIN0x01)
4944 end++;
4945
4946 src->seqlo = seq;
4947 if (src->state < TCPS_SYN_SENT2)
4948 pf_set_protostate(*stp, psrc, TCPS_SYN_SENT2);
4949
4950 /*
4951 * May need to slide the window (seqhi may have been set by
4952 * the crappy stack check or if we picked up the connection
4953 * after establishment)
4954 */
4955 if (src->seqhi == 1 ||
4956 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)((int)((end + (((1)>(dst->max_win << dws))?(1):(dst
->max_win << dws)))-(src->seqhi)) >= 0)
)
4957 src->seqhi = end + MAX(1, dst->max_win << dws)(((1)>(dst->max_win << dws))?(1):(dst->max_win
<< dws))
;
4958 if (win > src->max_win)
4959 src->max_win = win;
4960
4961 } else {
4962 ack = ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t
)(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t
)(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th->
th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack
) & 0xff000000) >> 24) : __swap32md(th->th_ack))
- dst->seqdiff;
4963 if (src->seqdiff) {
4964 /* Modulate sequence numbers */
4965 pf_patch_32(pd, &th->th_seq, htonl(seq + src->seqdiff)(__uint32_t)(__builtin_constant_p(seq + src->seqdiff) ? (__uint32_t
)(((__uint32_t)(seq + src->seqdiff) & 0xff) << 24
| ((__uint32_t)(seq + src->seqdiff) & 0xff00) <<
8 | ((__uint32_t)(seq + src->seqdiff) & 0xff0000) >>
8 | ((__uint32_t)(seq + src->seqdiff) & 0xff000000) >>
24) : __swap32md(seq + src->seqdiff))
);
4966 pf_patch_32(pd, &th->th_ack, htonl(ack)(__uint32_t)(__builtin_constant_p(ack) ? (__uint32_t)(((__uint32_t
)(ack) & 0xff) << 24 | ((__uint32_t)(ack) & 0xff00
) << 8 | ((__uint32_t)(ack) & 0xff0000) >> 8 |
((__uint32_t)(ack) & 0xff000000) >> 24) : __swap32md
(ack))
);
4967 *copyback = 1;
4968 }
4969 end = seq + pd->p_len;
4970 if (th->th_flags & TH_SYN0x02)
4971 end++;
4972 data_end = end;
4973 if (th->th_flags & TH_FIN0x01)
4974 end++;
4975 }
4976
4977 if ((th->th_flags & TH_ACK0x10) == 0) {
4978 /* Let it pass through the ack skew check */
4979 ack = dst->seqlo;
4980 } else if ((ack == 0 &&
4981 (th->th_flags & (TH_ACK0x10|TH_RST0x04)) == (TH_ACK0x10|TH_RST0x04)) ||
4982 /* broken tcp stacks do not set ack */
4983 (dst->state < TCPS_SYN_SENT2)) {
4984 /*
4985 * Many stacks (ours included) will set the ACK number in an
4986 * FIN|ACK if the SYN times out -- no sequence to ACK.
4987 */
4988 ack = dst->seqlo;
4989 }
4990
4991 if (seq == end) {
4992 /* Ease sequencing restrictions on no data packets */
4993 seq = src->seqlo;
4994 data_end = end = seq;
4995 }
4996
4997 ackskew = dst->seqlo - ack;
4998
4999
5000 /*
5001 * Need to demodulate the sequence numbers in any TCP SACK options
5002 * (Selective ACK). We could optionally validate the SACK values
5003 * against the current ACK window, either forwards or backwards, but
5004 * I'm not confident that SACK has been implemented properly
5005 * everywhere. It wouldn't surprise me if several stacks accidently
5006 * SACK too far backwards of previously ACKed data. There really aren't
5007 * any security implications of bad SACKing unless the target stack
5008 * doesn't validate the option length correctly. Someone trying to
5009 * spoof into a TCP connection won't bother blindly sending SACK
5010 * options anyway.
5011 */
5012 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
5013 if (pf_modulate_sack(pd, dst))
5014 *copyback = 1;
5015 }
5016
5017
5018#define MAXACKWINDOW(0xffff + 1500) (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
5019 if (SEQ_GEQ(src->seqhi, data_end)((int)((src->seqhi)-(data_end)) >= 0) &&
5020 /* Last octet inside other's window space */
5021 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws))((int)((seq)-(src->seqlo - (dst->max_win << dws))
) >= 0)
&&
5022 /* Retrans: not more than one window back */
5023 (ackskew >= -MAXACKWINDOW(0xffff + 1500)) &&
5024 /* Acking not more than one reassembled fragment backwards */
5025 (ackskew <= (MAXACKWINDOW(0xffff + 1500) << sws)) &&
5026 /* Acking not more than one window forward */
5027 ((th->th_flags & TH_RST0x04) == 0 || orig_seq == src->seqlo ||
5028 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) {
5029 /* Require an exact/+1 sequence match on resets when possible */
5030
5031 if (dst->scrub || src->scrub) {
5032 if (pf_normalize_tcp_stateful(pd, reason, *stp, src,
5033 dst, copyback))
5034 return (PF_DROP);
5035 }
5036
5037 /* update max window */
5038 if (src->max_win < win)
5039 src->max_win = win;
5040 /* synchronize sequencing */
5041 if (SEQ_GT(end, src->seqlo)((int)((end)-(src->seqlo)) > 0))
5042 src->seqlo = end;
5043 /* slide the window of what the other end can send */
5044 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)((int)((ack + (win << sws))-(dst->seqhi)) >= 0))
5045 dst->seqhi = ack + MAX((win << sws), 1)((((win << sws))>(1))?((win << sws)):(1));
5046
5047 /* update states */
5048 if (th->th_flags & TH_SYN0x02)
5049 if (src->state < TCPS_SYN_SENT2)
5050 pf_set_protostate(*stp, psrc, TCPS_SYN_SENT2);
5051 if (th->th_flags & TH_FIN0x01)
5052 if (src->state < TCPS_CLOSING7)
5053 pf_set_protostate(*stp, psrc, TCPS_CLOSING7);
5054 if (th->th_flags & TH_ACK0x10) {
5055 if (dst->state == TCPS_SYN_SENT2) {
5056 pf_set_protostate(*stp, pdst,
5057 TCPS_ESTABLISHED4);
5058 if (src->state == TCPS_ESTABLISHED4 &&
5059 !SLIST_EMPTY(&(*stp)->src_nodes)(((&(*stp)->src_nodes)->slh_first) == ((void *)0)) &&
5060 pf_src_connlimit(stp)) {
5061 REASON_SET(reason, PFRES_SRCLIMIT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (13);
if (13 < 17) pf_status.counters[13]++; } } while (0)
;
5062 return (PF_DROP);
5063 }
5064 } else if (dst->state == TCPS_CLOSING7)
5065 pf_set_protostate(*stp, pdst,
5066 TCPS_FIN_WAIT_29);
5067 }
5068 if (th->th_flags & TH_RST0x04)
5069 pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_TIME_WAIT10);
5070
5071 /* update expire time */
5072 (*stp)->expire = getuptime();
5073 if (src->state >= TCPS_FIN_WAIT_29 &&
5074 dst->state >= TCPS_FIN_WAIT_29)
5075 pf_update_state_timeout(*stp, PFTM_TCP_CLOSED);
5076 else if (src->state >= TCPS_CLOSING7 &&
5077 dst->state >= TCPS_CLOSING7)
5078 pf_update_state_timeout(*stp, PFTM_TCP_FIN_WAIT);
5079 else if (src->state < TCPS_ESTABLISHED4 ||
5080 dst->state < TCPS_ESTABLISHED4)
5081 pf_update_state_timeout(*stp, PFTM_TCP_OPENING);
5082 else if (src->state >= TCPS_CLOSING7 ||
5083 dst->state >= TCPS_CLOSING7)
5084 pf_update_state_timeout(*stp, PFTM_TCP_CLOSING);
5085 else
5086 pf_update_state_timeout(*stp, PFTM_TCP_ESTABLISHED);
5087
5088 /* Fall through to PASS packet */
5089 } else if ((dst->state < TCPS_SYN_SENT2 ||
5090 dst->state >= TCPS_FIN_WAIT_29 ||
5091 src->state >= TCPS_FIN_WAIT_29) &&
5092 SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end)((int)((src->seqhi + (0xffff + 1500))-(data_end)) >= 0) &&
5093 /* Within a window forward of the originating packet */
5094 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)((int)((seq)-(src->seqlo - (0xffff + 1500))) >= 0)) {
5095 /* Within a window backward of the originating packet */
5096
5097 /*
5098 * This currently handles three situations:
5099 * 1) Stupid stacks will shotgun SYNs before their peer
5100 * replies.
5101 * 2) When PF catches an already established stream (the
5102 * firewall rebooted, the state table was flushed, routes
5103 * changed...)
5104 * 3) Packets get funky immediately after the connection
5105 * closes (this should catch Solaris spurious ACK|FINs
5106 * that web servers like to spew after a close)
5107 *
5108 * This must be a little more careful than the above code
5109 * since packet floods will also be caught here. We don't
5110 * update the TTL here to mitigate the damage of a packet
5111 * flood and so the same code can handle awkward establishment
5112 * and a loosened connection close.
5113 * In the establishment case, a correct peer response will
5114 * validate the connection, go through the normal state code
5115 * and keep updating the state TTL.
5116 */
5117
5118 if (pf_status.debug >= LOG_NOTICE5) {
5119 log(LOG_NOTICE5, "pf: loose state match: ");
5120 pf_print_state(*stp);
5121 pf_print_flags(th->th_flags);
5122 addlog(" seq=%u (%u) ack=%u len=%u ackskew=%d "
5123 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
5124 pd->p_len, ackskew, (*stp)->packets[0],
5125 (*stp)->packets[1],
5126 pd->dir == PF_IN ? "in" : "out",
5127 pd->dir == (*stp)->direction ? "fwd" : "rev");
5128 }
5129
5130 if (dst->scrub || src->scrub) {
5131 if (pf_normalize_tcp_stateful(pd, reason, *stp, src,
5132 dst, copyback))
5133 return (PF_DROP);
5134 }
5135
5136 /* update max window */
5137 if (src->max_win < win)
5138 src->max_win = win;
5139 /* synchronize sequencing */
5140 if (SEQ_GT(end, src->seqlo)((int)((end)-(src->seqlo)) > 0))
5141 src->seqlo = end;
5142 /* slide the window of what the other end can send */
5143 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)((int)((ack + (win << sws))-(dst->seqhi)) >= 0))
5144 dst->seqhi = ack + MAX((win << sws), 1)((((win << sws))>(1))?((win << sws)):(1));
5145
5146 /*
5147 * Cannot set dst->seqhi here since this could be a shotgunned
5148 * SYN and not an already established connection.
5149 */
5150 if (th->th_flags & TH_FIN0x01)
5151 if (src->state < TCPS_CLOSING7)
5152 pf_set_protostate(*stp, psrc, TCPS_CLOSING7);
5153 if (th->th_flags & TH_RST0x04)
5154 pf_set_protostate(*stp, PF_PEER_BOTH,