| File: | net/pf.c |
| Warning: | line 4534, column 15 6th function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* $OpenBSD: pf.c,v 1.1193 2024/01/10 16:44:30 bluhm Exp $ */ | |||
| 2 | ||||
| 3 | /* | |||
| 4 | * Copyright (c) 2001 Daniel Hartmeier | |||
| 5 | * Copyright (c) 2002 - 2013 Henning Brauer <henning@openbsd.org> | |||
| 6 | * All rights reserved. | |||
| 7 | * | |||
| 8 | * Redistribution and use in source and binary forms, with or without | |||
| 9 | * modification, are permitted provided that the following conditions | |||
| 10 | * are met: | |||
| 11 | * | |||
| 12 | * - Redistributions of source code must retain the above copyright | |||
| 13 | * notice, this list of conditions and the following disclaimer. | |||
| 14 | * - Redistributions in binary form must reproduce the above | |||
| 15 | * copyright notice, this list of conditions and the following | |||
| 16 | * disclaimer in the documentation and/or other materials provided | |||
| 17 | * with the distribution. | |||
| 18 | * | |||
| 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| 20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| 21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |||
| 22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |||
| 23 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |||
| 24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | |||
| 25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |||
| 26 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
| 27 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||
| 28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | |||
| 29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||
| 30 | * POSSIBILITY OF SUCH DAMAGE. | |||
| 31 | * | |||
| 32 | * Effort sponsored in part by the Defense Advanced Research Projects | |||
| 33 | * Agency (DARPA) and Air Force Research Laboratory, Air Force | |||
| 34 | * Materiel Command, USAF, under agreement number F30602-01-2-0537. | |||
| 35 | * | |||
| 36 | */ | |||
| 37 | ||||
| 38 | #include "bpfilter.h" | |||
| 39 | #include "carp.h" | |||
| 40 | #include "pflog.h" | |||
| 41 | #include "pfsync.h" | |||
| 42 | #include "pflow.h" | |||
| 43 | ||||
| 44 | #include <sys/param.h> | |||
| 45 | #include <sys/systm.h> | |||
| 46 | #include <sys/mbuf.h> | |||
| 47 | #include <sys/filio.h> | |||
| 48 | #include <sys/socket.h> | |||
| 49 | #include <sys/socketvar.h> | |||
| 50 | #include <sys/kernel.h> | |||
| 51 | #include <sys/time.h> | |||
| 52 | #include <sys/pool.h> | |||
| 53 | #include <sys/proc.h> | |||
| 54 | #include <sys/rwlock.h> | |||
| 55 | #include <sys/syslog.h> | |||
| 56 | ||||
| 57 | #include <crypto/sha2.h> | |||
| 58 | ||||
| 59 | #include <net/if.h> | |||
| 60 | #include <net/if_var.h> | |||
| 61 | #include <net/if_types.h> | |||
| 62 | #include <net/route.h> | |||
| 63 | #include <net/toeplitz.h> | |||
| 64 | ||||
| 65 | #include <netinet/in.h> | |||
| 66 | #include <netinet/in_var.h> | |||
| 67 | #include <netinet/ip.h> | |||
| 68 | #include <netinet/in_pcb.h> | |||
| 69 | #include <netinet/ip_var.h> | |||
| 70 | #include <netinet/ip_icmp.h> | |||
| 71 | #include <netinet/icmp_var.h> | |||
| 72 | #include <netinet/tcp.h> | |||
| 73 | #include <netinet/tcp_seq.h> | |||
| 74 | #include <netinet/tcp_timer.h> | |||
| 75 | #include <netinet/tcp_var.h> | |||
| 76 | #include <netinet/tcp_fsm.h> | |||
| 77 | #include <netinet/udp.h> | |||
| 78 | #include <netinet/udp_var.h> | |||
| 79 | #include <netinet/ip_divert.h> | |||
| 80 | ||||
| 81 | #ifdef INET61 | |||
| 82 | #include <netinet6/in6_var.h> | |||
| 83 | #include <netinet/ip6.h> | |||
| 84 | #include <netinet6/ip6_var.h> | |||
| 85 | #include <netinet/icmp6.h> | |||
| 86 | #include <netinet6/nd6.h> | |||
| 87 | #include <netinet6/ip6_divert.h> | |||
| 88 | #endif /* INET6 */ | |||
| 89 | ||||
| 90 | #include <net/pfvar.h> | |||
| 91 | #include <net/pfvar_priv.h> | |||
| 92 | ||||
| 93 | #if NPFLOG1 > 0 | |||
| 94 | #include <net/if_pflog.h> | |||
| 95 | #endif /* NPFLOG > 0 */ | |||
| 96 | ||||
| 97 | #if NPFLOW1 > 0 | |||
| 98 | #include <net/if_pflow.h> | |||
| 99 | #endif /* NPFLOW > 0 */ | |||
| 100 | ||||
| 101 | #if NPFSYNC1 > 0 | |||
| 102 | #include <net/if_pfsync.h> | |||
| 103 | #endif /* NPFSYNC > 0 */ | |||
| 104 | ||||
| 105 | /* | |||
| 106 | * Global variables | |||
| 107 | */ | |||
| 108 | struct pf_state_tree pf_statetbl; | |||
| 109 | struct pf_queuehead pf_queues[2]; | |||
| 110 | struct pf_queuehead *pf_queues_active; | |||
| 111 | struct pf_queuehead *pf_queues_inactive; | |||
| 112 | ||||
| 113 | struct pf_status pf_status; | |||
| 114 | ||||
| 115 | struct mutex pf_inp_mtx = MUTEX_INITIALIZER(IPL_SOFTNET){ ((void *)0), ((((0x2)) > 0x0 && ((0x2)) < 0x9 ) ? 0x9 : ((0x2))), 0x0 }; | |||
| 116 | ||||
| 117 | int pf_hdr_limit = 20; /* arbitrary limit, tune in ddb */ | |||
| 118 | ||||
| 119 | SHA2_CTX pf_tcp_secret_ctx; | |||
| 120 | u_char pf_tcp_secret[16]; | |||
| 121 | int pf_tcp_secret_init; | |||
| 122 | int pf_tcp_iss_off; | |||
| 123 | ||||
| 124 | enum pf_test_status { | |||
| 125 | PF_TEST_FAIL = -1, | |||
| 126 | PF_TEST_OK, | |||
| 127 | PF_TEST_QUICK | |||
| 128 | }; | |||
| 129 | ||||
| 130 | struct pf_test_ctx { | |||
| 131 | struct pf_pdesc *pd; | |||
| 132 | struct pf_rule_actions act; | |||
| 133 | u_int8_t icmpcode; | |||
| 134 | u_int8_t icmptype; | |||
| 135 | int icmp_dir; | |||
| 136 | int state_icmp; | |||
| 137 | int tag; | |||
| 138 | u_short reason; | |||
| 139 | struct pf_rule_item *ri; | |||
| 140 | struct pf_src_node *sns[PF_SN_MAX]; | |||
| 141 | struct pf_rule_slist rules; | |||
| 142 | struct pf_rule *nr; | |||
| 143 | struct pf_rule **rm; | |||
| 144 | struct pf_rule *a; | |||
| 145 | struct pf_rule **am; | |||
| 146 | struct pf_ruleset **rsm; | |||
| 147 | struct pf_ruleset *arsm; | |||
| 148 | struct pf_ruleset *aruleset; | |||
| 149 | struct tcphdr *th; | |||
| 150 | }; | |||
| 151 | ||||
| 152 | struct pool pf_src_tree_pl, pf_rule_pl, pf_queue_pl; | |||
| 153 | struct pool pf_state_pl, pf_state_key_pl, pf_state_item_pl; | |||
| 154 | struct pool pf_rule_item_pl, pf_sn_item_pl, pf_pktdelay_pl; | |||
| 155 | ||||
| 156 | void pf_add_threshold(struct pf_threshold *); | |||
| 157 | int pf_check_threshold(struct pf_threshold *); | |||
| 158 | int pf_check_tcp_cksum(struct mbuf *, int, int, | |||
| 159 | sa_family_t); | |||
| 160 | __inline void pf_cksum_fixup(u_int16_t *, u_int16_t, u_int16_t, | |||
| 161 | u_int8_t); | |||
| 162 | void pf_cksum_fixup_a(u_int16_t *, const struct pf_addr *, | |||
| 163 | const struct pf_addr *, sa_family_t, u_int8_t); | |||
| 164 | int pf_modulate_sack(struct pf_pdesc *, | |||
| 165 | struct pf_state_peer *); | |||
| 166 | int pf_icmp_mapping(struct pf_pdesc *, u_int8_t, int *, | |||
| 167 | u_int16_t *, u_int16_t *); | |||
| 168 | int pf_change_icmp_af(struct mbuf *, int, | |||
| 169 | struct pf_pdesc *, struct pf_pdesc *, | |||
| 170 | struct pf_addr *, struct pf_addr *, sa_family_t, | |||
| 171 | sa_family_t); | |||
| 172 | int pf_translate_a(struct pf_pdesc *, struct pf_addr *, | |||
| 173 | struct pf_addr *); | |||
| 174 | void pf_translate_icmp(struct pf_pdesc *, struct pf_addr *, | |||
| 175 | u_int16_t *, struct pf_addr *, struct pf_addr *, | |||
| 176 | u_int16_t); | |||
| 177 | int pf_translate_icmp_af(struct pf_pdesc*, int, void *); | |||
| 178 | void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, int, | |||
| 179 | sa_family_t, struct pf_rule *, u_int); | |||
| 180 | void pf_detach_state(struct pf_state *); | |||
| 181 | struct pf_state_key *pf_state_key_attach(struct pf_state_key *, | |||
| 182 | struct pf_state *, int); | |||
| 183 | void pf_state_key_detach(struct pf_state *, int); | |||
| 184 | u_int32_t pf_tcp_iss(struct pf_pdesc *); | |||
| 185 | void pf_rule_to_actions(struct pf_rule *, | |||
| 186 | struct pf_rule_actions *); | |||
| 187 | int pf_test_rule(struct pf_pdesc *, struct pf_rule **, | |||
| 188 | struct pf_state **, struct pf_rule **, | |||
| 189 | struct pf_ruleset **, u_short *); | |||
| 190 | static __inline int pf_create_state(struct pf_pdesc *, struct pf_rule *, | |||
| 191 | struct pf_rule *, struct pf_rule *, | |||
| 192 | struct pf_state_key **, struct pf_state_key **, | |||
| 193 | int *, struct pf_state **, int, | |||
| 194 | struct pf_rule_slist *, struct pf_rule_actions *, | |||
| 195 | struct pf_src_node **); | |||
| 196 | static __inline int pf_state_key_addr_setup(struct pf_pdesc *, void *, | |||
| 197 | int, struct pf_addr *, int, struct pf_addr *, | |||
| 198 | int, int); | |||
| 199 | int pf_state_key_setup(struct pf_pdesc *, struct | |||
| 200 | pf_state_key **, struct pf_state_key **, int); | |||
| 201 | int pf_tcp_track_full(struct pf_pdesc *, | |||
| 202 | struct pf_state **, u_short *, int *, int); | |||
| 203 | int pf_tcp_track_sloppy(struct pf_pdesc *, | |||
| 204 | struct pf_state **, u_short *); | |||
| 205 | static __inline int pf_synproxy(struct pf_pdesc *, struct pf_state **, | |||
| 206 | u_short *); | |||
| 207 | int pf_test_state(struct pf_pdesc *, struct pf_state **, | |||
| 208 | u_short *); | |||
| 209 | int pf_icmp_state_lookup(struct pf_pdesc *, | |||
| 210 | struct pf_state_key_cmp *, struct pf_state **, | |||
| 211 | u_int16_t, u_int16_t, int, int *, int, int); | |||
| 212 | int pf_test_state_icmp(struct pf_pdesc *, | |||
| 213 | struct pf_state **, u_short *); | |||
| 214 | u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, int, | |||
| 215 | u_int16_t); | |||
| 216 | static __inline int pf_set_rt_ifp(struct pf_state *, struct pf_addr *, | |||
| 217 | sa_family_t, struct pf_src_node **); | |||
| 218 | struct pf_divert *pf_get_divert(struct mbuf *); | |||
| 219 | int pf_walk_option(struct pf_pdesc *, struct ip *, | |||
| 220 | int, int, u_short *); | |||
| 221 | int pf_walk_header(struct pf_pdesc *, struct ip *, | |||
| 222 | u_short *); | |||
| 223 | int pf_walk_option6(struct pf_pdesc *, struct ip6_hdr *, | |||
| 224 | int, int, u_short *); | |||
| 225 | int pf_walk_header6(struct pf_pdesc *, struct ip6_hdr *, | |||
| 226 | u_short *); | |||
| 227 | void pf_print_state_parts(struct pf_state *, | |||
| 228 | struct pf_state_key *, struct pf_state_key *); | |||
| 229 | int pf_addr_wrap_neq(struct pf_addr_wrap *, | |||
| 230 | struct pf_addr_wrap *); | |||
| 231 | int pf_compare_state_keys(struct pf_state_key *, | |||
| 232 | struct pf_state_key *, struct pfi_kif *, u_int); | |||
| 233 | u_int16_t pf_pkt_hash(sa_family_t, uint8_t, | |||
| 234 | const struct pf_addr *, const struct pf_addr *, | |||
| 235 | uint16_t, uint16_t); | |||
| 236 | int pf_find_state(struct pf_pdesc *, | |||
| 237 | struct pf_state_key_cmp *, struct pf_state **); | |||
| 238 | int pf_src_connlimit(struct pf_state **); | |||
| 239 | int pf_match_rcvif(struct mbuf *, struct pf_rule *); | |||
| 240 | int pf_step_into_anchor(struct pf_test_ctx *, | |||
| 241 | struct pf_rule *); | |||
| 242 | int pf_match_rule(struct pf_test_ctx *, | |||
| 243 | struct pf_ruleset *); | |||
| 244 | void pf_counters_inc(int, struct pf_pdesc *, | |||
| 245 | struct pf_state *, struct pf_rule *, | |||
| 246 | struct pf_rule *); | |||
| 247 | ||||
| 248 | int pf_state_insert(struct pfi_kif *, | |||
| 249 | struct pf_state_key **, struct pf_state_key **, | |||
| 250 | struct pf_state *); | |||
| 251 | ||||
| 252 | int pf_state_key_isvalid(struct pf_state_key *); | |||
| 253 | struct pf_state_key *pf_state_key_ref(struct pf_state_key *); | |||
| 254 | void pf_state_key_unref(struct pf_state_key *); | |||
| 255 | void pf_state_key_link_reverse(struct pf_state_key *, | |||
| 256 | struct pf_state_key *); | |||
| 257 | void pf_state_key_unlink_reverse(struct pf_state_key *); | |||
| 258 | void pf_state_key_link_inpcb(struct pf_state_key *, | |||
| 259 | struct inpcb *); | |||
| 260 | void pf_state_key_unlink_inpcb(struct pf_state_key *); | |||
| 261 | void pf_pktenqueue_delayed(void *); | |||
| 262 | int32_t pf_state_expires(const struct pf_state *, uint8_t); | |||
| 263 | ||||
| 264 | #if NPFLOG1 > 0 | |||
| 265 | void pf_log_matches(struct pf_pdesc *, struct pf_rule *, | |||
| 266 | struct pf_rule *, struct pf_ruleset *, | |||
| 267 | struct pf_rule_slist *); | |||
| 268 | #endif /* NPFLOG > 0 */ | |||
| 269 | ||||
| 270 | extern struct pool pfr_ktable_pl; | |||
| 271 | extern struct pool pfr_kentry_pl; | |||
| 272 | ||||
| 273 | struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = { | |||
| 274 | { &pf_state_pl, PFSTATE_HIWAT100000, PFSTATE_HIWAT100000 }, | |||
| 275 | { &pf_src_tree_pl, PFSNODE_HIWAT10000, PFSNODE_HIWAT10000 }, | |||
| 276 | { &pf_frent_pl, PFFRAG_FRENT_HIWAT((256 * 1024) / 16), PFFRAG_FRENT_HIWAT((256 * 1024) / 16) }, | |||
| 277 | { &pfr_ktable_pl, PFR_KTABLE_HIWAT1000, PFR_KTABLE_HIWAT1000 }, | |||
| 278 | { &pfr_kentry_pl, PFR_KENTRY_HIWAT200000, PFR_KENTRY_HIWAT200000 }, | |||
| 279 | { &pf_pktdelay_pl, PF_PKTDELAY_MAXPKTS10000, PF_PKTDELAY_MAXPKTS10000 }, | |||
| 280 | { &pf_anchor_pl, PF_ANCHOR_HIWAT512, PF_ANCHOR_HIWAT512 } | |||
| 281 | }; | |||
| 282 | ||||
| 283 | #define BOUND_IFACE(r, k)((r)->rule_flag & 0x00010000) ? (k) : pfi_all \ | |||
| 284 | ((r)->rule_flag & PFRULE_IFBOUND0x00010000) ? (k) : pfi_all | |||
| 285 | ||||
| 286 | #define STATE_INC_COUNTERS(s)do { struct pf_rule_item *mrm; s->rule.ptr->states_cur++ ; s->rule.ptr->states_tot++; if (s->anchor.ptr != (( void *)0)) { s->anchor.ptr->states_cur++; s->anchor. ptr->states_tot++; } for((mrm) = ((&s->match_rules) ->slh_first); (mrm) != ((void *)0); (mrm) = ((mrm)->entry .sle_next)) mrm->r->states_cur++; } while (0) \ | |||
| 287 | do { \ | |||
| 288 | struct pf_rule_item *mrm; \ | |||
| 289 | s->rule.ptr->states_cur++; \ | |||
| 290 | s->rule.ptr->states_tot++; \ | |||
| 291 | if (s->anchor.ptr != NULL((void *)0)) { \ | |||
| 292 | s->anchor.ptr->states_cur++; \ | |||
| 293 | s->anchor.ptr->states_tot++; \ | |||
| 294 | } \ | |||
| 295 | SLIST_FOREACH(mrm, &s->match_rules, entry)for((mrm) = ((&s->match_rules)->slh_first); (mrm) != ((void *)0); (mrm) = ((mrm)->entry.sle_next)) \ | |||
| 296 | mrm->r->states_cur++; \ | |||
| 297 | } while (0) | |||
| 298 | ||||
| 299 | static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); | |||
| 300 | static inline int pf_state_compare_key(const struct pf_state_key *, | |||
| 301 | const struct pf_state_key *); | |||
| 302 | static inline int pf_state_compare_id(const struct pf_state *, | |||
| 303 | const struct pf_state *); | |||
| 304 | #ifdef INET61 | |||
| 305 | static __inline void pf_cksum_uncover(u_int16_t *, u_int16_t, u_int8_t); | |||
| 306 | static __inline void pf_cksum_cover(u_int16_t *, u_int16_t, u_int8_t); | |||
| 307 | #endif /* INET6 */ | |||
| 308 | static __inline void pf_set_protostate(struct pf_state *, int, u_int8_t); | |||
| 309 | ||||
| 310 | struct pf_src_tree tree_src_tracking; | |||
| 311 | ||||
| 312 | struct pf_state_tree_id tree_id; | |||
| 313 | struct pf_state_list pf_state_list = PF_STATE_LIST_INITIALIZER(pf_state_list){ .pfs_list = { ((void *)0), &(pf_state_list.pfs_list).tqh_first }, .pfs_mtx = { ((void *)0), ((((0x2)) > 0x0 && ( (0x2)) < 0x9) ? 0x9 : ((0x2))), 0x0 }, .pfs_rwl = { 0, "pfstates" }, }; | |||
| 314 | ||||
| 315 | RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare)void pf_src_tree_RB_INSERT_COLOR(struct pf_src_tree *head, struct pf_src_node *elm) { struct pf_src_node *parent, *gparent, *tmp ; while ((parent = (elm)->entry.rbe_parent) && (parent )->entry.rbe_color == 1) { gparent = (parent)->entry.rbe_parent ; if (parent == (gparent)->entry.rbe_left) { tmp = (gparent )->entry.rbe_right; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry .rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0 ); elm = gparent; continue; } if ((parent)->entry.rbe_right == elm) { do { (tmp) = (parent)->entry.rbe_right; if (((parent )->entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)-> entry.rbe_left)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while ( 0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_left; if (((gparent)->entry.rbe_left = (tmp)->entry.rbe_right )) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_right = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } else { tmp = (gparent)-> entry.rbe_left; if (tmp && (tmp)->entry.rbe_color == 1) { (tmp)->entry.rbe_color = 0; do { (parent)->entry. rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0) ; elm = gparent; continue; } if ((parent)->entry.rbe_left == elm) { do { (tmp) = (parent)->entry.rbe_left; if (((parent )->entry.rbe_left = (tmp)->entry.rbe_right)) { ((tmp)-> entry.rbe_right)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_right = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while (0); } while (0); tmp = parent; parent = elm; elm = tmp; } do { (parent)->entry.rbe_color = 0; (gparent)->entry.rbe_color = 1; } while (0); do { (tmp) = (gparent)->entry.rbe_right ; if (((gparent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (gparent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (gparent )->entry.rbe_parent)) { if ((gparent) == ((gparent)->entry .rbe_parent)->entry.rbe_left) ((gparent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((gparent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (gparent); (gparent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); } } (head->rbh_root)-> entry.rbe_color = 0; } void pf_src_tree_RB_REMOVE_COLOR(struct pf_src_tree *head, struct pf_src_node *parent, struct pf_src_node *elm) { struct pf_src_node *tmp; while ((elm == ((void *)0) || (elm)->entry.rbe_color == 0) && elm != (head)-> rbh_root) { if ((parent)->entry.rbe_left == elm) { tmp = ( parent)->entry.rbe_right; if ((tmp)->entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry. rbe_color = 1; } while (0); do { (tmp) = (parent)->entry.rbe_right ; if (((parent)->entry.rbe_right = (tmp)->entry.rbe_left )) { ((tmp)->entry.rbe_left)->entry.rbe_parent = (parent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent )->entry.rbe_parent)) { if ((parent) == ((parent)->entry .rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_left = (parent); (parent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); tmp = (parent)->entry.rbe_right ; } if (((tmp)->entry.rbe_left == ((void *)0) || ((tmp)-> entry.rbe_left)->entry.rbe_color == 0) && ((tmp)-> entry.rbe_right == ((void *)0) || ((tmp)->entry.rbe_right) ->entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry.rbe_parent; } else { if ( (tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry. rbe_right)->entry.rbe_color == 0) { struct pf_src_node *oleft ; if ((oleft = (tmp)->entry.rbe_left)) (oleft)->entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oleft) = (tmp)-> entry.rbe_left; if (((tmp)->entry.rbe_left = (oleft)->entry .rbe_right)) { ((oleft)->entry.rbe_right)->entry.rbe_parent = (tmp); } do {} while (0); if (((oleft)->entry.rbe_parent = (tmp)->entry.rbe_parent)) { if ((tmp) == ((tmp)->entry .rbe_parent)->entry.rbe_left) ((tmp)->entry.rbe_parent) ->entry.rbe_left = (oleft); else ((tmp)->entry.rbe_parent )->entry.rbe_right = (oleft); } else (head)->rbh_root = (oleft); (oleft)->entry.rbe_right = (tmp); (tmp)->entry .rbe_parent = (oleft); do {} while (0); if (((oleft)->entry .rbe_parent)) do {} while (0); } while (0); tmp = (parent)-> entry.rbe_right; } (tmp)->entry.rbe_color = (parent)->entry .rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry .rbe_right) ((tmp)->entry.rbe_right)->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_right; if (((parent)-> entry.rbe_right = (tmp)->entry.rbe_left)) { ((tmp)->entry .rbe_left)->entry.rbe_parent = (parent); } do {} while (0) ; if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent )) { if ((parent) == ((parent)->entry.rbe_parent)->entry .rbe_left) ((parent)->entry.rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry .rbe_left = (parent); (parent)->entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent)) do {} while ( 0); } while (0); elm = (head)->rbh_root; break; } } else { tmp = (parent)->entry.rbe_left; if ((tmp)->entry.rbe_color == 1) { do { (tmp)->entry.rbe_color = 0; (parent)->entry .rbe_color = 1; } while (0); do { (tmp) = (parent)->entry. rbe_left; if (((parent)->entry.rbe_left = (tmp)->entry. rbe_right)) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (parent); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent)->entry.rbe_parent)) { if ((parent) == ((parent )->entry.rbe_parent)->entry.rbe_left) ((parent)->entry .rbe_parent)->entry.rbe_left = (tmp); else ((parent)->entry .rbe_parent)->entry.rbe_right = (tmp); } else (head)->rbh_root = (tmp); (tmp)->entry.rbe_right = (parent); (parent)-> entry.rbe_parent = (tmp); do {} while (0); if (((tmp)->entry .rbe_parent)) do {} while (0); } while (0); tmp = (parent)-> entry.rbe_left; } if (((tmp)->entry.rbe_left == ((void *)0 ) || ((tmp)->entry.rbe_left)->entry.rbe_color == 0) && ((tmp)->entry.rbe_right == ((void *)0) || ((tmp)->entry .rbe_right)->entry.rbe_color == 0)) { (tmp)->entry.rbe_color = 1; elm = parent; parent = (elm)->entry.rbe_parent; } else { if ((tmp)->entry.rbe_left == ((void *)0) || ((tmp)-> entry.rbe_left)->entry.rbe_color == 0) { struct pf_src_node *oright; if ((oright = (tmp)->entry.rbe_right)) (oright)-> entry.rbe_color = 0; (tmp)->entry.rbe_color = 1; do { (oright ) = (tmp)->entry.rbe_right; if (((tmp)->entry.rbe_right = (oright)->entry.rbe_left)) { ((oright)->entry.rbe_left )->entry.rbe_parent = (tmp); } do {} while (0); if (((oright )->entry.rbe_parent = (tmp)->entry.rbe_parent)) { if (( tmp) == ((tmp)->entry.rbe_parent)->entry.rbe_left) ((tmp )->entry.rbe_parent)->entry.rbe_left = (oright); else ( (tmp)->entry.rbe_parent)->entry.rbe_right = (oright); } else (head)->rbh_root = (oright); (oright)->entry.rbe_left = (tmp); (tmp)->entry.rbe_parent = (oright); do {} while ( 0); if (((oright)->entry.rbe_parent)) do {} while (0); } while (0); tmp = (parent)->entry.rbe_left; } (tmp)->entry.rbe_color = (parent)->entry.rbe_color; (parent)->entry.rbe_color = 0; if ((tmp)->entry.rbe_left) ((tmp)->entry.rbe_left )->entry.rbe_color = 0; do { (tmp) = (parent)->entry.rbe_left ; if (((parent)->entry.rbe_left = (tmp)->entry.rbe_right )) { ((tmp)->entry.rbe_right)->entry.rbe_parent = (parent ); } do {} while (0); if (((tmp)->entry.rbe_parent = (parent )->entry.rbe_parent)) { if ((parent) == ((parent)->entry .rbe_parent)->entry.rbe_left) ((parent)->entry.rbe_parent )->entry.rbe_left = (tmp); else ((parent)->entry.rbe_parent )->entry.rbe_right = (tmp); } else (head)->rbh_root = ( tmp); (tmp)->entry.rbe_right = (parent); (parent)->entry .rbe_parent = (tmp); do {} while (0); if (((tmp)->entry.rbe_parent )) do {} while (0); } while (0); elm = (head)->rbh_root; break ; } } } if (elm) (elm)->entry.rbe_color = 0; } struct pf_src_node * pf_src_tree_RB_REMOVE(struct pf_src_tree *head, struct pf_src_node *elm) { struct pf_src_node *child, *parent, *old = elm; int color ; if ((elm)->entry.rbe_left == ((void *)0)) child = (elm)-> entry.rbe_right; else if ((elm)->entry.rbe_right == ((void *)0)) child = (elm)->entry.rbe_left; else { struct pf_src_node *left; elm = (elm)->entry.rbe_right; while ((left = (elm) ->entry.rbe_left)) elm = left; child = (elm)->entry.rbe_right ; parent = (elm)->entry.rbe_parent; color = (elm)->entry .rbe_color; if (child) (child)->entry.rbe_parent = parent; if (parent) { if ((parent)->entry.rbe_left == elm) (parent )->entry.rbe_left = child; else (parent)->entry.rbe_right = child; do {} while (0); } else (head)->rbh_root = child ; if ((elm)->entry.rbe_parent == old) parent = elm; (elm)-> entry = (old)->entry; if ((old)->entry.rbe_parent) { if (((old)->entry.rbe_parent)->entry.rbe_left == old) ((old )->entry.rbe_parent)->entry.rbe_left = elm; else ((old) ->entry.rbe_parent)->entry.rbe_right = elm; do {} while (0); } else (head)->rbh_root = elm; ((old)->entry.rbe_left )->entry.rbe_parent = elm; if ((old)->entry.rbe_right) ( (old)->entry.rbe_right)->entry.rbe_parent = elm; if (parent ) { left = parent; do { do {} while (0); } while ((left = (left )->entry.rbe_parent)); } goto color; } parent = (elm)-> entry.rbe_parent; color = (elm)->entry.rbe_color; if (child ) (child)->entry.rbe_parent = parent; if (parent) { if ((parent )->entry.rbe_left == elm) (parent)->entry.rbe_left = child ; else (parent)->entry.rbe_right = child; do {} while (0); } else (head)->rbh_root = child; color: if (color == 0) pf_src_tree_RB_REMOVE_COLOR (head, parent, child); return (old); } struct pf_src_node * pf_src_tree_RB_INSERT (struct pf_src_tree *head, struct pf_src_node *elm) { struct pf_src_node *tmp; struct pf_src_node *parent = ((void *)0); int comp = 0 ; tmp = (head)->rbh_root; while (tmp) { parent = tmp; comp = (pf_src_compare)(elm, parent); if (comp < 0) tmp = (tmp )->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry .rbe_right; else return (tmp); } do { (elm)->entry.rbe_parent = parent; (elm)->entry.rbe_left = (elm)->entry.rbe_right = ((void *)0); (elm)->entry.rbe_color = 1; } while (0); if (parent != ((void *)0)) { if (comp < 0) (parent)->entry .rbe_left = elm; else (parent)->entry.rbe_right = elm; do { } while (0); } else (head)->rbh_root = elm; pf_src_tree_RB_INSERT_COLOR (head, elm); return (((void *)0)); } struct pf_src_node * pf_src_tree_RB_FIND (struct pf_src_tree *head, struct pf_src_node *elm) { struct pf_src_node *tmp = (head)->rbh_root; int comp; while (tmp) { comp = pf_src_compare (elm, tmp); if (comp < 0) tmp = (tmp)->entry.rbe_left; else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } return (((void *)0)); } struct pf_src_node * pf_src_tree_RB_NFIND (struct pf_src_tree *head, struct pf_src_node *elm) { struct pf_src_node *tmp = (head)->rbh_root; struct pf_src_node *res = ((void *)0); int comp; while (tmp) { comp = pf_src_compare(elm, tmp ); if (comp < 0) { res = tmp; tmp = (tmp)->entry.rbe_left ; } else if (comp > 0) tmp = (tmp)->entry.rbe_right; else return (tmp); } return (res); } struct pf_src_node * pf_src_tree_RB_NEXT (struct pf_src_node *elm) { if ((elm)->entry.rbe_right) { elm = (elm)->entry.rbe_right; while ((elm)->entry.rbe_left ) elm = (elm)->entry.rbe_left; } else { if ((elm)->entry .rbe_parent && (elm == ((elm)->entry.rbe_parent)-> entry.rbe_left)) elm = (elm)->entry.rbe_parent; else { while ((elm)->entry.rbe_parent && (elm == ((elm)->entry .rbe_parent)->entry.rbe_right)) elm = (elm)->entry.rbe_parent ; elm = (elm)->entry.rbe_parent; } } return (elm); } struct pf_src_node * pf_src_tree_RB_PREV(struct pf_src_node *elm) { if ((elm)->entry.rbe_left) { elm = (elm)->entry.rbe_left ; while ((elm)->entry.rbe_right) elm = (elm)->entry.rbe_right ; } else { if ((elm)->entry.rbe_parent && (elm == ( (elm)->entry.rbe_parent)->entry.rbe_right)) elm = (elm) ->entry.rbe_parent; else { while ((elm)->entry.rbe_parent && (elm == ((elm)->entry.rbe_parent)->entry.rbe_left )) elm = (elm)->entry.rbe_parent; elm = (elm)->entry.rbe_parent ; } } return (elm); } struct pf_src_node * pf_src_tree_RB_MINMAX (struct pf_src_tree *head, int val) { struct pf_src_node *tmp = (head)->rbh_root; struct pf_src_node *parent = ((void * )0); while (tmp) { parent = tmp; if (val < 0) tmp = (tmp)-> entry.rbe_left; else tmp = (tmp)->entry.rbe_right; } return (parent); }; | |||
| 316 | RBT_GENERATE(pf_state_tree, pf_state_key, sk_entry, pf_state_compare_key)static int pf_state_tree_RBT_COMPARE(const void *lptr, const void *rptr) { const struct pf_state_key *l = lptr, *r = rptr; return pf_state_compare_key(l, r); } static const struct rb_type pf_state_tree_RBT_INFO = { pf_state_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof (struct pf_state_key, sk_entry), }; const struct rb_type *const pf_state_tree_RBT_TYPE = &pf_state_tree_RBT_INFO; | |||
| 317 | RBT_GENERATE(pf_state_tree_id, pf_state, entry_id, pf_state_compare_id)static int pf_state_tree_id_RBT_COMPARE(const void *lptr, const void *rptr) { const struct pf_state *l = lptr, *r = rptr; return pf_state_compare_id(l, r); } static const struct rb_type pf_state_tree_id_RBT_INFO = { pf_state_tree_id_RBT_COMPARE, ((void *)0), __builtin_offsetof (struct pf_state, entry_id), }; const struct rb_type *const pf_state_tree_id_RBT_TYPE = &pf_state_tree_id_RBT_INFO; | |||
| 318 | ||||
| 319 | int | |||
| 320 | pf_addr_compare(const struct pf_addr *a, const struct pf_addr *b, | |||
| 321 | sa_family_t af) | |||
| 322 | { | |||
| 323 | switch (af) { | |||
| 324 | case AF_INET2: | |||
| 325 | if (a->addr32pfa.addr32[0] > b->addr32pfa.addr32[0]) | |||
| 326 | return (1); | |||
| 327 | if (a->addr32pfa.addr32[0] < b->addr32pfa.addr32[0]) | |||
| 328 | return (-1); | |||
| 329 | break; | |||
| 330 | #ifdef INET61 | |||
| 331 | case AF_INET624: | |||
| 332 | if (a->addr32pfa.addr32[3] > b->addr32pfa.addr32[3]) | |||
| 333 | return (1); | |||
| 334 | if (a->addr32pfa.addr32[3] < b->addr32pfa.addr32[3]) | |||
| 335 | return (-1); | |||
| 336 | if (a->addr32pfa.addr32[2] > b->addr32pfa.addr32[2]) | |||
| 337 | return (1); | |||
| 338 | if (a->addr32pfa.addr32[2] < b->addr32pfa.addr32[2]) | |||
| 339 | return (-1); | |||
| 340 | if (a->addr32pfa.addr32[1] > b->addr32pfa.addr32[1]) | |||
| 341 | return (1); | |||
| 342 | if (a->addr32pfa.addr32[1] < b->addr32pfa.addr32[1]) | |||
| 343 | return (-1); | |||
| 344 | if (a->addr32pfa.addr32[0] > b->addr32pfa.addr32[0]) | |||
| 345 | return (1); | |||
| 346 | if (a->addr32pfa.addr32[0] < b->addr32pfa.addr32[0]) | |||
| 347 | return (-1); | |||
| 348 | break; | |||
| 349 | #endif /* INET6 */ | |||
| 350 | } | |||
| 351 | return (0); | |||
| 352 | } | |||
| 353 | ||||
| 354 | static __inline int | |||
| 355 | pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) | |||
| 356 | { | |||
| 357 | int diff; | |||
| 358 | ||||
| 359 | if (a->rule.ptr > b->rule.ptr) | |||
| 360 | return (1); | |||
| 361 | if (a->rule.ptr < b->rule.ptr) | |||
| 362 | return (-1); | |||
| 363 | if ((diff = a->type - b->type) != 0) | |||
| 364 | return (diff); | |||
| 365 | if ((diff = a->af - b->af) != 0) | |||
| 366 | return (diff); | |||
| 367 | if ((diff = pf_addr_compare(&a->addr, &b->addr, a->af)) != 0) | |||
| 368 | return (diff); | |||
| 369 | return (0); | |||
| 370 | } | |||
| 371 | ||||
| 372 | static __inline void | |||
| 373 | pf_set_protostate(struct pf_state *st, int which, u_int8_t newstate) | |||
| 374 | { | |||
| 375 | if (which == PF_PEER_DST || which == PF_PEER_BOTH) | |||
| 376 | st->dst.state = newstate; | |||
| 377 | if (which == PF_PEER_DST) | |||
| 378 | return; | |||
| 379 | ||||
| 380 | if (st->src.state == newstate) | |||
| 381 | return; | |||
| 382 | if (st->creatorid == pf_status.hostid && | |||
| 383 | st->key[PF_SK_STACK]->proto == IPPROTO_TCP6 && | |||
| 384 | !(TCPS_HAVEESTABLISHED(st->src.state)((st->src.state) >= 4) || | |||
| 385 | st->src.state == TCPS_CLOSED0) && | |||
| 386 | (TCPS_HAVEESTABLISHED(newstate)((newstate) >= 4) || newstate == TCPS_CLOSED0)) | |||
| 387 | pf_status.states_halfopen--; | |||
| 388 | ||||
| 389 | st->src.state = newstate; | |||
| 390 | } | |||
| 391 | ||||
| 392 | void | |||
| 393 | pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) | |||
| 394 | { | |||
| 395 | switch (af) { | |||
| 396 | case AF_INET2: | |||
| 397 | dst->addr32pfa.addr32[0] = src->addr32pfa.addr32[0]; | |||
| 398 | break; | |||
| 399 | #ifdef INET61 | |||
| 400 | case AF_INET624: | |||
| 401 | dst->addr32pfa.addr32[0] = src->addr32pfa.addr32[0]; | |||
| 402 | dst->addr32pfa.addr32[1] = src->addr32pfa.addr32[1]; | |||
| 403 | dst->addr32pfa.addr32[2] = src->addr32pfa.addr32[2]; | |||
| 404 | dst->addr32pfa.addr32[3] = src->addr32pfa.addr32[3]; | |||
| 405 | break; | |||
| 406 | #endif /* INET6 */ | |||
| 407 | default: | |||
| 408 | unhandled_af(af); | |||
| 409 | } | |||
| 410 | } | |||
| 411 | ||||
| 412 | void | |||
| 413 | pf_init_threshold(struct pf_threshold *threshold, | |||
| 414 | u_int32_t limit, u_int32_t seconds) | |||
| 415 | { | |||
| 416 | threshold->limit = limit * PF_THRESHOLD_MULT1000; | |||
| 417 | threshold->seconds = seconds; | |||
| 418 | threshold->count = 0; | |||
| 419 | threshold->last = getuptime(); | |||
| 420 | } | |||
| 421 | ||||
| 422 | void | |||
| 423 | pf_add_threshold(struct pf_threshold *threshold) | |||
| 424 | { | |||
| 425 | u_int32_t t = getuptime(), diff = t - threshold->last; | |||
| 426 | ||||
| 427 | if (diff >= threshold->seconds) | |||
| 428 | threshold->count = 0; | |||
| 429 | else | |||
| 430 | threshold->count -= threshold->count * diff / | |||
| 431 | threshold->seconds; | |||
| 432 | threshold->count += PF_THRESHOLD_MULT1000; | |||
| 433 | threshold->last = t; | |||
| 434 | } | |||
| 435 | ||||
| 436 | int | |||
| 437 | pf_check_threshold(struct pf_threshold *threshold) | |||
| 438 | { | |||
| 439 | return (threshold->count > threshold->limit); | |||
| 440 | } | |||
| 441 | ||||
| 442 | void | |||
| 443 | pf_state_list_insert(struct pf_state_list *pfs, struct pf_state *st) | |||
| 444 | { | |||
| 445 | /* | |||
| 446 | * we can always put states on the end of the list. | |||
| 447 | * | |||
| 448 | * things reading the list should take a read lock, then | |||
| 449 | * the mutex, get the head and tail pointers, release the | |||
| 450 | * mutex, and then they can iterate between the head and tail. | |||
| 451 | */ | |||
| 452 | ||||
| 453 | pf_state_ref(st); /* get a ref for the list */ | |||
| 454 | ||||
| 455 | mtx_enter(&pfs->pfs_mtx); | |||
| 456 | TAILQ_INSERT_TAIL(&pfs->pfs_list, st, entry_list)do { (st)->entry_list.tqe_next = ((void *)0); (st)->entry_list .tqe_prev = (&pfs->pfs_list)->tqh_last; *(&pfs-> pfs_list)->tqh_last = (st); (&pfs->pfs_list)->tqh_last = &(st)->entry_list.tqe_next; } while (0); | |||
| 457 | mtx_leave(&pfs->pfs_mtx); | |||
| 458 | } | |||
| 459 | ||||
| 460 | void | |||
| 461 | pf_state_list_remove(struct pf_state_list *pfs, struct pf_state *st) | |||
| 462 | { | |||
| 463 | /* states can only be removed when the write lock is held */ | |||
| 464 | rw_assert_wrlock(&pfs->pfs_rwl); | |||
| 465 | ||||
| 466 | mtx_enter(&pfs->pfs_mtx); | |||
| 467 | TAILQ_REMOVE(&pfs->pfs_list, st, entry_list)do { if (((st)->entry_list.tqe_next) != ((void *)0)) (st)-> entry_list.tqe_next->entry_list.tqe_prev = (st)->entry_list .tqe_prev; else (&pfs->pfs_list)->tqh_last = (st)-> entry_list.tqe_prev; *(st)->entry_list.tqe_prev = (st)-> entry_list.tqe_next; ((st)->entry_list.tqe_prev) = ((void * )-1); ((st)->entry_list.tqe_next) = ((void *)-1); } while ( 0); | |||
| 468 | mtx_leave(&pfs->pfs_mtx); | |||
| 469 | ||||
| 470 | pf_state_unref(st); /* list no longer references the state */ | |||
| 471 | } | |||
| 472 | ||||
| 473 | void | |||
| 474 | pf_update_state_timeout(struct pf_state *st, int to) | |||
| 475 | { | |||
| 476 | mtx_enter(&st->mtx); | |||
| 477 | if (st->timeout != PFTM_UNLINKED) | |||
| 478 | st->timeout = to; | |||
| 479 | mtx_leave(&st->mtx); | |||
| 480 | } | |||
| 481 | ||||
| 482 | int | |||
| 483 | pf_src_connlimit(struct pf_state **stp) | |||
| 484 | { | |||
| 485 | int bad = 0; | |||
| 486 | struct pf_src_node *sn; | |||
| 487 | ||||
| 488 | if ((sn = pf_get_src_node((*stp), PF_SN_NONE)) == NULL((void *)0)) | |||
| 489 | return (0); | |||
| 490 | ||||
| 491 | sn->conn++; | |||
| 492 | (*stp)->src.tcp_est = 1; | |||
| 493 | pf_add_threshold(&sn->conn_rate); | |||
| 494 | ||||
| 495 | if ((*stp)->rule.ptr->max_src_conn && | |||
| 496 | (*stp)->rule.ptr->max_src_conn < sn->conn) { | |||
| 497 | pf_status.lcounters[LCNT_SRCCONN3]++; | |||
| 498 | bad++; | |||
| 499 | } | |||
| 500 | ||||
| 501 | if ((*stp)->rule.ptr->max_src_conn_rate.limit && | |||
| 502 | pf_check_threshold(&sn->conn_rate)) { | |||
| 503 | pf_status.lcounters[LCNT_SRCCONNRATE4]++; | |||
| 504 | bad++; | |||
| 505 | } | |||
| 506 | ||||
| 507 | if (!bad) | |||
| 508 | return (0); | |||
| 509 | ||||
| 510 | if ((*stp)->rule.ptr->overload_tbl) { | |||
| 511 | struct pfr_addr p; | |||
| 512 | u_int32_t killed = 0; | |||
| 513 | ||||
| 514 | pf_status.lcounters[LCNT_OVERLOAD_TABLE5]++; | |||
| 515 | if (pf_status.debug >= LOG_NOTICE5) { | |||
| 516 | log(LOG_NOTICE5, | |||
| 517 | "pf: pf_src_connlimit: blocking address "); | |||
| 518 | pf_print_host(&sn->addr, 0, | |||
| 519 | (*stp)->key[PF_SK_WIRE]->af); | |||
| 520 | } | |||
| 521 | ||||
| 522 | memset(&p, 0, sizeof(p))__builtin_memset((&p), (0), (sizeof(p))); | |||
| 523 | p.pfra_af = (*stp)->key[PF_SK_WIRE]->af; | |||
| 524 | switch ((*stp)->key[PF_SK_WIRE]->af) { | |||
| 525 | case AF_INET2: | |||
| 526 | p.pfra_net = 32; | |||
| 527 | p.pfra_ip4addrpfra_u._pfra_ip4addr = sn->addr.v4pfa.v4; | |||
| 528 | break; | |||
| 529 | #ifdef INET61 | |||
| 530 | case AF_INET624: | |||
| 531 | p.pfra_net = 128; | |||
| 532 | p.pfra_ip6addrpfra_u._pfra_ip6addr = sn->addr.v6pfa.v6; | |||
| 533 | break; | |||
| 534 | #endif /* INET6 */ | |||
| 535 | } | |||
| 536 | ||||
| 537 | pfr_insert_kentry((*stp)->rule.ptr->overload_tbl, | |||
| 538 | &p, gettime()); | |||
| 539 | ||||
| 540 | /* kill existing states if that's required. */ | |||
| 541 | if ((*stp)->rule.ptr->flush) { | |||
| 542 | struct pf_state_key *sk; | |||
| 543 | struct pf_state *st; | |||
| 544 | ||||
| 545 | pf_status.lcounters[LCNT_OVERLOAD_FLUSH6]++; | |||
| 546 | RBT_FOREACH(st, pf_state_tree_id, &tree_id)for ((st) = pf_state_tree_id_RBT_MIN((&tree_id)); (st) != ((void *)0); (st) = pf_state_tree_id_RBT_NEXT((st))) { | |||
| 547 | sk = st->key[PF_SK_WIRE]; | |||
| 548 | /* | |||
| 549 | * Kill states from this source. (Only those | |||
| 550 | * from the same rule if PF_FLUSH_GLOBAL is not | |||
| 551 | * set) | |||
| 552 | */ | |||
| 553 | if (sk->af == | |||
| 554 | (*stp)->key[PF_SK_WIRE]->af && | |||
| 555 | (((*stp)->direction == PF_OUT && | |||
| 556 | PF_AEQ(&sn->addr, &sk->addr[1], sk->af)((sk->af == 2 && (&sn->addr)->pfa.addr32 [0] == (&sk->addr[1])->pfa.addr32[0]) || (sk->af == 24 && (&sn->addr)->pfa.addr32[3] == (& sk->addr[1])->pfa.addr32[3] && (&sn->addr )->pfa.addr32[2] == (&sk->addr[1])->pfa.addr32[2 ] && (&sn->addr)->pfa.addr32[1] == (&sk ->addr[1])->pfa.addr32[1] && (&sn->addr) ->pfa.addr32[0] == (&sk->addr[1])->pfa.addr32[0] ))) || | |||
| 557 | ((*stp)->direction == PF_IN && | |||
| 558 | PF_AEQ(&sn->addr, &sk->addr[0], sk->af)((sk->af == 2 && (&sn->addr)->pfa.addr32 [0] == (&sk->addr[0])->pfa.addr32[0]) || (sk->af == 24 && (&sn->addr)->pfa.addr32[3] == (& sk->addr[0])->pfa.addr32[3] && (&sn->addr )->pfa.addr32[2] == (&sk->addr[0])->pfa.addr32[2 ] && (&sn->addr)->pfa.addr32[1] == (&sk ->addr[0])->pfa.addr32[1] && (&sn->addr) ->pfa.addr32[0] == (&sk->addr[0])->pfa.addr32[0] )))) && | |||
| 559 | ((*stp)->rule.ptr->flush & | |||
| 560 | PF_FLUSH_GLOBAL0x02 || | |||
| 561 | (*stp)->rule.ptr == st->rule.ptr)) { | |||
| 562 | pf_update_state_timeout(st, PFTM_PURGE); | |||
| 563 | pf_set_protostate(st, PF_PEER_BOTH, | |||
| 564 | TCPS_CLOSED0); | |||
| 565 | killed++; | |||
| 566 | } | |||
| 567 | } | |||
| 568 | if (pf_status.debug >= LOG_NOTICE5) | |||
| 569 | addlog(", %u states killed", killed); | |||
| 570 | } | |||
| 571 | if (pf_status.debug >= LOG_NOTICE5) | |||
| 572 | addlog("\n"); | |||
| 573 | } | |||
| 574 | ||||
| 575 | /* kill this state */ | |||
| 576 | pf_update_state_timeout(*stp, PFTM_PURGE); | |||
| 577 | pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_CLOSED0); | |||
| 578 | return (1); | |||
| 579 | } | |||
| 580 | ||||
| 581 | int | |||
| 582 | pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, | |||
| 583 | enum pf_sn_types type, sa_family_t af, struct pf_addr *src, | |||
| 584 | struct pf_addr *raddr, struct pfi_kif *kif) | |||
| 585 | { | |||
| 586 | struct pf_src_node k; | |||
| 587 | ||||
| 588 | if (*sn == NULL((void *)0)) { | |||
| 589 | k.af = af; | |||
| 590 | k.type = type; | |||
| 591 | pf_addrcpy(&k.addr, src, af); | |||
| 592 | k.rule.ptr = rule; | |||
| 593 | pf_status.scounters[SCNT_SRC_NODE_SEARCH0]++; | |||
| 594 | *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k)pf_src_tree_RB_FIND(&tree_src_tracking, &k); | |||
| 595 | } | |||
| 596 | if (*sn == NULL((void *)0)) { | |||
| 597 | if (!rule->max_src_nodes || | |||
| 598 | rule->src_nodes < rule->max_src_nodes) | |||
| 599 | (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT0x0002 | PR_ZERO0x0008); | |||
| 600 | else | |||
| 601 | pf_status.lcounters[LCNT_SRCNODES2]++; | |||
| 602 | if ((*sn) == NULL((void *)0)) | |||
| 603 | return (-1); | |||
| 604 | ||||
| 605 | pf_init_threshold(&(*sn)->conn_rate, | |||
| 606 | rule->max_src_conn_rate.limit, | |||
| 607 | rule->max_src_conn_rate.seconds); | |||
| 608 | ||||
| 609 | (*sn)->type = type; | |||
| 610 | (*sn)->af = af; | |||
| 611 | (*sn)->rule.ptr = rule; | |||
| 612 | pf_addrcpy(&(*sn)->addr, src, af); | |||
| 613 | if (raddr) | |||
| 614 | pf_addrcpy(&(*sn)->raddr, raddr, af); | |||
| 615 | if (RB_INSERT(pf_src_tree,pf_src_tree_RB_INSERT(&tree_src_tracking, *sn) | |||
| 616 | &tree_src_tracking, *sn)pf_src_tree_RB_INSERT(&tree_src_tracking, *sn) != NULL((void *)0)) { | |||
| 617 | if (pf_status.debug >= LOG_NOTICE5) { | |||
| 618 | log(LOG_NOTICE5, | |||
| 619 | "pf: src_tree insert failed: "); | |||
| 620 | pf_print_host(&(*sn)->addr, 0, af); | |||
| 621 | addlog("\n"); | |||
| 622 | } | |||
| 623 | pool_put(&pf_src_tree_pl, *sn); | |||
| 624 | return (-1); | |||
| 625 | } | |||
| 626 | (*sn)->creation = getuptime(); | |||
| 627 | (*sn)->rule.ptr->src_nodes++; | |||
| 628 | if (kif != NULL((void *)0)) { | |||
| 629 | (*sn)->kif = kif; | |||
| 630 | pfi_kif_ref(kif, PFI_KIF_REF_SRCNODE); | |||
| 631 | } | |||
| 632 | pf_status.scounters[SCNT_SRC_NODE_INSERT1]++; | |||
| 633 | pf_status.src_nodes++; | |||
| 634 | } else { | |||
| 635 | if (rule->max_src_states && | |||
| 636 | (*sn)->states >= rule->max_src_states) { | |||
| 637 | pf_status.lcounters[LCNT_SRCSTATES1]++; | |||
| 638 | return (-1); | |||
| 639 | } | |||
| 640 | } | |||
| 641 | return (0); | |||
| 642 | } | |||
| 643 | ||||
| 644 | void | |||
| 645 | pf_remove_src_node(struct pf_src_node *sn) | |||
| 646 | { | |||
| 647 | if (sn->states > 0 || sn->expire > getuptime()) | |||
| 648 | return; | |||
| 649 | ||||
| 650 | sn->rule.ptr->src_nodes--; | |||
| 651 | if (sn->rule.ptr->states_cur == 0 && | |||
| 652 | sn->rule.ptr->src_nodes == 0) | |||
| 653 | pf_rm_rule(NULL((void *)0), sn->rule.ptr); | |||
| 654 | RB_REMOVE(pf_src_tree, &tree_src_tracking, sn)pf_src_tree_RB_REMOVE(&tree_src_tracking, sn); | |||
| 655 | pf_status.scounters[SCNT_SRC_NODE_REMOVALS2]++; | |||
| 656 | pf_status.src_nodes--; | |||
| 657 | pfi_kif_unref(sn->kif, PFI_KIF_REF_SRCNODE); | |||
| 658 | pool_put(&pf_src_tree_pl, sn); | |||
| 659 | } | |||
| 660 | ||||
| 661 | struct pf_src_node * | |||
| 662 | pf_get_src_node(struct pf_state *st, enum pf_sn_types type) | |||
| 663 | { | |||
| 664 | struct pf_sn_item *sni; | |||
| 665 | ||||
| 666 | SLIST_FOREACH(sni, &st->src_nodes, next)for((sni) = ((&st->src_nodes)->slh_first); (sni) != ((void *)0); (sni) = ((sni)->next.sle_next)) | |||
| 667 | if (sni->sn->type == type) | |||
| 668 | return (sni->sn); | |||
| 669 | return (NULL((void *)0)); | |||
| 670 | } | |||
| 671 | ||||
| 672 | void | |||
| 673 | pf_state_rm_src_node(struct pf_state *st, struct pf_src_node *sn) | |||
| 674 | { | |||
| 675 | struct pf_sn_item *sni, *snin, *snip = NULL((void *)0); | |||
| 676 | ||||
| 677 | for (sni = SLIST_FIRST(&st->src_nodes)((&st->src_nodes)->slh_first); sni; sni = snin) { | |||
| 678 | snin = SLIST_NEXT(sni, next)((sni)->next.sle_next); | |||
| 679 | if (sni->sn == sn) { | |||
| 680 | if (snip) | |||
| 681 | SLIST_REMOVE_AFTER(snip, next)do { (snip)->next.sle_next = (snip)->next.sle_next-> next.sle_next; } while (0); | |||
| 682 | else | |||
| 683 | SLIST_REMOVE_HEAD(&st->src_nodes, next)do { (&st->src_nodes)->slh_first = (&st->src_nodes )->slh_first->next.sle_next; } while (0); | |||
| 684 | pool_put(&pf_sn_item_pl, sni); | |||
| 685 | sni = NULL((void *)0); | |||
| 686 | sn->states--; | |||
| 687 | } | |||
| 688 | if (sni != NULL((void *)0)) | |||
| 689 | snip = sni; | |||
| 690 | } | |||
| 691 | } | |||
| 692 | ||||
| 693 | /* state table stuff */ | |||
| 694 | ||||
| 695 | static inline int | |||
| 696 | pf_state_compare_key(const struct pf_state_key *a, | |||
| 697 | const struct pf_state_key *b) | |||
| 698 | { | |||
| 699 | int diff; | |||
| 700 | ||||
| 701 | if ((diff = a->hash - b->hash) != 0) | |||
| 702 | return (diff); | |||
| 703 | if ((diff = a->proto - b->proto) != 0) | |||
| 704 | return (diff); | |||
| 705 | if ((diff = a->af - b->af) != 0) | |||
| 706 | return (diff); | |||
| 707 | if ((diff = pf_addr_compare(&a->addr[0], &b->addr[0], a->af)) != 0) | |||
| 708 | return (diff); | |||
| 709 | if ((diff = pf_addr_compare(&a->addr[1], &b->addr[1], a->af)) != 0) | |||
| 710 | return (diff); | |||
| 711 | if ((diff = a->port[0] - b->port[0]) != 0) | |||
| 712 | return (diff); | |||
| 713 | if ((diff = a->port[1] - b->port[1]) != 0) | |||
| 714 | return (diff); | |||
| 715 | if ((diff = a->rdomain - b->rdomain) != 0) | |||
| 716 | return (diff); | |||
| 717 | return (0); | |||
| 718 | } | |||
| 719 | ||||
| 720 | static inline int | |||
| 721 | pf_state_compare_id(const struct pf_state *a, const struct pf_state *b) | |||
| 722 | { | |||
| 723 | if (a->id > b->id) | |||
| 724 | return (1); | |||
| 725 | if (a->id < b->id) | |||
| 726 | return (-1); | |||
| 727 | if (a->creatorid > b->creatorid) | |||
| 728 | return (1); | |||
| 729 | if (a->creatorid < b->creatorid) | |||
| 730 | return (-1); | |||
| 731 | ||||
| 732 | return (0); | |||
| 733 | } | |||
| 734 | ||||
| 735 | /* | |||
| 736 | * on failure, pf_state_key_attach() releases the pf_state_key | |||
| 737 | * reference and returns NULL. | |||
| 738 | */ | |||
| 739 | struct pf_state_key * | |||
| 740 | pf_state_key_attach(struct pf_state_key *sk, struct pf_state *st, int idx) | |||
| 741 | { | |||
| 742 | struct pf_state_item *si; | |||
| 743 | struct pf_state_key *cur; | |||
| 744 | struct pf_state *oldst = NULL((void *)0); | |||
| 745 | ||||
| 746 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 747 | ||||
| 748 | KASSERT(st->key[idx] == NULL)((st->key[idx] == ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 748, "st->key[idx] == NULL")); | |||
| 749 | sk->sk_removed = 0; | |||
| 750 | cur = RBT_INSERT(pf_state_tree, &pf_statetbl, sk)pf_state_tree_RBT_INSERT(&pf_statetbl, sk); | |||
| 751 | if (cur != NULL((void *)0)) { | |||
| 752 | sk->sk_removed = 1; | |||
| 753 | /* key exists. check for same kif, if none, add to key */ | |||
| 754 | TAILQ_FOREACH(si, &cur->sk_states, si_entry)for((si) = ((&cur->sk_states)->tqh_first); (si) != ( (void *)0); (si) = ((si)->si_entry.tqe_next)) { | |||
| 755 | struct pf_state *sist = si->si_st; | |||
| 756 | if (sist->kif == st->kif && | |||
| 757 | ((sist->key[PF_SK_WIRE]->af == sk->af && | |||
| 758 | sist->direction == st->direction) || | |||
| 759 | (sist->key[PF_SK_WIRE]->af != | |||
| 760 | sist->key[PF_SK_STACK]->af && | |||
| 761 | sk->af == sist->key[PF_SK_STACK]->af && | |||
| 762 | sist->direction != st->direction))) { | |||
| 763 | int reuse = 0; | |||
| 764 | ||||
| 765 | if (sk->proto == IPPROTO_TCP6 && | |||
| 766 | sist->src.state >= TCPS_FIN_WAIT_29 && | |||
| 767 | sist->dst.state >= TCPS_FIN_WAIT_29) | |||
| 768 | reuse = 1; | |||
| 769 | if (pf_status.debug >= LOG_NOTICE5) { | |||
| 770 | log(LOG_NOTICE5, | |||
| 771 | "pf: %s key attach %s on %s: ", | |||
| 772 | (idx == PF_SK_WIRE) ? | |||
| 773 | "wire" : "stack", | |||
| 774 | reuse ? "reuse" : "failed", | |||
| 775 | st->kif->pfik_name); | |||
| 776 | pf_print_state_parts(st, | |||
| 777 | (idx == PF_SK_WIRE) ? sk : NULL((void *)0), | |||
| 778 | (idx == PF_SK_STACK) ? sk : NULL((void *)0)); | |||
| 779 | addlog(", existing: "); | |||
| 780 | pf_print_state_parts(sist, | |||
| 781 | (idx == PF_SK_WIRE) ? sk : NULL((void *)0), | |||
| 782 | (idx == PF_SK_STACK) ? sk : NULL((void *)0)); | |||
| 783 | addlog("\n"); | |||
| 784 | } | |||
| 785 | if (reuse) { | |||
| 786 | pf_set_protostate(sist, PF_PEER_BOTH, | |||
| 787 | TCPS_CLOSED0); | |||
| 788 | /* remove late or sks can go away */ | |||
| 789 | oldst = sist; | |||
| 790 | } else { | |||
| 791 | pf_state_key_unref(sk); | |||
| 792 | return (NULL((void *)0)); /* collision! */ | |||
| 793 | } | |||
| 794 | } | |||
| 795 | } | |||
| 796 | ||||
| 797 | /* reuse the existing state key */ | |||
| 798 | pf_state_key_unref(sk); | |||
| 799 | sk = cur; | |||
| 800 | } | |||
| 801 | ||||
| 802 | if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT0x0002)) == NULL((void *)0)) { | |||
| 803 | if (TAILQ_EMPTY(&sk->sk_states)(((&sk->sk_states)->tqh_first) == ((void *)0))) { | |||
| 804 | KASSERT(cur == NULL)((cur == ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c" , 804, "cur == NULL")); | |||
| 805 | RBT_REMOVE(pf_state_tree, &pf_statetbl, sk)pf_state_tree_RBT_REMOVE(&pf_statetbl, sk); | |||
| 806 | sk->sk_removed = 1; | |||
| 807 | pf_state_key_unref(sk); | |||
| 808 | } | |||
| 809 | ||||
| 810 | return (NULL((void *)0)); | |||
| 811 | } | |||
| 812 | ||||
| 813 | st->key[idx] = pf_state_key_ref(sk); /* give a ref to state */ | |||
| 814 | si->si_st = pf_state_ref(st); | |||
| 815 | ||||
| 816 | /* list is sorted, if-bound states before floating */ | |||
| 817 | if (st->kif == pfi_all) | |||
| 818 | TAILQ_INSERT_TAIL(&sk->sk_states, si, si_entry)do { (si)->si_entry.tqe_next = ((void *)0); (si)->si_entry .tqe_prev = (&sk->sk_states)->tqh_last; *(&sk-> sk_states)->tqh_last = (si); (&sk->sk_states)->tqh_last = &(si)->si_entry.tqe_next; } while (0); | |||
| 819 | else | |||
| 820 | TAILQ_INSERT_HEAD(&sk->sk_states, si, si_entry)do { if (((si)->si_entry.tqe_next = (&sk->sk_states )->tqh_first) != ((void *)0)) (&sk->sk_states)-> tqh_first->si_entry.tqe_prev = &(si)->si_entry.tqe_next ; else (&sk->sk_states)->tqh_last = &(si)->si_entry .tqe_next; (&sk->sk_states)->tqh_first = (si); (si) ->si_entry.tqe_prev = &(&sk->sk_states)->tqh_first ; } while (0); | |||
| 821 | ||||
| 822 | if (oldst) | |||
| 823 | pf_remove_state(oldst); | |||
| 824 | ||||
| 825 | /* caller owns the pf_state ref, which owns a pf_state_key ref now */ | |||
| 826 | return (sk); | |||
| 827 | } | |||
| 828 | ||||
| 829 | void | |||
| 830 | pf_detach_state(struct pf_state *st) | |||
| 831 | { | |||
| 832 | KASSERT(st->key[PF_SK_WIRE] != NULL)((st->key[PF_SK_WIRE] != ((void *)0)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/net/pf.c", 832, "st->key[PF_SK_WIRE] != NULL" )); | |||
| 833 | pf_state_key_detach(st, PF_SK_WIRE); | |||
| 834 | ||||
| 835 | KASSERT(st->key[PF_SK_STACK] != NULL)((st->key[PF_SK_STACK] != ((void *)0)) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/net/pf.c", 835, "st->key[PF_SK_STACK] != NULL" )); | |||
| 836 | if (st->key[PF_SK_STACK] != st->key[PF_SK_WIRE]) | |||
| 837 | pf_state_key_detach(st, PF_SK_STACK); | |||
| 838 | } | |||
| 839 | ||||
| 840 | void | |||
| 841 | pf_state_key_detach(struct pf_state *st, int idx) | |||
| 842 | { | |||
| 843 | struct pf_state_item *si; | |||
| 844 | struct pf_state_key *sk; | |||
| 845 | ||||
| 846 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 847 | ||||
| 848 | sk = st->key[idx]; | |||
| 849 | if (sk == NULL((void *)0)) | |||
| 850 | return; | |||
| 851 | ||||
| 852 | TAILQ_FOREACH(si, &sk->sk_states, si_entry)for((si) = ((&sk->sk_states)->tqh_first); (si) != ( (void *)0); (si) = ((si)->si_entry.tqe_next)) { | |||
| 853 | if (si->si_st == st) | |||
| 854 | break; | |||
| 855 | } | |||
| 856 | if (si == NULL((void *)0)) | |||
| 857 | return; | |||
| 858 | ||||
| 859 | TAILQ_REMOVE(&sk->sk_states, si, si_entry)do { if (((si)->si_entry.tqe_next) != ((void *)0)) (si)-> si_entry.tqe_next->si_entry.tqe_prev = (si)->si_entry.tqe_prev ; else (&sk->sk_states)->tqh_last = (si)->si_entry .tqe_prev; *(si)->si_entry.tqe_prev = (si)->si_entry.tqe_next ; ((si)->si_entry.tqe_prev) = ((void *)-1); ((si)->si_entry .tqe_next) = ((void *)-1); } while (0); | |||
| 860 | pool_put(&pf_state_item_pl, si); | |||
| 861 | ||||
| 862 | if (TAILQ_EMPTY(&sk->sk_states)(((&sk->sk_states)->tqh_first) == ((void *)0))) { | |||
| 863 | RBT_REMOVE(pf_state_tree, &pf_statetbl, sk)pf_state_tree_RBT_REMOVE(&pf_statetbl, sk); | |||
| 864 | sk->sk_removed = 1; | |||
| 865 | pf_state_key_unlink_reverse(sk); | |||
| 866 | pf_state_key_unlink_inpcb(sk); | |||
| 867 | pf_state_key_unref(sk); | |||
| 868 | } | |||
| 869 | ||||
| 870 | pf_state_unref(st); | |||
| 871 | } | |||
| 872 | ||||
| 873 | struct pf_state_key * | |||
| 874 | pf_alloc_state_key(int pool_flags) | |||
| 875 | { | |||
| 876 | struct pf_state_key *sk; | |||
| 877 | ||||
| 878 | if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL((void *)0)) | |||
| 879 | return (NULL((void *)0)); | |||
| 880 | ||||
| 881 | PF_REF_INIT(sk->sk_refcnt)refcnt_init(&(sk->sk_refcnt)); | |||
| 882 | TAILQ_INIT(&sk->sk_states)do { (&sk->sk_states)->tqh_first = ((void *)0); (& sk->sk_states)->tqh_last = &(&sk->sk_states) ->tqh_first; } while (0); | |||
| 883 | sk->sk_removed = 1; | |||
| 884 | ||||
| 885 | return (sk); | |||
| 886 | } | |||
| 887 | ||||
| 888 | static __inline int | |||
| 889 | pf_state_key_addr_setup(struct pf_pdesc *pd, void *arg, int sidx, | |||
| 890 | struct pf_addr *saddr, int didx, struct pf_addr *daddr, int af, int multi) | |||
| 891 | { | |||
| 892 | struct pf_state_key_cmp *key = arg; | |||
| 893 | #ifdef INET61 | |||
| 894 | struct pf_addr *target; | |||
| 895 | ||||
| 896 | if (af == AF_INET2 || pd->proto != IPPROTO_ICMPV658) | |||
| 897 | goto copy; | |||
| 898 | ||||
| 899 | switch (pd->hdr.icmp6.icmp6_type) { | |||
| 900 | case ND_NEIGHBOR_SOLICIT135: | |||
| 901 | if (multi) | |||
| 902 | return (-1); | |||
| 903 | target = (struct pf_addr *)&pd->hdr.nd_ns.nd_ns_target; | |||
| 904 | daddr = target; | |||
| 905 | break; | |||
| 906 | case ND_NEIGHBOR_ADVERT136: | |||
| 907 | if (multi) | |||
| 908 | return (-1); | |||
| 909 | target = (struct pf_addr *)&pd->hdr.nd_ns.nd_ns_target; | |||
| 910 | saddr = target; | |||
| 911 | if (IN6_IS_ADDR_MULTICAST(&pd->dst->v6)((&pd->dst->pfa.v6)->__u6_addr.__u6_addr8[0] == 0xff )) { | |||
| 912 | key->addr[didx].addr32pfa.addr32[0] = 0; | |||
| 913 | key->addr[didx].addr32pfa.addr32[1] = 0; | |||
| 914 | key->addr[didx].addr32pfa.addr32[2] = 0; | |||
| 915 | key->addr[didx].addr32pfa.addr32[3] = 0; | |||
| 916 | daddr = NULL((void *)0); /* overwritten */ | |||
| 917 | } | |||
| 918 | break; | |||
| 919 | default: | |||
| 920 | if (multi) { | |||
| 921 | key->addr[sidx].addr32pfa.addr32[0] = __IPV6_ADDR_INT32_MLL(__uint32_t)(__builtin_constant_p(0xff020000) ? (__uint32_t)( ((__uint32_t)(0xff020000) & 0xff) << 24 | ((__uint32_t )(0xff020000) & 0xff00) << 8 | ((__uint32_t)(0xff020000 ) & 0xff0000) >> 8 | ((__uint32_t)(0xff020000) & 0xff000000) >> 24) : __swap32md(0xff020000)); | |||
| 922 | key->addr[sidx].addr32pfa.addr32[1] = 0; | |||
| 923 | key->addr[sidx].addr32pfa.addr32[2] = 0; | |||
| 924 | key->addr[sidx].addr32pfa.addr32[3] = __IPV6_ADDR_INT32_ONE(__uint32_t)(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t )(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ( (__uint32_t)(1) & 0xff000000) >> 24) : __swap32md(1 )); | |||
| 925 | saddr = NULL((void *)0); /* overwritten */ | |||
| 926 | } | |||
| 927 | } | |||
| 928 | copy: | |||
| 929 | #endif /* INET6 */ | |||
| 930 | if (saddr) | |||
| 931 | pf_addrcpy(&key->addr[sidx], saddr, af); | |||
| 932 | if (daddr) | |||
| 933 | pf_addrcpy(&key->addr[didx], daddr, af); | |||
| 934 | ||||
| 935 | return (0); | |||
| 936 | } | |||
| 937 | ||||
| 938 | int | |||
| 939 | pf_state_key_setup(struct pf_pdesc *pd, struct pf_state_key **skw, | |||
| 940 | struct pf_state_key **sks, int rtableid) | |||
| 941 | { | |||
| 942 | /* if returning error we MUST pool_put state keys ourselves */ | |||
| 943 | struct pf_state_key *sk1, *sk2; | |||
| 944 | u_int wrdom = pd->rdomain; | |||
| 945 | int afto = pd->af != pd->naf; | |||
| 946 | ||||
| 947 | if ((sk1 = pf_alloc_state_key(PR_NOWAIT0x0002 | PR_ZERO0x0008)) == NULL((void *)0)) | |||
| 948 | return (ENOMEM12); | |||
| 949 | ||||
| 950 | pf_state_key_addr_setup(pd, sk1, pd->sidx, pd->src, pd->didx, pd->dst, | |||
| 951 | pd->af, 0); | |||
| 952 | sk1->port[pd->sidx] = pd->osport; | |||
| 953 | sk1->port[pd->didx] = pd->odport; | |||
| 954 | sk1->proto = pd->proto; | |||
| 955 | sk1->af = pd->af; | |||
| 956 | sk1->rdomain = pd->rdomain; | |||
| 957 | sk1->hash = pf_pkt_hash(sk1->af, sk1->proto, | |||
| 958 | &sk1->addr[0], &sk1->addr[1], sk1->port[0], sk1->port[1]); | |||
| 959 | if (rtableid >= 0) | |||
| 960 | wrdom = rtable_l2(rtableid); | |||
| 961 | ||||
| 962 | if (PF_ANEQ(&pd->nsaddr, pd->src, pd->af)((pd->af == 2 && (&pd->nsaddr)->pfa.addr32 [0] != (pd->src)->pfa.addr32[0]) || (pd->af == 24 && ((&pd->nsaddr)->pfa.addr32[3] != (pd->src)-> pfa.addr32[3] || (&pd->nsaddr)->pfa.addr32[2] != (pd ->src)->pfa.addr32[2] || (&pd->nsaddr)->pfa.addr32 [1] != (pd->src)->pfa.addr32[1] || (&pd->nsaddr) ->pfa.addr32[0] != (pd->src)->pfa.addr32[0]))) || | |||
| 963 | PF_ANEQ(&pd->ndaddr, pd->dst, pd->af)((pd->af == 2 && (&pd->ndaddr)->pfa.addr32 [0] != (pd->dst)->pfa.addr32[0]) || (pd->af == 24 && ((&pd->ndaddr)->pfa.addr32[3] != (pd->dst)-> pfa.addr32[3] || (&pd->ndaddr)->pfa.addr32[2] != (pd ->dst)->pfa.addr32[2] || (&pd->ndaddr)->pfa.addr32 [1] != (pd->dst)->pfa.addr32[1] || (&pd->ndaddr) ->pfa.addr32[0] != (pd->dst)->pfa.addr32[0]))) || | |||
| 964 | pd->nsport != pd->osport || pd->ndport != pd->odport || | |||
| 965 | wrdom != pd->rdomain || afto) { /* NAT/NAT64 */ | |||
| 966 | if ((sk2 = pf_alloc_state_key(PR_NOWAIT0x0002 | PR_ZERO0x0008)) == NULL((void *)0)) { | |||
| 967 | pf_state_key_unref(sk1); | |||
| 968 | return (ENOMEM12); | |||
| 969 | } | |||
| 970 | pf_state_key_addr_setup(pd, sk2, afto ? pd->didx : pd->sidx, | |||
| 971 | &pd->nsaddr, afto ? pd->sidx : pd->didx, &pd->ndaddr, | |||
| 972 | pd->naf, 0); | |||
| 973 | sk2->port[afto ? pd->didx : pd->sidx] = pd->nsport; | |||
| 974 | sk2->port[afto ? pd->sidx : pd->didx] = pd->ndport; | |||
| 975 | if (afto) { | |||
| 976 | switch (pd->proto) { | |||
| 977 | case IPPROTO_ICMP1: | |||
| 978 | sk2->proto = IPPROTO_ICMPV658; | |||
| 979 | break; | |||
| 980 | case IPPROTO_ICMPV658: | |||
| 981 | sk2->proto = IPPROTO_ICMP1; | |||
| 982 | break; | |||
| 983 | default: | |||
| 984 | sk2->proto = pd->proto; | |||
| 985 | } | |||
| 986 | } else | |||
| 987 | sk2->proto = pd->proto; | |||
| 988 | sk2->af = pd->naf; | |||
| 989 | sk2->rdomain = wrdom; | |||
| 990 | sk2->hash = pf_pkt_hash(sk2->af, sk2->proto, | |||
| 991 | &sk2->addr[0], &sk2->addr[1], sk2->port[0], sk2->port[1]); | |||
| 992 | } else | |||
| 993 | sk2 = pf_state_key_ref(sk1); | |||
| 994 | ||||
| 995 | if (pd->dir == PF_IN) { | |||
| 996 | *skw = sk1; | |||
| 997 | *sks = sk2; | |||
| 998 | } else { | |||
| 999 | *sks = sk1; | |||
| 1000 | *skw = sk2; | |||
| 1001 | } | |||
| 1002 | ||||
| 1003 | if (pf_status.debug >= LOG_DEBUG7) { | |||
| 1004 | log(LOG_DEBUG7, "pf: key setup: "); | |||
| 1005 | pf_print_state_parts(NULL((void *)0), *skw, *sks); | |||
| 1006 | addlog("\n"); | |||
| 1007 | } | |||
| 1008 | ||||
| 1009 | return (0); | |||
| 1010 | } | |||
| 1011 | ||||
| 1012 | /* | |||
| 1013 | * pf_state_insert() does the following: | |||
| 1014 | * - links the pf_state up with pf_state_key(s). | |||
| 1015 | * - inserts the pf_state_keys into pf_state_tree. | |||
| 1016 | * - inserts the pf_state into the into pf_state_tree_id. | |||
| 1017 | * - tells pfsync about the state. | |||
| 1018 | * | |||
| 1019 | * pf_state_insert() owns the references to the pf_state_key structs | |||
| 1020 | * it is given. on failure to insert, these references are released. | |||
| 1021 | * on success, the caller owns a pf_state reference that allows it | |||
| 1022 | * to access the state keys. | |||
| 1023 | */ | |||
| 1024 | ||||
| 1025 | int | |||
| 1026 | pf_state_insert(struct pfi_kif *kif, struct pf_state_key **skwp, | |||
| 1027 | struct pf_state_key **sksp, struct pf_state *st) | |||
| 1028 | { | |||
| 1029 | struct pf_state_key *skw = *skwp; | |||
| 1030 | struct pf_state_key *sks = *sksp; | |||
| 1031 | int same = (skw == sks); | |||
| 1032 | ||||
| 1033 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 1034 | ||||
| 1035 | st->kif = kif; | |||
| 1036 | PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0); | |||
| 1037 | ||||
| 1038 | skw = pf_state_key_attach(skw, st, PF_SK_WIRE); | |||
| 1039 | if (skw == NULL((void *)0)) { | |||
| 1040 | pf_state_key_unref(sks); | |||
| 1041 | PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_state_lock), __func__); } while ( 0); rw_exit_write(&pf_state_lock); } while (0); | |||
| 1042 | return (-1); | |||
| 1043 | } | |||
| 1044 | ||||
| 1045 | if (same) { | |||
| 1046 | /* pf_state_key_attach might have swapped skw */ | |||
| 1047 | pf_state_key_unref(sks); | |||
| 1048 | st->key[PF_SK_STACK] = sks = pf_state_key_ref(skw); | |||
| 1049 | } else if (pf_state_key_attach(sks, st, PF_SK_STACK) == NULL((void *)0)) { | |||
| 1050 | pf_state_key_detach(st, PF_SK_WIRE); | |||
| 1051 | PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_state_lock), __func__); } while ( 0); rw_exit_write(&pf_state_lock); } while (0); | |||
| 1052 | return (-1); | |||
| 1053 | } | |||
| 1054 | ||||
| 1055 | if (st->id == 0 && st->creatorid == 0) { | |||
| 1056 | st->id = htobe64(pf_status.stateid++)(__uint64_t)(__builtin_constant_p(pf_status.stateid++) ? (__uint64_t )((((__uint64_t)(pf_status.stateid++) & 0xff) << 56 ) | ((__uint64_t)(pf_status.stateid++) & 0xff00ULL) << 40 | ((__uint64_t)(pf_status.stateid++) & 0xff0000ULL) << 24 | ((__uint64_t)(pf_status.stateid++) & 0xff000000ULL) << 8 | ((__uint64_t)(pf_status.stateid++) & 0xff00000000ULL ) >> 8 | ((__uint64_t)(pf_status.stateid++) & 0xff0000000000ULL ) >> 24 | ((__uint64_t)(pf_status.stateid++) & 0xff000000000000ULL ) >> 40 | ((__uint64_t)(pf_status.stateid++) & 0xff00000000000000ULL ) >> 56) : __swap64md(pf_status.stateid++)); | |||
| 1057 | st->creatorid = pf_status.hostid; | |||
| 1058 | } | |||
| 1059 | if (RBT_INSERT(pf_state_tree_id, &tree_id, st)pf_state_tree_id_RBT_INSERT(&tree_id, st) != NULL((void *)0)) { | |||
| 1060 | if (pf_status.debug >= LOG_NOTICE5) { | |||
| 1061 | log(LOG_NOTICE5, "pf: state insert failed: " | |||
| 1062 | "id: %016llx creatorid: %08x", | |||
| 1063 | betoh64(st->id)(__uint64_t)(__builtin_constant_p(st->id) ? (__uint64_t)(( ((__uint64_t)(st->id) & 0xff) << 56) | ((__uint64_t )(st->id) & 0xff00ULL) << 40 | ((__uint64_t)(st-> id) & 0xff0000ULL) << 24 | ((__uint64_t)(st->id) & 0xff000000ULL) << 8 | ((__uint64_t)(st->id) & 0xff00000000ULL) >> 8 | ((__uint64_t)(st->id) & 0xff0000000000ULL) >> 24 | ((__uint64_t)(st->id) & 0xff000000000000ULL) >> 40 | ((__uint64_t)(st->id) & 0xff00000000000000ULL) >> 56) : __swap64md(st->id)), ntohl(st->creatorid)(__uint32_t)(__builtin_constant_p(st->creatorid) ? (__uint32_t )(((__uint32_t)(st->creatorid) & 0xff) << 24 | ( (__uint32_t)(st->creatorid) & 0xff00) << 8 | ((__uint32_t )(st->creatorid) & 0xff0000) >> 8 | ((__uint32_t )(st->creatorid) & 0xff000000) >> 24) : __swap32md (st->creatorid))); | |||
| 1064 | addlog("\n"); | |||
| 1065 | } | |||
| 1066 | pf_detach_state(st); | |||
| 1067 | PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_state_lock), __func__); } while ( 0); rw_exit_write(&pf_state_lock); } while (0); | |||
| 1068 | return (-1); | |||
| 1069 | } | |||
| 1070 | pf_state_list_insert(&pf_state_list, st); | |||
| 1071 | pf_status.fcounters[FCNT_STATE_INSERT1]++; | |||
| 1072 | pf_status.states++; | |||
| 1073 | pfi_kif_ref(kif, PFI_KIF_REF_STATE); | |||
| 1074 | PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_state_lock), __func__); } while ( 0); rw_exit_write(&pf_state_lock); } while (0); | |||
| 1075 | ||||
| 1076 | #if NPFSYNC1 > 0 | |||
| 1077 | pfsync_insert_state(st); | |||
| 1078 | #endif /* NPFSYNC > 0 */ | |||
| 1079 | ||||
| 1080 | *skwp = skw; | |||
| 1081 | *sksp = sks; | |||
| 1082 | ||||
| 1083 | return (0); | |||
| 1084 | } | |||
| 1085 | ||||
| 1086 | struct pf_state * | |||
| 1087 | pf_find_state_byid(struct pf_state_cmp *key) | |||
| 1088 | { | |||
| 1089 | pf_status.fcounters[FCNT_STATE_SEARCH0]++; | |||
| 1090 | ||||
| 1091 | return (RBT_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key)pf_state_tree_id_RBT_FIND(&tree_id, (struct pf_state *)key )); | |||
| 1092 | } | |||
| 1093 | ||||
| 1094 | int | |||
| 1095 | pf_compare_state_keys(struct pf_state_key *a, struct pf_state_key *b, | |||
| 1096 | struct pfi_kif *kif, u_int dir) | |||
| 1097 | { | |||
| 1098 | /* a (from hdr) and b (new) must be exact opposites of each other */ | |||
| 1099 | if (a->af == b->af && a->proto == b->proto && | |||
| 1100 | PF_AEQ(&a->addr[0], &b->addr[1], a->af)((a->af == 2 && (&a->addr[0])->pfa.addr32 [0] == (&b->addr[1])->pfa.addr32[0]) || (a->af == 24 && (&a->addr[0])->pfa.addr32[3] == (& b->addr[1])->pfa.addr32[3] && (&a->addr[ 0])->pfa.addr32[2] == (&b->addr[1])->pfa.addr32[ 2] && (&a->addr[0])->pfa.addr32[1] == (& b->addr[1])->pfa.addr32[1] && (&a->addr[ 0])->pfa.addr32[0] == (&b->addr[1])->pfa.addr32[ 0])) && | |||
| 1101 | PF_AEQ(&a->addr[1], &b->addr[0], a->af)((a->af == 2 && (&a->addr[1])->pfa.addr32 [0] == (&b->addr[0])->pfa.addr32[0]) || (a->af == 24 && (&a->addr[1])->pfa.addr32[3] == (& b->addr[0])->pfa.addr32[3] && (&a->addr[ 1])->pfa.addr32[2] == (&b->addr[0])->pfa.addr32[ 2] && (&a->addr[1])->pfa.addr32[1] == (& b->addr[0])->pfa.addr32[1] && (&a->addr[ 1])->pfa.addr32[0] == (&b->addr[0])->pfa.addr32[ 0])) && | |||
| 1102 | a->port[0] == b->port[1] && | |||
| 1103 | a->port[1] == b->port[0] && a->rdomain == b->rdomain) | |||
| 1104 | return (0); | |||
| 1105 | else { | |||
| 1106 | /* mismatch. must not happen. */ | |||
| 1107 | if (pf_status.debug >= LOG_ERR3) { | |||
| 1108 | log(LOG_ERR3, | |||
| 1109 | "pf: state key linking mismatch! dir=%s, " | |||
| 1110 | "if=%s, stored af=%u, a0: ", | |||
| 1111 | dir == PF_OUT ? "OUT" : "IN", | |||
| 1112 | kif->pfik_name, a->af); | |||
| 1113 | pf_print_host(&a->addr[0], a->port[0], a->af); | |||
| 1114 | addlog(", a1: "); | |||
| 1115 | pf_print_host(&a->addr[1], a->port[1], a->af); | |||
| 1116 | addlog(", proto=%u", a->proto); | |||
| 1117 | addlog(", found af=%u, a0: ", b->af); | |||
| 1118 | pf_print_host(&b->addr[0], b->port[0], b->af); | |||
| 1119 | addlog(", a1: "); | |||
| 1120 | pf_print_host(&b->addr[1], b->port[1], b->af); | |||
| 1121 | addlog(", proto=%u", b->proto); | |||
| 1122 | addlog("\n"); | |||
| 1123 | } | |||
| 1124 | return (-1); | |||
| 1125 | } | |||
| 1126 | } | |||
| 1127 | ||||
| 1128 | int | |||
| 1129 | pf_find_state(struct pf_pdesc *pd, struct pf_state_key_cmp *key, | |||
| 1130 | struct pf_state **stp) | |||
| 1131 | { | |||
| 1132 | struct pf_state_key *sk, *pkt_sk; | |||
| 1133 | struct pf_state_item *si; | |||
| 1134 | struct pf_state *st = NULL((void *)0); | |||
| 1135 | ||||
| 1136 | pf_status.fcounters[FCNT_STATE_SEARCH0]++; | |||
| 1137 | if (pf_status.debug >= LOG_DEBUG7) { | |||
| 1138 | log(LOG_DEBUG7, "pf: key search, %s on %s: ", | |||
| 1139 | pd->dir == PF_OUT ? "out" : "in", pd->kif->pfik_name); | |||
| 1140 | pf_print_state_parts(NULL((void *)0), (struct pf_state_key *)key, NULL((void *)0)); | |||
| 1141 | addlog("\n"); | |||
| 1142 | } | |||
| 1143 | ||||
| 1144 | pkt_sk = NULL((void *)0); | |||
| 1145 | sk = NULL((void *)0); | |||
| 1146 | if (pd->dir == PF_OUT) { | |||
| 1147 | /* first if block deals with outbound forwarded packet */ | |||
| 1148 | pkt_sk = pd->m->m_pkthdrM_dat.MH.MH_pkthdr.pf.statekey; | |||
| 1149 | ||||
| 1150 | if (!pf_state_key_isvalid(pkt_sk)) { | |||
| 1151 | pf_mbuf_unlink_state_key(pd->m); | |||
| 1152 | pkt_sk = NULL((void *)0); | |||
| 1153 | } | |||
| 1154 | ||||
| 1155 | if (pkt_sk && pf_state_key_isvalid(pkt_sk->sk_reverse)) | |||
| 1156 | sk = pkt_sk->sk_reverse; | |||
| 1157 | ||||
| 1158 | if (pkt_sk == NULL((void *)0)) { | |||
| 1159 | struct inpcb *inp = pd->m->m_pkthdrM_dat.MH.MH_pkthdr.pf.inp; | |||
| 1160 | ||||
| 1161 | /* here we deal with local outbound packet */ | |||
| 1162 | if (inp != NULL((void *)0)) { | |||
| 1163 | struct pf_state_key *inp_sk; | |||
| 1164 | ||||
| 1165 | mtx_enter(&pf_inp_mtx); | |||
| 1166 | inp_sk = inp->inp_pf_sk; | |||
| 1167 | if (pf_state_key_isvalid(inp_sk)) { | |||
| 1168 | sk = inp_sk; | |||
| 1169 | mtx_leave(&pf_inp_mtx); | |||
| 1170 | } else if (inp_sk != NULL((void *)0)) { | |||
| 1171 | KASSERT(inp_sk->sk_inp == inp)((inp_sk->sk_inp == inp) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 1171, "inp_sk->sk_inp == inp")); | |||
| 1172 | inp_sk->sk_inp = NULL((void *)0); | |||
| 1173 | inp->inp_pf_sk = NULL((void *)0); | |||
| 1174 | mtx_leave(&pf_inp_mtx); | |||
| 1175 | ||||
| 1176 | pf_state_key_unref(inp_sk); | |||
| 1177 | in_pcbunref(inp); | |||
| 1178 | } else | |||
| 1179 | mtx_leave(&pf_inp_mtx); | |||
| 1180 | } | |||
| 1181 | } | |||
| 1182 | } | |||
| 1183 | ||||
| 1184 | if (sk == NULL((void *)0)) { | |||
| 1185 | if ((sk = RBT_FIND(pf_state_tree, &pf_statetbl,pf_state_tree_RBT_FIND(&pf_statetbl, (struct pf_state_key *)key) | |||
| 1186 | (struct pf_state_key *)key)pf_state_tree_RBT_FIND(&pf_statetbl, (struct pf_state_key *)key)) == NULL((void *)0)) | |||
| 1187 | return (PF_DROP); | |||
| 1188 | if (pd->dir == PF_OUT && pkt_sk && | |||
| 1189 | pf_compare_state_keys(pkt_sk, sk, pd->kif, pd->dir) == 0) | |||
| 1190 | pf_state_key_link_reverse(sk, pkt_sk); | |||
| 1191 | else if (pd->dir == PF_OUT) | |||
| 1192 | pf_state_key_link_inpcb(sk, pd->m->m_pkthdrM_dat.MH.MH_pkthdr.pf.inp); | |||
| 1193 | } | |||
| 1194 | ||||
| 1195 | /* remove firewall data from outbound packet */ | |||
| 1196 | if (pd->dir == PF_OUT) | |||
| 1197 | pf_pkt_addr_changed(pd->m); | |||
| 1198 | ||||
| 1199 | /* list is sorted, if-bound states before floating ones */ | |||
| 1200 | TAILQ_FOREACH(si, &sk->sk_states, si_entry)for((si) = ((&sk->sk_states)->tqh_first); (si) != ( (void *)0); (si) = ((si)->si_entry.tqe_next)) { | |||
| 1201 | struct pf_state *sist = si->si_st; | |||
| 1202 | if (sist->timeout != PFTM_PURGE && | |||
| 1203 | (sist->kif == pfi_all || sist->kif == pd->kif) && | |||
| 1204 | ((sist->key[PF_SK_WIRE]->af == sist->key[PF_SK_STACK]->af && | |||
| 1205 | sk == (pd->dir == PF_IN ? sist->key[PF_SK_WIRE] : | |||
| 1206 | sist->key[PF_SK_STACK])) || | |||
| 1207 | (sist->key[PF_SK_WIRE]->af != sist->key[PF_SK_STACK]->af | |||
| 1208 | && pd->dir == PF_IN && (sk == sist->key[PF_SK_STACK] || | |||
| 1209 | sk == sist->key[PF_SK_WIRE])))) { | |||
| 1210 | st = sist; | |||
| 1211 | break; | |||
| 1212 | } | |||
| 1213 | } | |||
| 1214 | ||||
| 1215 | if (st == NULL((void *)0)) | |||
| 1216 | return (PF_DROP); | |||
| 1217 | if (ISSET(st->state_flags, PFSTATE_INP_UNLINKED)((st->state_flags) & (0x0400))) | |||
| 1218 | return (PF_DROP); | |||
| 1219 | ||||
| 1220 | if (st->rule.ptr->pktrate.limit && pd->dir == st->direction) { | |||
| 1221 | pf_add_threshold(&st->rule.ptr->pktrate); | |||
| 1222 | if (pf_check_threshold(&st->rule.ptr->pktrate)) | |||
| 1223 | return (PF_DROP); | |||
| 1224 | } | |||
| 1225 | ||||
| 1226 | *stp = st; | |||
| 1227 | ||||
| 1228 | return (PF_MATCH); | |||
| 1229 | } | |||
| 1230 | ||||
| 1231 | struct pf_state * | |||
| 1232 | pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) | |||
| 1233 | { | |||
| 1234 | struct pf_state_key *sk; | |||
| 1235 | struct pf_state_item *si, *ret = NULL((void *)0); | |||
| 1236 | ||||
| 1237 | pf_status.fcounters[FCNT_STATE_SEARCH0]++; | |||
| 1238 | ||||
| 1239 | sk = RBT_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key)pf_state_tree_RBT_FIND(&pf_statetbl, (struct pf_state_key *)key); | |||
| 1240 | ||||
| 1241 | if (sk != NULL((void *)0)) { | |||
| 1242 | TAILQ_FOREACH(si, &sk->sk_states, si_entry)for((si) = ((&sk->sk_states)->tqh_first); (si) != ( (void *)0); (si) = ((si)->si_entry.tqe_next)) { | |||
| 1243 | struct pf_state *sist = si->si_st; | |||
| 1244 | if (dir == PF_INOUT || | |||
| 1245 | (sk == (dir == PF_IN ? sist->key[PF_SK_WIRE] : | |||
| 1246 | sist->key[PF_SK_STACK]))) { | |||
| 1247 | if (more == NULL((void *)0)) | |||
| 1248 | return (sist); | |||
| 1249 | ||||
| 1250 | if (ret) | |||
| 1251 | (*more)++; | |||
| 1252 | else | |||
| 1253 | ret = si; | |||
| 1254 | } | |||
| 1255 | } | |||
| 1256 | } | |||
| 1257 | return (ret ? ret->si_st : NULL((void *)0)); | |||
| 1258 | } | |||
| 1259 | ||||
| 1260 | void | |||
| 1261 | pf_state_peer_hton(const struct pf_state_peer *s, struct pfsync_state_peer *d) | |||
| 1262 | { | |||
| 1263 | d->seqlo = htonl(s->seqlo)(__uint32_t)(__builtin_constant_p(s->seqlo) ? (__uint32_t) (((__uint32_t)(s->seqlo) & 0xff) << 24 | ((__uint32_t )(s->seqlo) & 0xff00) << 8 | ((__uint32_t)(s-> seqlo) & 0xff0000) >> 8 | ((__uint32_t)(s->seqlo ) & 0xff000000) >> 24) : __swap32md(s->seqlo)); | |||
| 1264 | d->seqhi = htonl(s->seqhi)(__uint32_t)(__builtin_constant_p(s->seqhi) ? (__uint32_t) (((__uint32_t)(s->seqhi) & 0xff) << 24 | ((__uint32_t )(s->seqhi) & 0xff00) << 8 | ((__uint32_t)(s-> seqhi) & 0xff0000) >> 8 | ((__uint32_t)(s->seqhi ) & 0xff000000) >> 24) : __swap32md(s->seqhi)); | |||
| 1265 | d->seqdiff = htonl(s->seqdiff)(__uint32_t)(__builtin_constant_p(s->seqdiff) ? (__uint32_t )(((__uint32_t)(s->seqdiff) & 0xff) << 24 | ((__uint32_t )(s->seqdiff) & 0xff00) << 8 | ((__uint32_t)(s-> seqdiff) & 0xff0000) >> 8 | ((__uint32_t)(s->seqdiff ) & 0xff000000) >> 24) : __swap32md(s->seqdiff)); | |||
| 1266 | d->max_win = htons(s->max_win)(__uint16_t)(__builtin_constant_p(s->max_win) ? (__uint16_t )(((__uint16_t)(s->max_win) & 0xffU) << 8 | ((__uint16_t )(s->max_win) & 0xff00U) >> 8) : __swap16md(s-> max_win)); | |||
| 1267 | d->mss = htons(s->mss)(__uint16_t)(__builtin_constant_p(s->mss) ? (__uint16_t)(( (__uint16_t)(s->mss) & 0xffU) << 8 | ((__uint16_t )(s->mss) & 0xff00U) >> 8) : __swap16md(s->mss )); | |||
| 1268 | d->state = s->state; | |||
| 1269 | d->wscale = s->wscale; | |||
| 1270 | if (s->scrub) { | |||
| 1271 | d->scrub.pfss_flags = | |||
| 1272 | htons(s->scrub->pfss_flags & PFSS_TIMESTAMP)(__uint16_t)(__builtin_constant_p(s->scrub->pfss_flags & 0x0001) ? (__uint16_t)(((__uint16_t)(s->scrub->pfss_flags & 0x0001) & 0xffU) << 8 | ((__uint16_t)(s-> scrub->pfss_flags & 0x0001) & 0xff00U) >> 8) : __swap16md(s->scrub->pfss_flags & 0x0001)); | |||
| 1273 | d->scrub.pfss_ttl = (s)->scrub->pfss_ttl; | |||
| 1274 | d->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod)(__uint32_t)(__builtin_constant_p((s)->scrub->pfss_ts_mod ) ? (__uint32_t)(((__uint32_t)((s)->scrub->pfss_ts_mod) & 0xff) << 24 | ((__uint32_t)((s)->scrub->pfss_ts_mod ) & 0xff00) << 8 | ((__uint32_t)((s)->scrub-> pfss_ts_mod) & 0xff0000) >> 8 | ((__uint32_t)((s)-> scrub->pfss_ts_mod) & 0xff000000) >> 24) : __swap32md ((s)->scrub->pfss_ts_mod)); | |||
| 1275 | d->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID0x01; | |||
| 1276 | } | |||
| 1277 | } | |||
| 1278 | ||||
| 1279 | void | |||
| 1280 | pf_state_peer_ntoh(const struct pfsync_state_peer *s, struct pf_state_peer *d) | |||
| 1281 | { | |||
| 1282 | d->seqlo = ntohl(s->seqlo)(__uint32_t)(__builtin_constant_p(s->seqlo) ? (__uint32_t) (((__uint32_t)(s->seqlo) & 0xff) << 24 | ((__uint32_t )(s->seqlo) & 0xff00) << 8 | ((__uint32_t)(s-> seqlo) & 0xff0000) >> 8 | ((__uint32_t)(s->seqlo ) & 0xff000000) >> 24) : __swap32md(s->seqlo)); | |||
| 1283 | d->seqhi = ntohl(s->seqhi)(__uint32_t)(__builtin_constant_p(s->seqhi) ? (__uint32_t) (((__uint32_t)(s->seqhi) & 0xff) << 24 | ((__uint32_t )(s->seqhi) & 0xff00) << 8 | ((__uint32_t)(s-> seqhi) & 0xff0000) >> 8 | ((__uint32_t)(s->seqhi ) & 0xff000000) >> 24) : __swap32md(s->seqhi)); | |||
| 1284 | d->seqdiff = ntohl(s->seqdiff)(__uint32_t)(__builtin_constant_p(s->seqdiff) ? (__uint32_t )(((__uint32_t)(s->seqdiff) & 0xff) << 24 | ((__uint32_t )(s->seqdiff) & 0xff00) << 8 | ((__uint32_t)(s-> seqdiff) & 0xff0000) >> 8 | ((__uint32_t)(s->seqdiff ) & 0xff000000) >> 24) : __swap32md(s->seqdiff)); | |||
| 1285 | d->max_win = ntohs(s->max_win)(__uint16_t)(__builtin_constant_p(s->max_win) ? (__uint16_t )(((__uint16_t)(s->max_win) & 0xffU) << 8 | ((__uint16_t )(s->max_win) & 0xff00U) >> 8) : __swap16md(s-> max_win)); | |||
| 1286 | d->mss = ntohs(s->mss)(__uint16_t)(__builtin_constant_p(s->mss) ? (__uint16_t)(( (__uint16_t)(s->mss) & 0xffU) << 8 | ((__uint16_t )(s->mss) & 0xff00U) >> 8) : __swap16md(s->mss )); | |||
| 1287 | d->state = s->state; | |||
| 1288 | d->wscale = s->wscale; | |||
| 1289 | if (s->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID0x01 && | |||
| 1290 | d->scrub != NULL((void *)0)) { | |||
| 1291 | d->scrub->pfss_flags = | |||
| 1292 | ntohs(s->scrub.pfss_flags)(__uint16_t)(__builtin_constant_p(s->scrub.pfss_flags) ? ( __uint16_t)(((__uint16_t)(s->scrub.pfss_flags) & 0xffU ) << 8 | ((__uint16_t)(s->scrub.pfss_flags) & 0xff00U ) >> 8) : __swap16md(s->scrub.pfss_flags)) & PFSS_TIMESTAMP0x0001; | |||
| 1293 | d->scrub->pfss_ttl = s->scrub.pfss_ttl; | |||
| 1294 | d->scrub->pfss_ts_mod = ntohl(s->scrub.pfss_ts_mod)(__uint32_t)(__builtin_constant_p(s->scrub.pfss_ts_mod) ? ( __uint32_t)(((__uint32_t)(s->scrub.pfss_ts_mod) & 0xff ) << 24 | ((__uint32_t)(s->scrub.pfss_ts_mod) & 0xff00 ) << 8 | ((__uint32_t)(s->scrub.pfss_ts_mod) & 0xff0000 ) >> 8 | ((__uint32_t)(s->scrub.pfss_ts_mod) & 0xff000000 ) >> 24) : __swap32md(s->scrub.pfss_ts_mod)); | |||
| 1295 | } | |||
| 1296 | } | |||
| 1297 | ||||
| 1298 | void | |||
| 1299 | pf_state_export(struct pfsync_state *sp, struct pf_state *st) | |||
| 1300 | { | |||
| 1301 | int32_t expire; | |||
| 1302 | ||||
| 1303 | memset(sp, 0, sizeof(struct pfsync_state))__builtin_memset((sp), (0), (sizeof(struct pfsync_state))); | |||
| 1304 | ||||
| 1305 | /* copy from state key */ | |||
| 1306 | sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; | |||
| 1307 | sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; | |||
| 1308 | sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; | |||
| 1309 | sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; | |||
| 1310 | sp->key[PF_SK_WIRE].rdomain = htons(st->key[PF_SK_WIRE]->rdomain)(__uint16_t)(__builtin_constant_p(st->key[PF_SK_WIRE]-> rdomain) ? (__uint16_t)(((__uint16_t)(st->key[PF_SK_WIRE]-> rdomain) & 0xffU) << 8 | ((__uint16_t)(st->key[PF_SK_WIRE ]->rdomain) & 0xff00U) >> 8) : __swap16md(st-> key[PF_SK_WIRE]->rdomain)); | |||
| 1311 | sp->key[PF_SK_WIRE].af = st->key[PF_SK_WIRE]->af; | |||
| 1312 | sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; | |||
| 1313 | sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; | |||
| 1314 | sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; | |||
| 1315 | sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; | |||
| 1316 | sp->key[PF_SK_STACK].rdomain = htons(st->key[PF_SK_STACK]->rdomain)(__uint16_t)(__builtin_constant_p(st->key[PF_SK_STACK]-> rdomain) ? (__uint16_t)(((__uint16_t)(st->key[PF_SK_STACK] ->rdomain) & 0xffU) << 8 | ((__uint16_t)(st-> key[PF_SK_STACK]->rdomain) & 0xff00U) >> 8) : __swap16md (st->key[PF_SK_STACK]->rdomain)); | |||
| 1317 | sp->key[PF_SK_STACK].af = st->key[PF_SK_STACK]->af; | |||
| 1318 | sp->rtableid[PF_SK_WIRE] = htonl(st->rtableid[PF_SK_WIRE])(__uint32_t)(__builtin_constant_p(st->rtableid[PF_SK_WIRE] ) ? (__uint32_t)(((__uint32_t)(st->rtableid[PF_SK_WIRE]) & 0xff) << 24 | ((__uint32_t)(st->rtableid[PF_SK_WIRE ]) & 0xff00) << 8 | ((__uint32_t)(st->rtableid[PF_SK_WIRE ]) & 0xff0000) >> 8 | ((__uint32_t)(st->rtableid [PF_SK_WIRE]) & 0xff000000) >> 24) : __swap32md(st-> rtableid[PF_SK_WIRE])); | |||
| 1319 | sp->rtableid[PF_SK_STACK] = htonl(st->rtableid[PF_SK_STACK])(__uint32_t)(__builtin_constant_p(st->rtableid[PF_SK_STACK ]) ? (__uint32_t)(((__uint32_t)(st->rtableid[PF_SK_STACK]) & 0xff) << 24 | ((__uint32_t)(st->rtableid[PF_SK_STACK ]) & 0xff00) << 8 | ((__uint32_t)(st->rtableid[PF_SK_STACK ]) & 0xff0000) >> 8 | ((__uint32_t)(st->rtableid [PF_SK_STACK]) & 0xff000000) >> 24) : __swap32md(st ->rtableid[PF_SK_STACK])); | |||
| 1320 | sp->proto = st->key[PF_SK_WIRE]->proto; | |||
| 1321 | sp->af = st->key[PF_SK_WIRE]->af; | |||
| 1322 | ||||
| 1323 | /* copy from state */ | |||
| 1324 | strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); | |||
| 1325 | sp->rt = st->rt; | |||
| 1326 | sp->rt_addr = st->rt_addr; | |||
| 1327 | sp->creation = htonl(getuptime() - st->creation)(__uint32_t)(__builtin_constant_p(getuptime() - st->creation ) ? (__uint32_t)(((__uint32_t)(getuptime() - st->creation) & 0xff) << 24 | ((__uint32_t)(getuptime() - st-> creation) & 0xff00) << 8 | ((__uint32_t)(getuptime( ) - st->creation) & 0xff0000) >> 8 | ((__uint32_t )(getuptime() - st->creation) & 0xff000000) >> 24 ) : __swap32md(getuptime() - st->creation)); | |||
| 1328 | expire = pf_state_expires(st, st->timeout); | |||
| 1329 | if (expire <= getuptime()) | |||
| 1330 | sp->expire = htonl(0)(__uint32_t)(__builtin_constant_p(0) ? (__uint32_t)(((__uint32_t )(0) & 0xff) << 24 | ((__uint32_t)(0) & 0xff00) << 8 | ((__uint32_t)(0) & 0xff0000) >> 8 | ( (__uint32_t)(0) & 0xff000000) >> 24) : __swap32md(0 )); | |||
| 1331 | else | |||
| 1332 | sp->expire = htonl(expire - getuptime())(__uint32_t)(__builtin_constant_p(expire - getuptime()) ? (__uint32_t )(((__uint32_t)(expire - getuptime()) & 0xff) << 24 | ((__uint32_t)(expire - getuptime()) & 0xff00) << 8 | ((__uint32_t)(expire - getuptime()) & 0xff0000) >> 8 | ((__uint32_t)(expire - getuptime()) & 0xff000000) >> 24) : __swap32md(expire - getuptime())); | |||
| 1333 | ||||
| 1334 | sp->direction = st->direction; | |||
| 1335 | #if NPFLOG1 > 0 | |||
| 1336 | sp->log = st->log; | |||
| 1337 | #endif /* NPFLOG > 0 */ | |||
| 1338 | sp->timeout = st->timeout; | |||
| 1339 | sp->state_flags = htons(st->state_flags)(__uint16_t)(__builtin_constant_p(st->state_flags) ? (__uint16_t )(((__uint16_t)(st->state_flags) & 0xffU) << 8 | ((__uint16_t)(st->state_flags) & 0xff00U) >> 8) : __swap16md(st->state_flags)); | |||
| 1340 | if (READ_ONCE(st->sync_defer)({ typeof(st->sync_defer) __tmp = *(volatile typeof(st-> sync_defer) *)&(st->sync_defer); membar_datadep_consumer (); __tmp; }) != NULL((void *)0)) | |||
| 1341 | sp->state_flags |= htons(PFSTATE_ACK)(__uint16_t)(__builtin_constant_p(0x0010) ? (__uint16_t)(((__uint16_t )(0x0010) & 0xffU) << 8 | ((__uint16_t)(0x0010) & 0xff00U) >> 8) : __swap16md(0x0010)); | |||
| 1342 | if (!SLIST_EMPTY(&st->src_nodes)(((&st->src_nodes)->slh_first) == ((void *)0))) | |||
| 1343 | sp->sync_flags |= PFSYNC_FLAG_SRCNODE0x04; | |||
| 1344 | ||||
| 1345 | sp->id = st->id; | |||
| 1346 | sp->creatorid = st->creatorid; | |||
| 1347 | pf_state_peer_hton(&st->src, &sp->src); | |||
| 1348 | pf_state_peer_hton(&st->dst, &sp->dst); | |||
| 1349 | ||||
| 1350 | if (st->rule.ptr == NULL((void *)0)) | |||
| 1351 | sp->rule = htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t )(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00 ) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 | ((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md (-1)); | |||
| 1352 | else | |||
| 1353 | sp->rule = htonl(st->rule.ptr->nr)(__uint32_t)(__builtin_constant_p(st->rule.ptr->nr) ? ( __uint32_t)(((__uint32_t)(st->rule.ptr->nr) & 0xff) << 24 | ((__uint32_t)(st->rule.ptr->nr) & 0xff00 ) << 8 | ((__uint32_t)(st->rule.ptr->nr) & 0xff0000 ) >> 8 | ((__uint32_t)(st->rule.ptr->nr) & 0xff000000 ) >> 24) : __swap32md(st->rule.ptr->nr)); | |||
| 1354 | if (st->anchor.ptr == NULL((void *)0)) | |||
| 1355 | sp->anchor = htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t )(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00 ) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 | ((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md (-1)); | |||
| 1356 | else | |||
| 1357 | sp->anchor = htonl(st->anchor.ptr->nr)(__uint32_t)(__builtin_constant_p(st->anchor.ptr->nr) ? (__uint32_t)(((__uint32_t)(st->anchor.ptr->nr) & 0xff ) << 24 | ((__uint32_t)(st->anchor.ptr->nr) & 0xff00) << 8 | ((__uint32_t)(st->anchor.ptr->nr) & 0xff0000) >> 8 | ((__uint32_t)(st->anchor.ptr ->nr) & 0xff000000) >> 24) : __swap32md(st->anchor .ptr->nr)); | |||
| 1358 | sp->nat_rule = htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t )(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00 ) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 | ((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md (-1)); /* left for compat, nat_rule is gone */ | |||
| 1359 | ||||
| 1360 | pf_state_counter_hton(st->packets[0], sp->packets[0])do { sp->packets[0][0] = (__uint32_t)(__builtin_constant_p ((st->packets[0]>>32)&0xffffffff) ? (__uint32_t) (((__uint32_t)((st->packets[0]>>32)&0xffffffff) & 0xff) << 24 | ((__uint32_t)((st->packets[0]>> 32)&0xffffffff) & 0xff00) << 8 | ((__uint32_t)( (st->packets[0]>>32)&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t)((st->packets[0]>>32)& 0xffffffff) & 0xff000000) >> 24) : __swap32md((st-> packets[0]>>32)&0xffffffff)); sp->packets[0][1] = (__uint32_t)(__builtin_constant_p(st->packets[0]&0xffffffff ) ? (__uint32_t)(((__uint32_t)(st->packets[0]&0xffffffff ) & 0xff) << 24 | ((__uint32_t)(st->packets[0]& 0xffffffff) & 0xff00) << 8 | ((__uint32_t)(st->packets [0]&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t )(st->packets[0]&0xffffffff) & 0xff000000) >> 24) : __swap32md(st->packets[0]&0xffffffff)); } while (0); | |||
| 1361 | pf_state_counter_hton(st->packets[1], sp->packets[1])do { sp->packets[1][0] = (__uint32_t)(__builtin_constant_p ((st->packets[1]>>32)&0xffffffff) ? (__uint32_t) (((__uint32_t)((st->packets[1]>>32)&0xffffffff) & 0xff) << 24 | ((__uint32_t)((st->packets[1]>> 32)&0xffffffff) & 0xff00) << 8 | ((__uint32_t)( (st->packets[1]>>32)&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t)((st->packets[1]>>32)& 0xffffffff) & 0xff000000) >> 24) : __swap32md((st-> packets[1]>>32)&0xffffffff)); sp->packets[1][1] = (__uint32_t)(__builtin_constant_p(st->packets[1]&0xffffffff ) ? (__uint32_t)(((__uint32_t)(st->packets[1]&0xffffffff ) & 0xff) << 24 | ((__uint32_t)(st->packets[1]& 0xffffffff) & 0xff00) << 8 | ((__uint32_t)(st->packets [1]&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t )(st->packets[1]&0xffffffff) & 0xff000000) >> 24) : __swap32md(st->packets[1]&0xffffffff)); } while (0); | |||
| 1362 | pf_state_counter_hton(st->bytes[0], sp->bytes[0])do { sp->bytes[0][0] = (__uint32_t)(__builtin_constant_p(( st->bytes[0]>>32)&0xffffffff) ? (__uint32_t)(((__uint32_t )((st->bytes[0]>>32)&0xffffffff) & 0xff) << 24 | ((__uint32_t)((st->bytes[0]>>32)&0xffffffff ) & 0xff00) << 8 | ((__uint32_t)((st->bytes[0]>> 32)&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t )((st->bytes[0]>>32)&0xffffffff) & 0xff000000 ) >> 24) : __swap32md((st->bytes[0]>>32)&0xffffffff )); sp->bytes[0][1] = (__uint32_t)(__builtin_constant_p(st ->bytes[0]&0xffffffff) ? (__uint32_t)(((__uint32_t)(st ->bytes[0]&0xffffffff) & 0xff) << 24 | ((__uint32_t )(st->bytes[0]&0xffffffff) & 0xff00) << 8 | ( (__uint32_t)(st->bytes[0]&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t)(st->bytes[0]&0xffffffff) & 0xff000000 ) >> 24) : __swap32md(st->bytes[0]&0xffffffff)); } while (0); | |||
| 1363 | pf_state_counter_hton(st->bytes[1], sp->bytes[1])do { sp->bytes[1][0] = (__uint32_t)(__builtin_constant_p(( st->bytes[1]>>32)&0xffffffff) ? (__uint32_t)(((__uint32_t )((st->bytes[1]>>32)&0xffffffff) & 0xff) << 24 | ((__uint32_t)((st->bytes[1]>>32)&0xffffffff ) & 0xff00) << 8 | ((__uint32_t)((st->bytes[1]>> 32)&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t )((st->bytes[1]>>32)&0xffffffff) & 0xff000000 ) >> 24) : __swap32md((st->bytes[1]>>32)&0xffffffff )); sp->bytes[1][1] = (__uint32_t)(__builtin_constant_p(st ->bytes[1]&0xffffffff) ? (__uint32_t)(((__uint32_t)(st ->bytes[1]&0xffffffff) & 0xff) << 24 | ((__uint32_t )(st->bytes[1]&0xffffffff) & 0xff00) << 8 | ( (__uint32_t)(st->bytes[1]&0xffffffff) & 0xff0000) >> 8 | ((__uint32_t)(st->bytes[1]&0xffffffff) & 0xff000000 ) >> 24) : __swap32md(st->bytes[1]&0xffffffff)); } while (0); | |||
| 1364 | ||||
| 1365 | sp->max_mss = htons(st->max_mss)(__uint16_t)(__builtin_constant_p(st->max_mss) ? (__uint16_t )(((__uint16_t)(st->max_mss) & 0xffU) << 8 | ((__uint16_t )(st->max_mss) & 0xff00U) >> 8) : __swap16md(st-> max_mss)); | |||
| 1366 | sp->min_ttl = st->min_ttl; | |||
| 1367 | sp->set_tos = st->set_tos; | |||
| 1368 | sp->set_prio[0] = st->set_prio[0]; | |||
| 1369 | sp->set_prio[1] = st->set_prio[1]; | |||
| 1370 | } | |||
| 1371 | ||||
| 1372 | int | |||
| 1373 | pf_state_alloc_scrub_memory(const struct pfsync_state_peer *s, | |||
| 1374 | struct pf_state_peer *d) | |||
| 1375 | { | |||
| 1376 | if (s->scrub.scrub_flag && d->scrub == NULL((void *)0)) | |||
| 1377 | return (pf_normalize_tcp_alloc(d)); | |||
| 1378 | ||||
| 1379 | return (0); | |||
| 1380 | } | |||
| 1381 | ||||
| 1382 | #if NPFSYNC1 > 0 | |||
| 1383 | int | |||
| 1384 | pf_state_import(const struct pfsync_state *sp, int flags) | |||
| 1385 | { | |||
| 1386 | struct pf_state *st = NULL((void *)0); | |||
| 1387 | struct pf_state_key *skw = NULL((void *)0), *sks = NULL((void *)0); | |||
| 1388 | struct pf_rule *r = NULL((void *)0); | |||
| 1389 | struct pfi_kif *kif; | |||
| 1390 | int pool_flags; | |||
| 1391 | int error = ENOMEM12; | |||
| 1392 | int n = 0; | |||
| 1393 | ||||
| 1394 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 1395 | ||||
| 1396 | if (sp->creatorid == 0) { | |||
| 1397 | DPFPRINTF(LOG_NOTICE, "%s: invalid creator id: %08x", __func__,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "%s: invalid creator id: %08x", __func__, (__uint32_t)(__builtin_constant_p (sp->creatorid) ? (__uint32_t)(((__uint32_t)(sp->creatorid ) & 0xff) << 24 | ((__uint32_t)(sp->creatorid) & 0xff00) << 8 | ((__uint32_t)(sp->creatorid) & 0xff0000 ) >> 8 | ((__uint32_t)(sp->creatorid) & 0xff000000 ) >> 24) : __swap32md(sp->creatorid))); addlog("\n") ; } } while (0) | |||
| 1398 | ntohl(sp->creatorid))do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "%s: invalid creator id: %08x", __func__, (__uint32_t)(__builtin_constant_p (sp->creatorid) ? (__uint32_t)(((__uint32_t)(sp->creatorid ) & 0xff) << 24 | ((__uint32_t)(sp->creatorid) & 0xff00) << 8 | ((__uint32_t)(sp->creatorid) & 0xff0000 ) >> 8 | ((__uint32_t)(sp->creatorid) & 0xff000000 ) >> 24) : __swap32md(sp->creatorid))); addlog("\n") ; } } while (0); | |||
| 1399 | return (EINVAL22); | |||
| 1400 | } | |||
| 1401 | ||||
| 1402 | if ((kif = pfi_kif_get(sp->ifname, NULL((void *)0))) == NULL((void *)0)) { | |||
| 1403 | DPFPRINTF(LOG_NOTICE, "%s: unknown interface: %s", __func__,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "%s: unknown interface: %s", __func__, sp->ifname); addlog ("\n"); } } while (0) | |||
| 1404 | sp->ifname)do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "%s: unknown interface: %s", __func__, sp->ifname); addlog ("\n"); } } while (0); | |||
| 1405 | if (flags & PFSYNC_SI_IOCTL0x01) | |||
| 1406 | return (EINVAL22); | |||
| 1407 | return (0); /* skip this state */ | |||
| 1408 | } | |||
| 1409 | ||||
| 1410 | if (sp->af == 0) | |||
| 1411 | return (0); /* skip this state */ | |||
| 1412 | ||||
| 1413 | /* | |||
| 1414 | * If the ruleset checksums match or the state is coming from the ioctl, | |||
| 1415 | * it's safe to associate the state with the rule of that number. | |||
| 1416 | */ | |||
| 1417 | if (sp->rule != htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t )(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00 ) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 | ((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md (-1)) && sp->anchor == htonl(-1)(__uint32_t)(__builtin_constant_p(-1) ? (__uint32_t)(((__uint32_t )(-1) & 0xff) << 24 | ((__uint32_t)(-1) & 0xff00 ) << 8 | ((__uint32_t)(-1) & 0xff0000) >> 8 | ((__uint32_t)(-1) & 0xff000000) >> 24) : __swap32md (-1)) && | |||
| 1418 | (flags & (PFSYNC_SI_IOCTL0x01 | PFSYNC_SI_CKSUM0x02)) && | |||
| 1419 | ntohl(sp->rule)(__uint32_t)(__builtin_constant_p(sp->rule) ? (__uint32_t) (((__uint32_t)(sp->rule) & 0xff) << 24 | ((__uint32_t )(sp->rule) & 0xff00) << 8 | ((__uint32_t)(sp-> rule) & 0xff0000) >> 8 | ((__uint32_t)(sp->rule) & 0xff000000) >> 24) : __swap32md(sp->rule)) < pf_main_rulesetpf_main_anchor.ruleset.rules.active.rcount) { | |||
| 1420 | TAILQ_FOREACH(r, pf_main_ruleset.rules.active.ptr, entries)for((r) = ((pf_main_anchor.ruleset.rules.active.ptr)->tqh_first ); (r) != ((void *)0); (r) = ((r)->entries.tqe_next)) | |||
| 1421 | if (ntohl(sp->rule)(__uint32_t)(__builtin_constant_p(sp->rule) ? (__uint32_t) (((__uint32_t)(sp->rule) & 0xff) << 24 | ((__uint32_t )(sp->rule) & 0xff00) << 8 | ((__uint32_t)(sp-> rule) & 0xff0000) >> 8 | ((__uint32_t)(sp->rule) & 0xff000000) >> 24) : __swap32md(sp->rule)) == n++) | |||
| 1422 | break; | |||
| 1423 | } else | |||
| 1424 | r = &pf_default_rule; | |||
| 1425 | ||||
| 1426 | if ((r->max_states && r->states_cur >= r->max_states)) | |||
| 1427 | goto cleanup; | |||
| 1428 | ||||
| 1429 | if (flags & PFSYNC_SI_IOCTL0x01) | |||
| 1430 | pool_flags = PR_WAITOK0x0001 | PR_LIMITFAIL0x0004 | PR_ZERO0x0008; | |||
| 1431 | else | |||
| 1432 | pool_flags = PR_NOWAIT0x0002 | PR_LIMITFAIL0x0004 | PR_ZERO0x0008; | |||
| 1433 | ||||
| 1434 | if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL((void *)0)) | |||
| 1435 | goto cleanup; | |||
| 1436 | ||||
| 1437 | if ((skw = pf_alloc_state_key(pool_flags)) == NULL((void *)0)) | |||
| 1438 | goto cleanup; | |||
| 1439 | ||||
| 1440 | if ((sp->key[PF_SK_WIRE].af && | |||
| 1441 | (sp->key[PF_SK_WIRE].af != sp->key[PF_SK_STACK].af)) || | |||
| 1442 | PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],((sp->af == 2 && (&sp->key[PF_SK_WIRE].addr [0])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr[ 0])->pfa.addr32[0]) || (sp->af == 24 && ((& sp->key[PF_SK_WIRE].addr[0])->pfa.addr32[3] != (&sp ->key[PF_SK_STACK].addr[0])->pfa.addr32[3] || (&sp-> key[PF_SK_WIRE].addr[0])->pfa.addr32[2] != (&sp->key [PF_SK_STACK].addr[0])->pfa.addr32[2] || (&sp->key[ PF_SK_WIRE].addr[0])->pfa.addr32[1] != (&sp->key[PF_SK_STACK ].addr[0])->pfa.addr32[1] || (&sp->key[PF_SK_WIRE]. addr[0])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr [0])->pfa.addr32[0]))) | |||
| 1443 | &sp->key[PF_SK_STACK].addr[0], sp->af)((sp->af == 2 && (&sp->key[PF_SK_WIRE].addr [0])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr[ 0])->pfa.addr32[0]) || (sp->af == 24 && ((& sp->key[PF_SK_WIRE].addr[0])->pfa.addr32[3] != (&sp ->key[PF_SK_STACK].addr[0])->pfa.addr32[3] || (&sp-> key[PF_SK_WIRE].addr[0])->pfa.addr32[2] != (&sp->key [PF_SK_STACK].addr[0])->pfa.addr32[2] || (&sp->key[ PF_SK_WIRE].addr[0])->pfa.addr32[1] != (&sp->key[PF_SK_STACK ].addr[0])->pfa.addr32[1] || (&sp->key[PF_SK_WIRE]. addr[0])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr [0])->pfa.addr32[0]))) || | |||
| 1444 | PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],((sp->af == 2 && (&sp->key[PF_SK_WIRE].addr [1])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr[ 1])->pfa.addr32[0]) || (sp->af == 24 && ((& sp->key[PF_SK_WIRE].addr[1])->pfa.addr32[3] != (&sp ->key[PF_SK_STACK].addr[1])->pfa.addr32[3] || (&sp-> key[PF_SK_WIRE].addr[1])->pfa.addr32[2] != (&sp->key [PF_SK_STACK].addr[1])->pfa.addr32[2] || (&sp->key[ PF_SK_WIRE].addr[1])->pfa.addr32[1] != (&sp->key[PF_SK_STACK ].addr[1])->pfa.addr32[1] || (&sp->key[PF_SK_WIRE]. addr[1])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr [1])->pfa.addr32[0]))) | |||
| 1445 | &sp->key[PF_SK_STACK].addr[1], sp->af)((sp->af == 2 && (&sp->key[PF_SK_WIRE].addr [1])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr[ 1])->pfa.addr32[0]) || (sp->af == 24 && ((& sp->key[PF_SK_WIRE].addr[1])->pfa.addr32[3] != (&sp ->key[PF_SK_STACK].addr[1])->pfa.addr32[3] || (&sp-> key[PF_SK_WIRE].addr[1])->pfa.addr32[2] != (&sp->key [PF_SK_STACK].addr[1])->pfa.addr32[2] || (&sp->key[ PF_SK_WIRE].addr[1])->pfa.addr32[1] != (&sp->key[PF_SK_STACK ].addr[1])->pfa.addr32[1] || (&sp->key[PF_SK_WIRE]. addr[1])->pfa.addr32[0] != (&sp->key[PF_SK_STACK].addr [1])->pfa.addr32[0]))) || | |||
| 1446 | sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] || | |||
| 1447 | sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1] || | |||
| 1448 | sp->key[PF_SK_WIRE].rdomain != sp->key[PF_SK_STACK].rdomain) { | |||
| 1449 | if ((sks = pf_alloc_state_key(pool_flags)) == NULL((void *)0)) | |||
| 1450 | goto cleanup; | |||
| 1451 | } else | |||
| 1452 | sks = pf_state_key_ref(skw); | |||
| 1453 | ||||
| 1454 | /* allocate memory for scrub info */ | |||
| 1455 | if (pf_state_alloc_scrub_memory(&sp->src, &st->src) || | |||
| 1456 | pf_state_alloc_scrub_memory(&sp->dst, &st->dst)) | |||
| 1457 | goto cleanup; | |||
| 1458 | ||||
| 1459 | /* copy to state key(s) */ | |||
| 1460 | skw->addr[0] = sp->key[PF_SK_WIRE].addr[0]; | |||
| 1461 | skw->addr[1] = sp->key[PF_SK_WIRE].addr[1]; | |||
| 1462 | skw->port[0] = sp->key[PF_SK_WIRE].port[0]; | |||
| 1463 | skw->port[1] = sp->key[PF_SK_WIRE].port[1]; | |||
| 1464 | skw->rdomain = ntohs(sp->key[PF_SK_WIRE].rdomain)(__uint16_t)(__builtin_constant_p(sp->key[PF_SK_WIRE].rdomain ) ? (__uint16_t)(((__uint16_t)(sp->key[PF_SK_WIRE].rdomain ) & 0xffU) << 8 | ((__uint16_t)(sp->key[PF_SK_WIRE ].rdomain) & 0xff00U) >> 8) : __swap16md(sp->key [PF_SK_WIRE].rdomain)); | |||
| 1465 | skw->proto = sp->proto; | |||
| 1466 | if (!(skw->af = sp->key[PF_SK_WIRE].af)) | |||
| 1467 | skw->af = sp->af; | |||
| 1468 | skw->hash = pf_pkt_hash(skw->af, skw->proto, | |||
| 1469 | &skw->addr[0], &skw->addr[1], skw->port[0], skw->port[1]); | |||
| 1470 | ||||
| 1471 | if (sks != skw) { | |||
| 1472 | sks->addr[0] = sp->key[PF_SK_STACK].addr[0]; | |||
| 1473 | sks->addr[1] = sp->key[PF_SK_STACK].addr[1]; | |||
| 1474 | sks->port[0] = sp->key[PF_SK_STACK].port[0]; | |||
| 1475 | sks->port[1] = sp->key[PF_SK_STACK].port[1]; | |||
| 1476 | sks->rdomain = ntohs(sp->key[PF_SK_STACK].rdomain)(__uint16_t)(__builtin_constant_p(sp->key[PF_SK_STACK].rdomain ) ? (__uint16_t)(((__uint16_t)(sp->key[PF_SK_STACK].rdomain ) & 0xffU) << 8 | ((__uint16_t)(sp->key[PF_SK_STACK ].rdomain) & 0xff00U) >> 8) : __swap16md(sp->key [PF_SK_STACK].rdomain)); | |||
| 1477 | if (!(sks->af = sp->key[PF_SK_STACK].af)) | |||
| 1478 | sks->af = sp->af; | |||
| 1479 | if (sks->af != skw->af) { | |||
| 1480 | switch (sp->proto) { | |||
| 1481 | case IPPROTO_ICMP1: | |||
| 1482 | sks->proto = IPPROTO_ICMPV658; | |||
| 1483 | break; | |||
| 1484 | case IPPROTO_ICMPV658: | |||
| 1485 | sks->proto = IPPROTO_ICMP1; | |||
| 1486 | break; | |||
| 1487 | default: | |||
| 1488 | sks->proto = sp->proto; | |||
| 1489 | } | |||
| 1490 | } else | |||
| 1491 | sks->proto = sp->proto; | |||
| 1492 | ||||
| 1493 | if (((sks->af != AF_INET2) && (sks->af != AF_INET624)) || | |||
| 1494 | ((skw->af != AF_INET2) && (skw->af != AF_INET624))) { | |||
| 1495 | error = EINVAL22; | |||
| 1496 | goto cleanup; | |||
| 1497 | } | |||
| 1498 | ||||
| 1499 | sks->hash = pf_pkt_hash(sks->af, sks->proto, | |||
| 1500 | &sks->addr[0], &sks->addr[1], sks->port[0], sks->port[1]); | |||
| 1501 | ||||
| 1502 | } else if ((sks->af != AF_INET2) && (sks->af != AF_INET624)) { | |||
| 1503 | error = EINVAL22; | |||
| 1504 | goto cleanup; | |||
| 1505 | } | |||
| 1506 | st->rtableid[PF_SK_WIRE] = ntohl(sp->rtableid[PF_SK_WIRE])(__uint32_t)(__builtin_constant_p(sp->rtableid[PF_SK_WIRE] ) ? (__uint32_t)(((__uint32_t)(sp->rtableid[PF_SK_WIRE]) & 0xff) << 24 | ((__uint32_t)(sp->rtableid[PF_SK_WIRE ]) & 0xff00) << 8 | ((__uint32_t)(sp->rtableid[PF_SK_WIRE ]) & 0xff0000) >> 8 | ((__uint32_t)(sp->rtableid [PF_SK_WIRE]) & 0xff000000) >> 24) : __swap32md(sp-> rtableid[PF_SK_WIRE])); | |||
| 1507 | st->rtableid[PF_SK_STACK] = ntohl(sp->rtableid[PF_SK_STACK])(__uint32_t)(__builtin_constant_p(sp->rtableid[PF_SK_STACK ]) ? (__uint32_t)(((__uint32_t)(sp->rtableid[PF_SK_STACK]) & 0xff) << 24 | ((__uint32_t)(sp->rtableid[PF_SK_STACK ]) & 0xff00) << 8 | ((__uint32_t)(sp->rtableid[PF_SK_STACK ]) & 0xff0000) >> 8 | ((__uint32_t)(sp->rtableid [PF_SK_STACK]) & 0xff000000) >> 24) : __swap32md(sp ->rtableid[PF_SK_STACK])); | |||
| 1508 | ||||
| 1509 | /* copy to state */ | |||
| 1510 | st->rt_addr = sp->rt_addr; | |||
| 1511 | st->rt = sp->rt; | |||
| 1512 | st->creation = getuptime() - ntohl(sp->creation)(__uint32_t)(__builtin_constant_p(sp->creation) ? (__uint32_t )(((__uint32_t)(sp->creation) & 0xff) << 24 | (( __uint32_t)(sp->creation) & 0xff00) << 8 | ((__uint32_t )(sp->creation) & 0xff0000) >> 8 | ((__uint32_t) (sp->creation) & 0xff000000) >> 24) : __swap32md (sp->creation)); | |||
| 1513 | st->expire = getuptime(); | |||
| 1514 | if (ntohl(sp->expire)(__uint32_t)(__builtin_constant_p(sp->expire) ? (__uint32_t )(((__uint32_t)(sp->expire) & 0xff) << 24 | ((__uint32_t )(sp->expire) & 0xff00) << 8 | ((__uint32_t)(sp-> expire) & 0xff0000) >> 8 | ((__uint32_t)(sp->expire ) & 0xff000000) >> 24) : __swap32md(sp->expire))) { | |||
| 1515 | u_int32_t timeout; | |||
| 1516 | ||||
| 1517 | timeout = r->timeout[sp->timeout]; | |||
| 1518 | if (!timeout) | |||
| 1519 | timeout = pf_default_rule.timeout[sp->timeout]; | |||
| 1520 | ||||
| 1521 | /* sp->expire may have been adaptively scaled by export. */ | |||
| 1522 | st->expire -= timeout - ntohl(sp->expire)(__uint32_t)(__builtin_constant_p(sp->expire) ? (__uint32_t )(((__uint32_t)(sp->expire) & 0xff) << 24 | ((__uint32_t )(sp->expire) & 0xff00) << 8 | ((__uint32_t)(sp-> expire) & 0xff0000) >> 8 | ((__uint32_t)(sp->expire ) & 0xff000000) >> 24) : __swap32md(sp->expire)); | |||
| 1523 | } | |||
| 1524 | ||||
| 1525 | st->direction = sp->direction; | |||
| 1526 | st->log = sp->log; | |||
| 1527 | st->timeout = sp->timeout; | |||
| 1528 | st->state_flags = ntohs(sp->state_flags)(__uint16_t)(__builtin_constant_p(sp->state_flags) ? (__uint16_t )(((__uint16_t)(sp->state_flags) & 0xffU) << 8 | ((__uint16_t)(sp->state_flags) & 0xff00U) >> 8) : __swap16md(sp->state_flags)); | |||
| 1529 | st->max_mss = ntohs(sp->max_mss)(__uint16_t)(__builtin_constant_p(sp->max_mss) ? (__uint16_t )(((__uint16_t)(sp->max_mss) & 0xffU) << 8 | ((__uint16_t )(sp->max_mss) & 0xff00U) >> 8) : __swap16md(sp-> max_mss)); | |||
| 1530 | st->min_ttl = sp->min_ttl; | |||
| 1531 | st->set_tos = sp->set_tos; | |||
| 1532 | st->set_prio[0] = sp->set_prio[0]; | |||
| 1533 | st->set_prio[1] = sp->set_prio[1]; | |||
| 1534 | ||||
| 1535 | st->id = sp->id; | |||
| 1536 | st->creatorid = sp->creatorid; | |||
| 1537 | pf_state_peer_ntoh(&sp->src, &st->src); | |||
| 1538 | pf_state_peer_ntoh(&sp->dst, &st->dst); | |||
| 1539 | ||||
| 1540 | st->rule.ptr = r; | |||
| 1541 | st->anchor.ptr = NULL((void *)0); | |||
| 1542 | ||||
| 1543 | PF_REF_INIT(st->refcnt)refcnt_init(&(st->refcnt)); | |||
| 1544 | mtx_init(&st->mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&st-> mtx), ((((0x4)) > 0x0 && ((0x4)) < 0x9) ? 0x9 : ((0x4)))); } while (0); | |||
| 1545 | ||||
| 1546 | /* XXX when we have anchors, use STATE_INC_COUNTERS */ | |||
| 1547 | r->states_cur++; | |||
| 1548 | r->states_tot++; | |||
| 1549 | ||||
| 1550 | st->sync_state = PFSYNC_S_NONE0xd0; | |||
| 1551 | st->pfsync_time = getuptime(); | |||
| 1552 | #if NPFSYNC1 > 0 | |||
| 1553 | pfsync_init_state(st, skw, sks, flags); | |||
| 1554 | #endif | |||
| 1555 | ||||
| 1556 | if (pf_state_insert(kif, &skw, &sks, st) != 0) { | |||
| 1557 | /* XXX when we have anchors, use STATE_DEC_COUNTERS */ | |||
| 1558 | r->states_cur--; | |||
| 1559 | error = EEXIST17; | |||
| 1560 | goto cleanup_state; | |||
| 1561 | } | |||
| 1562 | ||||
| 1563 | return (0); | |||
| 1564 | ||||
| 1565 | cleanup: | |||
| 1566 | if (skw != NULL((void *)0)) | |||
| 1567 | pf_state_key_unref(skw); | |||
| 1568 | if (sks != NULL((void *)0)) | |||
| 1569 | pf_state_key_unref(sks); | |||
| 1570 | ||||
| 1571 | cleanup_state: /* pf_state_insert frees the state keys */ | |||
| 1572 | if (st) { | |||
| 1573 | if (st->dst.scrub) | |||
| 1574 | pool_put(&pf_state_scrub_pl, st->dst.scrub); | |||
| 1575 | if (st->src.scrub) | |||
| 1576 | pool_put(&pf_state_scrub_pl, st->src.scrub); | |||
| 1577 | pool_put(&pf_state_pl, st); | |||
| 1578 | } | |||
| 1579 | return (error); | |||
| 1580 | } | |||
| 1581 | #endif /* NPFSYNC > 0 */ | |||
| 1582 | ||||
| 1583 | /* END state table stuff */ | |||
| 1584 | ||||
| 1585 | void pf_purge_states(void *); | |||
| 1586 | struct task pf_purge_states_task = | |||
| 1587 | TASK_INITIALIZER(pf_purge_states, NULL){{ ((void *)0), ((void *)0) }, (pf_purge_states), (((void *)0 )), 0 }; | |||
| 1588 | ||||
| 1589 | void pf_purge_states_tick(void *); | |||
| 1590 | struct timeout pf_purge_states_to = | |||
| 1591 | TIMEOUT_INITIALIZER(pf_purge_states_tick, NULL){ .to_list = { ((void *)0), ((void *)0) }, .to_abstime = { .tv_sec = 0, .tv_nsec = 0 }, .to_func = ((pf_purge_states_tick)), .to_arg = ((((void *)0))), .to_time = 0, .to_flags = (0) | 0x04, .to_kclock = ((-1)) }; | |||
| 1592 | ||||
| 1593 | unsigned int pf_purge_expired_states(unsigned int, unsigned int); | |||
| 1594 | ||||
| 1595 | /* | |||
| 1596 | * how many states to scan this interval. | |||
| 1597 | * | |||
| 1598 | * this is set when the timeout fires, and reduced by the task. the | |||
| 1599 | * task will reschedule itself until the limit is reduced to zero, | |||
| 1600 | * and then it adds the timeout again. | |||
| 1601 | */ | |||
| 1602 | unsigned int pf_purge_states_limit; | |||
| 1603 | ||||
| 1604 | /* | |||
| 1605 | * limit how many states are processed with locks held per run of | |||
| 1606 | * the state purge task. | |||
| 1607 | */ | |||
| 1608 | unsigned int pf_purge_states_collect = 64; | |||
| 1609 | ||||
| 1610 | void | |||
| 1611 | pf_purge_states_tick(void *null) | |||
| 1612 | { | |||
| 1613 | unsigned int limit = pf_status.states; | |||
| 1614 | unsigned int interval = pf_default_rule.timeout[PFTM_INTERVAL]; | |||
| 1615 | ||||
| 1616 | if (limit == 0) { | |||
| 1617 | timeout_add_sec(&pf_purge_states_to, 1); | |||
| 1618 | return; | |||
| 1619 | } | |||
| 1620 | ||||
| 1621 | /* | |||
| 1622 | * process a fraction of the state table every second | |||
| 1623 | */ | |||
| 1624 | ||||
| 1625 | if (interval > 1) | |||
| 1626 | limit /= interval; | |||
| 1627 | ||||
| 1628 | pf_purge_states_limit = limit; | |||
| 1629 | task_add(systqmp, &pf_purge_states_task); | |||
| 1630 | } | |||
| 1631 | ||||
| 1632 | void | |||
| 1633 | pf_purge_states(void *null) | |||
| 1634 | { | |||
| 1635 | unsigned int limit; | |||
| 1636 | unsigned int scanned; | |||
| 1637 | ||||
| 1638 | limit = pf_purge_states_limit; | |||
| 1639 | if (limit < pf_purge_states_collect) | |||
| 1640 | limit = pf_purge_states_collect; | |||
| 1641 | ||||
| 1642 | scanned = pf_purge_expired_states(limit, pf_purge_states_collect); | |||
| 1643 | if (scanned >= pf_purge_states_limit) { | |||
| 1644 | /* we've run out of states to scan this "interval" */ | |||
| 1645 | timeout_add_sec(&pf_purge_states_to, 1); | |||
| 1646 | return; | |||
| 1647 | } | |||
| 1648 | ||||
| 1649 | pf_purge_states_limit -= scanned; | |||
| 1650 | task_add(systqmp, &pf_purge_states_task); | |||
| 1651 | } | |||
| 1652 | ||||
| 1653 | void pf_purge_tick(void *); | |||
| 1654 | struct timeout pf_purge_to = | |||
| 1655 | TIMEOUT_INITIALIZER(pf_purge_tick, NULL){ .to_list = { ((void *)0), ((void *)0) }, .to_abstime = { .tv_sec = 0, .tv_nsec = 0 }, .to_func = ((pf_purge_tick)), .to_arg = ((((void *)0))), .to_time = 0, .to_flags = (0) | 0x04, .to_kclock = ((-1)) }; | |||
| 1656 | ||||
| 1657 | void pf_purge(void *); | |||
| 1658 | struct task pf_purge_task = | |||
| 1659 | TASK_INITIALIZER(pf_purge, NULL){{ ((void *)0), ((void *)0) }, (pf_purge), (((void *)0)), 0 }; | |||
| 1660 | ||||
| 1661 | void | |||
| 1662 | pf_purge_tick(void *null) | |||
| 1663 | { | |||
| 1664 | task_add(systqmp, &pf_purge_task); | |||
| 1665 | } | |||
| 1666 | ||||
| 1667 | void | |||
| 1668 | pf_purge(void *null) | |||
| 1669 | { | |||
| 1670 | unsigned int interval = max(1, pf_default_rule.timeout[PFTM_INTERVAL]); | |||
| 1671 | ||||
| 1672 | PF_LOCK()do { rw_enter_write(&pf_lock); } while (0); | |||
| 1673 | ||||
| 1674 | pf_purge_expired_src_nodes(); | |||
| 1675 | ||||
| 1676 | PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write (&pf_lock); } while (0); | |||
| 1677 | ||||
| 1678 | /* | |||
| 1679 | * Fragments don't require PF_LOCK(), they use their own lock. | |||
| 1680 | */ | |||
| 1681 | pf_purge_expired_fragments(); | |||
| 1682 | ||||
| 1683 | /* interpret the interval as idle time between runs */ | |||
| 1684 | timeout_add_sec(&pf_purge_to, interval); | |||
| 1685 | } | |||
| 1686 | ||||
| 1687 | int32_t | |||
| 1688 | pf_state_expires(const struct pf_state *st, uint8_t stimeout) | |||
| 1689 | { | |||
| 1690 | u_int32_t timeout; | |||
| 1691 | u_int32_t start; | |||
| 1692 | u_int32_t end; | |||
| 1693 | u_int32_t states; | |||
| 1694 | ||||
| 1695 | /* | |||
| 1696 | * pf_state_expires is used by the state purge task to | |||
| 1697 | * decide if a state is a candidate for cleanup, and by the | |||
| 1698 | * pfsync state export code to populate an expiry time. | |||
| 1699 | * | |||
| 1700 | * this function may be called by the state purge task while | |||
| 1701 | * the state is being modified. avoid inconsistent reads of | |||
| 1702 | * state->timeout by having the caller do the read (and any | |||
| 1703 | * checks it needs to do on the same variable) and then pass | |||
| 1704 | * their view of the timeout in here for this function to use. | |||
| 1705 | * the only consequence of using a stale timeout value is | |||
| 1706 | * that the state won't be a candidate for purging until the | |||
| 1707 | * next pass of the purge task. | |||
| 1708 | */ | |||
| 1709 | ||||
| 1710 | /* handle all PFTM_* >= PFTM_MAX here */ | |||
| 1711 | if (stimeout >= PFTM_MAX) | |||
| 1712 | return (0); | |||
| 1713 | ||||
| 1714 | KASSERT(stimeout < PFTM_MAX)((stimeout < PFTM_MAX) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c", 1714, "stimeout < PFTM_MAX")); | |||
| 1715 | ||||
| 1716 | timeout = st->rule.ptr->timeout[stimeout]; | |||
| 1717 | if (!timeout) | |||
| 1718 | timeout = pf_default_rule.timeout[stimeout]; | |||
| 1719 | ||||
| 1720 | start = st->rule.ptr->timeout[PFTM_ADAPTIVE_START]; | |||
| 1721 | if (start) { | |||
| 1722 | end = st->rule.ptr->timeout[PFTM_ADAPTIVE_END]; | |||
| 1723 | states = st->rule.ptr->states_cur; | |||
| 1724 | } else { | |||
| 1725 | start = pf_default_rule.timeout[PFTM_ADAPTIVE_START]; | |||
| 1726 | end = pf_default_rule.timeout[PFTM_ADAPTIVE_END]; | |||
| 1727 | states = pf_status.states; | |||
| 1728 | } | |||
| 1729 | if (end && states > start && start < end) { | |||
| 1730 | if (states >= end) | |||
| 1731 | return (0); | |||
| 1732 | ||||
| 1733 | timeout = (u_int64_t)timeout * (end - states) / (end - start); | |||
| 1734 | } | |||
| 1735 | ||||
| 1736 | return (st->expire + timeout); | |||
| 1737 | } | |||
| 1738 | ||||
| 1739 | void | |||
| 1740 | pf_purge_expired_src_nodes(void) | |||
| 1741 | { | |||
| 1742 | struct pf_src_node *cur, *next; | |||
| 1743 | ||||
| 1744 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 1745 | ||||
| 1746 | RB_FOREACH_SAFE(cur, pf_src_tree, &tree_src_tracking, next)for ((cur) = pf_src_tree_RB_MINMAX(&tree_src_tracking, -1 ); ((cur) != ((void *)0)) && ((next) = pf_src_tree_RB_NEXT (cur), 1); (cur) = (next)) { | |||
| 1747 | if (cur->states == 0 && cur->expire <= getuptime()) { | |||
| 1748 | pf_remove_src_node(cur); | |||
| 1749 | } | |||
| 1750 | } | |||
| 1751 | } | |||
| 1752 | ||||
| 1753 | void | |||
| 1754 | pf_src_tree_remove_state(struct pf_state *st) | |||
| 1755 | { | |||
| 1756 | u_int32_t timeout; | |||
| 1757 | struct pf_sn_item *sni; | |||
| 1758 | ||||
| 1759 | while ((sni = SLIST_FIRST(&st->src_nodes)((&st->src_nodes)->slh_first)) != NULL((void *)0)) { | |||
| 1760 | SLIST_REMOVE_HEAD(&st->src_nodes, next)do { (&st->src_nodes)->slh_first = (&st->src_nodes )->slh_first->next.sle_next; } while (0); | |||
| 1761 | if (st->src.tcp_est) | |||
| 1762 | --sni->sn->conn; | |||
| 1763 | if (--sni->sn->states == 0) { | |||
| 1764 | timeout = st->rule.ptr->timeout[PFTM_SRC_NODE]; | |||
| 1765 | if (!timeout) | |||
| 1766 | timeout = | |||
| 1767 | pf_default_rule.timeout[PFTM_SRC_NODE]; | |||
| 1768 | sni->sn->expire = getuptime() + timeout; | |||
| 1769 | } | |||
| 1770 | pool_put(&pf_sn_item_pl, sni); | |||
| 1771 | } | |||
| 1772 | } | |||
| 1773 | ||||
| 1774 | void | |||
| 1775 | pf_remove_state(struct pf_state *st) | |||
| 1776 | { | |||
| 1777 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 1778 | ||||
| 1779 | mtx_enter(&st->mtx); | |||
| 1780 | if (st->timeout == PFTM_UNLINKED) { | |||
| 1781 | mtx_leave(&st->mtx); | |||
| 1782 | return; | |||
| 1783 | } | |||
| 1784 | st->timeout = PFTM_UNLINKED; | |||
| 1785 | mtx_leave(&st->mtx); | |||
| 1786 | ||||
| 1787 | /* handle load balancing related tasks */ | |||
| 1788 | pf_postprocess_addr(st); | |||
| 1789 | ||||
| 1790 | if (st->src.state == PF_TCPS_PROXY_DST((11)+1)) { | |||
| 1791 | pf_send_tcp(st->rule.ptr, st->key[PF_SK_WIRE]->af, | |||
| 1792 | &st->key[PF_SK_WIRE]->addr[1], | |||
| 1793 | &st->key[PF_SK_WIRE]->addr[0], | |||
| 1794 | st->key[PF_SK_WIRE]->port[1], | |||
| 1795 | st->key[PF_SK_WIRE]->port[0], | |||
| 1796 | st->src.seqhi, st->src.seqlo + 1, | |||
| 1797 | TH_RST0x04|TH_ACK0x10, 0, 0, 0, 1, st->tag, | |||
| 1798 | st->key[PF_SK_WIRE]->rdomain); | |||
| 1799 | } | |||
| 1800 | if (st->key[PF_SK_STACK]->proto == IPPROTO_TCP6) | |||
| 1801 | pf_set_protostate(st, PF_PEER_BOTH, TCPS_CLOSED0); | |||
| 1802 | ||||
| 1803 | RBT_REMOVE(pf_state_tree_id, &tree_id, st)pf_state_tree_id_RBT_REMOVE(&tree_id, st); | |||
| 1804 | #if NPFLOW1 > 0 | |||
| 1805 | if (st->state_flags & PFSTATE_PFLOW0x0004) | |||
| 1806 | export_pflow(st); | |||
| 1807 | #endif /* NPFLOW > 0 */ | |||
| 1808 | #if NPFSYNC1 > 0 | |||
| 1809 | pfsync_delete_state(st); | |||
| 1810 | #endif /* NPFSYNC > 0 */ | |||
| 1811 | pf_src_tree_remove_state(st); | |||
| 1812 | pf_detach_state(st); | |||
| 1813 | } | |||
| 1814 | ||||
| 1815 | void | |||
| 1816 | pf_remove_divert_state(struct inpcb *inp) | |||
| 1817 | { | |||
| 1818 | struct pf_state_key *sk; | |||
| 1819 | struct pf_state_item *si; | |||
| 1820 | ||||
| 1821 | PF_ASSERT_UNLOCKED()do { if (rw_status(&pf_lock) == 0x0001UL) splassert_fail( 0, rw_status(&pf_lock), __func__); } while (0); | |||
| 1822 | ||||
| 1823 | if (READ_ONCE(inp->inp_pf_sk)({ typeof(inp->inp_pf_sk) __tmp = *(volatile typeof(inp-> inp_pf_sk) *)&(inp->inp_pf_sk); membar_datadep_consumer (); __tmp; }) == NULL((void *)0)) | |||
| 1824 | return; | |||
| 1825 | ||||
| 1826 | mtx_enter(&pf_inp_mtx); | |||
| 1827 | sk = pf_state_key_ref(inp->inp_pf_sk); | |||
| 1828 | mtx_leave(&pf_inp_mtx); | |||
| 1829 | if (sk == NULL((void *)0)) | |||
| 1830 | return; | |||
| 1831 | ||||
| 1832 | PF_LOCK()do { rw_enter_write(&pf_lock); } while (0); | |||
| 1833 | PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0); | |||
| 1834 | TAILQ_FOREACH(si, &sk->sk_states, si_entry)for((si) = ((&sk->sk_states)->tqh_first); (si) != ( (void *)0); (si) = ((si)->si_entry.tqe_next)) { | |||
| 1835 | struct pf_state *sist = si->si_st; | |||
| 1836 | if (sk == sist->key[PF_SK_STACK] && sist->rule.ptr && | |||
| 1837 | (sist->rule.ptr->divert.type == PF_DIVERT_TO || | |||
| 1838 | sist->rule.ptr->divert.type == PF_DIVERT_REPLY)) { | |||
| 1839 | if (sist->key[PF_SK_STACK]->proto == IPPROTO_TCP6 && | |||
| 1840 | sist->key[PF_SK_WIRE] != sist->key[PF_SK_STACK]) { | |||
| 1841 | /* | |||
| 1842 | * If the local address is translated, keep | |||
| 1843 | * the state for "tcp.closed" seconds to | |||
| 1844 | * prevent its source port from being reused. | |||
| 1845 | */ | |||
| 1846 | if (sist->src.state < TCPS_FIN_WAIT_29 || | |||
| 1847 | sist->dst.state < TCPS_FIN_WAIT_29) { | |||
| 1848 | pf_set_protostate(sist, PF_PEER_BOTH, | |||
| 1849 | TCPS_TIME_WAIT10); | |||
| 1850 | pf_update_state_timeout(sist, | |||
| 1851 | PFTM_TCP_CLOSED); | |||
| 1852 | sist->expire = getuptime(); | |||
| 1853 | } | |||
| 1854 | sist->state_flags |= PFSTATE_INP_UNLINKED0x0400; | |||
| 1855 | } else | |||
| 1856 | pf_remove_state(sist); | |||
| 1857 | break; | |||
| 1858 | } | |||
| 1859 | } | |||
| 1860 | PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_state_lock), __func__); } while ( 0); rw_exit_write(&pf_state_lock); } while (0); | |||
| 1861 | PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write (&pf_lock); } while (0); | |||
| 1862 | ||||
| 1863 | pf_state_key_unref(sk); | |||
| 1864 | } | |||
| 1865 | ||||
| 1866 | void | |||
| 1867 | pf_free_state(struct pf_state *st) | |||
| 1868 | { | |||
| 1869 | struct pf_rule_item *ri; | |||
| 1870 | ||||
| 1871 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 1872 | ||||
| 1873 | #if NPFSYNC1 > 0 | |||
| 1874 | if (pfsync_state_in_use(st)) | |||
| 1875 | return; | |||
| 1876 | #endif /* NPFSYNC > 0 */ | |||
| 1877 | ||||
| 1878 | KASSERT(st->timeout == PFTM_UNLINKED)((st->timeout == PFTM_UNLINKED) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 1878, "st->timeout == PFTM_UNLINKED" )); | |||
| 1879 | if (--st->rule.ptr->states_cur == 0 && | |||
| 1880 | st->rule.ptr->src_nodes == 0) | |||
| 1881 | pf_rm_rule(NULL((void *)0), st->rule.ptr); | |||
| 1882 | if (st->anchor.ptr != NULL((void *)0)) | |||
| 1883 | if (--st->anchor.ptr->states_cur == 0) | |||
| 1884 | pf_rm_rule(NULL((void *)0), st->anchor.ptr); | |||
| 1885 | while ((ri = SLIST_FIRST(&st->match_rules)((&st->match_rules)->slh_first))) { | |||
| 1886 | SLIST_REMOVE_HEAD(&st->match_rules, entry)do { (&st->match_rules)->slh_first = (&st->match_rules )->slh_first->entry.sle_next; } while (0); | |||
| 1887 | if (--ri->r->states_cur == 0 && | |||
| 1888 | ri->r->src_nodes == 0) | |||
| 1889 | pf_rm_rule(NULL((void *)0), ri->r); | |||
| 1890 | pool_put(&pf_rule_item_pl, ri); | |||
| 1891 | } | |||
| 1892 | pf_normalize_tcp_cleanup(st); | |||
| 1893 | pfi_kif_unref(st->kif, PFI_KIF_REF_STATE); | |||
| 1894 | pf_state_list_remove(&pf_state_list, st); | |||
| 1895 | if (st->tag) | |||
| 1896 | pf_tag_unref(st->tag); | |||
| 1897 | pf_state_unref(st); | |||
| 1898 | pf_status.fcounters[FCNT_STATE_REMOVALS2]++; | |||
| 1899 | pf_status.states--; | |||
| 1900 | } | |||
| 1901 | ||||
| 1902 | unsigned int | |||
| 1903 | pf_purge_expired_states(const unsigned int limit, const unsigned int collect) | |||
| 1904 | { | |||
| 1905 | /* | |||
| 1906 | * this task/thread/context/whatever is the only thing that | |||
| 1907 | * removes states from the pf_state_list, so the cur reference | |||
| 1908 | * it holds between calls is guaranteed to still be in the | |||
| 1909 | * list. | |||
| 1910 | */ | |||
| 1911 | static struct pf_state *cur = NULL((void *)0); | |||
| 1912 | ||||
| 1913 | struct pf_state *head, *tail; | |||
| 1914 | struct pf_state *st; | |||
| 1915 | SLIST_HEAD(pf_state_gcl, pf_state)struct pf_state_gcl { struct pf_state *slh_first; } gcl = SLIST_HEAD_INITIALIZER(gcl){ ((void *)0) }; | |||
| 1916 | time_t now; | |||
| 1917 | unsigned int scanned; | |||
| 1918 | unsigned int collected = 0; | |||
| 1919 | ||||
| 1920 | PF_ASSERT_UNLOCKED()do { if (rw_status(&pf_lock) == 0x0001UL) splassert_fail( 0, rw_status(&pf_lock), __func__); } while (0); | |||
| 1921 | ||||
| 1922 | rw_enter_read(&pf_state_list.pfs_rwl); | |||
| 1923 | ||||
| 1924 | mtx_enter(&pf_state_list.pfs_mtx); | |||
| 1925 | head = TAILQ_FIRST(&pf_state_list.pfs_list)((&pf_state_list.pfs_list)->tqh_first); | |||
| 1926 | tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue)(*(((struct pf_state_queue *)((&pf_state_list.pfs_list)-> tqh_last))->tqh_last)); | |||
| 1927 | mtx_leave(&pf_state_list.pfs_mtx); | |||
| 1928 | ||||
| 1929 | if (head == NULL((void *)0)) { | |||
| 1930 | /* the list is empty */ | |||
| 1931 | rw_exit_read(&pf_state_list.pfs_rwl); | |||
| 1932 | return (limit); | |||
| 1933 | } | |||
| 1934 | ||||
| 1935 | /* (re)start at the front of the list */ | |||
| 1936 | if (cur == NULL((void *)0)) | |||
| 1937 | cur = head; | |||
| 1938 | ||||
| 1939 | now = getuptime(); | |||
| 1940 | ||||
| 1941 | for (scanned = 0; scanned < limit; scanned++) { | |||
| 1942 | uint8_t stimeout = cur->timeout; | |||
| 1943 | unsigned int limited = 0; | |||
| 1944 | ||||
| 1945 | if ((stimeout == PFTM_UNLINKED) || | |||
| 1946 | (pf_state_expires(cur, stimeout) <= now)) { | |||
| 1947 | st = pf_state_ref(cur); | |||
| 1948 | SLIST_INSERT_HEAD(&gcl, st, gc_list)do { (st)->gc_list.sle_next = (&gcl)->slh_first; (& gcl)->slh_first = (st); } while (0); | |||
| 1949 | ||||
| 1950 | if (++collected >= collect) | |||
| 1951 | limited = 1; | |||
| 1952 | } | |||
| 1953 | ||||
| 1954 | /* don't iterate past the end of our view of the list */ | |||
| 1955 | if (cur == tail) { | |||
| 1956 | cur = NULL((void *)0); | |||
| 1957 | break; | |||
| 1958 | } | |||
| 1959 | ||||
| 1960 | cur = TAILQ_NEXT(cur, entry_list)((cur)->entry_list.tqe_next); | |||
| 1961 | ||||
| 1962 | /* don't spend too much time here. */ | |||
| 1963 | if (ISSET(READ_ONCE(curcpu()->ci_schedstate.spc_schedflags),((({ typeof(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_schedstate.spc_schedflags) __tmp = *(volatile typeof(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_schedstate.spc_schedflags) *)&(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_schedstate.spc_schedflags); membar_datadep_consumer (); __tmp; })) & (0x0002)) | |||
| 1964 | SPCF_SHOULDYIELD)((({ typeof(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_schedstate.spc_schedflags) __tmp = *(volatile typeof(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self ))); __ci;})->ci_schedstate.spc_schedflags) *)&(({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_schedstate.spc_schedflags); membar_datadep_consumer (); __tmp; })) & (0x0002)) || limited) | |||
| 1965 | break; | |||
| 1966 | } | |||
| 1967 | ||||
| 1968 | rw_exit_read(&pf_state_list.pfs_rwl); | |||
| 1969 | ||||
| 1970 | if (SLIST_EMPTY(&gcl)(((&gcl)->slh_first) == ((void *)0))) | |||
| 1971 | return (scanned); | |||
| 1972 | ||||
| 1973 | rw_enter_write(&pf_state_list.pfs_rwl); | |||
| 1974 | PF_LOCK()do { rw_enter_write(&pf_lock); } while (0); | |||
| 1975 | PF_STATE_ENTER_WRITE()do { rw_enter_write(&pf_state_lock); } while (0); | |||
| 1976 | SLIST_FOREACH(st, &gcl, gc_list)for((st) = ((&gcl)->slh_first); (st) != ((void *)0); ( st) = ((st)->gc_list.sle_next)) { | |||
| 1977 | if (st->timeout != PFTM_UNLINKED) | |||
| 1978 | pf_remove_state(st); | |||
| 1979 | ||||
| 1980 | pf_free_state(st); | |||
| 1981 | } | |||
| 1982 | PF_STATE_EXIT_WRITE()do { do { if (rw_status(&pf_state_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_state_lock), __func__); } while ( 0); rw_exit_write(&pf_state_lock); } while (0); | |||
| 1983 | PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write (&pf_lock); } while (0); | |||
| 1984 | rw_exit_write(&pf_state_list.pfs_rwl); | |||
| 1985 | ||||
| 1986 | while ((st = SLIST_FIRST(&gcl)((&gcl)->slh_first)) != NULL((void *)0)) { | |||
| 1987 | SLIST_REMOVE_HEAD(&gcl, gc_list)do { (&gcl)->slh_first = (&gcl)->slh_first-> gc_list.sle_next; } while (0); | |||
| 1988 | pf_state_unref(st); | |||
| 1989 | } | |||
| 1990 | ||||
| 1991 | return (scanned); | |||
| 1992 | } | |||
| 1993 | ||||
| 1994 | int | |||
| 1995 | pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw, int wait) | |||
| 1996 | { | |||
| 1997 | if (aw->type != PF_ADDR_TABLE) | |||
| 1998 | return (0); | |||
| 1999 | if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname, wait)) == NULL((void *)0)) | |||
| 2000 | return (1); | |||
| 2001 | return (0); | |||
| 2002 | } | |||
| 2003 | ||||
| 2004 | void | |||
| 2005 | pf_tbladdr_remove(struct pf_addr_wrap *aw) | |||
| 2006 | { | |||
| 2007 | if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL((void *)0)) | |||
| 2008 | return; | |||
| 2009 | pfr_detach_table(aw->p.tbl); | |||
| 2010 | aw->p.tbl = NULL((void *)0); | |||
| 2011 | } | |||
| 2012 | ||||
| 2013 | void | |||
| 2014 | pf_tbladdr_copyout(struct pf_addr_wrap *aw) | |||
| 2015 | { | |||
| 2016 | struct pfr_ktable *kt = aw->p.tbl; | |||
| 2017 | ||||
| 2018 | if (aw->type != PF_ADDR_TABLE || kt == NULL((void *)0)) | |||
| 2019 | return; | |||
| 2020 | if (!(kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) && kt->pfrkt_root != NULL((void *)0)) | |||
| 2021 | kt = kt->pfrkt_root; | |||
| 2022 | aw->p.tbl = NULL((void *)0); | |||
| 2023 | aw->p.tblcnt = (kt->pfrkt_flagspfrkt_ts.pfrts_t.pfrt_flags & PFR_TFLAG_ACTIVE0x00000004) ? | |||
| 2024 | kt->pfrkt_cntpfrkt_ts.pfrts_cnt : -1; | |||
| 2025 | } | |||
| 2026 | ||||
| 2027 | void | |||
| 2028 | pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) | |||
| 2029 | { | |||
| 2030 | switch (af) { | |||
| 2031 | case AF_INET2: { | |||
| 2032 | u_int32_t a = ntohl(addr->addr32[0])(__uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? ( __uint32_t)(((__uint32_t)(addr->pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00 ) << 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000 ) >> 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000 ) >> 24) : __swap32md(addr->pfa.addr32[0])); | |||
| 2033 | addlog("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, | |||
| 2034 | (a>>8)&255, a&255); | |||
| 2035 | if (p) { | |||
| 2036 | p = ntohs(p)(__uint16_t)(__builtin_constant_p(p) ? (__uint16_t)(((__uint16_t )(p) & 0xffU) << 8 | ((__uint16_t)(p) & 0xff00U ) >> 8) : __swap16md(p)); | |||
| 2037 | addlog(":%u", p); | |||
| 2038 | } | |||
| 2039 | break; | |||
| 2040 | } | |||
| 2041 | #ifdef INET61 | |||
| 2042 | case AF_INET624: { | |||
| 2043 | u_int16_t b; | |||
| 2044 | u_int8_t i, curstart, curend, maxstart, maxend; | |||
| 2045 | curstart = curend = maxstart = maxend = 255; | |||
| 2046 | for (i = 0; i < 8; i++) { | |||
| 2047 | if (!addr->addr16pfa.addr16[i]) { | |||
| 2048 | if (curstart == 255) | |||
| 2049 | curstart = i; | |||
| 2050 | curend = i; | |||
| 2051 | } else { | |||
| 2052 | if ((curend - curstart) > | |||
| 2053 | (maxend - maxstart)) { | |||
| 2054 | maxstart = curstart; | |||
| 2055 | maxend = curend; | |||
| 2056 | } | |||
| 2057 | curstart = curend = 255; | |||
| 2058 | } | |||
| 2059 | } | |||
| 2060 | if ((curend - curstart) > | |||
| 2061 | (maxend - maxstart)) { | |||
| 2062 | maxstart = curstart; | |||
| 2063 | maxend = curend; | |||
| 2064 | } | |||
| 2065 | for (i = 0; i < 8; i++) { | |||
| 2066 | if (i >= maxstart && i <= maxend) { | |||
| 2067 | if (i == 0) | |||
| 2068 | addlog(":"); | |||
| 2069 | if (i == maxend) | |||
| 2070 | addlog(":"); | |||
| 2071 | } else { | |||
| 2072 | b = ntohs(addr->addr16[i])(__uint16_t)(__builtin_constant_p(addr->pfa.addr16[i]) ? ( __uint16_t)(((__uint16_t)(addr->pfa.addr16[i]) & 0xffU ) << 8 | ((__uint16_t)(addr->pfa.addr16[i]) & 0xff00U ) >> 8) : __swap16md(addr->pfa.addr16[i])); | |||
| 2073 | addlog("%x", b); | |||
| 2074 | if (i < 7) | |||
| 2075 | addlog(":"); | |||
| 2076 | } | |||
| 2077 | } | |||
| 2078 | if (p) { | |||
| 2079 | p = ntohs(p)(__uint16_t)(__builtin_constant_p(p) ? (__uint16_t)(((__uint16_t )(p) & 0xffU) << 8 | ((__uint16_t)(p) & 0xff00U ) >> 8) : __swap16md(p)); | |||
| 2080 | addlog("[%u]", p); | |||
| 2081 | } | |||
| 2082 | break; | |||
| 2083 | } | |||
| 2084 | #endif /* INET6 */ | |||
| 2085 | } | |||
| 2086 | } | |||
| 2087 | ||||
| 2088 | void | |||
| 2089 | pf_print_state(struct pf_state *st) | |||
| 2090 | { | |||
| 2091 | pf_print_state_parts(st, NULL((void *)0), NULL((void *)0)); | |||
| 2092 | } | |||
| 2093 | ||||
| 2094 | void | |||
| 2095 | pf_print_state_parts(struct pf_state *st, | |||
| 2096 | struct pf_state_key *skwp, struct pf_state_key *sksp) | |||
| 2097 | { | |||
| 2098 | struct pf_state_key *skw, *sks; | |||
| 2099 | u_int8_t proto, dir; | |||
| 2100 | ||||
| 2101 | /* Do our best to fill these, but they're skipped if NULL */ | |||
| 2102 | skw = skwp ? skwp : (st ? st->key[PF_SK_WIRE] : NULL((void *)0)); | |||
| 2103 | sks = sksp ? sksp : (st ? st->key[PF_SK_STACK] : NULL((void *)0)); | |||
| 2104 | proto = skw ? skw->proto : (sks ? sks->proto : 0); | |||
| 2105 | dir = st ? st->direction : 0; | |||
| 2106 | ||||
| 2107 | switch (proto) { | |||
| 2108 | case IPPROTO_IPV44: | |||
| 2109 | addlog("IPv4"); | |||
| 2110 | break; | |||
| 2111 | case IPPROTO_IPV641: | |||
| 2112 | addlog("IPv6"); | |||
| 2113 | break; | |||
| 2114 | case IPPROTO_TCP6: | |||
| 2115 | addlog("TCP"); | |||
| 2116 | break; | |||
| 2117 | case IPPROTO_UDP17: | |||
| 2118 | addlog("UDP"); | |||
| 2119 | break; | |||
| 2120 | case IPPROTO_ICMP1: | |||
| 2121 | addlog("ICMP"); | |||
| 2122 | break; | |||
| 2123 | case IPPROTO_ICMPV658: | |||
| 2124 | addlog("ICMPv6"); | |||
| 2125 | break; | |||
| 2126 | default: | |||
| 2127 | addlog("%u", proto); | |||
| 2128 | break; | |||
| 2129 | } | |||
| 2130 | switch (dir) { | |||
| 2131 | case PF_IN: | |||
| 2132 | addlog(" in"); | |||
| 2133 | break; | |||
| 2134 | case PF_OUT: | |||
| 2135 | addlog(" out"); | |||
| 2136 | break; | |||
| 2137 | } | |||
| 2138 | if (skw) { | |||
| 2139 | addlog(" wire: (%d) ", skw->rdomain); | |||
| 2140 | pf_print_host(&skw->addr[0], skw->port[0], skw->af); | |||
| 2141 | addlog(" "); | |||
| 2142 | pf_print_host(&skw->addr[1], skw->port[1], skw->af); | |||
| 2143 | } | |||
| 2144 | if (sks) { | |||
| 2145 | addlog(" stack: (%d) ", sks->rdomain); | |||
| 2146 | if (sks != skw) { | |||
| 2147 | pf_print_host(&sks->addr[0], sks->port[0], sks->af); | |||
| 2148 | addlog(" "); | |||
| 2149 | pf_print_host(&sks->addr[1], sks->port[1], sks->af); | |||
| 2150 | } else | |||
| 2151 | addlog("-"); | |||
| 2152 | } | |||
| 2153 | if (st) { | |||
| 2154 | if (proto == IPPROTO_TCP6) { | |||
| 2155 | addlog(" [lo=%u high=%u win=%u modulator=%u", | |||
| 2156 | st->src.seqlo, st->src.seqhi, | |||
| 2157 | st->src.max_win, st->src.seqdiff); | |||
| 2158 | if (st->src.wscale && st->dst.wscale) | |||
| 2159 | addlog(" wscale=%u", | |||
| 2160 | st->src.wscale & PF_WSCALE_MASK0x0f); | |||
| 2161 | addlog("]"); | |||
| 2162 | addlog(" [lo=%u high=%u win=%u modulator=%u", | |||
| 2163 | st->dst.seqlo, st->dst.seqhi, | |||
| 2164 | st->dst.max_win, st->dst.seqdiff); | |||
| 2165 | if (st->src.wscale && st->dst.wscale) | |||
| 2166 | addlog(" wscale=%u", | |||
| 2167 | st->dst.wscale & PF_WSCALE_MASK0x0f); | |||
| 2168 | addlog("]"); | |||
| 2169 | } | |||
| 2170 | addlog(" %u:%u", st->src.state, st->dst.state); | |||
| 2171 | if (st->rule.ptr) | |||
| 2172 | addlog(" @%d", st->rule.ptr->nr); | |||
| 2173 | } | |||
| 2174 | } | |||
| 2175 | ||||
| 2176 | void | |||
| 2177 | pf_print_flags(u_int8_t f) | |||
| 2178 | { | |||
| 2179 | if (f) | |||
| 2180 | addlog(" "); | |||
| 2181 | if (f & TH_FIN0x01) | |||
| 2182 | addlog("F"); | |||
| 2183 | if (f & TH_SYN0x02) | |||
| 2184 | addlog("S"); | |||
| 2185 | if (f & TH_RST0x04) | |||
| 2186 | addlog("R"); | |||
| 2187 | if (f & TH_PUSH0x08) | |||
| 2188 | addlog("P"); | |||
| 2189 | if (f & TH_ACK0x10) | |||
| 2190 | addlog("A"); | |||
| 2191 | if (f & TH_URG0x20) | |||
| 2192 | addlog("U"); | |||
| 2193 | if (f & TH_ECE0x40) | |||
| 2194 | addlog("E"); | |||
| 2195 | if (f & TH_CWR0x80) | |||
| 2196 | addlog("W"); | |||
| 2197 | } | |||
| 2198 | ||||
| 2199 | #define PF_SET_SKIP_STEPS(i)do { while (head[i] != cur) { head[i]->skip[i].ptr = cur; head [i] = ((head[i])->entries.tqe_next); } } while (0) \ | |||
| 2200 | do { \ | |||
| 2201 | while (head[i] != cur) { \ | |||
| 2202 | head[i]->skip[i].ptr = cur; \ | |||
| 2203 | head[i] = TAILQ_NEXT(head[i], entries)((head[i])->entries.tqe_next); \ | |||
| 2204 | } \ | |||
| 2205 | } while (0) | |||
| 2206 | ||||
| 2207 | void | |||
| 2208 | pf_calc_skip_steps(struct pf_rulequeue *rules) | |||
| 2209 | { | |||
| 2210 | struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT9]; | |||
| 2211 | int i; | |||
| 2212 | ||||
| 2213 | cur = TAILQ_FIRST(rules)((rules)->tqh_first); | |||
| 2214 | prev = cur; | |||
| 2215 | for (i = 0; i < PF_SKIP_COUNT9; ++i) | |||
| 2216 | head[i] = cur; | |||
| 2217 | while (cur != NULL((void *)0)) { | |||
| 2218 | if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) | |||
| 2219 | PF_SET_SKIP_STEPS(PF_SKIP_IFP)do { while (head[0] != cur) { head[0]->skip[0].ptr = cur; head [0] = ((head[0])->entries.tqe_next); } } while (0); | |||
| 2220 | if (cur->direction != prev->direction) | |||
| 2221 | PF_SET_SKIP_STEPS(PF_SKIP_DIR)do { while (head[1] != cur) { head[1]->skip[1].ptr = cur; head [1] = ((head[1])->entries.tqe_next); } } while (0); | |||
| 2222 | if (cur->onrdomain != prev->onrdomain || | |||
| 2223 | cur->ifnot != prev->ifnot) | |||
| 2224 | PF_SET_SKIP_STEPS(PF_SKIP_RDOM)do { while (head[2] != cur) { head[2]->skip[2].ptr = cur; head [2] = ((head[2])->entries.tqe_next); } } while (0); | |||
| 2225 | if (cur->af != prev->af) | |||
| 2226 | PF_SET_SKIP_STEPS(PF_SKIP_AF)do { while (head[3] != cur) { head[3]->skip[3].ptr = cur; head [3] = ((head[3])->entries.tqe_next); } } while (0); | |||
| 2227 | if (cur->proto != prev->proto) | |||
| 2228 | PF_SET_SKIP_STEPS(PF_SKIP_PROTO)do { while (head[4] != cur) { head[4]->skip[4].ptr = cur; head [4] = ((head[4])->entries.tqe_next); } } while (0); | |||
| 2229 | if (cur->src.neg != prev->src.neg || | |||
| 2230 | pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) | |||
| 2231 | PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR)do { while (head[5] != cur) { head[5]->skip[5].ptr = cur; head [5] = ((head[5])->entries.tqe_next); } } while (0); | |||
| 2232 | if (cur->dst.neg != prev->dst.neg || | |||
| 2233 | pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) | |||
| 2234 | PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR)do { while (head[6] != cur) { head[6]->skip[6].ptr = cur; head [6] = ((head[6])->entries.tqe_next); } } while (0); | |||
| 2235 | if (cur->src.port[0] != prev->src.port[0] || | |||
| 2236 | cur->src.port[1] != prev->src.port[1] || | |||
| 2237 | cur->src.port_op != prev->src.port_op) | |||
| 2238 | PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT)do { while (head[7] != cur) { head[7]->skip[7].ptr = cur; head [7] = ((head[7])->entries.tqe_next); } } while (0); | |||
| 2239 | if (cur->dst.port[0] != prev->dst.port[0] || | |||
| 2240 | cur->dst.port[1] != prev->dst.port[1] || | |||
| 2241 | cur->dst.port_op != prev->dst.port_op) | |||
| 2242 | PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT)do { while (head[8] != cur) { head[8]->skip[8].ptr = cur; head [8] = ((head[8])->entries.tqe_next); } } while (0); | |||
| 2243 | ||||
| 2244 | prev = cur; | |||
| 2245 | cur = TAILQ_NEXT(cur, entries)((cur)->entries.tqe_next); | |||
| 2246 | } | |||
| 2247 | for (i = 0; i < PF_SKIP_COUNT9; ++i) | |||
| 2248 | PF_SET_SKIP_STEPS(i)do { while (head[i] != cur) { head[i]->skip[i].ptr = cur; head [i] = ((head[i])->entries.tqe_next); } } while (0); | |||
| 2249 | } | |||
| 2250 | ||||
| 2251 | int | |||
| 2252 | pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) | |||
| 2253 | { | |||
| 2254 | if (aw1->type != aw2->type) | |||
| 2255 | return (1); | |||
| 2256 | switch (aw1->type) { | |||
| 2257 | case PF_ADDR_ADDRMASK: | |||
| 2258 | case PF_ADDR_RANGE: | |||
| 2259 | if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6)((24 == 2 && (&aw1->v.a.addr)->pfa.addr32[0 ] != (&aw2->v.a.addr)->pfa.addr32[0]) || (24 == 24 && ((&aw1->v.a.addr)->pfa.addr32[3] != (&aw2-> v.a.addr)->pfa.addr32[3] || (&aw1->v.a.addr)->pfa .addr32[2] != (&aw2->v.a.addr)->pfa.addr32[2] || (& aw1->v.a.addr)->pfa.addr32[1] != (&aw2->v.a.addr )->pfa.addr32[1] || (&aw1->v.a.addr)->pfa.addr32 [0] != (&aw2->v.a.addr)->pfa.addr32[0])))) | |||
| 2260 | return (1); | |||
| 2261 | if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6)((24 == 2 && (&aw1->v.a.mask)->pfa.addr32[0 ] != (&aw2->v.a.mask)->pfa.addr32[0]) || (24 == 24 && ((&aw1->v.a.mask)->pfa.addr32[3] != (&aw2-> v.a.mask)->pfa.addr32[3] || (&aw1->v.a.mask)->pfa .addr32[2] != (&aw2->v.a.mask)->pfa.addr32[2] || (& aw1->v.a.mask)->pfa.addr32[1] != (&aw2->v.a.mask )->pfa.addr32[1] || (&aw1->v.a.mask)->pfa.addr32 [0] != (&aw2->v.a.mask)->pfa.addr32[0])))) | |||
| 2262 | return (1); | |||
| 2263 | return (0); | |||
| 2264 | case PF_ADDR_DYNIFTL: | |||
| 2265 | return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); | |||
| 2266 | case PF_ADDR_NONE: | |||
| 2267 | case PF_ADDR_NOROUTE: | |||
| 2268 | case PF_ADDR_URPFFAILED: | |||
| 2269 | return (0); | |||
| 2270 | case PF_ADDR_TABLE: | |||
| 2271 | return (aw1->p.tbl != aw2->p.tbl); | |||
| 2272 | case PF_ADDR_RTLABEL: | |||
| 2273 | return (aw1->v.rtlabel != aw2->v.rtlabel); | |||
| 2274 | default: | |||
| 2275 | addlog("invalid address type: %d\n", aw1->type); | |||
| 2276 | return (1); | |||
| 2277 | } | |||
| 2278 | } | |||
| 2279 | ||||
| 2280 | /* This algorithm computes 'a + b - c' in ones-complement using a trick to | |||
| 2281 | * emulate at most one ones-complement subtraction. This thereby limits net | |||
| 2282 | * carries/borrows to at most one, eliminating a reduction step and saving one | |||
| 2283 | * each of +, >>, & and ~. | |||
| 2284 | * | |||
| 2285 | * def. x mod y = x - (x//y)*y for integer x,y | |||
| 2286 | * def. sum = x mod 2^16 | |||
| 2287 | * def. accumulator = (x >> 16) mod 2^16 | |||
| 2288 | * | |||
| 2289 | * The trick works as follows: subtracting exactly one u_int16_t from the | |||
| 2290 | * u_int32_t x incurs at most one underflow, wrapping its upper 16-bits, the | |||
| 2291 | * accumulator, to 2^16 - 1. Adding this to the 16-bit sum preserves the | |||
| 2292 | * ones-complement borrow: | |||
| 2293 | * | |||
| 2294 | * (sum + accumulator) mod 2^16 | |||
| 2295 | * = { assume underflow: accumulator := 2^16 - 1 } | |||
| 2296 | * (sum + 2^16 - 1) mod 2^16 | |||
| 2297 | * = { mod } | |||
| 2298 | * (sum - 1) mod 2^16 | |||
| 2299 | * | |||
| 2300 | * Although this breaks for sum = 0, giving 0xffff, which is ones-complement's | |||
| 2301 | * other zero, not -1, that cannot occur: the 16-bit sum cannot be underflown | |||
| 2302 | * to zero as that requires subtraction of at least 2^16, which exceeds a | |||
| 2303 | * single u_int16_t's range. | |||
| 2304 | * | |||
| 2305 | * We use the following theorem to derive the implementation: | |||
| 2306 | * | |||
| 2307 | * th. (x + (y mod z)) mod z = (x + y) mod z (0) | |||
| 2308 | * proof. | |||
| 2309 | * (x + (y mod z)) mod z | |||
| 2310 | * = { def mod } | |||
| 2311 | * (x + y - (y//z)*z) mod z | |||
| 2312 | * = { (a + b*c) mod c = a mod c } | |||
| 2313 | * (x + y) mod z [end of proof] | |||
| 2314 | * | |||
| 2315 | * ... and thereby obtain: | |||
| 2316 | * | |||
| 2317 | * (sum + accumulator) mod 2^16 | |||
| 2318 | * = { def. accumulator, def. sum } | |||
| 2319 | * (x mod 2^16 + (x >> 16) mod 2^16) mod 2^16 | |||
| 2320 | * = { (0), twice } | |||
| 2321 | * (x + (x >> 16)) mod 2^16 | |||
| 2322 | * = { x mod 2^n = x & (2^n - 1) } | |||
| 2323 | * (x + (x >> 16)) & 0xffff | |||
| 2324 | * | |||
| 2325 | * Note: this serves also as a reduction step for at most one add (as the | |||
| 2326 | * trailing mod 2^16 prevents further reductions by destroying carries). | |||
| 2327 | */ | |||
| 2328 | __inline void | |||
| 2329 | pf_cksum_fixup(u_int16_t *cksum, u_int16_t was, u_int16_t now, | |||
| 2330 | u_int8_t proto) | |||
| 2331 | { | |||
| 2332 | u_int32_t x; | |||
| 2333 | const int udp = proto == IPPROTO_UDP17; | |||
| 2334 | ||||
| 2335 | x = *cksum + was - now; | |||
| 2336 | x = (x + (x >> 16)) & 0xffff; | |||
| 2337 | ||||
| 2338 | /* optimise: eliminate a branch when not udp */ | |||
| 2339 | if (udp && *cksum == 0x0000) | |||
| 2340 | return; | |||
| 2341 | if (udp && x == 0x0000) | |||
| 2342 | x = 0xffff; | |||
| 2343 | ||||
| 2344 | *cksum = (u_int16_t)(x); | |||
| 2345 | } | |||
| 2346 | ||||
| 2347 | #ifdef INET61 | |||
| 2348 | /* pre: coverage(cksum) is superset of coverage(covered_cksum) */ | |||
| 2349 | static __inline void | |||
| 2350 | pf_cksum_uncover(u_int16_t *cksum, u_int16_t covered_cksum, u_int8_t proto) | |||
| 2351 | { | |||
| 2352 | pf_cksum_fixup(cksum, ~covered_cksum, 0x0, proto); | |||
| 2353 | } | |||
| 2354 | ||||
| 2355 | /* pre: disjoint(coverage(cksum), coverage(uncovered_cksum)) */ | |||
| 2356 | static __inline void | |||
| 2357 | pf_cksum_cover(u_int16_t *cksum, u_int16_t uncovered_cksum, u_int8_t proto) | |||
| 2358 | { | |||
| 2359 | pf_cksum_fixup(cksum, 0x0, ~uncovered_cksum, proto); | |||
| 2360 | } | |||
| 2361 | #endif /* INET6 */ | |||
| 2362 | ||||
| 2363 | /* pre: *a is 16-bit aligned within its packet | |||
| 2364 | * | |||
| 2365 | * This algorithm emulates 16-bit ones-complement sums on a twos-complement | |||
| 2366 | * machine by conserving ones-complement's otherwise discarded carries in the | |||
| 2367 | * upper bits of x. These accumulated carries when added to the lower 16-bits | |||
| 2368 | * over at least zero 'reduction' steps then complete the ones-complement sum. | |||
| 2369 | * | |||
| 2370 | * def. sum = x mod 2^16 | |||
| 2371 | * def. accumulator = (x >> 16) | |||
| 2372 | * | |||
| 2373 | * At most two reduction steps | |||
| 2374 | * | |||
| 2375 | * x := sum + accumulator | |||
| 2376 | * = { def sum, def accumulator } | |||
| 2377 | * x := x mod 2^16 + (x >> 16) | |||
| 2378 | * = { x mod 2^n = x & (2^n - 1) } | |||
| 2379 | * x := (x & 0xffff) + (x >> 16) | |||
| 2380 | * | |||
| 2381 | * are necessary to incorporate the accumulated carries (at most one per add) | |||
| 2382 | * i.e. to reduce x < 2^16 from at most 16 carries in the upper 16 bits. | |||
| 2383 | * | |||
| 2384 | * The function is also invariant over the endian of the host. Why? | |||
| 2385 | * | |||
| 2386 | * Define the unary transpose operator ~ on a bitstring in python slice | |||
| 2387 | * notation as lambda m: m[P:] + m[:P] , for some constant pivot P. | |||
| 2388 | * | |||
| 2389 | * th. ~ distributes over ones-complement addition, denoted by +_1, i.e. | |||
| 2390 | * | |||
| 2391 | * ~m +_1 ~n = ~(m +_1 n) (for all bitstrings m,n of equal length) | |||
| 2392 | * | |||
| 2393 | * proof. Regard the bitstrings in m +_1 n as split at P, forming at most two | |||
| 2394 | * 'half-adds'. Under ones-complement addition, each half-add carries to the | |||
| 2395 | * other, so the sum of each half-add is unaffected by their relative | |||
| 2396 | * order. Therefore: | |||
| 2397 | * | |||
| 2398 | * ~m +_1 ~n | |||
| 2399 | * = { half-adds invariant under transposition } | |||
| 2400 | * ~s | |||
| 2401 | * = { substitute } | |||
| 2402 | * ~(m +_1 n) [end of proof] | |||
| 2403 | * | |||
| 2404 | * th. Summing two in-memory ones-complement 16-bit variables m,n on a machine | |||
| 2405 | * with the converse endian does not alter the result. | |||
| 2406 | * | |||
| 2407 | * proof. | |||
| 2408 | * { converse machine endian: load/store transposes, P := 8 } | |||
| 2409 | * ~(~m +_1 ~n) | |||
| 2410 | * = { ~ over +_1 } | |||
| 2411 | * ~~m +_1 ~~n | |||
| 2412 | * = { ~ is an involution } | |||
| 2413 | * m +_1 n [end of proof] | |||
| 2414 | * | |||
| 2415 | */ | |||
| 2416 | #define NEG(x)((u_int16_t)~(x)) ((u_int16_t)~(x)) | |||
| 2417 | void | |||
| 2418 | pf_cksum_fixup_a(u_int16_t *cksum, const struct pf_addr *a, | |||
| 2419 | const struct pf_addr *an, sa_family_t af, u_int8_t proto) | |||
| 2420 | { | |||
| 2421 | u_int32_t x; | |||
| 2422 | const u_int16_t *n = an->addr16pfa.addr16; | |||
| 2423 | const u_int16_t *o = a->addr16pfa.addr16; | |||
| 2424 | const int udp = proto == IPPROTO_UDP17; | |||
| 2425 | ||||
| 2426 | switch (af) { | |||
| 2427 | case AF_INET2: | |||
| 2428 | x = *cksum + o[0] + NEG(n[0])((u_int16_t)~(n[0])) + o[1] + NEG(n[1])((u_int16_t)~(n[1])); | |||
| 2429 | break; | |||
| 2430 | #ifdef INET61 | |||
| 2431 | case AF_INET624: | |||
| 2432 | x = *cksum + o[0] + NEG(n[0])((u_int16_t)~(n[0])) + o[1] + NEG(n[1])((u_int16_t)~(n[1])) +\ | |||
| 2433 | o[2] + NEG(n[2])((u_int16_t)~(n[2])) + o[3] + NEG(n[3])((u_int16_t)~(n[3])) +\ | |||
| 2434 | o[4] + NEG(n[4])((u_int16_t)~(n[4])) + o[5] + NEG(n[5])((u_int16_t)~(n[5])) +\ | |||
| 2435 | o[6] + NEG(n[6])((u_int16_t)~(n[6])) + o[7] + NEG(n[7])((u_int16_t)~(n[7])); | |||
| 2436 | break; | |||
| 2437 | #endif /* INET6 */ | |||
| 2438 | default: | |||
| 2439 | unhandled_af(af); | |||
| 2440 | } | |||
| 2441 | ||||
| 2442 | x = (x & 0xffff) + (x >> 16); | |||
| 2443 | x = (x & 0xffff) + (x >> 16); | |||
| 2444 | ||||
| 2445 | /* optimise: eliminate a branch when not udp */ | |||
| 2446 | if (udp && *cksum == 0x0000) | |||
| 2447 | return; | |||
| 2448 | if (udp && x == 0x0000) | |||
| 2449 | x = 0xffff; | |||
| 2450 | ||||
| 2451 | *cksum = (u_int16_t)(x); | |||
| 2452 | } | |||
| 2453 | ||||
| 2454 | int | |||
| 2455 | pf_patch_8(struct pf_pdesc *pd, u_int8_t *f, u_int8_t v, bool_Bool hi) | |||
| 2456 | { | |||
| 2457 | int rewrite = 0; | |||
| 2458 | ||||
| 2459 | if (*f != v) { | |||
| 2460 | u_int16_t old = htons(hi ? (*f << 8) : *f)(__uint16_t)(__builtin_constant_p(hi ? (*f << 8) : *f) ? (__uint16_t)(((__uint16_t)(hi ? (*f << 8) : *f) & 0xffU ) << 8 | ((__uint16_t)(hi ? (*f << 8) : *f) & 0xff00U) >> 8) : __swap16md(hi ? (*f << 8) : *f) ); | |||
| 2461 | u_int16_t new = htons(hi ? ( v << 8) : v)(__uint16_t)(__builtin_constant_p(hi ? ( v << 8) : v) ? (__uint16_t)(((__uint16_t)(hi ? ( v << 8) : v) & 0xffU ) << 8 | ((__uint16_t)(hi ? ( v << 8) : v) & 0xff00U ) >> 8) : __swap16md(hi ? ( v << 8) : v)); | |||
| 2462 | ||||
| 2463 | pf_cksum_fixup(pd->pcksum, old, new, pd->proto); | |||
| 2464 | *f = v; | |||
| 2465 | rewrite = 1; | |||
| 2466 | } | |||
| 2467 | ||||
| 2468 | return (rewrite); | |||
| 2469 | } | |||
| 2470 | ||||
| 2471 | /* pre: *f is 16-bit aligned within its packet */ | |||
| 2472 | int | |||
| 2473 | pf_patch_16(struct pf_pdesc *pd, u_int16_t *f, u_int16_t v) | |||
| 2474 | { | |||
| 2475 | int rewrite = 0; | |||
| 2476 | ||||
| 2477 | if (*f != v) { | |||
| 2478 | pf_cksum_fixup(pd->pcksum, *f, v, pd->proto); | |||
| 2479 | *f = v; | |||
| 2480 | rewrite = 1; | |||
| 2481 | } | |||
| 2482 | ||||
| 2483 | return (rewrite); | |||
| 2484 | } | |||
| 2485 | ||||
| 2486 | int | |||
| 2487 | pf_patch_16_unaligned(struct pf_pdesc *pd, void *f, u_int16_t v, bool_Bool hi) | |||
| 2488 | { | |||
| 2489 | int rewrite = 0; | |||
| 2490 | u_int8_t *fb = (u_int8_t*)f; | |||
| 2491 | u_int8_t *vb = (u_int8_t*)&v; | |||
| 2492 | ||||
| 2493 | if (hi && ALIGNED_POINTER(f, u_int16_t)1) { | |||
| 2494 | return (pf_patch_16(pd, f, v)); /* optimise */ | |||
| 2495 | } | |||
| 2496 | ||||
| 2497 | rewrite += pf_patch_8(pd, fb++, *vb++, hi); | |||
| 2498 | rewrite += pf_patch_8(pd, fb++, *vb++,!hi); | |||
| 2499 | ||||
| 2500 | return (rewrite); | |||
| 2501 | } | |||
| 2502 | ||||
| 2503 | /* pre: *f is 16-bit aligned within its packet */ | |||
| 2504 | /* pre: pd->proto != IPPROTO_UDP */ | |||
| 2505 | int | |||
| 2506 | pf_patch_32(struct pf_pdesc *pd, u_int32_t *f, u_int32_t v) | |||
| 2507 | { | |||
| 2508 | int rewrite = 0; | |||
| 2509 | u_int16_t *pc = pd->pcksum; | |||
| 2510 | u_int8_t proto = pd->proto; | |||
| 2511 | ||||
| 2512 | /* optimise: inline udp fixup code is unused; let compiler scrub it */ | |||
| 2513 | if (proto == IPPROTO_UDP17) | |||
| 2514 | panic("%s: udp", __func__); | |||
| 2515 | ||||
| 2516 | /* optimise: skip *f != v guard; true for all use-cases */ | |||
| 2517 | pf_cksum_fixup(pc, *f / (1 << 16), v / (1 << 16), proto); | |||
| 2518 | pf_cksum_fixup(pc, *f % (1 << 16), v % (1 << 16), proto); | |||
| 2519 | ||||
| 2520 | *f = v; | |||
| 2521 | rewrite = 1; | |||
| 2522 | ||||
| 2523 | return (rewrite); | |||
| 2524 | } | |||
| 2525 | ||||
| 2526 | int | |||
| 2527 | pf_patch_32_unaligned(struct pf_pdesc *pd, void *f, u_int32_t v, bool_Bool hi) | |||
| 2528 | { | |||
| 2529 | int rewrite = 0; | |||
| 2530 | u_int8_t *fb = (u_int8_t*)f; | |||
| 2531 | u_int8_t *vb = (u_int8_t*)&v; | |||
| 2532 | ||||
| 2533 | if (hi && ALIGNED_POINTER(f, u_int32_t)1) { | |||
| 2534 | return (pf_patch_32(pd, f, v)); /* optimise */ | |||
| 2535 | } | |||
| 2536 | ||||
| 2537 | rewrite += pf_patch_8(pd, fb++, *vb++, hi); | |||
| 2538 | rewrite += pf_patch_8(pd, fb++, *vb++,!hi); | |||
| 2539 | rewrite += pf_patch_8(pd, fb++, *vb++, hi); | |||
| 2540 | rewrite += pf_patch_8(pd, fb++, *vb++,!hi); | |||
| 2541 | ||||
| 2542 | return (rewrite); | |||
| 2543 | } | |||
| 2544 | ||||
| 2545 | int | |||
| 2546 | pf_icmp_mapping(struct pf_pdesc *pd, u_int8_t type, int *icmp_dir, | |||
| 2547 | u_int16_t *virtual_id, u_int16_t *virtual_type) | |||
| 2548 | { | |||
| 2549 | /* | |||
| 2550 | * ICMP types marked with PF_OUT are typically responses to | |||
| 2551 | * PF_IN, and will match states in the opposite direction. | |||
| 2552 | * PF_IN ICMP types need to match a state with that type. | |||
| 2553 | */ | |||
| 2554 | *icmp_dir = PF_OUT; | |||
| 2555 | ||||
| 2556 | /* Queries (and responses) */ | |||
| 2557 | switch (pd->af) { | |||
| 2558 | case AF_INET2: | |||
| 2559 | switch (type) { | |||
| 2560 | case ICMP_ECHO8: | |||
| 2561 | *icmp_dir = PF_IN; | |||
| 2562 | /* FALLTHROUGH */ | |||
| 2563 | case ICMP_ECHOREPLY0: | |||
| 2564 | *virtual_type = ICMP_ECHO8; | |||
| 2565 | *virtual_id = pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id; | |||
| 2566 | break; | |||
| 2567 | ||||
| 2568 | case ICMP_TSTAMP13: | |||
| 2569 | *icmp_dir = PF_IN; | |||
| 2570 | /* FALLTHROUGH */ | |||
| 2571 | case ICMP_TSTAMPREPLY14: | |||
| 2572 | *virtual_type = ICMP_TSTAMP13; | |||
| 2573 | *virtual_id = pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id; | |||
| 2574 | break; | |||
| 2575 | ||||
| 2576 | case ICMP_IREQ15: | |||
| 2577 | *icmp_dir = PF_IN; | |||
| 2578 | /* FALLTHROUGH */ | |||
| 2579 | case ICMP_IREQREPLY16: | |||
| 2580 | *virtual_type = ICMP_IREQ15; | |||
| 2581 | *virtual_id = pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id; | |||
| 2582 | break; | |||
| 2583 | ||||
| 2584 | case ICMP_MASKREQ17: | |||
| 2585 | *icmp_dir = PF_IN; | |||
| 2586 | /* FALLTHROUGH */ | |||
| 2587 | case ICMP_MASKREPLY18: | |||
| 2588 | *virtual_type = ICMP_MASKREQ17; | |||
| 2589 | *virtual_id = pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id; | |||
| 2590 | break; | |||
| 2591 | ||||
| 2592 | case ICMP_IPV6_WHEREAREYOU33: | |||
| 2593 | *icmp_dir = PF_IN; | |||
| 2594 | /* FALLTHROUGH */ | |||
| 2595 | case ICMP_IPV6_IAMHERE34: | |||
| 2596 | *virtual_type = ICMP_IPV6_WHEREAREYOU33; | |||
| 2597 | *virtual_id = 0; /* Nothing sane to match on! */ | |||
| 2598 | break; | |||
| 2599 | ||||
| 2600 | case ICMP_MOBILE_REGREQUEST35: | |||
| 2601 | *icmp_dir = PF_IN; | |||
| 2602 | /* FALLTHROUGH */ | |||
| 2603 | case ICMP_MOBILE_REGREPLY36: | |||
| 2604 | *virtual_type = ICMP_MOBILE_REGREQUEST35; | |||
| 2605 | *virtual_id = 0; /* Nothing sane to match on! */ | |||
| 2606 | break; | |||
| 2607 | ||||
| 2608 | case ICMP_ROUTERSOLICIT10: | |||
| 2609 | *icmp_dir = PF_IN; | |||
| 2610 | /* FALLTHROUGH */ | |||
| 2611 | case ICMP_ROUTERADVERT9: | |||
| 2612 | *virtual_type = ICMP_ROUTERSOLICIT10; | |||
| 2613 | *virtual_id = 0; /* Nothing sane to match on! */ | |||
| 2614 | break; | |||
| 2615 | ||||
| 2616 | /* These ICMP types map to other connections */ | |||
| 2617 | case ICMP_UNREACH3: | |||
| 2618 | case ICMP_SOURCEQUENCH4: | |||
| 2619 | case ICMP_REDIRECT5: | |||
| 2620 | case ICMP_TIMXCEED11: | |||
| 2621 | case ICMP_PARAMPROB12: | |||
| 2622 | /* These will not be used, but set them anyway */ | |||
| 2623 | *icmp_dir = PF_IN; | |||
| 2624 | *virtual_type = htons(type)(__uint16_t)(__builtin_constant_p(type) ? (__uint16_t)(((__uint16_t )(type) & 0xffU) << 8 | ((__uint16_t)(type) & 0xff00U ) >> 8) : __swap16md(type)); | |||
| 2625 | *virtual_id = 0; | |||
| 2626 | return (1); /* These types match to another state */ | |||
| 2627 | ||||
| 2628 | /* | |||
| 2629 | * All remaining ICMP types get their own states, | |||
| 2630 | * and will only match in one direction. | |||
| 2631 | */ | |||
| 2632 | default: | |||
| 2633 | *icmp_dir = PF_IN; | |||
| 2634 | *virtual_type = type; | |||
| 2635 | *virtual_id = 0; | |||
| 2636 | break; | |||
| 2637 | } | |||
| 2638 | break; | |||
| 2639 | #ifdef INET61 | |||
| 2640 | case AF_INET624: | |||
| 2641 | switch (type) { | |||
| 2642 | case ICMP6_ECHO_REQUEST128: | |||
| 2643 | *icmp_dir = PF_IN; | |||
| 2644 | /* FALLTHROUGH */ | |||
| 2645 | case ICMP6_ECHO_REPLY129: | |||
| 2646 | *virtual_type = ICMP6_ECHO_REQUEST128; | |||
| 2647 | *virtual_id = pd->hdr.icmp6.icmp6_idicmp6_dataun.icmp6_un_data16[0]; | |||
| 2648 | break; | |||
| 2649 | ||||
| 2650 | case MLD_LISTENER_QUERY130: | |||
| 2651 | case MLD_LISTENER_REPORT131: { | |||
| 2652 | struct mld_hdr *mld = &pd->hdr.mld; | |||
| 2653 | u_int32_t h; | |||
| 2654 | ||||
| 2655 | /* | |||
| 2656 | * Listener Report can be sent by clients | |||
| 2657 | * without an associated Listener Query. | |||
| 2658 | * In addition to that, when Report is sent as a | |||
| 2659 | * reply to a Query its source and destination | |||
| 2660 | * address are different. | |||
| 2661 | */ | |||
| 2662 | *icmp_dir = PF_IN; | |||
| 2663 | *virtual_type = MLD_LISTENER_QUERY130; | |||
| 2664 | /* generate fake id for these messages */ | |||
| 2665 | h = mld->mld_addr.s6_addr32__u6_addr.__u6_addr32[0] ^ | |||
| 2666 | mld->mld_addr.s6_addr32__u6_addr.__u6_addr32[1] ^ | |||
| 2667 | mld->mld_addr.s6_addr32__u6_addr.__u6_addr32[2] ^ | |||
| 2668 | mld->mld_addr.s6_addr32__u6_addr.__u6_addr32[3]; | |||
| 2669 | *virtual_id = (h >> 16) ^ (h & 0xffff); | |||
| 2670 | break; | |||
| 2671 | } | |||
| 2672 | ||||
| 2673 | /* | |||
| 2674 | * ICMP6_FQDN and ICMP6_NI query/reply are the same type as | |||
| 2675 | * ICMP6_WRU | |||
| 2676 | */ | |||
| 2677 | case ICMP6_WRUREQUEST139: | |||
| 2678 | *icmp_dir = PF_IN; | |||
| 2679 | /* FALLTHROUGH */ | |||
| 2680 | case ICMP6_WRUREPLY140: | |||
| 2681 | *virtual_type = ICMP6_WRUREQUEST139; | |||
| 2682 | *virtual_id = 0; /* Nothing sane to match on! */ | |||
| 2683 | break; | |||
| 2684 | ||||
| 2685 | case MLD_MTRACE201: | |||
| 2686 | *icmp_dir = PF_IN; | |||
| 2687 | /* FALLTHROUGH */ | |||
| 2688 | case MLD_MTRACE_RESP200: | |||
| 2689 | *virtual_type = MLD_MTRACE201; | |||
| 2690 | *virtual_id = 0; /* Nothing sane to match on! */ | |||
| 2691 | break; | |||
| 2692 | ||||
| 2693 | case ND_NEIGHBOR_SOLICIT135: | |||
| 2694 | *icmp_dir = PF_IN; | |||
| 2695 | /* FALLTHROUGH */ | |||
| 2696 | case ND_NEIGHBOR_ADVERT136: { | |||
| 2697 | struct nd_neighbor_solicit *nd = &pd->hdr.nd_ns; | |||
| 2698 | u_int32_t h; | |||
| 2699 | ||||
| 2700 | *virtual_type = ND_NEIGHBOR_SOLICIT135; | |||
| 2701 | /* generate fake id for these messages */ | |||
| 2702 | h = nd->nd_ns_target.s6_addr32__u6_addr.__u6_addr32[0] ^ | |||
| 2703 | nd->nd_ns_target.s6_addr32__u6_addr.__u6_addr32[1] ^ | |||
| 2704 | nd->nd_ns_target.s6_addr32__u6_addr.__u6_addr32[2] ^ | |||
| 2705 | nd->nd_ns_target.s6_addr32__u6_addr.__u6_addr32[3]; | |||
| 2706 | *virtual_id = (h >> 16) ^ (h & 0xffff); | |||
| 2707 | /* | |||
| 2708 | * the extra work here deals with 'keep state' option | |||
| 2709 | * at pass rule for unsolicited advertisement. By | |||
| 2710 | * returning 1 (state_icmp = 1) we override 'keep | |||
| 2711 | * state' to 'no state' so we don't create state for | |||
| 2712 | * unsolicited advertisements. No one expects answer to | |||
| 2713 | * unsolicited advertisements so we should be good. | |||
| 2714 | */ | |||
| 2715 | if (type == ND_NEIGHBOR_ADVERT136) { | |||
| 2716 | *virtual_type = htons(*virtual_type)(__uint16_t)(__builtin_constant_p(*virtual_type) ? (__uint16_t )(((__uint16_t)(*virtual_type) & 0xffU) << 8 | ((__uint16_t )(*virtual_type) & 0xff00U) >> 8) : __swap16md(*virtual_type )); | |||
| 2717 | return (1); | |||
| 2718 | } | |||
| 2719 | break; | |||
| 2720 | } | |||
| 2721 | ||||
| 2722 | /* | |||
| 2723 | * These ICMP types map to other connections. | |||
| 2724 | * ND_REDIRECT can't be in this list because the triggering | |||
| 2725 | * packet header is optional. | |||
| 2726 | */ | |||
| 2727 | case ICMP6_DST_UNREACH1: | |||
| 2728 | case ICMP6_PACKET_TOO_BIG2: | |||
| 2729 | case ICMP6_TIME_EXCEEDED3: | |||
| 2730 | case ICMP6_PARAM_PROB4: | |||
| 2731 | /* These will not be used, but set them anyway */ | |||
| 2732 | *icmp_dir = PF_IN; | |||
| 2733 | *virtual_type = htons(type)(__uint16_t)(__builtin_constant_p(type) ? (__uint16_t)(((__uint16_t )(type) & 0xffU) << 8 | ((__uint16_t)(type) & 0xff00U ) >> 8) : __swap16md(type)); | |||
| 2734 | *virtual_id = 0; | |||
| 2735 | return (1); /* These types match to another state */ | |||
| 2736 | /* | |||
| 2737 | * All remaining ICMP6 types get their own states, | |||
| 2738 | * and will only match in one direction. | |||
| 2739 | */ | |||
| 2740 | default: | |||
| 2741 | *icmp_dir = PF_IN; | |||
| 2742 | *virtual_type = type; | |||
| 2743 | *virtual_id = 0; | |||
| 2744 | break; | |||
| 2745 | } | |||
| 2746 | break; | |||
| 2747 | #endif /* INET6 */ | |||
| 2748 | } | |||
| 2749 | *virtual_type = htons(*virtual_type)(__uint16_t)(__builtin_constant_p(*virtual_type) ? (__uint16_t )(((__uint16_t)(*virtual_type) & 0xffU) << 8 | ((__uint16_t )(*virtual_type) & 0xff00U) >> 8) : __swap16md(*virtual_type )); | |||
| 2750 | return (0); /* These types match to their own state */ | |||
| 2751 | } | |||
| 2752 | ||||
| 2753 | void | |||
| 2754 | pf_translate_icmp(struct pf_pdesc *pd, struct pf_addr *qa, u_int16_t *qp, | |||
| 2755 | struct pf_addr *oa, struct pf_addr *na, u_int16_t np) | |||
| 2756 | { | |||
| 2757 | /* note: doesn't trouble to fixup quoted checksums, if any */ | |||
| 2758 | ||||
| 2759 | /* change quoted protocol port */ | |||
| 2760 | if (qp != NULL((void *)0)) | |||
| 2761 | pf_patch_16(pd, qp, np); | |||
| 2762 | ||||
| 2763 | /* change quoted ip address */ | |||
| 2764 | pf_cksum_fixup_a(pd->pcksum, qa, na, pd->af, pd->proto); | |||
| 2765 | pf_addrcpy(qa, na, pd->af); | |||
| 2766 | ||||
| 2767 | /* change network-header's ip address */ | |||
| 2768 | if (oa) | |||
| 2769 | pf_translate_a(pd, oa, na); | |||
| 2770 | } | |||
| 2771 | ||||
| 2772 | /* pre: *a is 16-bit aligned within its packet */ | |||
| 2773 | /* *a is a network header src/dst address */ | |||
| 2774 | int | |||
| 2775 | pf_translate_a(struct pf_pdesc *pd, struct pf_addr *a, struct pf_addr *an) | |||
| 2776 | { | |||
| 2777 | int rewrite = 0; | |||
| 2778 | ||||
| 2779 | /* warning: !PF_ANEQ != PF_AEQ */ | |||
| 2780 | if (!PF_ANEQ(a, an, pd->af)((pd->af == 2 && (a)->pfa.addr32[0] != (an)-> pfa.addr32[0]) || (pd->af == 24 && ((a)->pfa.addr32 [3] != (an)->pfa.addr32[3] || (a)->pfa.addr32[2] != (an )->pfa.addr32[2] || (a)->pfa.addr32[1] != (an)->pfa. addr32[1] || (a)->pfa.addr32[0] != (an)->pfa.addr32[0]) ))) | |||
| 2781 | return (0); | |||
| 2782 | ||||
| 2783 | /* fixup transport pseudo-header, if any */ | |||
| 2784 | switch (pd->proto) { | |||
| 2785 | case IPPROTO_TCP6: /* FALLTHROUGH */ | |||
| 2786 | case IPPROTO_UDP17: /* FALLTHROUGH */ | |||
| 2787 | case IPPROTO_ICMPV658: | |||
| 2788 | pf_cksum_fixup_a(pd->pcksum, a, an, pd->af, pd->proto); | |||
| 2789 | break; | |||
| 2790 | default: | |||
| 2791 | break; /* assume no pseudo-header */ | |||
| 2792 | } | |||
| 2793 | ||||
| 2794 | pf_addrcpy(a, an, pd->af); | |||
| 2795 | rewrite = 1; | |||
| 2796 | ||||
| 2797 | return (rewrite); | |||
| 2798 | } | |||
| 2799 | ||||
| 2800 | #ifdef INET61 | |||
| 2801 | /* pf_translate_af() may change pd->m, adjust local copies after calling */ | |||
| 2802 | int | |||
| 2803 | pf_translate_af(struct pf_pdesc *pd) | |||
| 2804 | { | |||
| 2805 | static const struct pf_addr zero; | |||
| 2806 | struct ip *ip4; | |||
| 2807 | struct ip6_hdr *ip6; | |||
| 2808 | int copyback = 0; | |||
| 2809 | u_int hlen, ohlen, dlen; | |||
| 2810 | u_int16_t *pc; | |||
| 2811 | u_int8_t af_proto, naf_proto; | |||
| 2812 | ||||
| 2813 | hlen = (pd->naf == AF_INET2) ? sizeof(*ip4) : sizeof(*ip6); | |||
| 2814 | ohlen = pd->off; | |||
| 2815 | dlen = pd->tot_len - pd->off; | |||
| 2816 | pc = pd->pcksum; | |||
| 2817 | ||||
| 2818 | af_proto = naf_proto = pd->proto; | |||
| 2819 | if (naf_proto == IPPROTO_ICMP1) | |||
| 2820 | af_proto = IPPROTO_ICMPV658; | |||
| 2821 | if (naf_proto == IPPROTO_ICMPV658) | |||
| 2822 | af_proto = IPPROTO_ICMP1; | |||
| 2823 | ||||
| 2824 | /* uncover stale pseudo-header */ | |||
| 2825 | switch (af_proto) { | |||
| 2826 | case IPPROTO_ICMPV658: | |||
| 2827 | /* optimise: unchanged for TCP/UDP */ | |||
| 2828 | pf_cksum_fixup(pc, htons(af_proto)(__uint16_t)(__builtin_constant_p(af_proto) ? (__uint16_t)((( __uint16_t)(af_proto) & 0xffU) << 8 | ((__uint16_t) (af_proto) & 0xff00U) >> 8) : __swap16md(af_proto)), 0x0, af_proto); | |||
| 2829 | pf_cksum_fixup(pc, htons(dlen)(__uint16_t)(__builtin_constant_p(dlen) ? (__uint16_t)(((__uint16_t )(dlen) & 0xffU) << 8 | ((__uint16_t)(dlen) & 0xff00U ) >> 8) : __swap16md(dlen)), 0x0, af_proto); | |||
| 2830 | /* FALLTHROUGH */ | |||
| 2831 | case IPPROTO_UDP17: /* FALLTHROUGH */ | |||
| 2832 | case IPPROTO_TCP6: | |||
| 2833 | pf_cksum_fixup_a(pc, pd->src, &zero, pd->af, af_proto); | |||
| 2834 | pf_cksum_fixup_a(pc, pd->dst, &zero, pd->af, af_proto); | |||
| 2835 | copyback = 1; | |||
| 2836 | break; | |||
| 2837 | default: | |||
| 2838 | break; /* assume no pseudo-header */ | |||
| 2839 | } | |||
| 2840 | ||||
| 2841 | /* replace the network header */ | |||
| 2842 | m_adj(pd->m, pd->off); | |||
| 2843 | pd->src = NULL((void *)0); | |||
| 2844 | pd->dst = NULL((void *)0); | |||
| 2845 | ||||
| 2846 | if ((M_PREPEND(pd->m, hlen, M_DONTWAIT)(pd->m) = m_prepend((pd->m), (hlen), (0x0002))) == NULL((void *)0)) { | |||
| 2847 | pd->m = NULL((void *)0); | |||
| 2848 | return (-1); | |||
| 2849 | } | |||
| 2850 | ||||
| 2851 | pd->off = hlen; | |||
| 2852 | pd->tot_len += hlen - ohlen; | |||
| 2853 | ||||
| 2854 | switch (pd->naf) { | |||
| 2855 | case AF_INET2: | |||
| 2856 | ip4 = mtod(pd->m, struct ip *)((struct ip *)((pd->m)->m_hdr.mh_data)); | |||
| 2857 | memset(ip4, 0, hlen)__builtin_memset((ip4), (0), (hlen)); | |||
| 2858 | ip4->ip_v = IPVERSION4; | |||
| 2859 | ip4->ip_hl = hlen >> 2; | |||
| 2860 | ip4->ip_tos = pd->tos; | |||
| 2861 | ip4->ip_len = htons(hlen + dlen)(__uint16_t)(__builtin_constant_p(hlen + dlen) ? (__uint16_t) (((__uint16_t)(hlen + dlen) & 0xffU) << 8 | ((__uint16_t )(hlen + dlen) & 0xff00U) >> 8) : __swap16md(hlen + dlen)); | |||
| 2862 | ip4->ip_id = htons(ip_randomid())(__uint16_t)(__builtin_constant_p(ip_randomid()) ? (__uint16_t )(((__uint16_t)(ip_randomid()) & 0xffU) << 8 | ((__uint16_t )(ip_randomid()) & 0xff00U) >> 8) : __swap16md(ip_randomid ())); | |||
| 2863 | ip4->ip_off = htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t )(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) & 0xff00U) >> 8) : __swap16md(0x4000)); | |||
| 2864 | ip4->ip_ttl = pd->ttl; | |||
| 2865 | ip4->ip_p = pd->proto; | |||
| 2866 | ip4->ip_src = pd->nsaddr.v4pfa.v4; | |||
| 2867 | ip4->ip_dst = pd->ndaddr.v4pfa.v4; | |||
| 2868 | break; | |||
| 2869 | case AF_INET624: | |||
| 2870 | ip6 = mtod(pd->m, struct ip6_hdr *)((struct ip6_hdr *)((pd->m)->m_hdr.mh_data)); | |||
| 2871 | memset(ip6, 0, hlen)__builtin_memset((ip6), (0), (hlen)); | |||
| 2872 | ip6->ip6_vfcip6_ctlun.ip6_un2_vfc = IPV6_VERSION0x60; | |||
| 2873 | ip6->ip6_flowip6_ctlun.ip6_un1.ip6_un1_flow |= htonl((u_int32_t)pd->tos << 20)(__uint32_t)(__builtin_constant_p((u_int32_t)pd->tos << 20) ? (__uint32_t)(((__uint32_t)((u_int32_t)pd->tos << 20) & 0xff) << 24 | ((__uint32_t)((u_int32_t)pd-> tos << 20) & 0xff00) << 8 | ((__uint32_t)((u_int32_t )pd->tos << 20) & 0xff0000) >> 8 | ((__uint32_t )((u_int32_t)pd->tos << 20) & 0xff000000) >> 24) : __swap32md((u_int32_t)pd->tos << 20)); | |||
| 2874 | ip6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(dlen)(__uint16_t)(__builtin_constant_p(dlen) ? (__uint16_t)(((__uint16_t )(dlen) & 0xffU) << 8 | ((__uint16_t)(dlen) & 0xff00U ) >> 8) : __swap16md(dlen)); | |||
| 2875 | ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = pd->proto; | |||
| 2876 | if (!pd->ttl || pd->ttl > IPV6_DEFHLIM64) | |||
| 2877 | ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = IPV6_DEFHLIM64; | |||
| 2878 | else | |||
| 2879 | ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = pd->ttl; | |||
| 2880 | ip6->ip6_src = pd->nsaddr.v6pfa.v6; | |||
| 2881 | ip6->ip6_dst = pd->ndaddr.v6pfa.v6; | |||
| 2882 | break; | |||
| 2883 | default: | |||
| 2884 | unhandled_af(pd->naf); | |||
| 2885 | } | |||
| 2886 | ||||
| 2887 | /* UDP over IPv6 must be checksummed per rfc2460 p27 */ | |||
| 2888 | if (naf_proto == IPPROTO_UDP17 && *pc == 0x0000 && | |||
| 2889 | pd->naf == AF_INET624) { | |||
| 2890 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_UDP_CSUM_OUT0x0004; | |||
| 2891 | } | |||
| 2892 | ||||
| 2893 | /* cover fresh pseudo-header */ | |||
| 2894 | switch (naf_proto) { | |||
| 2895 | case IPPROTO_ICMPV658: | |||
| 2896 | /* optimise: unchanged for TCP/UDP */ | |||
| 2897 | pf_cksum_fixup(pc, 0x0, htons(naf_proto)(__uint16_t)(__builtin_constant_p(naf_proto) ? (__uint16_t)(( (__uint16_t)(naf_proto) & 0xffU) << 8 | ((__uint16_t )(naf_proto) & 0xff00U) >> 8) : __swap16md(naf_proto )), naf_proto); | |||
| 2898 | pf_cksum_fixup(pc, 0x0, htons(dlen)(__uint16_t)(__builtin_constant_p(dlen) ? (__uint16_t)(((__uint16_t )(dlen) & 0xffU) << 8 | ((__uint16_t)(dlen) & 0xff00U ) >> 8) : __swap16md(dlen)), naf_proto); | |||
| 2899 | /* FALLTHROUGH */ | |||
| 2900 | case IPPROTO_UDP17: /* FALLTHROUGH */ | |||
| 2901 | case IPPROTO_TCP6: | |||
| 2902 | pf_cksum_fixup_a(pc, &zero, &pd->nsaddr, pd->naf, naf_proto); | |||
| 2903 | pf_cksum_fixup_a(pc, &zero, &pd->ndaddr, pd->naf, naf_proto); | |||
| 2904 | copyback = 1; | |||
| 2905 | break; | |||
| 2906 | default: | |||
| 2907 | break; /* assume no pseudo-header */ | |||
| 2908 | } | |||
| 2909 | ||||
| 2910 | /* flush pd->pcksum */ | |||
| 2911 | if (copyback) | |||
| 2912 | m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT0x0002); | |||
| 2913 | ||||
| 2914 | return (0); | |||
| 2915 | } | |||
| 2916 | ||||
| 2917 | int | |||
| 2918 | pf_change_icmp_af(struct mbuf *m, int ipoff2, struct pf_pdesc *pd, | |||
| 2919 | struct pf_pdesc *pd2, struct pf_addr *src, struct pf_addr *dst, | |||
| 2920 | sa_family_t af, sa_family_t naf) | |||
| 2921 | { | |||
| 2922 | struct mbuf *n = NULL((void *)0); | |||
| 2923 | struct ip *ip4; | |||
| 2924 | struct ip6_hdr *ip6; | |||
| 2925 | u_int hlen, ohlen, dlen; | |||
| 2926 | int d; | |||
| 2927 | ||||
| 2928 | if (af == naf || (af != AF_INET2 && af != AF_INET624) || | |||
| 2929 | (naf != AF_INET2 && naf != AF_INET624)) | |||
| 2930 | return (-1); | |||
| 2931 | ||||
| 2932 | /* split the mbuf chain on the quoted ip/ip6 header boundary */ | |||
| 2933 | if ((n = m_split(m, ipoff2, M_DONTWAIT0x0002)) == NULL((void *)0)) | |||
| 2934 | return (-1); | |||
| 2935 | ||||
| 2936 | /* new quoted header */ | |||
| 2937 | hlen = naf == AF_INET2 ? sizeof(*ip4) : sizeof(*ip6); | |||
| 2938 | /* old quoted header */ | |||
| 2939 | ohlen = pd2->off - ipoff2; | |||
| 2940 | ||||
| 2941 | /* trim old quoted header */ | |||
| 2942 | pf_cksum_uncover(pd->pcksum, in_cksum(n, ohlen), pd->proto); | |||
| 2943 | m_adj(n, ohlen); | |||
| 2944 | ||||
| 2945 | /* prepend a new, translated, quoted header */ | |||
| 2946 | if ((M_PREPEND(n, hlen, M_DONTWAIT)(n) = m_prepend((n), (hlen), (0x0002))) == NULL((void *)0)) | |||
| 2947 | return (-1); | |||
| 2948 | ||||
| 2949 | switch (naf) { | |||
| 2950 | case AF_INET2: | |||
| 2951 | ip4 = mtod(n, struct ip *)((struct ip *)((n)->m_hdr.mh_data)); | |||
| 2952 | memset(ip4, 0, sizeof(*ip4))__builtin_memset((ip4), (0), (sizeof(*ip4))); | |||
| 2953 | ip4->ip_v = IPVERSION4; | |||
| 2954 | ip4->ip_hl = sizeof(*ip4) >> 2; | |||
| 2955 | ip4->ip_len = htons(sizeof(*ip4) + pd2->tot_len - ohlen)(__uint16_t)(__builtin_constant_p(sizeof(*ip4) + pd2->tot_len - ohlen) ? (__uint16_t)(((__uint16_t)(sizeof(*ip4) + pd2-> tot_len - ohlen) & 0xffU) << 8 | ((__uint16_t)(sizeof (*ip4) + pd2->tot_len - ohlen) & 0xff00U) >> 8) : __swap16md(sizeof(*ip4) + pd2->tot_len - ohlen)); | |||
| 2956 | ip4->ip_id = htons(ip_randomid())(__uint16_t)(__builtin_constant_p(ip_randomid()) ? (__uint16_t )(((__uint16_t)(ip_randomid()) & 0xffU) << 8 | ((__uint16_t )(ip_randomid()) & 0xff00U) >> 8) : __swap16md(ip_randomid ())); | |||
| 2957 | ip4->ip_off = htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t )(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) & 0xff00U) >> 8) : __swap16md(0x4000)); | |||
| 2958 | ip4->ip_ttl = pd2->ttl; | |||
| 2959 | if (pd2->proto == IPPROTO_ICMPV658) | |||
| 2960 | ip4->ip_p = IPPROTO_ICMP1; | |||
| 2961 | else | |||
| 2962 | ip4->ip_p = pd2->proto; | |||
| 2963 | ip4->ip_src = src->v4pfa.v4; | |||
| 2964 | ip4->ip_dst = dst->v4pfa.v4; | |||
| 2965 | in_hdr_cksum_out(n, NULL((void *)0)); | |||
| 2966 | break; | |||
| 2967 | case AF_INET624: | |||
| 2968 | ip6 = mtod(n, struct ip6_hdr *)((struct ip6_hdr *)((n)->m_hdr.mh_data)); | |||
| 2969 | memset(ip6, 0, sizeof(*ip6))__builtin_memset((ip6), (0), (sizeof(*ip6))); | |||
| 2970 | ip6->ip6_vfcip6_ctlun.ip6_un2_vfc = IPV6_VERSION0x60; | |||
| 2971 | ip6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(pd2->tot_len - ohlen)(__uint16_t)(__builtin_constant_p(pd2->tot_len - ohlen) ? ( __uint16_t)(((__uint16_t)(pd2->tot_len - ohlen) & 0xffU ) << 8 | ((__uint16_t)(pd2->tot_len - ohlen) & 0xff00U ) >> 8) : __swap16md(pd2->tot_len - ohlen)); | |||
| 2972 | if (pd2->proto == IPPROTO_ICMP1) | |||
| 2973 | ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = IPPROTO_ICMPV658; | |||
| 2974 | else | |||
| 2975 | ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = pd2->proto; | |||
| 2976 | if (!pd2->ttl || pd2->ttl > IPV6_DEFHLIM64) | |||
| 2977 | ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = IPV6_DEFHLIM64; | |||
| 2978 | else | |||
| 2979 | ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = pd2->ttl; | |||
| 2980 | ip6->ip6_src = src->v6pfa.v6; | |||
| 2981 | ip6->ip6_dst = dst->v6pfa.v6; | |||
| 2982 | break; | |||
| 2983 | } | |||
| 2984 | ||||
| 2985 | /* cover new quoted header */ | |||
| 2986 | /* optimise: any new AF_INET header of ours sums to zero */ | |||
| 2987 | if (naf != AF_INET2) { | |||
| 2988 | pf_cksum_cover(pd->pcksum, in_cksum(n, hlen), pd->proto); | |||
| 2989 | } | |||
| 2990 | ||||
| 2991 | /* reattach modified quoted packet to outer header */ | |||
| 2992 | { | |||
| 2993 | int nlen = n->m_pkthdrM_dat.MH.MH_pkthdr.len; | |||
| 2994 | m_cat(m, n); | |||
| 2995 | m->m_pkthdrM_dat.MH.MH_pkthdr.len += nlen; | |||
| 2996 | } | |||
| 2997 | ||||
| 2998 | /* account for altered length */ | |||
| 2999 | d = hlen - ohlen; | |||
| 3000 | ||||
| 3001 | if (pd->proto == IPPROTO_ICMPV658) { | |||
| 3002 | /* fixup pseudo-header */ | |||
| 3003 | dlen = pd->tot_len - pd->off; | |||
| 3004 | pf_cksum_fixup(pd->pcksum, | |||
| 3005 | htons(dlen)(__uint16_t)(__builtin_constant_p(dlen) ? (__uint16_t)(((__uint16_t )(dlen) & 0xffU) << 8 | ((__uint16_t)(dlen) & 0xff00U ) >> 8) : __swap16md(dlen)), htons(dlen + d)(__uint16_t)(__builtin_constant_p(dlen + d) ? (__uint16_t)((( __uint16_t)(dlen + d) & 0xffU) << 8 | ((__uint16_t) (dlen + d) & 0xff00U) >> 8) : __swap16md(dlen + d)), pd->proto); | |||
| 3006 | } | |||
| 3007 | ||||
| 3008 | pd->tot_len += d; | |||
| 3009 | pd2->tot_len += d; | |||
| 3010 | pd2->off += d; | |||
| 3011 | ||||
| 3012 | /* note: not bothering to update network headers as | |||
| 3013 | these due for rewrite by pf_translate_af() */ | |||
| 3014 | ||||
| 3015 | return (0); | |||
| 3016 | } | |||
| 3017 | ||||
| 3018 | ||||
| 3019 | #define PTR_IP(field)(__builtin_offsetof(struct ip, field)) (offsetof(struct ip, field)__builtin_offsetof(struct ip, field)) | |||
| 3020 | #define PTR_IP6(field)(__builtin_offsetof(struct ip6_hdr, field)) (offsetof(struct ip6_hdr, field)__builtin_offsetof(struct ip6_hdr, field)) | |||
| 3021 | ||||
| 3022 | int | |||
| 3023 | pf_translate_icmp_af(struct pf_pdesc *pd, int af, void *arg) | |||
| 3024 | { | |||
| 3025 | struct icmp *icmp4; | |||
| 3026 | struct icmp6_hdr *icmp6; | |||
| 3027 | u_int32_t mtu; | |||
| 3028 | int32_t ptr = -1; | |||
| 3029 | u_int8_t type; | |||
| 3030 | u_int8_t code; | |||
| 3031 | ||||
| 3032 | switch (af) { | |||
| 3033 | case AF_INET2: | |||
| 3034 | icmp6 = arg; | |||
| 3035 | type = icmp6->icmp6_type; | |||
| 3036 | code = icmp6->icmp6_code; | |||
| 3037 | mtu = ntohl(icmp6->icmp6_mtu)(__uint32_t)(__builtin_constant_p(icmp6->icmp6_dataun.icmp6_un_data32 [0]) ? (__uint32_t)(((__uint32_t)(icmp6->icmp6_dataun.icmp6_un_data32 [0]) & 0xff) << 24 | ((__uint32_t)(icmp6->icmp6_dataun .icmp6_un_data32[0]) & 0xff00) << 8 | ((__uint32_t) (icmp6->icmp6_dataun.icmp6_un_data32[0]) & 0xff0000) >> 8 | ((__uint32_t)(icmp6->icmp6_dataun.icmp6_un_data32[0]) & 0xff000000) >> 24) : __swap32md(icmp6->icmp6_dataun .icmp6_un_data32[0])); | |||
| 3038 | ||||
| 3039 | switch (type) { | |||
| 3040 | case ICMP6_ECHO_REQUEST128: | |||
| 3041 | type = ICMP_ECHO8; | |||
| 3042 | break; | |||
| 3043 | case ICMP6_ECHO_REPLY129: | |||
| 3044 | type = ICMP_ECHOREPLY0; | |||
| 3045 | break; | |||
| 3046 | case ICMP6_DST_UNREACH1: | |||
| 3047 | type = ICMP_UNREACH3; | |||
| 3048 | switch (code) { | |||
| 3049 | case ICMP6_DST_UNREACH_NOROUTE0: | |||
| 3050 | case ICMP6_DST_UNREACH_BEYONDSCOPE2: | |||
| 3051 | case ICMP6_DST_UNREACH_ADDR3: | |||
| 3052 | code = ICMP_UNREACH_HOST1; | |||
| 3053 | break; | |||
| 3054 | case ICMP6_DST_UNREACH_ADMIN1: | |||
| 3055 | code = ICMP_UNREACH_HOST_PROHIB10; | |||
| 3056 | break; | |||
| 3057 | case ICMP6_DST_UNREACH_NOPORT4: | |||
| 3058 | code = ICMP_UNREACH_PORT3; | |||
| 3059 | break; | |||
| 3060 | default: | |||
| 3061 | return (-1); | |||
| 3062 | } | |||
| 3063 | break; | |||
| 3064 | case ICMP6_PACKET_TOO_BIG2: | |||
| 3065 | type = ICMP_UNREACH3; | |||
| 3066 | code = ICMP_UNREACH_NEEDFRAG4; | |||
| 3067 | mtu -= 20; | |||
| 3068 | break; | |||
| 3069 | case ICMP6_TIME_EXCEEDED3: | |||
| 3070 | type = ICMP_TIMXCEED11; | |||
| 3071 | break; | |||
| 3072 | case ICMP6_PARAM_PROB4: | |||
| 3073 | switch (code) { | |||
| 3074 | case ICMP6_PARAMPROB_HEADER0: | |||
| 3075 | type = ICMP_PARAMPROB12; | |||
| 3076 | code = ICMP_PARAMPROB_ERRATPTR0; | |||
| 3077 | ptr = ntohl(icmp6->icmp6_pptr)(__uint32_t)(__builtin_constant_p(icmp6->icmp6_dataun.icmp6_un_data32 [0]) ? (__uint32_t)(((__uint32_t)(icmp6->icmp6_dataun.icmp6_un_data32 [0]) & 0xff) << 24 | ((__uint32_t)(icmp6->icmp6_dataun .icmp6_un_data32[0]) & 0xff00) << 8 | ((__uint32_t) (icmp6->icmp6_dataun.icmp6_un_data32[0]) & 0xff0000) >> 8 | ((__uint32_t)(icmp6->icmp6_dataun.icmp6_un_data32[0]) & 0xff000000) >> 24) : __swap32md(icmp6->icmp6_dataun .icmp6_un_data32[0])); | |||
| 3078 | ||||
| 3079 | if (ptr == PTR_IP6(ip6_vfc)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un2_vfc))) | |||
| 3080 | ; /* preserve */ | |||
| 3081 | else if (ptr == PTR_IP6(ip6_vfc)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un2_vfc)) + 1) | |||
| 3082 | ptr = PTR_IP(ip_tos)(__builtin_offsetof(struct ip, ip_tos)); | |||
| 3083 | else if (ptr == PTR_IP6(ip6_plen)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_plen )) || | |||
| 3084 | ptr == PTR_IP6(ip6_plen)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_plen )) + 1) | |||
| 3085 | ptr = PTR_IP(ip_len)(__builtin_offsetof(struct ip, ip_len)); | |||
| 3086 | else if (ptr == PTR_IP6(ip6_nxt)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_nxt ))) | |||
| 3087 | ptr = PTR_IP(ip_p)(__builtin_offsetof(struct ip, ip_p)); | |||
| 3088 | else if (ptr == PTR_IP6(ip6_hlim)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_hlim ))) | |||
| 3089 | ptr = PTR_IP(ip_ttl)(__builtin_offsetof(struct ip, ip_ttl)); | |||
| 3090 | else if (ptr >= PTR_IP6(ip6_src)(__builtin_offsetof(struct ip6_hdr, ip6_src)) && | |||
| 3091 | ptr < PTR_IP6(ip6_dst)(__builtin_offsetof(struct ip6_hdr, ip6_dst))) | |||
| 3092 | ptr = PTR_IP(ip_src)(__builtin_offsetof(struct ip, ip_src)); | |||
| 3093 | else if (ptr >= PTR_IP6(ip6_dst)(__builtin_offsetof(struct ip6_hdr, ip6_dst)) && | |||
| 3094 | ptr < sizeof(struct ip6_hdr)) | |||
| 3095 | ptr = PTR_IP(ip_dst)(__builtin_offsetof(struct ip, ip_dst)); | |||
| 3096 | else { | |||
| 3097 | return (-1); | |||
| 3098 | } | |||
| 3099 | break; | |||
| 3100 | case ICMP6_PARAMPROB_NEXTHEADER1: | |||
| 3101 | type = ICMP_UNREACH3; | |||
| 3102 | code = ICMP_UNREACH_PROTOCOL2; | |||
| 3103 | break; | |||
| 3104 | default: | |||
| 3105 | return (-1); | |||
| 3106 | } | |||
| 3107 | break; | |||
| 3108 | default: | |||
| 3109 | return (-1); | |||
| 3110 | } | |||
| 3111 | ||||
| 3112 | pf_patch_8(pd, &icmp6->icmp6_type, type, PF_HI(1)); | |||
| 3113 | pf_patch_8(pd, &icmp6->icmp6_code, code, PF_LO(!(1))); | |||
| 3114 | ||||
| 3115 | /* aligns well with a icmpv4 nextmtu */ | |||
| 3116 | pf_patch_32(pd, &icmp6->icmp6_mtuicmp6_dataun.icmp6_un_data32[0], htonl(mtu)(__uint32_t)(__builtin_constant_p(mtu) ? (__uint32_t)(((__uint32_t )(mtu) & 0xff) << 24 | ((__uint32_t)(mtu) & 0xff00 ) << 8 | ((__uint32_t)(mtu) & 0xff0000) >> 8 | ((__uint32_t)(mtu) & 0xff000000) >> 24) : __swap32md (mtu))); | |||
| 3117 | ||||
| 3118 | /* icmpv4 pptr is a one most significant byte */ | |||
| 3119 | if (ptr >= 0) | |||
| 3120 | pf_patch_32(pd, &icmp6->icmp6_pptricmp6_dataun.icmp6_un_data32[0], htonl(ptr << 24)(__uint32_t)(__builtin_constant_p(ptr << 24) ? (__uint32_t )(((__uint32_t)(ptr << 24) & 0xff) << 24 | (( __uint32_t)(ptr << 24) & 0xff00) << 8 | ((__uint32_t )(ptr << 24) & 0xff0000) >> 8 | ((__uint32_t) (ptr << 24) & 0xff000000) >> 24) : __swap32md (ptr << 24))); | |||
| 3121 | break; | |||
| 3122 | case AF_INET624: | |||
| 3123 | icmp4 = arg; | |||
| 3124 | type = icmp4->icmp_type; | |||
| 3125 | code = icmp4->icmp_code; | |||
| 3126 | mtu = ntohs(icmp4->icmp_nextmtu)(__uint16_t)(__builtin_constant_p(icmp4->icmp_hun.ih_pmtu. ipm_nextmtu) ? (__uint16_t)(((__uint16_t)(icmp4->icmp_hun. ih_pmtu.ipm_nextmtu) & 0xffU) << 8 | ((__uint16_t)( icmp4->icmp_hun.ih_pmtu.ipm_nextmtu) & 0xff00U) >> 8) : __swap16md(icmp4->icmp_hun.ih_pmtu.ipm_nextmtu)); | |||
| 3127 | ||||
| 3128 | switch (type) { | |||
| 3129 | case ICMP_ECHO8: | |||
| 3130 | type = ICMP6_ECHO_REQUEST128; | |||
| 3131 | break; | |||
| 3132 | case ICMP_ECHOREPLY0: | |||
| 3133 | type = ICMP6_ECHO_REPLY129; | |||
| 3134 | break; | |||
| 3135 | case ICMP_UNREACH3: | |||
| 3136 | type = ICMP6_DST_UNREACH1; | |||
| 3137 | switch (code) { | |||
| 3138 | case ICMP_UNREACH_NET0: | |||
| 3139 | case ICMP_UNREACH_HOST1: | |||
| 3140 | case ICMP_UNREACH_NET_UNKNOWN6: | |||
| 3141 | case ICMP_UNREACH_HOST_UNKNOWN7: | |||
| 3142 | case ICMP_UNREACH_ISOLATED8: | |||
| 3143 | case ICMP_UNREACH_TOSNET11: | |||
| 3144 | case ICMP_UNREACH_TOSHOST12: | |||
| 3145 | code = ICMP6_DST_UNREACH_NOROUTE0; | |||
| 3146 | break; | |||
| 3147 | case ICMP_UNREACH_PORT3: | |||
| 3148 | code = ICMP6_DST_UNREACH_NOPORT4; | |||
| 3149 | break; | |||
| 3150 | case ICMP_UNREACH_NET_PROHIB9: | |||
| 3151 | case ICMP_UNREACH_HOST_PROHIB10: | |||
| 3152 | case ICMP_UNREACH_FILTER_PROHIB13: | |||
| 3153 | case ICMP_UNREACH_PRECEDENCE_CUTOFF15: | |||
| 3154 | code = ICMP6_DST_UNREACH_ADMIN1; | |||
| 3155 | break; | |||
| 3156 | case ICMP_UNREACH_PROTOCOL2: | |||
| 3157 | type = ICMP6_PARAM_PROB4; | |||
| 3158 | code = ICMP6_PARAMPROB_NEXTHEADER1; | |||
| 3159 | ptr = offsetof(struct ip6_hdr, ip6_nxt)__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_nxt ); | |||
| 3160 | break; | |||
| 3161 | case ICMP_UNREACH_NEEDFRAG4: | |||
| 3162 | type = ICMP6_PACKET_TOO_BIG2; | |||
| 3163 | code = 0; | |||
| 3164 | mtu += 20; | |||
| 3165 | break; | |||
| 3166 | default: | |||
| 3167 | return (-1); | |||
| 3168 | } | |||
| 3169 | break; | |||
| 3170 | case ICMP_TIMXCEED11: | |||
| 3171 | type = ICMP6_TIME_EXCEEDED3; | |||
| 3172 | break; | |||
| 3173 | case ICMP_PARAMPROB12: | |||
| 3174 | type = ICMP6_PARAM_PROB4; | |||
| 3175 | switch (code) { | |||
| 3176 | case ICMP_PARAMPROB_ERRATPTR0: | |||
| 3177 | code = ICMP6_PARAMPROB_HEADER0; | |||
| 3178 | break; | |||
| 3179 | case ICMP_PARAMPROB_LENGTH2: | |||
| 3180 | code = ICMP6_PARAMPROB_HEADER0; | |||
| 3181 | break; | |||
| 3182 | default: | |||
| 3183 | return (-1); | |||
| 3184 | } | |||
| 3185 | ||||
| 3186 | ptr = icmp4->icmp_pptricmp_hun.ih_pptr; | |||
| 3187 | if (ptr == 0 || ptr == PTR_IP(ip_tos)(__builtin_offsetof(struct ip, ip_tos))) | |||
| 3188 | ; /* preserve */ | |||
| 3189 | else if (ptr == PTR_IP(ip_len)(__builtin_offsetof(struct ip, ip_len)) || | |||
| 3190 | ptr == PTR_IP(ip_len)(__builtin_offsetof(struct ip, ip_len)) + 1) | |||
| 3191 | ptr = PTR_IP6(ip6_plen)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_plen )); | |||
| 3192 | else if (ptr == PTR_IP(ip_ttl)(__builtin_offsetof(struct ip, ip_ttl))) | |||
| 3193 | ptr = PTR_IP6(ip6_hlim)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_hlim )); | |||
| 3194 | else if (ptr == PTR_IP(ip_p)(__builtin_offsetof(struct ip, ip_p))) | |||
| 3195 | ptr = PTR_IP6(ip6_nxt)(__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_nxt )); | |||
| 3196 | else if (ptr >= PTR_IP(ip_src)(__builtin_offsetof(struct ip, ip_src)) && | |||
| 3197 | ptr < PTR_IP(ip_dst)(__builtin_offsetof(struct ip, ip_dst))) | |||
| 3198 | ptr = PTR_IP6(ip6_src)(__builtin_offsetof(struct ip6_hdr, ip6_src)); | |||
| 3199 | else if (ptr >= PTR_IP(ip_dst)(__builtin_offsetof(struct ip, ip_dst)) && | |||
| 3200 | ptr < sizeof(struct ip)) | |||
| 3201 | ptr = PTR_IP6(ip6_dst)(__builtin_offsetof(struct ip6_hdr, ip6_dst)); | |||
| 3202 | else { | |||
| 3203 | return (-1); | |||
| 3204 | } | |||
| 3205 | break; | |||
| 3206 | default: | |||
| 3207 | return (-1); | |||
| 3208 | } | |||
| 3209 | ||||
| 3210 | pf_patch_8(pd, &icmp4->icmp_type, type, PF_HI(1)); | |||
| 3211 | pf_patch_8(pd, &icmp4->icmp_code, code, PF_LO(!(1))); | |||
| 3212 | pf_patch_16(pd, &icmp4->icmp_nextmtuicmp_hun.ih_pmtu.ipm_nextmtu, htons(mtu)(__uint16_t)(__builtin_constant_p(mtu) ? (__uint16_t)(((__uint16_t )(mtu) & 0xffU) << 8 | ((__uint16_t)(mtu) & 0xff00U ) >> 8) : __swap16md(mtu))); | |||
| 3213 | if (ptr >= 0) | |||
| 3214 | pf_patch_32(pd, &icmp4->icmp_voidicmp_hun.ih_void, htonl(ptr)(__uint32_t)(__builtin_constant_p(ptr) ? (__uint32_t)(((__uint32_t )(ptr) & 0xff) << 24 | ((__uint32_t)(ptr) & 0xff00 ) << 8 | ((__uint32_t)(ptr) & 0xff0000) >> 8 | ((__uint32_t)(ptr) & 0xff000000) >> 24) : __swap32md (ptr))); | |||
| 3215 | break; | |||
| 3216 | } | |||
| 3217 | ||||
| 3218 | return (0); | |||
| 3219 | } | |||
| 3220 | #endif /* INET6 */ | |||
| 3221 | ||||
| 3222 | /* | |||
| 3223 | * Need to modulate the sequence numbers in the TCP SACK option | |||
| 3224 | * (credits to Krzysztof Pfaff for report and patch) | |||
| 3225 | */ | |||
| 3226 | int | |||
| 3227 | pf_modulate_sack(struct pf_pdesc *pd, struct pf_state_peer *dst) | |||
| 3228 | { | |||
| 3229 | struct sackblk sack; | |||
| 3230 | int copyback = 0, i; | |||
| 3231 | int olen, optsoff; | |||
| 3232 | u_int8_t opts[MAX_TCPOPTLEN40], *opt, *eoh; | |||
| 3233 | ||||
| 3234 | olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr); | |||
| 3235 | optsoff = pd->off + sizeof(struct tcphdr); | |||
| 3236 | #define TCPOLEN_MINSACK(8 + 2) (TCPOLEN_SACK8 + 2) | |||
| 3237 | if (olen < TCPOLEN_MINSACK(8 + 2) || | |||
| 3238 | !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL((void *)0), pd->af)) | |||
| 3239 | return (0); | |||
| 3240 | ||||
| 3241 | eoh = opts + olen; | |||
| 3242 | opt = opts; | |||
| 3243 | while ((opt = pf_find_tcpopt(opt, opts, olen, | |||
| 3244 | TCPOPT_SACK5, TCPOLEN_MINSACK(8 + 2))) != NULL((void *)0)) | |||
| 3245 | { | |||
| 3246 | size_t safelen = MIN(opt[1], (eoh - opt))(((opt[1])<((eoh - opt)))?(opt[1]):((eoh - opt))); | |||
| 3247 | for (i = 2; i + TCPOLEN_SACK8 <= safelen; i += TCPOLEN_SACK8) { | |||
| 3248 | size_t startoff = (opt + i) - opts; | |||
| 3249 | memcpy(&sack, &opt[i], sizeof(sack))__builtin_memcpy((&sack), (&opt[i]), (sizeof(sack))); | |||
| 3250 | pf_patch_32_unaligned(pd, &sack.start, | |||
| 3251 | htonl(ntohl(sack.start) - dst->seqdiff)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p (sack.start) ? (__uint32_t)(((__uint32_t)(sack.start) & 0xff ) << 24 | ((__uint32_t)(sack.start) & 0xff00) << 8 | ((__uint32_t)(sack.start) & 0xff0000) >> 8 | ( (__uint32_t)(sack.start) & 0xff000000) >> 24) : __swap32md (sack.start)) - dst->seqdiff) ? (__uint32_t)(((__uint32_t) ((__uint32_t)(__builtin_constant_p(sack.start) ? (__uint32_t) (((__uint32_t)(sack.start) & 0xff) << 24 | ((__uint32_t )(sack.start) & 0xff00) << 8 | ((__uint32_t)(sack.start ) & 0xff0000) >> 8 | ((__uint32_t)(sack.start) & 0xff000000) >> 24) : __swap32md(sack.start)) - dst-> seqdiff) & 0xff) << 24 | ((__uint32_t)((__uint32_t) (__builtin_constant_p(sack.start) ? (__uint32_t)(((__uint32_t )(sack.start) & 0xff) << 24 | ((__uint32_t)(sack.start ) & 0xff00) << 8 | ((__uint32_t)(sack.start) & 0xff0000 ) >> 8 | ((__uint32_t)(sack.start) & 0xff000000) >> 24) : __swap32md(sack.start)) - dst->seqdiff) & 0xff00 ) << 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p (sack.start) ? (__uint32_t)(((__uint32_t)(sack.start) & 0xff ) << 24 | ((__uint32_t)(sack.start) & 0xff00) << 8 | ((__uint32_t)(sack.start) & 0xff0000) >> 8 | ( (__uint32_t)(sack.start) & 0xff000000) >> 24) : __swap32md (sack.start)) - dst->seqdiff) & 0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(sack.start) ? (__uint32_t)(((__uint32_t)(sack.start) & 0xff) << 24 | ((__uint32_t)(sack.start) & 0xff00) << 8 | ((__uint32_t )(sack.start) & 0xff0000) >> 8 | ((__uint32_t)(sack .start) & 0xff000000) >> 24) : __swap32md(sack.start )) - dst->seqdiff) & 0xff000000) >> 24) : __swap32md ((__uint32_t)(__builtin_constant_p(sack.start) ? (__uint32_t) (((__uint32_t)(sack.start) & 0xff) << 24 | ((__uint32_t )(sack.start) & 0xff00) << 8 | ((__uint32_t)(sack.start ) & 0xff0000) >> 8 | ((__uint32_t)(sack.start) & 0xff000000) >> 24) : __swap32md(sack.start)) - dst-> seqdiff)), | |||
| 3252 | PF_ALGNMNT(startoff)(((startoff) % 2) == 0 ? (1) : (!(1)))); | |||
| 3253 | pf_patch_32_unaligned(pd, &sack.end, | |||
| 3254 | htonl(ntohl(sack.end) - dst->seqdiff)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p (sack.end) ? (__uint32_t)(((__uint32_t)(sack.end) & 0xff) << 24 | ((__uint32_t)(sack.end) & 0xff00) << 8 | ((__uint32_t)(sack.end) & 0xff0000) >> 8 | ((__uint32_t )(sack.end) & 0xff000000) >> 24) : __swap32md(sack. end)) - dst->seqdiff) ? (__uint32_t)(((__uint32_t)((__uint32_t )(__builtin_constant_p(sack.end) ? (__uint32_t)(((__uint32_t) (sack.end) & 0xff) << 24 | ((__uint32_t)(sack.end) & 0xff00) << 8 | ((__uint32_t)(sack.end) & 0xff0000) >> 8 | ((__uint32_t)(sack.end) & 0xff000000) >> 24) : __swap32md(sack.end)) - dst->seqdiff) & 0xff) << 24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(sack.end ) ? (__uint32_t)(((__uint32_t)(sack.end) & 0xff) << 24 | ((__uint32_t)(sack.end) & 0xff00) << 8 | ((__uint32_t )(sack.end) & 0xff0000) >> 8 | ((__uint32_t)(sack.end ) & 0xff000000) >> 24) : __swap32md(sack.end)) - dst ->seqdiff) & 0xff00) << 8 | ((__uint32_t)((__uint32_t )(__builtin_constant_p(sack.end) ? (__uint32_t)(((__uint32_t) (sack.end) & 0xff) << 24 | ((__uint32_t)(sack.end) & 0xff00) << 8 | ((__uint32_t)(sack.end) & 0xff0000) >> 8 | ((__uint32_t)(sack.end) & 0xff000000) >> 24) : __swap32md(sack.end)) - dst->seqdiff) & 0xff0000 ) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p (sack.end) ? (__uint32_t)(((__uint32_t)(sack.end) & 0xff) << 24 | ((__uint32_t)(sack.end) & 0xff00) << 8 | ((__uint32_t)(sack.end) & 0xff0000) >> 8 | ((__uint32_t )(sack.end) & 0xff000000) >> 24) : __swap32md(sack. end)) - dst->seqdiff) & 0xff000000) >> 24) : __swap32md ((__uint32_t)(__builtin_constant_p(sack.end) ? (__uint32_t)(( (__uint32_t)(sack.end) & 0xff) << 24 | ((__uint32_t )(sack.end) & 0xff00) << 8 | ((__uint32_t)(sack.end ) & 0xff0000) >> 8 | ((__uint32_t)(sack.end) & 0xff000000 ) >> 24) : __swap32md(sack.end)) - dst->seqdiff)), | |||
| 3255 | PF_ALGNMNT(startoff + sizeof(sack.start))(((startoff + sizeof(sack.start)) % 2) == 0 ? (1) : (!(1)))); | |||
| 3256 | memcpy(&opt[i], &sack, sizeof(sack))__builtin_memcpy((&opt[i]), (&sack), (sizeof(sack))); | |||
| 3257 | } | |||
| 3258 | copyback = 1; | |||
| 3259 | opt += opt[1]; | |||
| 3260 | } | |||
| 3261 | ||||
| 3262 | if (copyback) | |||
| 3263 | m_copyback(pd->m, optsoff, olen, opts, M_NOWAIT0x0002); | |||
| 3264 | return (copyback); | |||
| 3265 | } | |||
| 3266 | ||||
| 3267 | struct mbuf * | |||
| 3268 | pf_build_tcp(const struct pf_rule *r, sa_family_t af, | |||
| 3269 | const struct pf_addr *saddr, const struct pf_addr *daddr, | |||
| 3270 | u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, | |||
| 3271 | u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, | |||
| 3272 | u_int16_t rtag, u_int sack, u_int rdom) | |||
| 3273 | { | |||
| 3274 | struct mbuf *m; | |||
| 3275 | int len, tlen; | |||
| 3276 | struct ip *h; | |||
| 3277 | #ifdef INET61 | |||
| 3278 | struct ip6_hdr *h6; | |||
| 3279 | #endif /* INET6 */ | |||
| 3280 | struct tcphdr *th; | |||
| 3281 | char *opt; | |||
| 3282 | ||||
| 3283 | /* maximum segment size tcp option */ | |||
| 3284 | tlen = sizeof(struct tcphdr); | |||
| 3285 | if (mss) | |||
| 3286 | tlen += 4; | |||
| 3287 | if (sack) | |||
| 3288 | tlen += 2; | |||
| 3289 | ||||
| 3290 | switch (af) { | |||
| 3291 | case AF_INET2: | |||
| 3292 | len = sizeof(struct ip) + tlen; | |||
| 3293 | break; | |||
| 3294 | #ifdef INET61 | |||
| 3295 | case AF_INET624: | |||
| 3296 | len = sizeof(struct ip6_hdr) + tlen; | |||
| 3297 | break; | |||
| 3298 | #endif /* INET6 */ | |||
| 3299 | default: | |||
| 3300 | unhandled_af(af); | |||
| 3301 | } | |||
| 3302 | ||||
| 3303 | /* create outgoing mbuf */ | |||
| 3304 | m = m_gethdr(M_DONTWAIT0x0002, MT_HEADER2); | |||
| 3305 | if (m == NULL((void *)0)) | |||
| 3306 | return (NULL((void *)0)); | |||
| 3307 | if (tag) | |||
| 3308 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags |= PF_TAG_GENERATED0x01; | |||
| 3309 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.tag = rtag; | |||
| 3310 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = rdom; | |||
| 3311 | if (r && (r->scrub_flags & PFSTATE_SETPRIO0x0200)) | |||
| 3312 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = r->set_prio[0]; | |||
| 3313 | if (r && r->qid) | |||
| 3314 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.qid = r->qid; | |||
| 3315 | m->m_datam_hdr.mh_data += max_linkhdr; | |||
| 3316 | m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len; | |||
| 3317 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx = 0; | |||
| 3318 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_OUT0x0002; | |||
| 3319 | memset(m->m_data, 0, len)__builtin_memset((m->m_hdr.mh_data), (0), (len)); | |||
| 3320 | switch (af) { | |||
| 3321 | case AF_INET2: | |||
| 3322 | h = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data)); | |||
| 3323 | h->ip_p = IPPROTO_TCP6; | |||
| 3324 | h->ip_len = htons(tlen)(__uint16_t)(__builtin_constant_p(tlen) ? (__uint16_t)(((__uint16_t )(tlen) & 0xffU) << 8 | ((__uint16_t)(tlen) & 0xff00U ) >> 8) : __swap16md(tlen)); | |||
| 3325 | h->ip_v = 4; | |||
| 3326 | h->ip_hl = sizeof(*h) >> 2; | |||
| 3327 | h->ip_tos = IPTOS_LOWDELAY0x10; | |||
| 3328 | h->ip_len = htons(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t )(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U ) >> 8) : __swap16md(len)); | |||
| 3329 | h->ip_off = htons(ip_mtudisc ? IP_DF : 0)(__uint16_t)(__builtin_constant_p(ip_mtudisc ? 0x4000 : 0) ? ( __uint16_t)(((__uint16_t)(ip_mtudisc ? 0x4000 : 0) & 0xffU ) << 8 | ((__uint16_t)(ip_mtudisc ? 0x4000 : 0) & 0xff00U ) >> 8) : __swap16md(ip_mtudisc ? 0x4000 : 0)); | |||
| 3330 | h->ip_ttl = ttl ? ttl : ip_defttl; | |||
| 3331 | h->ip_sum = 0; | |||
| 3332 | h->ip_src.s_addr = saddr->v4pfa.v4.s_addr; | |||
| 3333 | h->ip_dst.s_addr = daddr->v4pfa.v4.s_addr; | |||
| 3334 | ||||
| 3335 | th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); | |||
| 3336 | break; | |||
| 3337 | #ifdef INET61 | |||
| 3338 | case AF_INET624: | |||
| 3339 | h6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data)); | |||
| 3340 | h6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = IPPROTO_TCP6; | |||
| 3341 | h6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(tlen)(__uint16_t)(__builtin_constant_p(tlen) ? (__uint16_t)(((__uint16_t )(tlen) & 0xffU) << 8 | ((__uint16_t)(tlen) & 0xff00U ) >> 8) : __swap16md(tlen)); | |||
| 3342 | h6->ip6_vfcip6_ctlun.ip6_un2_vfc |= IPV6_VERSION0x60; | |||
| 3343 | h6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = IPV6_DEFHLIM64; | |||
| 3344 | memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr))__builtin_memcpy((&h6->ip6_src), (&saddr->pfa.v6 ), (sizeof(struct in6_addr))); | |||
| 3345 | memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr))__builtin_memcpy((&h6->ip6_dst), (&daddr->pfa.v6 ), (sizeof(struct in6_addr))); | |||
| 3346 | ||||
| 3347 | th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); | |||
| 3348 | break; | |||
| 3349 | #endif /* INET6 */ | |||
| 3350 | default: | |||
| 3351 | unhandled_af(af); | |||
| 3352 | } | |||
| 3353 | ||||
| 3354 | /* TCP header */ | |||
| 3355 | th->th_sport = sport; | |||
| 3356 | th->th_dport = dport; | |||
| 3357 | th->th_seq = htonl(seq)(__uint32_t)(__builtin_constant_p(seq) ? (__uint32_t)(((__uint32_t )(seq) & 0xff) << 24 | ((__uint32_t)(seq) & 0xff00 ) << 8 | ((__uint32_t)(seq) & 0xff0000) >> 8 | ((__uint32_t)(seq) & 0xff000000) >> 24) : __swap32md (seq)); | |||
| 3358 | th->th_ack = htonl(ack)(__uint32_t)(__builtin_constant_p(ack) ? (__uint32_t)(((__uint32_t )(ack) & 0xff) << 24 | ((__uint32_t)(ack) & 0xff00 ) << 8 | ((__uint32_t)(ack) & 0xff0000) >> 8 | ((__uint32_t)(ack) & 0xff000000) >> 24) : __swap32md (ack)); | |||
| 3359 | th->th_off = tlen >> 2; | |||
| 3360 | th->th_flags = flags; | |||
| 3361 | th->th_win = htons(win)(__uint16_t)(__builtin_constant_p(win) ? (__uint16_t)(((__uint16_t )(win) & 0xffU) << 8 | ((__uint16_t)(win) & 0xff00U ) >> 8) : __swap16md(win)); | |||
| 3362 | ||||
| 3363 | opt = (char *)(th + 1); | |||
| 3364 | if (mss) { | |||
| 3365 | opt[0] = TCPOPT_MAXSEG2; | |||
| 3366 | opt[1] = 4; | |||
| 3367 | mss = htons(mss)(__uint16_t)(__builtin_constant_p(mss) ? (__uint16_t)(((__uint16_t )(mss) & 0xffU) << 8 | ((__uint16_t)(mss) & 0xff00U ) >> 8) : __swap16md(mss)); | |||
| 3368 | memcpy((opt + 2), &mss, 2)__builtin_memcpy(((opt + 2)), (&mss), (2)); | |||
| 3369 | opt += 4; | |||
| 3370 | } | |||
| 3371 | if (sack) { | |||
| 3372 | opt[0] = TCPOPT_SACK_PERMITTED4; | |||
| 3373 | opt[1] = 2; | |||
| 3374 | opt += 2; | |||
| 3375 | } | |||
| 3376 | ||||
| 3377 | return (m); | |||
| 3378 | } | |||
| 3379 | ||||
| 3380 | void | |||
| 3381 | pf_send_tcp(const struct pf_rule *r, sa_family_t af, | |||
| 3382 | const struct pf_addr *saddr, const struct pf_addr *daddr, | |||
| 3383 | u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, | |||
| 3384 | u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, | |||
| 3385 | u_int16_t rtag, u_int rdom) | |||
| 3386 | { | |||
| 3387 | struct mbuf *m; | |||
| 3388 | ||||
| 3389 | if ((m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, | |||
| 3390 | flags, win, mss, ttl, tag, rtag, 0, rdom)) == NULL((void *)0)) | |||
| 3391 | return; | |||
| 3392 | ||||
| 3393 | switch (af) { | |||
| 3394 | case AF_INET2: | |||
| 3395 | ip_send(m); | |||
| 3396 | break; | |||
| 3397 | #ifdef INET61 | |||
| 3398 | case AF_INET624: | |||
| 3399 | ip6_send(m); | |||
| 3400 | break; | |||
| 3401 | #endif /* INET6 */ | |||
| 3402 | } | |||
| 3403 | } | |||
| 3404 | ||||
| 3405 | static void | |||
| 3406 | pf_send_challenge_ack(struct pf_pdesc *pd, struct pf_state *st, | |||
| 3407 | struct pf_state_peer *src, struct pf_state_peer *dst) | |||
| 3408 | { | |||
| 3409 | /* | |||
| 3410 | * We are sending challenge ACK as a response to SYN packet, which | |||
| 3411 | * matches existing state (modulo TCP window check). Therefore packet | |||
| 3412 | * must be sent on behalf of destination. | |||
| 3413 | * | |||
| 3414 | * We expect sender to remain either silent, or send RST packet | |||
| 3415 | * so both, firewall and remote peer, can purge dead state from | |||
| 3416 | * memory. | |||
| 3417 | */ | |||
| 3418 | pf_send_tcp(st->rule.ptr, pd->af, pd->dst, pd->src, | |||
| 3419 | pd->hdr.tcp.th_dport, pd->hdr.tcp.th_sport, dst->seqlo, | |||
| 3420 | src->seqlo, TH_ACK0x10, 0, 0, st->rule.ptr->return_ttl, 1, 0, | |||
| 3421 | pd->rdomain); | |||
| 3422 | } | |||
| 3423 | ||||
| 3424 | void | |||
| 3425 | pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, int param, | |||
| 3426 | sa_family_t af, struct pf_rule *r, u_int rdomain) | |||
| 3427 | { | |||
| 3428 | struct mbuf *m0; | |||
| 3429 | ||||
| 3430 | if ((m0 = m_copym(m, 0, M_COPYALL1000000000, M_NOWAIT0x0002)) == NULL((void *)0)) | |||
| 3431 | return; | |||
| 3432 | ||||
| 3433 | m0->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags |= PF_TAG_GENERATED0x01; | |||
| 3434 | m0->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = rdomain; | |||
| 3435 | if (r && (r->scrub_flags & PFSTATE_SETPRIO0x0200)) | |||
| 3436 | m0->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = r->set_prio[0]; | |||
| 3437 | if (r && r->qid) | |||
| 3438 | m0->m_pkthdrM_dat.MH.MH_pkthdr.pf.qid = r->qid; | |||
| 3439 | ||||
| 3440 | switch (af) { | |||
| 3441 | case AF_INET2: | |||
| 3442 | icmp_error(m0, type, code, 0, param); | |||
| 3443 | break; | |||
| 3444 | #ifdef INET61 | |||
| 3445 | case AF_INET624: | |||
| 3446 | icmp6_error(m0, type, code, param); | |||
| 3447 | break; | |||
| 3448 | #endif /* INET6 */ | |||
| 3449 | } | |||
| 3450 | } | |||
| 3451 | ||||
| 3452 | /* | |||
| 3453 | * Return ((n = 0) == (a = b [with mask m])) | |||
| 3454 | * Note: n != 0 => returns (a != b [with mask m]) | |||
| 3455 | */ | |||
| 3456 | int | |||
| 3457 | pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, | |||
| 3458 | struct pf_addr *b, sa_family_t af) | |||
| 3459 | { | |||
| 3460 | switch (af) { | |||
| 3461 | case AF_INET2: | |||
| 3462 | if ((a->addr32pfa.addr32[0] & m->addr32pfa.addr32[0]) == | |||
| 3463 | (b->addr32pfa.addr32[0] & m->addr32pfa.addr32[0])) | |||
| 3464 | return (n == 0); | |||
| 3465 | break; | |||
| 3466 | #ifdef INET61 | |||
| 3467 | case AF_INET624: | |||
| 3468 | if (((a->addr32pfa.addr32[0] & m->addr32pfa.addr32[0]) == | |||
| 3469 | (b->addr32pfa.addr32[0] & m->addr32pfa.addr32[0])) && | |||
| 3470 | ((a->addr32pfa.addr32[1] & m->addr32pfa.addr32[1]) == | |||
| 3471 | (b->addr32pfa.addr32[1] & m->addr32pfa.addr32[1])) && | |||
| 3472 | ((a->addr32pfa.addr32[2] & m->addr32pfa.addr32[2]) == | |||
| 3473 | (b->addr32pfa.addr32[2] & m->addr32pfa.addr32[2])) && | |||
| 3474 | ((a->addr32pfa.addr32[3] & m->addr32pfa.addr32[3]) == | |||
| 3475 | (b->addr32pfa.addr32[3] & m->addr32pfa.addr32[3]))) | |||
| 3476 | return (n == 0); | |||
| 3477 | break; | |||
| 3478 | #endif /* INET6 */ | |||
| 3479 | } | |||
| 3480 | ||||
| 3481 | return (n != 0); | |||
| 3482 | } | |||
| 3483 | ||||
| 3484 | /* | |||
| 3485 | * Return 1 if b <= a <= e, otherwise return 0. | |||
| 3486 | */ | |||
| 3487 | int | |||
| 3488 | pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, | |||
| 3489 | struct pf_addr *a, sa_family_t af) | |||
| 3490 | { | |||
| 3491 | switch (af) { | |||
| 3492 | case AF_INET2: | |||
| 3493 | if ((ntohl(a->addr32[0])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[0]) ? (__uint32_t )(((__uint32_t)(a->pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(a->pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(a->pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(a->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md(a->pfa.addr32[0])) < ntohl(b->addr32[0])(__uint32_t)(__builtin_constant_p(b->pfa.addr32[0]) ? (__uint32_t )(((__uint32_t)(b->pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(b->pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(b->pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(b->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md(b->pfa.addr32[0]))) || | |||
| 3494 | (ntohl(a->addr32[0])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[0]) ? (__uint32_t )(((__uint32_t)(a->pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(a->pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(a->pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(a->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md(a->pfa.addr32[0])) > ntohl(e->addr32[0])(__uint32_t)(__builtin_constant_p(e->pfa.addr32[0]) ? (__uint32_t )(((__uint32_t)(e->pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(e->pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(e->pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(e->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md(e->pfa.addr32[0])))) | |||
| 3495 | return (0); | |||
| 3496 | break; | |||
| 3497 | #ifdef INET61 | |||
| 3498 | case AF_INET624: { | |||
| 3499 | int i; | |||
| 3500 | ||||
| 3501 | /* check a >= b */ | |||
| 3502 | for (i = 0; i < 4; ++i) | |||
| 3503 | if (ntohl(a->addr32[i])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[i]) ? (__uint32_t )(((__uint32_t)(a->pfa.addr32[i]) & 0xff) << 24 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff00) << 8 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff0000) >> 8 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff000000) >> 24) : __swap32md(a->pfa.addr32[i])) > ntohl(b->addr32[i])(__uint32_t)(__builtin_constant_p(b->pfa.addr32[i]) ? (__uint32_t )(((__uint32_t)(b->pfa.addr32[i]) & 0xff) << 24 | ((__uint32_t)(b->pfa.addr32[i]) & 0xff00) << 8 | ((__uint32_t)(b->pfa.addr32[i]) & 0xff0000) >> 8 | ((__uint32_t)(b->pfa.addr32[i]) & 0xff000000) >> 24) : __swap32md(b->pfa.addr32[i]))) | |||
| 3504 | break; | |||
| 3505 | else if (ntohl(a->addr32[i])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[i]) ? (__uint32_t )(((__uint32_t)(a->pfa.addr32[i]) & 0xff) << 24 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff00) << 8 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff0000) >> 8 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff000000) >> 24) : __swap32md(a->pfa.addr32[i])) < ntohl(b->addr32[i])(__uint32_t)(__builtin_constant_p(b->pfa.addr32[i]) ? (__uint32_t )(((__uint32_t)(b->pfa.addr32[i]) & 0xff) << 24 | ((__uint32_t)(b->pfa.addr32[i]) & 0xff00) << 8 | ((__uint32_t)(b->pfa.addr32[i]) & 0xff0000) >> 8 | ((__uint32_t)(b->pfa.addr32[i]) & 0xff000000) >> 24) : __swap32md(b->pfa.addr32[i]))) | |||
| 3506 | return (0); | |||
| 3507 | /* check a <= e */ | |||
| 3508 | for (i = 0; i < 4; ++i) | |||
| 3509 | if (ntohl(a->addr32[i])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[i]) ? (__uint32_t )(((__uint32_t)(a->pfa.addr32[i]) & 0xff) << 24 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff00) << 8 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff0000) >> 8 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff000000) >> 24) : __swap32md(a->pfa.addr32[i])) < ntohl(e->addr32[i])(__uint32_t)(__builtin_constant_p(e->pfa.addr32[i]) ? (__uint32_t )(((__uint32_t)(e->pfa.addr32[i]) & 0xff) << 24 | ((__uint32_t)(e->pfa.addr32[i]) & 0xff00) << 8 | ((__uint32_t)(e->pfa.addr32[i]) & 0xff0000) >> 8 | ((__uint32_t)(e->pfa.addr32[i]) & 0xff000000) >> 24) : __swap32md(e->pfa.addr32[i]))) | |||
| 3510 | break; | |||
| 3511 | else if (ntohl(a->addr32[i])(__uint32_t)(__builtin_constant_p(a->pfa.addr32[i]) ? (__uint32_t )(((__uint32_t)(a->pfa.addr32[i]) & 0xff) << 24 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff00) << 8 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff0000) >> 8 | ((__uint32_t)(a->pfa.addr32[i]) & 0xff000000) >> 24) : __swap32md(a->pfa.addr32[i])) > ntohl(e->addr32[i])(__uint32_t)(__builtin_constant_p(e->pfa.addr32[i]) ? (__uint32_t )(((__uint32_t)(e->pfa.addr32[i]) & 0xff) << 24 | ((__uint32_t)(e->pfa.addr32[i]) & 0xff00) << 8 | ((__uint32_t)(e->pfa.addr32[i]) & 0xff0000) >> 8 | ((__uint32_t)(e->pfa.addr32[i]) & 0xff000000) >> 24) : __swap32md(e->pfa.addr32[i]))) | |||
| 3512 | return (0); | |||
| 3513 | break; | |||
| 3514 | } | |||
| 3515 | #endif /* INET6 */ | |||
| 3516 | } | |||
| 3517 | return (1); | |||
| 3518 | } | |||
| 3519 | ||||
| 3520 | int | |||
| 3521 | pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) | |||
| 3522 | { | |||
| 3523 | switch (op) { | |||
| 3524 | case PF_OP_IRG: | |||
| 3525 | return ((p > a1) && (p < a2)); | |||
| 3526 | case PF_OP_XRG: | |||
| 3527 | return ((p < a1) || (p > a2)); | |||
| 3528 | case PF_OP_RRG: | |||
| 3529 | return ((p >= a1) && (p <= a2)); | |||
| 3530 | case PF_OP_EQ: | |||
| 3531 | return (p == a1); | |||
| 3532 | case PF_OP_NE: | |||
| 3533 | return (p != a1); | |||
| 3534 | case PF_OP_LT: | |||
| 3535 | return (p < a1); | |||
| 3536 | case PF_OP_LE: | |||
| 3537 | return (p <= a1); | |||
| 3538 | case PF_OP_GT: | |||
| 3539 | return (p > a1); | |||
| 3540 | case PF_OP_GE: | |||
| 3541 | return (p >= a1); | |||
| 3542 | } | |||
| 3543 | return (0); /* never reached */ | |||
| 3544 | } | |||
| 3545 | ||||
| 3546 | int | |||
| 3547 | pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) | |||
| 3548 | { | |||
| 3549 | return (pf_match(op, ntohs(a1)(__uint16_t)(__builtin_constant_p(a1) ? (__uint16_t)(((__uint16_t )(a1) & 0xffU) << 8 | ((__uint16_t)(a1) & 0xff00U ) >> 8) : __swap16md(a1)), ntohs(a2)(__uint16_t)(__builtin_constant_p(a2) ? (__uint16_t)(((__uint16_t )(a2) & 0xffU) << 8 | ((__uint16_t)(a2) & 0xff00U ) >> 8) : __swap16md(a2)), ntohs(p)(__uint16_t)(__builtin_constant_p(p) ? (__uint16_t)(((__uint16_t )(p) & 0xffU) << 8 | ((__uint16_t)(p) & 0xff00U ) >> 8) : __swap16md(p)))); | |||
| 3550 | } | |||
| 3551 | ||||
| 3552 | int | |||
| 3553 | pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) | |||
| 3554 | { | |||
| 3555 | if (u == -1 && op != PF_OP_EQ && op != PF_OP_NE) | |||
| 3556 | return (0); | |||
| 3557 | return (pf_match(op, a1, a2, u)); | |||
| 3558 | } | |||
| 3559 | ||||
| 3560 | int | |||
| 3561 | pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) | |||
| 3562 | { | |||
| 3563 | if (g == -1 && op != PF_OP_EQ && op != PF_OP_NE) | |||
| 3564 | return (0); | |||
| 3565 | return (pf_match(op, a1, a2, g)); | |||
| 3566 | } | |||
| 3567 | ||||
| 3568 | int | |||
| 3569 | pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag) | |||
| 3570 | { | |||
| 3571 | if (*tag == -1) | |||
| 3572 | *tag = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.tag; | |||
| 3573 | ||||
| 3574 | return ((!r->match_tag_not && r->match_tag == *tag) || | |||
| 3575 | (r->match_tag_not && r->match_tag != *tag)); | |||
| 3576 | } | |||
| 3577 | ||||
| 3578 | int | |||
| 3579 | pf_match_rcvif(struct mbuf *m, struct pf_rule *r) | |||
| 3580 | { | |||
| 3581 | struct ifnet *ifp; | |||
| 3582 | #if NCARP1 > 0 | |||
| 3583 | struct ifnet *ifp0; | |||
| 3584 | #endif | |||
| 3585 | struct pfi_kif *kif; | |||
| 3586 | ||||
| 3587 | ifp = if_get(m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx); | |||
| 3588 | if (ifp == NULL((void *)0)) | |||
| 3589 | return (0); | |||
| 3590 | ||||
| 3591 | #if NCARP1 > 0 | |||
| 3592 | if (ifp->if_typeif_data.ifi_type == IFT_CARP0xf7 && | |||
| 3593 | (ifp0 = if_get(ifp->if_carpdevidxif_carp_ptr.carp_idx)) != NULL((void *)0)) { | |||
| 3594 | kif = (struct pfi_kif *)ifp0->if_pf_kif; | |||
| 3595 | if_put(ifp0); | |||
| 3596 | } else | |||
| 3597 | #endif /* NCARP */ | |||
| 3598 | kif = (struct pfi_kif *)ifp->if_pf_kif; | |||
| 3599 | ||||
| 3600 | if_put(ifp); | |||
| 3601 | ||||
| 3602 | if (kif == NULL((void *)0)) { | |||
| 3603 | DPFPRINTF(LOG_ERR,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: kif == NULL, @%d via %s", __func__, r->nr, r->rcv_ifname ); addlog("\n"); } } while (0) | |||
| 3604 | "%s: kif == NULL, @%d via %s", __func__,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: kif == NULL, @%d via %s", __func__, r->nr, r->rcv_ifname ); addlog("\n"); } } while (0) | |||
| 3605 | r->nr, r->rcv_ifname)do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: kif == NULL, @%d via %s", __func__, r->nr, r->rcv_ifname ); addlog("\n"); } } while (0); | |||
| 3606 | return (0); | |||
| 3607 | } | |||
| 3608 | ||||
| 3609 | return (pfi_kif_match(r->rcv_kif, kif)); | |||
| 3610 | } | |||
| 3611 | ||||
| 3612 | void | |||
| 3613 | pf_tag_packet(struct mbuf *m, int tag, int rtableid) | |||
| 3614 | { | |||
| 3615 | if (tag > 0) | |||
| 3616 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.tag = tag; | |||
| 3617 | if (rtableid >= 0) | |||
| 3618 | m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = (u_int)rtableid; | |||
| 3619 | } | |||
| 3620 | ||||
| 3621 | void | |||
| 3622 | pf_anchor_stack_init(void) | |||
| 3623 | { | |||
| 3624 | struct pf_anchor_stackframe *stack; | |||
| 3625 | ||||
| 3626 | stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack); | |||
| 3627 | stack[PF_ANCHOR_STACK_MAX64].sf_stack_topu.u_stack_top = &stack[0]; | |||
| 3628 | cpumem_leave(pf_anchor_stack, stack); | |||
| 3629 | } | |||
| 3630 | ||||
| 3631 | int | |||
| 3632 | pf_anchor_stack_is_full(struct pf_anchor_stackframe *sf) | |||
| 3633 | { | |||
| 3634 | struct pf_anchor_stackframe *stack; | |||
| 3635 | int rv; | |||
| 3636 | ||||
| 3637 | stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack); | |||
| 3638 | rv = (sf == &stack[PF_ANCHOR_STACK_MAX64]); | |||
| 3639 | cpumem_leave(pf_anchor_stack, stack); | |||
| 3640 | ||||
| 3641 | return (rv); | |||
| 3642 | } | |||
| 3643 | ||||
| 3644 | int | |||
| 3645 | pf_anchor_stack_is_empty(struct pf_anchor_stackframe *sf) | |||
| 3646 | { | |||
| 3647 | struct pf_anchor_stackframe *stack; | |||
| 3648 | int rv; | |||
| 3649 | ||||
| 3650 | stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack); | |||
| 3651 | rv = (sf == &stack[0]); | |||
| 3652 | cpumem_leave(pf_anchor_stack, stack); | |||
| 3653 | ||||
| 3654 | return (rv); | |||
| 3655 | } | |||
| 3656 | ||||
| 3657 | struct pf_anchor_stackframe * | |||
| 3658 | pf_anchor_stack_top(void) | |||
| 3659 | { | |||
| 3660 | struct pf_anchor_stackframe *stack; | |||
| 3661 | struct pf_anchor_stackframe *top_sf; | |||
| 3662 | ||||
| 3663 | stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack); | |||
| 3664 | top_sf = stack[PF_ANCHOR_STACK_MAX64].sf_stack_topu.u_stack_top; | |||
| 3665 | cpumem_leave(pf_anchor_stack, stack); | |||
| 3666 | ||||
| 3667 | return (top_sf); | |||
| 3668 | } | |||
| 3669 | ||||
| 3670 | int | |||
| 3671 | pf_anchor_stack_push(struct pf_ruleset *rs, struct pf_rule *r, | |||
| 3672 | struct pf_anchor *child, int jump_target) | |||
| 3673 | { | |||
| 3674 | struct pf_anchor_stackframe *stack; | |||
| 3675 | struct pf_anchor_stackframe *top_sf = pf_anchor_stack_top(); | |||
| 3676 | ||||
| 3677 | top_sf++; | |||
| 3678 | if (pf_anchor_stack_is_full(top_sf)) | |||
| 3679 | return (-1); | |||
| 3680 | ||||
| 3681 | top_sf->sf_rs = rs; | |||
| 3682 | top_sf->sf_ru.u_r = r; | |||
| 3683 | top_sf->sf_child = child; | |||
| 3684 | top_sf->sf_jump_target = jump_target; | |||
| 3685 | ||||
| 3686 | stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack); | |||
| 3687 | ||||
| 3688 | if ((top_sf <= &stack[0]) || (top_sf >= &stack[PF_ANCHOR_STACK_MAX64])) | |||
| 3689 | panic("%s: top frame outside of anchor stack range", __func__); | |||
| 3690 | ||||
| 3691 | stack[PF_ANCHOR_STACK_MAX64].sf_stack_topu.u_stack_top = top_sf; | |||
| 3692 | cpumem_leave(pf_anchor_stack, stack); | |||
| 3693 | ||||
| 3694 | return (0); | |||
| 3695 | } | |||
| 3696 | ||||
| 3697 | int | |||
| 3698 | pf_anchor_stack_pop(struct pf_ruleset **rs, struct pf_rule **r, | |||
| 3699 | struct pf_anchor **child, int *jump_target) | |||
| 3700 | { | |||
| 3701 | struct pf_anchor_stackframe *top_sf = pf_anchor_stack_top(); | |||
| 3702 | struct pf_anchor_stackframe *stack; | |||
| 3703 | int on_top; | |||
| 3704 | ||||
| 3705 | stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack); | |||
| 3706 | if (pf_anchor_stack_is_empty(top_sf)) { | |||
| 3707 | on_top = -1; | |||
| 3708 | } else { | |||
| 3709 | if ((top_sf <= &stack[0]) || | |||
| 3710 | (top_sf >= &stack[PF_ANCHOR_STACK_MAX64])) | |||
| 3711 | panic("%s: top frame outside of anchor stack range", | |||
| 3712 | __func__); | |||
| 3713 | ||||
| 3714 | *rs = top_sf->sf_rs; | |||
| 3715 | *r = top_sf->sf_ru.u_r; | |||
| 3716 | *child = top_sf->sf_child; | |||
| 3717 | *jump_target = top_sf->sf_jump_target; | |||
| 3718 | top_sf--; | |||
| 3719 | stack[PF_ANCHOR_STACK_MAX64].sf_stack_topu.u_stack_top = top_sf; | |||
| 3720 | on_top = 0; | |||
| 3721 | } | |||
| 3722 | cpumem_leave(pf_anchor_stack, stack); | |||
| 3723 | ||||
| 3724 | return (on_top); | |||
| 3725 | } | |||
| 3726 | ||||
| 3727 | void | |||
| 3728 | pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, | |||
| 3729 | struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) | |||
| 3730 | { | |||
| 3731 | switch (af) { | |||
| 3732 | case AF_INET2: | |||
| 3733 | naddr->addr32pfa.addr32[0] = (raddr->addr32pfa.addr32[0] & rmask->addr32pfa.addr32[0]) | | |||
| 3734 | ((rmask->addr32pfa.addr32[0] ^ 0xffffffff ) & saddr->addr32pfa.addr32[0]); | |||
| 3735 | break; | |||
| 3736 | #ifdef INET61 | |||
| 3737 | case AF_INET624: | |||
| 3738 | naddr->addr32pfa.addr32[0] = (raddr->addr32pfa.addr32[0] & rmask->addr32pfa.addr32[0]) | | |||
| 3739 | ((rmask->addr32pfa.addr32[0] ^ 0xffffffff ) & saddr->addr32pfa.addr32[0]); | |||
| 3740 | naddr->addr32pfa.addr32[1] = (raddr->addr32pfa.addr32[1] & rmask->addr32pfa.addr32[1]) | | |||
| 3741 | ((rmask->addr32pfa.addr32[1] ^ 0xffffffff ) & saddr->addr32pfa.addr32[1]); | |||
| 3742 | naddr->addr32pfa.addr32[2] = (raddr->addr32pfa.addr32[2] & rmask->addr32pfa.addr32[2]) | | |||
| 3743 | ((rmask->addr32pfa.addr32[2] ^ 0xffffffff ) & saddr->addr32pfa.addr32[2]); | |||
| 3744 | naddr->addr32pfa.addr32[3] = (raddr->addr32pfa.addr32[3] & rmask->addr32pfa.addr32[3]) | | |||
| 3745 | ((rmask->addr32pfa.addr32[3] ^ 0xffffffff ) & saddr->addr32pfa.addr32[3]); | |||
| 3746 | break; | |||
| 3747 | #endif /* INET6 */ | |||
| 3748 | default: | |||
| 3749 | unhandled_af(af); | |||
| 3750 | } | |||
| 3751 | } | |||
| 3752 | ||||
| 3753 | void | |||
| 3754 | pf_addr_inc(struct pf_addr *addr, sa_family_t af) | |||
| 3755 | { | |||
| 3756 | switch (af) { | |||
| 3757 | case AF_INET2: | |||
| 3758 | addr->addr32pfa.addr32[0] = htonl(ntohl(addr->addr32[0]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[0])) + 1) ? (__uint32_t)(((__uint32_t)(( __uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? (__uint32_t )(((__uint32_t)(addr->pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md(addr->pfa.addr32[0])) + 1) & 0xff) << 24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr-> pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32 [0]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[0]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[0])) + 1) & 0xff00) << 8 | ((__uint32_t) ((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? ( __uint32_t)(((__uint32_t)(addr->pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00 ) << 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000 ) >> 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000 ) >> 24) : __swap32md(addr->pfa.addr32[0])) + 1) & 0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[0])) + 1) & 0xff000000) >> 24) : __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa. addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[0 ]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[0]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[0])) + 1)); | |||
| 3759 | break; | |||
| 3760 | #ifdef INET61 | |||
| 3761 | case AF_INET624: | |||
| 3762 | if (addr->addr32pfa.addr32[3] == 0xffffffff) { | |||
| 3763 | addr->addr32pfa.addr32[3] = 0; | |||
| 3764 | if (addr->addr32pfa.addr32[2] == 0xffffffff) { | |||
| 3765 | addr->addr32pfa.addr32[2] = 0; | |||
| 3766 | if (addr->addr32pfa.addr32[1] == 0xffffffff) { | |||
| 3767 | addr->addr32pfa.addr32[1] = 0; | |||
| 3768 | addr->addr32pfa.addr32[0] = | |||
| 3769 | htonl(ntohl(addr->addr32[0]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[0])) + 1) ? (__uint32_t)(((__uint32_t)(( __uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? (__uint32_t )(((__uint32_t)(addr->pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md(addr->pfa.addr32[0])) + 1) & 0xff) << 24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr-> pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32 [0]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[0]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[0])) + 1) & 0xff00) << 8 | ((__uint32_t) ((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[0]) ? ( __uint32_t)(((__uint32_t)(addr->pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff00 ) << 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff0000 ) >> 8 | ((__uint32_t)(addr->pfa.addr32[0]) & 0xff000000 ) >> 24) : __swap32md(addr->pfa.addr32[0])) + 1) & 0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[0]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[0]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[0]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[0]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[0]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[0])) + 1) & 0xff000000) >> 24) : __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa. addr32[0]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[0 ]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [0]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [0]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[0]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[0])) + 1)); | |||
| 3770 | } else | |||
| 3771 | addr->addr32pfa.addr32[1] = | |||
| 3772 | htonl(ntohl(addr->addr32[1]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[1]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[1]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[1]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[1]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[1]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[1])) + 1) ? (__uint32_t)(((__uint32_t)(( __uint32_t)(__builtin_constant_p(addr->pfa.addr32[1]) ? (__uint32_t )(((__uint32_t)(addr->pfa.addr32[1]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff000000) >> 24) : __swap32md(addr->pfa.addr32[1])) + 1) & 0xff) << 24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr-> pfa.addr32[1]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32 [1]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [1]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [1]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[1]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[1])) + 1) & 0xff00) << 8 | ((__uint32_t) ((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[1]) ? ( __uint32_t)(((__uint32_t)(addr->pfa.addr32[1]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff00 ) << 8 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff0000 ) >> 8 | ((__uint32_t)(addr->pfa.addr32[1]) & 0xff000000 ) >> 24) : __swap32md(addr->pfa.addr32[1])) + 1) & 0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[1]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[1]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[1]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[1]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[1]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[1])) + 1) & 0xff000000) >> 24) : __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa. addr32[1]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[1 ]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [1]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [1]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[1]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[1])) + 1)); | |||
| 3773 | } else | |||
| 3774 | addr->addr32pfa.addr32[2] = | |||
| 3775 | htonl(ntohl(addr->addr32[2]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[2]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[2]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[2]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[2]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[2]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[2])) + 1) ? (__uint32_t)(((__uint32_t)(( __uint32_t)(__builtin_constant_p(addr->pfa.addr32[2]) ? (__uint32_t )(((__uint32_t)(addr->pfa.addr32[2]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff000000) >> 24) : __swap32md(addr->pfa.addr32[2])) + 1) & 0xff) << 24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr-> pfa.addr32[2]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32 [2]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [2]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [2]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[2]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[2])) + 1) & 0xff00) << 8 | ((__uint32_t) ((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[2]) ? ( __uint32_t)(((__uint32_t)(addr->pfa.addr32[2]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff00 ) << 8 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff0000 ) >> 8 | ((__uint32_t)(addr->pfa.addr32[2]) & 0xff000000 ) >> 24) : __swap32md(addr->pfa.addr32[2])) + 1) & 0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[2]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[2]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[2]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[2]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[2]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[2])) + 1) & 0xff000000) >> 24) : __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa. addr32[2]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[2 ]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [2]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [2]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[2]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[2])) + 1)); | |||
| 3776 | } else | |||
| 3777 | addr->addr32pfa.addr32[3] = | |||
| 3778 | htonl(ntohl(addr->addr32[3]) + 1)(__uint32_t)(__builtin_constant_p((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[3]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[3]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[3]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[3]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[3]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[3])) + 1) ? (__uint32_t)(((__uint32_t)(( __uint32_t)(__builtin_constant_p(addr->pfa.addr32[3]) ? (__uint32_t )(((__uint32_t)(addr->pfa.addr32[3]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff000000) >> 24) : __swap32md(addr->pfa.addr32[3])) + 1) & 0xff) << 24 | ((__uint32_t)((__uint32_t)(__builtin_constant_p(addr-> pfa.addr32[3]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32 [3]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [3]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [3]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[3]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[3])) + 1) & 0xff00) << 8 | ((__uint32_t) ((__uint32_t)(__builtin_constant_p(addr->pfa.addr32[3]) ? ( __uint32_t)(((__uint32_t)(addr->pfa.addr32[3]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff00 ) << 8 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff0000 ) >> 8 | ((__uint32_t)(addr->pfa.addr32[3]) & 0xff000000 ) >> 24) : __swap32md(addr->pfa.addr32[3])) + 1) & 0xff0000) >> 8 | ((__uint32_t)((__uint32_t)(__builtin_constant_p (addr->pfa.addr32[3]) ? (__uint32_t)(((__uint32_t)(addr-> pfa.addr32[3]) & 0xff) << 24 | ((__uint32_t)(addr-> pfa.addr32[3]) & 0xff00) << 8 | ((__uint32_t)(addr-> pfa.addr32[3]) & 0xff0000) >> 8 | ((__uint32_t)(addr ->pfa.addr32[3]) & 0xff000000) >> 24) : __swap32md (addr->pfa.addr32[3])) + 1) & 0xff000000) >> 24) : __swap32md((__uint32_t)(__builtin_constant_p(addr->pfa. addr32[3]) ? (__uint32_t)(((__uint32_t)(addr->pfa.addr32[3 ]) & 0xff) << 24 | ((__uint32_t)(addr->pfa.addr32 [3]) & 0xff00) << 8 | ((__uint32_t)(addr->pfa.addr32 [3]) & 0xff0000) >> 8 | ((__uint32_t)(addr->pfa. addr32[3]) & 0xff000000) >> 24) : __swap32md(addr-> pfa.addr32[3])) + 1)); | |||
| 3779 | break; | |||
| 3780 | #endif /* INET6 */ | |||
| 3781 | default: | |||
| 3782 | unhandled_af(af); | |||
| 3783 | } | |||
| 3784 | } | |||
| 3785 | ||||
| 3786 | int | |||
| 3787 | pf_socket_lookup(struct pf_pdesc *pd) | |||
| 3788 | { | |||
| 3789 | struct pf_addr *saddr, *daddr; | |||
| 3790 | u_int16_t sport, dport; | |||
| 3791 | struct inpcbtable *tb; | |||
| 3792 | struct inpcb *inp; | |||
| 3793 | ||||
| 3794 | pd->lookup.uid = -1; | |||
| 3795 | pd->lookup.gid = -1; | |||
| 3796 | pd->lookup.pid = NO_PID(99999 +1); | |||
| 3797 | switch (pd->virtual_proto) { | |||
| 3798 | case IPPROTO_TCP6: | |||
| 3799 | sport = pd->hdr.tcp.th_sport; | |||
| 3800 | dport = pd->hdr.tcp.th_dport; | |||
| 3801 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 3802 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); | |||
| 3803 | tb = &tcbtable; | |||
| 3804 | break; | |||
| 3805 | case IPPROTO_UDP17: | |||
| 3806 | sport = pd->hdr.udp.uh_sport; | |||
| 3807 | dport = pd->hdr.udp.uh_dport; | |||
| 3808 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 3809 | NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl > 0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail (0x0002UL, _s, __func__); } while (0); | |||
| 3810 | tb = &udbtable; | |||
| 3811 | break; | |||
| 3812 | default: | |||
| 3813 | return (-1); | |||
| 3814 | } | |||
| 3815 | if (pd->dir == PF_IN) { | |||
| 3816 | saddr = pd->src; | |||
| 3817 | daddr = pd->dst; | |||
| 3818 | } else { | |||
| 3819 | u_int16_t p; | |||
| 3820 | ||||
| 3821 | p = sport; | |||
| 3822 | sport = dport; | |||
| 3823 | dport = p; | |||
| 3824 | saddr = pd->dst; | |||
| 3825 | daddr = pd->src; | |||
| 3826 | } | |||
| 3827 | switch (pd->af) { | |||
| 3828 | case AF_INET2: | |||
| 3829 | /* | |||
| 3830 | * Fails when rtable is changed while evaluating the ruleset | |||
| 3831 | * The socket looked up will not match the one hit in the end. | |||
| 3832 | */ | |||
| 3833 | inp = in_pcblookup(tb, saddr->v4pfa.v4, sport, daddr->v4pfa.v4, dport, | |||
| 3834 | pd->rdomain); | |||
| 3835 | if (inp == NULL((void *)0)) { | |||
| 3836 | inp = in_pcblookup_listen(tb, daddr->v4pfa.v4, dport, | |||
| 3837 | NULL((void *)0), pd->rdomain); | |||
| 3838 | if (inp == NULL((void *)0)) | |||
| 3839 | return (-1); | |||
| 3840 | } | |||
| 3841 | break; | |||
| 3842 | #ifdef INET61 | |||
| 3843 | case AF_INET624: | |||
| 3844 | if (pd->virtual_proto == IPPROTO_UDP17) | |||
| 3845 | tb = &udb6table; | |||
| 3846 | inp = in6_pcblookup(tb, &saddr->v6pfa.v6, sport, &daddr->v6pfa.v6, | |||
| 3847 | dport, pd->rdomain); | |||
| 3848 | if (inp == NULL((void *)0)) { | |||
| 3849 | inp = in6_pcblookup_listen(tb, &daddr->v6pfa.v6, dport, | |||
| 3850 | NULL((void *)0), pd->rdomain); | |||
| 3851 | if (inp == NULL((void *)0)) | |||
| 3852 | return (-1); | |||
| 3853 | } | |||
| 3854 | break; | |||
| 3855 | #endif /* INET6 */ | |||
| 3856 | default: | |||
| 3857 | unhandled_af(pd->af); | |||
| 3858 | } | |||
| 3859 | pd->lookup.uid = inp->inp_socket->so_euid; | |||
| 3860 | pd->lookup.gid = inp->inp_socket->so_egid; | |||
| 3861 | pd->lookup.pid = inp->inp_socket->so_cpid; | |||
| 3862 | in_pcbunref(inp); | |||
| 3863 | return (1); | |||
| 3864 | } | |||
| 3865 | ||||
| 3866 | /* post: r => (r[0] == type /\ r[1] >= min_typelen >= 2 "validity" | |||
| 3867 | * /\ (eoh - r) >= min_typelen >= 2 "safety" ) | |||
| 3868 | * | |||
| 3869 | * warning: r + r[1] may exceed opts bounds for r[1] > min_typelen | |||
| 3870 | */ | |||
| 3871 | u_int8_t* | |||
| 3872 | pf_find_tcpopt(u_int8_t *opt, u_int8_t *opts, size_t hlen, u_int8_t type, | |||
| 3873 | u_int8_t min_typelen) | |||
| 3874 | { | |||
| 3875 | u_int8_t *eoh = opts + hlen; | |||
| 3876 | ||||
| 3877 | if (min_typelen < 2) | |||
| 3878 | return (NULL((void *)0)); | |||
| 3879 | ||||
| 3880 | while ((eoh - opt) >= min_typelen) { | |||
| 3881 | switch (*opt) { | |||
| 3882 | case TCPOPT_EOL0: | |||
| 3883 | /* FALLTHROUGH - Workaround the failure of some | |||
| 3884 | systems to NOP-pad their bzero'd option buffers, | |||
| 3885 | producing spurious EOLs */ | |||
| 3886 | case TCPOPT_NOP1: | |||
| 3887 | opt++; | |||
| 3888 | continue; | |||
| 3889 | default: | |||
| 3890 | if (opt[0] == type && | |||
| 3891 | opt[1] >= min_typelen) | |||
| 3892 | return (opt); | |||
| 3893 | } | |||
| 3894 | ||||
| 3895 | opt += MAX(opt[1], 2)(((opt[1])>(2))?(opt[1]):(2)); /* evade infinite loops */ | |||
| 3896 | } | |||
| 3897 | ||||
| 3898 | return (NULL((void *)0)); | |||
| 3899 | } | |||
| 3900 | ||||
| 3901 | u_int8_t | |||
| 3902 | pf_get_wscale(struct pf_pdesc *pd) | |||
| 3903 | { | |||
| 3904 | int olen; | |||
| 3905 | u_int8_t opts[MAX_TCPOPTLEN40], *opt; | |||
| 3906 | u_int8_t wscale = 0; | |||
| 3907 | ||||
| 3908 | olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr); | |||
| 3909 | if (olen < TCPOLEN_WINDOW3 || !pf_pull_hdr(pd->m, | |||
| 3910 | pd->off + sizeof(struct tcphdr), opts, olen, NULL((void *)0), pd->af)) | |||
| 3911 | return (0); | |||
| 3912 | ||||
| 3913 | opt = opts; | |||
| 3914 | while ((opt = pf_find_tcpopt(opt, opts, olen, | |||
| 3915 | TCPOPT_WINDOW3, TCPOLEN_WINDOW3)) != NULL((void *)0)) { | |||
| 3916 | wscale = opt[2]; | |||
| 3917 | wscale = MIN(wscale, TCP_MAX_WINSHIFT)(((wscale)<(14))?(wscale):(14)); | |||
| 3918 | wscale |= PF_WSCALE_FLAG0x80; | |||
| 3919 | ||||
| 3920 | opt += opt[1]; | |||
| 3921 | } | |||
| 3922 | ||||
| 3923 | return (wscale); | |||
| 3924 | } | |||
| 3925 | ||||
| 3926 | u_int16_t | |||
| 3927 | pf_get_mss(struct pf_pdesc *pd) | |||
| 3928 | { | |||
| 3929 | int olen; | |||
| 3930 | u_int8_t opts[MAX_TCPOPTLEN40], *opt; | |||
| 3931 | u_int16_t mss = tcp_mssdflt; | |||
| 3932 | ||||
| 3933 | olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr); | |||
| 3934 | if (olen < TCPOLEN_MAXSEG4 || !pf_pull_hdr(pd->m, | |||
| 3935 | pd->off + sizeof(struct tcphdr), opts, olen, NULL((void *)0), pd->af)) | |||
| 3936 | return (0); | |||
| 3937 | ||||
| 3938 | opt = opts; | |||
| 3939 | while ((opt = pf_find_tcpopt(opt, opts, olen, | |||
| 3940 | TCPOPT_MAXSEG2, TCPOLEN_MAXSEG4)) != NULL((void *)0)) { | |||
| 3941 | memcpy(&mss, (opt + 2), 2)__builtin_memcpy((&mss), ((opt + 2)), (2)); | |||
| 3942 | mss = ntohs(mss)(__uint16_t)(__builtin_constant_p(mss) ? (__uint16_t)(((__uint16_t )(mss) & 0xffU) << 8 | ((__uint16_t)(mss) & 0xff00U ) >> 8) : __swap16md(mss)); | |||
| 3943 | ||||
| 3944 | opt += opt[1]; | |||
| 3945 | } | |||
| 3946 | return (mss); | |||
| 3947 | } | |||
| 3948 | ||||
| 3949 | u_int16_t | |||
| 3950 | pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer) | |||
| 3951 | { | |||
| 3952 | struct ifnet *ifp; | |||
| 3953 | struct sockaddr_in *dst; | |||
| 3954 | #ifdef INET61 | |||
| 3955 | struct sockaddr_in6 *dst6; | |||
| 3956 | #endif /* INET6 */ | |||
| 3957 | struct rtentry *rt = NULL((void *)0); | |||
| 3958 | struct sockaddr_storage ss; | |||
| 3959 | int hlen; | |||
| 3960 | u_int16_t mss = tcp_mssdflt; | |||
| 3961 | ||||
| 3962 | memset(&ss, 0, sizeof(ss))__builtin_memset((&ss), (0), (sizeof(ss))); | |||
| 3963 | ||||
| 3964 | switch (af) { | |||
| 3965 | case AF_INET2: | |||
| 3966 | hlen = sizeof(struct ip); | |||
| 3967 | dst = (struct sockaddr_in *)&ss; | |||
| 3968 | dst->sin_family = AF_INET2; | |||
| 3969 | dst->sin_len = sizeof(*dst); | |||
| 3970 | dst->sin_addr = addr->v4pfa.v4; | |||
| 3971 | rt = rtalloc(sintosa(dst), 0, rtableid); | |||
| 3972 | break; | |||
| 3973 | #ifdef INET61 | |||
| 3974 | case AF_INET624: | |||
| 3975 | hlen = sizeof(struct ip6_hdr); | |||
| 3976 | dst6 = (struct sockaddr_in6 *)&ss; | |||
| 3977 | dst6->sin6_family = AF_INET624; | |||
| 3978 | dst6->sin6_len = sizeof(*dst6); | |||
| 3979 | dst6->sin6_addr = addr->v6pfa.v6; | |||
| 3980 | rt = rtalloc(sin6tosa(dst6), 0, rtableid); | |||
| 3981 | break; | |||
| 3982 | #endif /* INET6 */ | |||
| 3983 | } | |||
| 3984 | ||||
| 3985 | if (rt != NULL((void *)0) && (ifp = if_get(rt->rt_ifidx)) != NULL((void *)0)) { | |||
| 3986 | mss = ifp->if_mtuif_data.ifi_mtu - hlen - sizeof(struct tcphdr); | |||
| 3987 | mss = max(tcp_mssdflt, mss); | |||
| 3988 | if_put(ifp); | |||
| 3989 | } | |||
| 3990 | rtfree(rt); | |||
| 3991 | mss = min(mss, offer); | |||
| 3992 | mss = max(mss, 64); /* sanity - at least max opt space */ | |||
| 3993 | return (mss); | |||
| 3994 | } | |||
| 3995 | ||||
| 3996 | static __inline int | |||
| 3997 | pf_set_rt_ifp(struct pf_state *st, struct pf_addr *saddr, sa_family_t af, | |||
| 3998 | struct pf_src_node **sns) | |||
| 3999 | { | |||
| 4000 | struct pf_rule *r = st->rule.ptr; | |||
| 4001 | int rv; | |||
| 4002 | ||||
| 4003 | if (!r->rt) | |||
| 4004 | return (0); | |||
| 4005 | ||||
| 4006 | rv = pf_map_addr(af, r, saddr, &st->rt_addr, NULL((void *)0), sns, | |||
| 4007 | &r->route, PF_SN_ROUTE); | |||
| 4008 | if (rv == 0) | |||
| 4009 | st->rt = r->rt; | |||
| 4010 | ||||
| 4011 | return (rv); | |||
| 4012 | } | |||
| 4013 | ||||
| 4014 | u_int32_t | |||
| 4015 | pf_tcp_iss(struct pf_pdesc *pd) | |||
| 4016 | { | |||
| 4017 | SHA2_CTX ctx; | |||
| 4018 | union { | |||
| 4019 | uint8_t bytes[SHA512_DIGEST_LENGTH64]; | |||
| 4020 | uint32_t words[1]; | |||
| 4021 | } digest; | |||
| 4022 | ||||
| 4023 | if (pf_tcp_secret_init == 0) { | |||
| 4024 | arc4random_buf(pf_tcp_secret, sizeof(pf_tcp_secret)); | |||
| 4025 | SHA512Init(&pf_tcp_secret_ctx); | |||
| 4026 | SHA512Update(&pf_tcp_secret_ctx, pf_tcp_secret, | |||
| 4027 | sizeof(pf_tcp_secret)); | |||
| 4028 | pf_tcp_secret_init = 1; | |||
| 4029 | } | |||
| 4030 | ctx = pf_tcp_secret_ctx; | |||
| 4031 | ||||
| 4032 | SHA512Update(&ctx, &pd->rdomain, sizeof(pd->rdomain)); | |||
| 4033 | SHA512Update(&ctx, &pd->hdr.tcp.th_sport, sizeof(u_short)); | |||
| 4034 | SHA512Update(&ctx, &pd->hdr.tcp.th_dport, sizeof(u_short)); | |||
| 4035 | switch (pd->af) { | |||
| 4036 | case AF_INET2: | |||
| 4037 | SHA512Update(&ctx, &pd->src->v4pfa.v4, sizeof(struct in_addr)); | |||
| 4038 | SHA512Update(&ctx, &pd->dst->v4pfa.v4, sizeof(struct in_addr)); | |||
| 4039 | break; | |||
| 4040 | #ifdef INET61 | |||
| 4041 | case AF_INET624: | |||
| 4042 | SHA512Update(&ctx, &pd->src->v6pfa.v6, sizeof(struct in6_addr)); | |||
| 4043 | SHA512Update(&ctx, &pd->dst->v6pfa.v6, sizeof(struct in6_addr)); | |||
| 4044 | break; | |||
| 4045 | #endif /* INET6 */ | |||
| 4046 | } | |||
| 4047 | SHA512Final(digest.bytes, &ctx); | |||
| 4048 | pf_tcp_iss_off += 4096; | |||
| 4049 | return (digest.words[0] + READ_ONCE(tcp_iss)({ typeof(tcp_iss) __tmp = *(volatile typeof(tcp_iss) *)& (tcp_iss); membar_datadep_consumer(); __tmp; }) + pf_tcp_iss_off); | |||
| 4050 | } | |||
| 4051 | ||||
| 4052 | void | |||
| 4053 | pf_rule_to_actions(struct pf_rule *r, struct pf_rule_actions *a) | |||
| 4054 | { | |||
| 4055 | if (r->qid) | |||
| 4056 | a->qid = r->qid; | |||
| 4057 | if (r->pqid) | |||
| 4058 | a->pqid = r->pqid; | |||
| 4059 | if (r->rtableid >= 0) | |||
| 4060 | a->rtableid = r->rtableid; | |||
| 4061 | #if NPFLOG1 > 0 | |||
| 4062 | a->log |= r->log; | |||
| 4063 | #endif /* NPFLOG > 0 */ | |||
| 4064 | if (r->scrub_flags & PFSTATE_SETTOS0x0040) | |||
| 4065 | a->set_tos = r->set_tos; | |||
| 4066 | if (r->min_ttl) | |||
| 4067 | a->min_ttl = r->min_ttl; | |||
| 4068 | if (r->max_mss) | |||
| 4069 | a->max_mss = r->max_mss; | |||
| 4070 | a->flags |= (r->scrub_flags & (PFSTATE_NODF0x0020|PFSTATE_RANDOMID0x0080| | |||
| 4071 | PFSTATE_SETTOS0x0040|PFSTATE_SCRUB_TCP0x0100|PFSTATE_SETPRIO0x0200)); | |||
| 4072 | if (r->scrub_flags & PFSTATE_SETPRIO0x0200) { | |||
| 4073 | a->set_prio[0] = r->set_prio[0]; | |||
| 4074 | a->set_prio[1] = r->set_prio[1]; | |||
| 4075 | } | |||
| 4076 | if (r->rule_flag & PFRULE_SETDELAY0x0080) | |||
| 4077 | a->delay = r->delay; | |||
| 4078 | } | |||
| 4079 | ||||
| 4080 | #define PF_TEST_ATTRIB(t, a)if (t) { r = a; continue; } else do { } while (0) \ | |||
| 4081 | if (t) { \ | |||
| 4082 | r = a; \ | |||
| 4083 | continue; \ | |||
| 4084 | } else do { \ | |||
| 4085 | } while (0) | |||
| 4086 | ||||
| 4087 | enum pf_test_status | |||
| 4088 | pf_match_rule(struct pf_test_ctx *ctx, struct pf_ruleset *ruleset) | |||
| 4089 | { | |||
| 4090 | struct pf_rule *r; | |||
| 4091 | struct pf_anchor *child = NULL((void *)0); | |||
| 4092 | int target; | |||
| 4093 | ||||
| 4094 | pf_anchor_stack_init(); | |||
| 4095 | enter_ruleset: | |||
| 4096 | r = TAILQ_FIRST(ruleset->rules.active.ptr)((ruleset->rules.active.ptr)->tqh_first); | |||
| 4097 | while (r != NULL((void *)0)) { | |||
| 4098 | PF_TEST_ATTRIB(r->rule_flag & PFRULE_EXPIRED,if (r->rule_flag & 0x00400000) { r = ((r)->entries. tqe_next); continue; } else do { } while (0) | |||
| 4099 | TAILQ_NEXT(r, entries))if (r->rule_flag & 0x00400000) { r = ((r)->entries. tqe_next); continue; } else do { } while (0); | |||
| 4100 | r->evaluations++; | |||
| 4101 | PF_TEST_ATTRIB(if ((pfi_kif_match(r->kif, ctx->pd->kif) == r->ifnot )) { r = r->skip[0].ptr; continue; } else do { } while (0) | |||
| 4102 | (pfi_kif_match(r->kif, ctx->pd->kif) == r->ifnot),if ((pfi_kif_match(r->kif, ctx->pd->kif) == r->ifnot )) { r = r->skip[0].ptr; continue; } else do { } while (0) | |||
| 4103 | r->skip[PF_SKIP_IFP].ptr)if ((pfi_kif_match(r->kif, ctx->pd->kif) == r->ifnot )) { r = r->skip[0].ptr; continue; } else do { } while (0); | |||
| 4104 | PF_TEST_ATTRIB((r->direction && r->direction != ctx->pd->dir),if ((r->direction && r->direction != ctx->pd ->dir)) { r = r->skip[1].ptr; continue; } else do { } while (0) | |||
| 4105 | r->skip[PF_SKIP_DIR].ptr)if ((r->direction && r->direction != ctx->pd ->dir)) { r = r->skip[1].ptr; continue; } else do { } while (0); | |||
| 4106 | PF_TEST_ATTRIB((r->onrdomain >= 0 &&if ((r->onrdomain >= 0 && (r->onrdomain == ctx ->pd->rdomain) == r->ifnot)) { r = r->skip[2].ptr ; continue; } else do { } while (0) | |||
| 4107 | (r->onrdomain == ctx->pd->rdomain) == r->ifnot),if ((r->onrdomain >= 0 && (r->onrdomain == ctx ->pd->rdomain) == r->ifnot)) { r = r->skip[2].ptr ; continue; } else do { } while (0) | |||
| 4108 | r->skip[PF_SKIP_RDOM].ptr)if ((r->onrdomain >= 0 && (r->onrdomain == ctx ->pd->rdomain) == r->ifnot)) { r = r->skip[2].ptr ; continue; } else do { } while (0); | |||
| 4109 | PF_TEST_ATTRIB((r->af && r->af != ctx->pd->af),if ((r->af && r->af != ctx->pd->af)) { r = r->skip[3].ptr; continue; } else do { } while (0) | |||
| 4110 | r->skip[PF_SKIP_AF].ptr)if ((r->af && r->af != ctx->pd->af)) { r = r->skip[3].ptr; continue; } else do { } while (0); | |||
| 4111 | PF_TEST_ATTRIB((r->proto && r->proto != ctx->pd->proto),if ((r->proto && r->proto != ctx->pd->proto )) { r = r->skip[4].ptr; continue; } else do { } while (0) | |||
| 4112 | r->skip[PF_SKIP_PROTO].ptr)if ((r->proto && r->proto != ctx->pd->proto )) { r = r->skip[4].ptr; continue; } else do { } while (0); | |||
| 4113 | PF_TEST_ATTRIB((PF_MISMATCHAW(&r->src.addr, &ctx->pd->nsaddr,if ((( (((&r->src.addr)->type == PF_ADDR_NOROUTE && pf_routable((&ctx->pd->nsaddr), (ctx->pd->naf ), ((void *)0), (ctx->act.rtableid))) || (((&r->src .addr)->type == PF_ADDR_URPFFAILED && (ctx->pd-> kif) != ((void *)0) && pf_routable((&ctx->pd-> nsaddr), (ctx->pd->naf), (ctx->pd->kif), (ctx-> act.rtableid))) || ((&r->src.addr)->type == PF_ADDR_RTLABEL && !pf_rtlabel_match((&ctx->pd->nsaddr), ( ctx->pd->naf), (&r->src.addr), (ctx->act.rtableid ))) || ((&r->src.addr)->type == PF_ADDR_TABLE && !pfr_match_addr((&r->src.addr)->p.tbl, (&ctx-> pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr )->type == PF_ADDR_DYNIFTL && !pfi_match_addr((& r->src.addr)->p.dyn, (&ctx->pd->nsaddr), (ctx ->pd->naf))) || ((&r->src.addr)->type == PF_ADDR_RANGE && !pf_match_addr_range(&(&r->src.addr)-> v.a.addr, &(&r->src.addr)->v.a.mask, (&ctx-> pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr )->type == PF_ADDR_ADDRMASK && !(((ctx->pd-> naf) == 2 && !(&(&r->src.addr)->v.a.mask )->pfa.addr32[0]) || ((ctx->pd->naf) == 24 && !(&(&r->src.addr)->v.a.mask)->pfa.addr32[0] && !(&(&r->src.addr)->v.a.mask)->pfa .addr32[1] && !(&(&r->src.addr)->v.a.mask )->pfa.addr32[2] && !(&(&r->src.addr)-> v.a.mask)->pfa.addr32[3] )) && !pf_match_addr(0, & (&r->src.addr)->v.a.addr, &(&r->src.addr )->v.a.mask, (&ctx->pd->nsaddr), (ctx->pd-> naf))))) != (r->src.neg) ))) { r = r->skip[5].ptr; continue ; } else do { } while (0) | |||
| 4114 | ctx->pd->naf, r->src.neg, ctx->pd->kif,if ((( (((&r->src.addr)->type == PF_ADDR_NOROUTE && pf_routable((&ctx->pd->nsaddr), (ctx->pd->naf ), ((void *)0), (ctx->act.rtableid))) || (((&r->src .addr)->type == PF_ADDR_URPFFAILED && (ctx->pd-> kif) != ((void *)0) && pf_routable((&ctx->pd-> nsaddr), (ctx->pd->naf), (ctx->pd->kif), (ctx-> act.rtableid))) || ((&r->src.addr)->type == PF_ADDR_RTLABEL && !pf_rtlabel_match((&ctx->pd->nsaddr), ( ctx->pd->naf), (&r->src.addr), (ctx->act.rtableid ))) || ((&r->src.addr)->type == PF_ADDR_TABLE && !pfr_match_addr((&r->src.addr)->p.tbl, (&ctx-> pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr )->type == PF_ADDR_DYNIFTL && !pfi_match_addr((& r->src.addr)->p.dyn, (&ctx->pd->nsaddr), (ctx ->pd->naf))) || ((&r->src.addr)->type == PF_ADDR_RANGE && !pf_match_addr_range(&(&r->src.addr)-> v.a.addr, &(&r->src.addr)->v.a.mask, (&ctx-> pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr )->type == PF_ADDR_ADDRMASK && !(((ctx->pd-> naf) == 2 && !(&(&r->src.addr)->v.a.mask )->pfa.addr32[0]) || ((ctx->pd->naf) == 24 && !(&(&r->src.addr)->v.a.mask)->pfa.addr32[0] && !(&(&r->src.addr)->v.a.mask)->pfa .addr32[1] && !(&(&r->src.addr)->v.a.mask )->pfa.addr32[2] && !(&(&r->src.addr)-> v.a.mask)->pfa.addr32[3] )) && !pf_match_addr(0, & (&r->src.addr)->v.a.addr, &(&r->src.addr )->v.a.mask, (&ctx->pd->nsaddr), (ctx->pd-> naf))))) != (r->src.neg) ))) { r = r->skip[5].ptr; continue ; } else do { } while (0) | |||
| 4115 | ctx->act.rtableid)),if ((( (((&r->src.addr)->type == PF_ADDR_NOROUTE && pf_routable((&ctx->pd->nsaddr), (ctx->pd->naf ), ((void *)0), (ctx->act.rtableid))) || (((&r->src .addr)->type == PF_ADDR_URPFFAILED && (ctx->pd-> kif) != ((void *)0) && pf_routable((&ctx->pd-> nsaddr), (ctx->pd->naf), (ctx->pd->kif), (ctx-> act.rtableid))) || ((&r->src.addr)->type == PF_ADDR_RTLABEL && !pf_rtlabel_match((&ctx->pd->nsaddr), ( ctx->pd->naf), (&r->src.addr), (ctx->act.rtableid ))) || ((&r->src.addr)->type == PF_ADDR_TABLE && !pfr_match_addr((&r->src.addr)->p.tbl, (&ctx-> pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr )->type == PF_ADDR_DYNIFTL && !pfi_match_addr((& r->src.addr)->p.dyn, (&ctx->pd->nsaddr), (ctx ->pd->naf))) || ((&r->src.addr)->type == PF_ADDR_RANGE && !pf_match_addr_range(&(&r->src.addr)-> v.a.addr, &(&r->src.addr)->v.a.mask, (&ctx-> pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr )->type == PF_ADDR_ADDRMASK && !(((ctx->pd-> naf) == 2 && !(&(&r->src.addr)->v.a.mask )->pfa.addr32[0]) || ((ctx->pd->naf) == 24 && !(&(&r->src.addr)->v.a.mask)->pfa.addr32[0] && !(&(&r->src.addr)->v.a.mask)->pfa .addr32[1] && !(&(&r->src.addr)->v.a.mask )->pfa.addr32[2] && !(&(&r->src.addr)-> v.a.mask)->pfa.addr32[3] )) && !pf_match_addr(0, & (&r->src.addr)->v.a.addr, &(&r->src.addr )->v.a.mask, (&ctx->pd->nsaddr), (ctx->pd-> naf))))) != (r->src.neg) ))) { r = r->skip[5].ptr; continue ; } else do { } while (0) | |||
| 4116 | r->skip[PF_SKIP_SRC_ADDR].ptr)if ((( (((&r->src.addr)->type == PF_ADDR_NOROUTE && pf_routable((&ctx->pd->nsaddr), (ctx->pd->naf ), ((void *)0), (ctx->act.rtableid))) || (((&r->src .addr)->type == PF_ADDR_URPFFAILED && (ctx->pd-> kif) != ((void *)0) && pf_routable((&ctx->pd-> nsaddr), (ctx->pd->naf), (ctx->pd->kif), (ctx-> act.rtableid))) || ((&r->src.addr)->type == PF_ADDR_RTLABEL && !pf_rtlabel_match((&ctx->pd->nsaddr), ( ctx->pd->naf), (&r->src.addr), (ctx->act.rtableid ))) || ((&r->src.addr)->type == PF_ADDR_TABLE && !pfr_match_addr((&r->src.addr)->p.tbl, (&ctx-> pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr )->type == PF_ADDR_DYNIFTL && !pfi_match_addr((& r->src.addr)->p.dyn, (&ctx->pd->nsaddr), (ctx ->pd->naf))) || ((&r->src.addr)->type == PF_ADDR_RANGE && !pf_match_addr_range(&(&r->src.addr)-> v.a.addr, &(&r->src.addr)->v.a.mask, (&ctx-> pd->nsaddr), (ctx->pd->naf))) || ((&r->src.addr )->type == PF_ADDR_ADDRMASK && !(((ctx->pd-> naf) == 2 && !(&(&r->src.addr)->v.a.mask )->pfa.addr32[0]) || ((ctx->pd->naf) == 24 && !(&(&r->src.addr)->v.a.mask)->pfa.addr32[0] && !(&(&r->src.addr)->v.a.mask)->pfa .addr32[1] && !(&(&r->src.addr)->v.a.mask )->pfa.addr32[2] && !(&(&r->src.addr)-> v.a.mask)->pfa.addr32[3] )) && !pf_match_addr(0, & (&r->src.addr)->v.a.addr, &(&r->src.addr )->v.a.mask, (&ctx->pd->nsaddr), (ctx->pd-> naf))))) != (r->src.neg) ))) { r = r->skip[5].ptr; continue ; } else do { } while (0); | |||
| 4117 | PF_TEST_ATTRIB((PF_MISMATCHAW(&r->dst.addr, &ctx->pd->ndaddr,if ((( (((&r->dst.addr)->type == PF_ADDR_NOROUTE && pf_routable((&ctx->pd->ndaddr), (ctx->pd->af ), ((void *)0), (ctx->act.rtableid))) || (((&r->dst .addr)->type == PF_ADDR_URPFFAILED && (((void *)0) ) != ((void *)0) && pf_routable((&ctx->pd-> ndaddr), (ctx->pd->af), (((void *)0)), (ctx->act.rtableid ))) || ((&r->dst.addr)->type == PF_ADDR_RTLABEL && !pf_rtlabel_match((&ctx->pd->ndaddr), (ctx->pd-> af), (&r->dst.addr), (ctx->act.rtableid))) || ((& r->dst.addr)->type == PF_ADDR_TABLE && !pfr_match_addr ((&r->dst.addr)->p.tbl, (&ctx->pd->ndaddr ), (ctx->pd->af))) || ((&r->dst.addr)->type == PF_ADDR_DYNIFTL && !pfi_match_addr((&r->dst.addr )->p.dyn, (&ctx->pd->ndaddr), (ctx->pd->af ))) || ((&r->dst.addr)->type == PF_ADDR_RANGE && !pf_match_addr_range(&(&r->dst.addr)->v.a.addr , &(&r->dst.addr)->v.a.mask, (&ctx->pd-> ndaddr), (ctx->pd->af))) || ((&r->dst.addr)-> type == PF_ADDR_ADDRMASK && !(((ctx->pd->af) == 2 && !(&(&r->dst.addr)->v.a.mask)-> pfa.addr32[0]) || ((ctx->pd->af) == 24 && !(& (&r->dst.addr)->v.a.mask)->pfa.addr32[0] && !(&(&r->dst.addr)->v.a.mask)->pfa.addr32[1] && !(&(&r->dst.addr)->v.a.mask)->pfa .addr32[2] && !(&(&r->dst.addr)->v.a.mask )->pfa.addr32[3] )) && !pf_match_addr(0, &(& r->dst.addr)->v.a.addr, &(&r->dst.addr)-> v.a.mask, (&ctx->pd->ndaddr), (ctx->pd->af))) )) != (r->dst.neg) ))) { r = r->skip[6].ptr; continue; } else do { } while (0) | |||
| 4118 | ctx->pd->af, r->dst.neg, NULL, ctx->act.rtableid)),if ((( (((&r->dst.addr)->type == PF_ADDR_NOROUTE && pf_routable((&ctx->pd->ndaddr), (ctx->pd->af ), ((void *)0), (ctx->act.rtableid))) || (((&r->dst .addr)->type == PF_ADDR_URPFFAILED && (((void *)0) ) != ((void *)0) && pf_routable((&ctx->pd-> ndaddr), (ctx->pd->af), (((void *)0)), (ctx->act.rtableid ))) || ((&r->dst.addr)->type == PF_ADDR_RTLABEL && !pf_rtlabel_match((&ctx->pd->ndaddr), (ctx->pd-> af), (&r->dst.addr), (ctx->act.rtableid))) || ((& r->dst.addr)->type == PF_ADDR_TABLE && !pfr_match_addr ((&r->dst.addr)->p.tbl, (&ctx->pd->ndaddr ), (ctx->pd->af))) || ((&r->dst.addr)->type == PF_ADDR_DYNIFTL && !pfi_match_addr((&r->dst.addr )->p.dyn, (&ctx->pd->ndaddr), (ctx->pd->af ))) || ((&r->dst.addr)->type == PF_ADDR_RANGE && !pf_match_addr_range(&(&r->dst.addr)->v.a.addr , &(&r->dst.addr)->v.a.mask, (&ctx->pd-> ndaddr), (ctx->pd->af))) || ((&r->dst.addr)-> type == PF_ADDR_ADDRMASK && !(((ctx->pd->af) == 2 && !(&(&r->dst.addr)->v.a.mask)-> pfa.addr32[0]) || ((ctx->pd->af) == 24 && !(& (&r->dst.addr)->v.a.mask)->pfa.addr32[0] && !(&(&r->dst.addr)->v.a.mask)->pfa.addr32[1] && !(&(&r->dst.addr)->v.a.mask)->pfa .addr32[2] && !(&(&r->dst.addr)->v.a.mask )->pfa.addr32[3] )) && !pf_match_addr(0, &(& r->dst.addr)->v.a.addr, &(&r->dst.addr)-> v.a.mask, (&ctx->pd->ndaddr), (ctx->pd->af))) )) != (r->dst.neg) ))) { r = r->skip[6].ptr; continue; } else do { } while (0) | |||
| 4119 | r->skip[PF_SKIP_DST_ADDR].ptr)if ((( (((&r->dst.addr)->type == PF_ADDR_NOROUTE && pf_routable((&ctx->pd->ndaddr), (ctx->pd->af ), ((void *)0), (ctx->act.rtableid))) || (((&r->dst .addr)->type == PF_ADDR_URPFFAILED && (((void *)0) ) != ((void *)0) && pf_routable((&ctx->pd-> ndaddr), (ctx->pd->af), (((void *)0)), (ctx->act.rtableid ))) || ((&r->dst.addr)->type == PF_ADDR_RTLABEL && !pf_rtlabel_match((&ctx->pd->ndaddr), (ctx->pd-> af), (&r->dst.addr), (ctx->act.rtableid))) || ((& r->dst.addr)->type == PF_ADDR_TABLE && !pfr_match_addr ((&r->dst.addr)->p.tbl, (&ctx->pd->ndaddr ), (ctx->pd->af))) || ((&r->dst.addr)->type == PF_ADDR_DYNIFTL && !pfi_match_addr((&r->dst.addr )->p.dyn, (&ctx->pd->ndaddr), (ctx->pd->af ))) || ((&r->dst.addr)->type == PF_ADDR_RANGE && !pf_match_addr_range(&(&r->dst.addr)->v.a.addr , &(&r->dst.addr)->v.a.mask, (&ctx->pd-> ndaddr), (ctx->pd->af))) || ((&r->dst.addr)-> type == PF_ADDR_ADDRMASK && !(((ctx->pd->af) == 2 && !(&(&r->dst.addr)->v.a.mask)-> pfa.addr32[0]) || ((ctx->pd->af) == 24 && !(& (&r->dst.addr)->v.a.mask)->pfa.addr32[0] && !(&(&r->dst.addr)->v.a.mask)->pfa.addr32[1] && !(&(&r->dst.addr)->v.a.mask)->pfa .addr32[2] && !(&(&r->dst.addr)->v.a.mask )->pfa.addr32[3] )) && !pf_match_addr(0, &(& r->dst.addr)->v.a.addr, &(&r->dst.addr)-> v.a.mask, (&ctx->pd->ndaddr), (ctx->pd->af))) )) != (r->dst.neg) ))) { r = r->skip[6].ptr; continue; } else do { } while (0); | |||
| 4120 | ||||
| 4121 | switch (ctx->pd->virtual_proto) { | |||
| 4122 | case PF_VPROTO_FRAGMENT256: | |||
| 4123 | /* tcp/udp only. port_op always 0 in other cases */ | |||
| 4124 | PF_TEST_ATTRIB((r->src.port_op || r->dst.port_op),if ((r->src.port_op || r->dst.port_op)) { r = ((r)-> entries.tqe_next); continue; } else do { } while (0) | |||
| 4125 | TAILQ_NEXT(r, entries))if ((r->src.port_op || r->dst.port_op)) { r = ((r)-> entries.tqe_next); continue; } else do { } while (0); | |||
| 4126 | PF_TEST_ATTRIB((ctx->pd->proto == IPPROTO_TCP &&if ((ctx->pd->proto == 6 && r->flagset)) { r = ((r)->entries.tqe_next); continue; } else do { } while ( 0) | |||
| 4127 | r->flagset),if ((ctx->pd->proto == 6 && r->flagset)) { r = ((r)->entries.tqe_next); continue; } else do { } while ( 0) | |||
| 4128 | TAILQ_NEXT(r, entries))if ((ctx->pd->proto == 6 && r->flagset)) { r = ((r)->entries.tqe_next); continue; } else do { } while ( 0); | |||
| 4129 | /* icmp only. type/code always 0 in other cases */ | |||
| 4130 | PF_TEST_ATTRIB((r->type || r->code),if ((r->type || r->code)) { r = ((r)->entries.tqe_next ); continue; } else do { } while (0) | |||
| 4131 | TAILQ_NEXT(r, entries))if ((r->type || r->code)) { r = ((r)->entries.tqe_next ); continue; } else do { } while (0); | |||
| 4132 | /* tcp/udp only. {uid|gid}.op always 0 in other cases */ | |||
| 4133 | PF_TEST_ATTRIB((r->gid.op || r->uid.op),if ((r->gid.op || r->uid.op)) { r = ((r)->entries.tqe_next ); continue; } else do { } while (0) | |||
| 4134 | TAILQ_NEXT(r, entries))if ((r->gid.op || r->uid.op)) { r = ((r)->entries.tqe_next ); continue; } else do { } while (0); | |||
| 4135 | break; | |||
| 4136 | ||||
| 4137 | case IPPROTO_TCP6: | |||
| 4138 | PF_TEST_ATTRIB(((r->flagset & ctx->th->th_flags) !=if (((r->flagset & ctx->th->th_flags) != r->flags )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4139 | r->flags),if (((r->flagset & ctx->th->th_flags) != r->flags )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4140 | TAILQ_NEXT(r, entries))if (((r->flagset & ctx->th->th_flags) != r->flags )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0); | |||
| 4141 | PF_TEST_ATTRIB((r->os_fingerprint != PF_OSFP_ANY &&if ((r->os_fingerprint != ((pf_osfp_t)0) && !pf_osfp_match (pf_osfp_fingerprint(ctx->pd), r->os_fingerprint))) { r = ((r)->entries.tqe_next); continue; } else do { } while ( 0) | |||
| 4142 | !pf_osfp_match(pf_osfp_fingerprint(ctx->pd),if ((r->os_fingerprint != ((pf_osfp_t)0) && !pf_osfp_match (pf_osfp_fingerprint(ctx->pd), r->os_fingerprint))) { r = ((r)->entries.tqe_next); continue; } else do { } while ( 0) | |||
| 4143 | r->os_fingerprint)),if ((r->os_fingerprint != ((pf_osfp_t)0) && !pf_osfp_match (pf_osfp_fingerprint(ctx->pd), r->os_fingerprint))) { r = ((r)->entries.tqe_next); continue; } else do { } while ( 0) | |||
| 4144 | TAILQ_NEXT(r, entries))if ((r->os_fingerprint != ((pf_osfp_t)0) && !pf_osfp_match (pf_osfp_fingerprint(ctx->pd), r->os_fingerprint))) { r = ((r)->entries.tqe_next); continue; } else do { } while ( 0); | |||
| 4145 | /* FALLTHROUGH */ | |||
| 4146 | ||||
| 4147 | case IPPROTO_UDP17: | |||
| 4148 | /* tcp/udp only. port_op always 0 in other cases */ | |||
| 4149 | PF_TEST_ATTRIB((r->src.port_op &&if ((r->src.port_op && !pf_match_port(r->src.port_op , r->src.port[0], r->src.port[1], ctx->pd->nsport ))) { r = r->skip[7].ptr; continue; } else do { } while (0 ) | |||
| 4150 | !pf_match_port(r->src.port_op, r->src.port[0],if ((r->src.port_op && !pf_match_port(r->src.port_op , r->src.port[0], r->src.port[1], ctx->pd->nsport ))) { r = r->skip[7].ptr; continue; } else do { } while (0 ) | |||
| 4151 | r->src.port[1], ctx->pd->nsport)),if ((r->src.port_op && !pf_match_port(r->src.port_op , r->src.port[0], r->src.port[1], ctx->pd->nsport ))) { r = r->skip[7].ptr; continue; } else do { } while (0 ) | |||
| 4152 | r->skip[PF_SKIP_SRC_PORT].ptr)if ((r->src.port_op && !pf_match_port(r->src.port_op , r->src.port[0], r->src.port[1], ctx->pd->nsport ))) { r = r->skip[7].ptr; continue; } else do { } while (0 ); | |||
| 4153 | PF_TEST_ATTRIB((r->dst.port_op &&if ((r->dst.port_op && !pf_match_port(r->dst.port_op , r->dst.port[0], r->dst.port[1], ctx->pd->ndport ))) { r = r->skip[8].ptr; continue; } else do { } while (0 ) | |||
| 4154 | !pf_match_port(r->dst.port_op, r->dst.port[0],if ((r->dst.port_op && !pf_match_port(r->dst.port_op , r->dst.port[0], r->dst.port[1], ctx->pd->ndport ))) { r = r->skip[8].ptr; continue; } else do { } while (0 ) | |||
| 4155 | r->dst.port[1], ctx->pd->ndport)),if ((r->dst.port_op && !pf_match_port(r->dst.port_op , r->dst.port[0], r->dst.port[1], ctx->pd->ndport ))) { r = r->skip[8].ptr; continue; } else do { } while (0 ) | |||
| 4156 | r->skip[PF_SKIP_DST_PORT].ptr)if ((r->dst.port_op && !pf_match_port(r->dst.port_op , r->dst.port[0], r->dst.port[1], ctx->pd->ndport ))) { r = r->skip[8].ptr; continue; } else do { } while (0 ); | |||
| 4157 | /* tcp/udp only. uid.op always 0 in other cases */ | |||
| 4158 | PF_TEST_ATTRIB((r->uid.op && (ctx->pd->lookup.done ||if ((r->uid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_uid(r->uid.op, r->uid.uid[0], r-> uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4159 | (ctx->pd->lookup.done =if ((r->uid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_uid(r->uid.op, r->uid.uid[0], r-> uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4160 | pf_socket_lookup(ctx->pd), 1)) &&if ((r->uid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_uid(r->uid.op, r->uid.uid[0], r-> uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4161 | !pf_match_uid(r->uid.op, r->uid.uid[0],if ((r->uid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_uid(r->uid.op, r->uid.uid[0], r-> uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4162 | r->uid.uid[1], ctx->pd->lookup.uid)),if ((r->uid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_uid(r->uid.op, r->uid.uid[0], r-> uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4163 | TAILQ_NEXT(r, entries))if ((r->uid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_uid(r->uid.op, r->uid.uid[0], r-> uid.uid[1], ctx->pd->lookup.uid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0); | |||
| 4164 | /* tcp/udp only. gid.op always 0 in other cases */ | |||
| 4165 | PF_TEST_ATTRIB((r->gid.op && (ctx->pd->lookup.done ||if ((r->gid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_gid(r->gid.op, r->gid.gid[0], r-> gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4166 | (ctx->pd->lookup.done =if ((r->gid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_gid(r->gid.op, r->gid.gid[0], r-> gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4167 | pf_socket_lookup(ctx->pd), 1)) &&if ((r->gid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_gid(r->gid.op, r->gid.gid[0], r-> gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4168 | !pf_match_gid(r->gid.op, r->gid.gid[0],if ((r->gid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_gid(r->gid.op, r->gid.gid[0], r-> gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4169 | r->gid.gid[1], ctx->pd->lookup.gid)),if ((r->gid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_gid(r->gid.op, r->gid.gid[0], r-> gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0) | |||
| 4170 | TAILQ_NEXT(r, entries))if ((r->gid.op && (ctx->pd->lookup.done || ( ctx->pd->lookup.done = pf_socket_lookup(ctx->pd), 1) ) && !pf_match_gid(r->gid.op, r->gid.gid[0], r-> gid.gid[1], ctx->pd->lookup.gid))) { r = ((r)->entries .tqe_next); continue; } else do { } while (0); | |||
| 4171 | break; | |||
| 4172 | ||||
| 4173 | case IPPROTO_ICMP1: | |||
| 4174 | /* icmp only. type always 0 in other cases */ | |||
| 4175 | PF_TEST_ATTRIB((r->type &&if ((r->type && r->type != ctx->icmptype + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4176 | r->type != ctx->icmptype + 1),if ((r->type && r->type != ctx->icmptype + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4177 | TAILQ_NEXT(r, entries))if ((r->type && r->type != ctx->icmptype + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0); | |||
| 4178 | /* icmp only. type always 0 in other cases */ | |||
| 4179 | PF_TEST_ATTRIB((r->code &&if ((r->code && r->code != ctx->icmpcode + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4180 | r->code != ctx->icmpcode + 1),if ((r->code && r->code != ctx->icmpcode + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4181 | TAILQ_NEXT(r, entries))if ((r->code && r->code != ctx->icmpcode + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0); | |||
| 4182 | /* icmp only. don't create states on replies */ | |||
| 4183 | PF_TEST_ATTRIB((r->keep_state && !ctx->state_icmp &&if ((r->keep_state && !ctx->state_icmp && (r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir != PF_IN)) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4184 | (r->rule_flag & PFRULE_STATESLOPPY) == 0 &&if ((r->keep_state && !ctx->state_icmp && (r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir != PF_IN)) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4185 | ctx->icmp_dir != PF_IN),if ((r->keep_state && !ctx->state_icmp && (r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir != PF_IN)) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4186 | TAILQ_NEXT(r, entries))if ((r->keep_state && !ctx->state_icmp && (r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir != PF_IN)) { r = ((r)->entries.tqe_next); continue; } else do { } while (0); | |||
| 4187 | break; | |||
| 4188 | ||||
| 4189 | case IPPROTO_ICMPV658: | |||
| 4190 | /* icmp only. type always 0 in other cases */ | |||
| 4191 | PF_TEST_ATTRIB((r->type &&if ((r->type && r->type != ctx->icmptype + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4192 | r->type != ctx->icmptype + 1),if ((r->type && r->type != ctx->icmptype + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4193 | TAILQ_NEXT(r, entries))if ((r->type && r->type != ctx->icmptype + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0); | |||
| 4194 | /* icmp only. type always 0 in other cases */ | |||
| 4195 | PF_TEST_ATTRIB((r->code &&if ((r->code && r->code != ctx->icmpcode + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4196 | r->code != ctx->icmpcode + 1),if ((r->code && r->code != ctx->icmpcode + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4197 | TAILQ_NEXT(r, entries))if ((r->code && r->code != ctx->icmpcode + 1 )) { r = ((r)->entries.tqe_next); continue; } else do { } while (0); | |||
| 4198 | /* icmp only. don't create states on replies */ | |||
| 4199 | PF_TEST_ATTRIB((r->keep_state && !ctx->state_icmp &&if ((r->keep_state && !ctx->state_icmp && (r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir != PF_IN && ctx->icmptype != 136)) { r = ((r)-> entries.tqe_next); continue; } else do { } while (0) | |||
| 4200 | (r->rule_flag & PFRULE_STATESLOPPY) == 0 &&if ((r->keep_state && !ctx->state_icmp && (r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir != PF_IN && ctx->icmptype != 136)) { r = ((r)-> entries.tqe_next); continue; } else do { } while (0) | |||
| 4201 | ctx->icmp_dir != PF_IN &&if ((r->keep_state && !ctx->state_icmp && (r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir != PF_IN && ctx->icmptype != 136)) { r = ((r)-> entries.tqe_next); continue; } else do { } while (0) | |||
| 4202 | ctx->icmptype != ND_NEIGHBOR_ADVERT),if ((r->keep_state && !ctx->state_icmp && (r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir != PF_IN && ctx->icmptype != 136)) { r = ((r)-> entries.tqe_next); continue; } else do { } while (0) | |||
| 4203 | TAILQ_NEXT(r, entries))if ((r->keep_state && !ctx->state_icmp && (r->rule_flag & 0x00020000) == 0 && ctx->icmp_dir != PF_IN && ctx->icmptype != 136)) { r = ((r)-> entries.tqe_next); continue; } else do { } while (0); | |||
| 4204 | break; | |||
| 4205 | ||||
| 4206 | default: | |||
| 4207 | break; | |||
| 4208 | } | |||
| 4209 | ||||
| 4210 | PF_TEST_ATTRIB((r->rule_flag & PFRULE_FRAGMENT &&if ((r->rule_flag & 0x0002 && ctx->pd->virtual_proto != 256)) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4211 | ctx->pd->virtual_proto != PF_VPROTO_FRAGMENT),if ((r->rule_flag & 0x0002 && ctx->pd->virtual_proto != 256)) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4212 | TAILQ_NEXT(r, entries))if ((r->rule_flag & 0x0002 && ctx->pd->virtual_proto != 256)) { r = ((r)->entries.tqe_next); continue; } else do { } while (0); | |||
| 4213 | PF_TEST_ATTRIB((r->tos && !(r->tos == ctx->pd->tos)),if ((r->tos && !(r->tos == ctx->pd->tos)) ) { r = ((r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4214 | TAILQ_NEXT(r, entries))if ((r->tos && !(r->tos == ctx->pd->tos)) ) { r = ((r)->entries.tqe_next); continue; } else do { } while (0); | |||
| 4215 | PF_TEST_ATTRIB((r->prob &&if ((r->prob && r->prob <= arc4random_uniform (0xffffffffU - 1) + 1)) { r = ((r)->entries.tqe_next); continue ; } else do { } while (0) | |||
| 4216 | r->prob <= arc4random_uniform(UINT_MAX - 1) + 1),if ((r->prob && r->prob <= arc4random_uniform (0xffffffffU - 1) + 1)) { r = ((r)->entries.tqe_next); continue ; } else do { } while (0) | |||
| 4217 | TAILQ_NEXT(r, entries))if ((r->prob && r->prob <= arc4random_uniform (0xffffffffU - 1) + 1)) { r = ((r)->entries.tqe_next); continue ; } else do { } while (0); | |||
| 4218 | PF_TEST_ATTRIB((r->match_tag &&if ((r->match_tag && !pf_match_tag(ctx->pd-> m, r, &ctx->tag))) { r = ((r)->entries.tqe_next); continue ; } else do { } while (0) | |||
| 4219 | !pf_match_tag(ctx->pd->m, r, &ctx->tag)),if ((r->match_tag && !pf_match_tag(ctx->pd-> m, r, &ctx->tag))) { r = ((r)->entries.tqe_next); continue ; } else do { } while (0) | |||
| 4220 | TAILQ_NEXT(r, entries))if ((r->match_tag && !pf_match_tag(ctx->pd-> m, r, &ctx->tag))) { r = ((r)->entries.tqe_next); continue ; } else do { } while (0); | |||
| 4221 | PF_TEST_ATTRIB((r->rcv_kif && pf_match_rcvif(ctx->pd->m, r) ==if ((r->rcv_kif && pf_match_rcvif(ctx->pd->m , r) == r->rcvifnot)) { r = ((r)->entries.tqe_next); continue ; } else do { } while (0) | |||
| 4222 | r->rcvifnot),if ((r->rcv_kif && pf_match_rcvif(ctx->pd->m , r) == r->rcvifnot)) { r = ((r)->entries.tqe_next); continue ; } else do { } while (0) | |||
| 4223 | TAILQ_NEXT(r, entries))if ((r->rcv_kif && pf_match_rcvif(ctx->pd->m , r) == r->rcvifnot)) { r = ((r)->entries.tqe_next); continue ; } else do { } while (0); | |||
| 4224 | PF_TEST_ATTRIB((r->prio &&if ((r->prio && (r->prio == 0xff ? 0 : r->prio ) != ctx->pd->m->M_dat.MH.MH_pkthdr.pf.prio)) { r = ( (r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4225 | (r->prio == PF_PRIO_ZERO ? 0 : r->prio) !=if ((r->prio && (r->prio == 0xff ? 0 : r->prio ) != ctx->pd->m->M_dat.MH.MH_pkthdr.pf.prio)) { r = ( (r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4226 | ctx->pd->m->m_pkthdr.pf.prio),if ((r->prio && (r->prio == 0xff ? 0 : r->prio ) != ctx->pd->m->M_dat.MH.MH_pkthdr.pf.prio)) { r = ( (r)->entries.tqe_next); continue; } else do { } while (0) | |||
| 4227 | TAILQ_NEXT(r, entries))if ((r->prio && (r->prio == 0xff ? 0 : r->prio ) != ctx->pd->m->M_dat.MH.MH_pkthdr.pf.prio)) { r = ( (r)->entries.tqe_next); continue; } else do { } while (0); | |||
| 4228 | ||||
| 4229 | /* must be last! */ | |||
| 4230 | if (r->pktrate.limit) { | |||
| 4231 | pf_add_threshold(&r->pktrate); | |||
| 4232 | PF_TEST_ATTRIB((pf_check_threshold(&r->pktrate)),if ((pf_check_threshold(&r->pktrate))) { r = ((r)-> entries.tqe_next); continue; } else do { } while (0) | |||
| 4233 | TAILQ_NEXT(r, entries))if ((pf_check_threshold(&r->pktrate))) { r = ((r)-> entries.tqe_next); continue; } else do { } while (0); | |||
| 4234 | } | |||
| 4235 | ||||
| 4236 | /* FALLTHROUGH */ | |||
| 4237 | if (r->tag) | |||
| 4238 | ctx->tag = r->tag; | |||
| 4239 | if (r->anchor == NULL((void *)0)) { | |||
| 4240 | ||||
| 4241 | if (r->rule_flag & PFRULE_ONCE0x00100000) { | |||
| 4242 | u_int32_t rule_flag; | |||
| 4243 | ||||
| 4244 | rule_flag = r->rule_flag; | |||
| 4245 | if (((rule_flag & PFRULE_EXPIRED0x00400000) == 0) && | |||
| 4246 | atomic_cas_uint(&r->rule_flag, rule_flag,_atomic_cas_uint((&r->rule_flag), (rule_flag), (rule_flag | 0x00400000)) | |||
| 4247 | rule_flag | PFRULE_EXPIRED)_atomic_cas_uint((&r->rule_flag), (rule_flag), (rule_flag | 0x00400000)) == rule_flag) { | |||
| 4248 | r->exptime = gettime(); | |||
| 4249 | } else { | |||
| 4250 | r = TAILQ_NEXT(r, entries)((r)->entries.tqe_next); | |||
| 4251 | continue; | |||
| 4252 | } | |||
| 4253 | } | |||
| 4254 | ||||
| 4255 | if (r->action == PF_MATCH) { | |||
| 4256 | if ((ctx->ri = pool_get(&pf_rule_item_pl, | |||
| 4257 | PR_NOWAIT0x0002)) == NULL((void *)0)) { | |||
| 4258 | REASON_SET(&ctx->reason, PFRES_MEMORY)do { if ((void *)(&ctx->reason) != ((void *)0)) { *(& ctx->reason) = (5); if (5 < 17) pf_status.counters[5]++ ; } } while (0); | |||
| 4259 | return (PF_TEST_FAIL); | |||
| 4260 | } | |||
| 4261 | ctx->ri->r = r; | |||
| 4262 | /* order is irrelevant */ | |||
| 4263 | SLIST_INSERT_HEAD(&ctx->rules, ctx->ri, entry)do { (ctx->ri)->entry.sle_next = (&ctx->rules)-> slh_first; (&ctx->rules)->slh_first = (ctx->ri); } while (0); | |||
| 4264 | ctx->ri = NULL((void *)0); | |||
| 4265 | pf_rule_to_actions(r, &ctx->act); | |||
| 4266 | if (r->rule_flag & PFRULE_AFTO0x00200000) | |||
| 4267 | ctx->pd->naf = r->naf; | |||
| 4268 | if (pf_get_transaddr(r, ctx->pd, ctx->sns, | |||
| 4269 | &ctx->nr) == -1) { | |||
| 4270 | REASON_SET(&ctx->reason,do { if ((void *)(&ctx->reason) != ((void *)0)) { *(& ctx->reason) = (15); if (15 < 17) pf_status.counters[15 ]++; } } while (0) | |||
| 4271 | PFRES_TRANSLATE)do { if ((void *)(&ctx->reason) != ((void *)0)) { *(& ctx->reason) = (15); if (15 < 17) pf_status.counters[15 ]++; } } while (0); | |||
| 4272 | return (PF_TEST_FAIL); | |||
| 4273 | } | |||
| 4274 | #if NPFLOG1 > 0 | |||
| 4275 | if (r->log) { | |||
| 4276 | REASON_SET(&ctx->reason, PFRES_MATCH)do { if ((void *)(&ctx->reason) != ((void *)0)) { *(& ctx->reason) = (0); if (0 < 17) pf_status.counters[0]++ ; } } while (0); | |||
| 4277 | pflog_packet(ctx->pd, ctx->reason, r, | |||
| 4278 | ctx->a, ruleset, NULL((void *)0)); | |||
| 4279 | } | |||
| 4280 | #endif /* NPFLOG > 0 */ | |||
| 4281 | } else { | |||
| 4282 | /* | |||
| 4283 | * found matching r | |||
| 4284 | */ | |||
| 4285 | *ctx->rm = r; | |||
| 4286 | /* | |||
| 4287 | * anchor, with ruleset, where r belongs to | |||
| 4288 | */ | |||
| 4289 | *ctx->am = ctx->a; | |||
| 4290 | /* | |||
| 4291 | * ruleset where r belongs to | |||
| 4292 | */ | |||
| 4293 | *ctx->rsm = ruleset; | |||
| 4294 | /* | |||
| 4295 | * ruleset, where anchor belongs to. | |||
| 4296 | */ | |||
| 4297 | ctx->arsm = ctx->aruleset; | |||
| 4298 | } | |||
| 4299 | ||||
| 4300 | #if NPFLOG1 > 0 | |||
| 4301 | if (ctx->act.log & PF_LOG_MATCHES0x10) | |||
| 4302 | pf_log_matches(ctx->pd, r, ctx->a, ruleset, | |||
| 4303 | &ctx->rules); | |||
| 4304 | #endif /* NPFLOG > 0 */ | |||
| 4305 | ||||
| 4306 | if (r->quick) | |||
| 4307 | return (PF_TEST_QUICK); | |||
| 4308 | } else { | |||
| 4309 | ctx->a = r; | |||
| 4310 | ctx->aruleset = &r->anchor->ruleset; | |||
| 4311 | if (r->anchor_wildcard) { | |||
| 4312 | RB_FOREACH(child, pf_anchor_node,for ((child) = pf_anchor_node_RB_MINMAX(&r->anchor-> children, -1); (child) != ((void *)0); (child) = pf_anchor_node_RB_NEXT (child)) | |||
| 4313 | &r->anchor->children)for ((child) = pf_anchor_node_RB_MINMAX(&r->anchor-> children, -1); (child) != ((void *)0); (child) = pf_anchor_node_RB_NEXT (child)) { | |||
| 4314 | if (pf_anchor_stack_push(ruleset, r, | |||
| 4315 | child, PF_NEXT_CHILD) != 0) | |||
| 4316 | return (PF_TEST_FAIL); | |||
| 4317 | ||||
| 4318 | ruleset = &child->ruleset; | |||
| 4319 | goto enter_ruleset; | |||
| 4320 | next_child: | |||
| 4321 | continue; /* with RB_FOREACH() */ | |||
| 4322 | } | |||
| 4323 | } else { | |||
| 4324 | if (pf_anchor_stack_push(ruleset, r, child, | |||
| 4325 | PF_NEXT_RULE) != 0) | |||
| 4326 | return (PF_TEST_FAIL); | |||
| 4327 | ||||
| 4328 | ruleset = &r->anchor->ruleset; | |||
| 4329 | child = NULL((void *)0); | |||
| 4330 | goto enter_ruleset; | |||
| 4331 | next_rule: | |||
| 4332 | ; | |||
| 4333 | } | |||
| 4334 | } | |||
| 4335 | r = TAILQ_NEXT(r, entries)((r)->entries.tqe_next); | |||
| 4336 | } | |||
| 4337 | ||||
| 4338 | if (pf_anchor_stack_pop(&ruleset, &r, &child, &target) == 0) { | |||
| 4339 | /* stop if any rule matched within quick anchors. */ | |||
| 4340 | if (r->quick == PF_TEST_QUICK && *ctx->am == r) | |||
| 4341 | return (PF_TEST_QUICK); | |||
| 4342 | ||||
| 4343 | switch (target) { | |||
| 4344 | case PF_NEXT_CHILD: | |||
| 4345 | goto next_child; | |||
| 4346 | case PF_NEXT_RULE: | |||
| 4347 | goto next_rule; | |||
| 4348 | default: | |||
| 4349 | panic("%s: unknown jump target", __func__); | |||
| 4350 | } | |||
| 4351 | } | |||
| 4352 | ||||
| 4353 | return (PF_TEST_OK); | |||
| 4354 | } | |||
| 4355 | ||||
| 4356 | int | |||
| 4357 | pf_test_rule(struct pf_pdesc *pd, struct pf_rule **rm, struct pf_state **sm, | |||
| 4358 | struct pf_rule **am, struct pf_ruleset **rsm, u_short *reason) | |||
| 4359 | { | |||
| 4360 | struct pf_rule *r = NULL((void *)0); | |||
| 4361 | struct pf_rule *a = NULL((void *)0); | |||
| 4362 | struct pf_ruleset *ruleset = NULL((void *)0); | |||
| 4363 | struct pf_state_key *skw = NULL((void *)0), *sks = NULL((void *)0); | |||
| 4364 | int rewrite = 0; | |||
| 4365 | u_int16_t virtual_type, virtual_id; | |||
| ||||
| 4366 | int action = PF_DROP; | |||
| 4367 | struct pf_test_ctx ctx; | |||
| 4368 | int rv; | |||
| 4369 | ||||
| 4370 | PF_ASSERT_LOCKED()do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail( 0x0001UL, rw_status(&pf_lock),__func__); } while (0); | |||
| 4371 | ||||
| 4372 | memset(&ctx, 0, sizeof(ctx))__builtin_memset((&ctx), (0), (sizeof(ctx))); | |||
| 4373 | ctx.pd = pd; | |||
| 4374 | ctx.rm = rm; | |||
| 4375 | ctx.am = am; | |||
| 4376 | ctx.rsm = rsm; | |||
| 4377 | ctx.th = &pd->hdr.tcp; | |||
| 4378 | ctx.act.rtableid = pd->rdomain; | |||
| 4379 | ctx.tag = -1; | |||
| 4380 | SLIST_INIT(&ctx.rules){ ((&ctx.rules)->slh_first) = ((void *)0); }; | |||
| 4381 | ||||
| 4382 | if (pd->dir == PF_IN && if_congested()) { | |||
| 4383 | REASON_SET(&ctx.reason, PFRES_CONGEST)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx .reason) = (7); if (7 < 17) pf_status.counters[7]++; } } while (0); | |||
| 4384 | return (PF_DROP); | |||
| 4385 | } | |||
| 4386 | ||||
| 4387 | switch (pd->virtual_proto) { | |||
| 4388 | case IPPROTO_ICMP1: | |||
| 4389 | ctx.icmptype = pd->hdr.icmp.icmp_type; | |||
| 4390 | ctx.icmpcode = pd->hdr.icmp.icmp_code; | |||
| 4391 | ctx.state_icmp = pf_icmp_mapping(pd, ctx.icmptype, | |||
| 4392 | &ctx.icmp_dir, &virtual_id, &virtual_type); | |||
| 4393 | if (ctx.icmp_dir == PF_IN) { | |||
| 4394 | pd->osport = pd->nsport = virtual_id; | |||
| 4395 | pd->odport = pd->ndport = virtual_type; | |||
| 4396 | } else { | |||
| 4397 | pd->osport = pd->nsport = virtual_type; | |||
| 4398 | pd->odport = pd->ndport = virtual_id; | |||
| 4399 | } | |||
| 4400 | break; | |||
| 4401 | #ifdef INET61 | |||
| 4402 | case IPPROTO_ICMPV658: | |||
| 4403 | ctx.icmptype = pd->hdr.icmp6.icmp6_type; | |||
| 4404 | ctx.icmpcode = pd->hdr.icmp6.icmp6_code; | |||
| 4405 | ctx.state_icmp = pf_icmp_mapping(pd, ctx.icmptype, | |||
| 4406 | &ctx.icmp_dir, &virtual_id, &virtual_type); | |||
| 4407 | if (ctx.icmp_dir == PF_IN) { | |||
| 4408 | pd->osport = pd->nsport = virtual_id; | |||
| 4409 | pd->odport = pd->ndport = virtual_type; | |||
| 4410 | } else { | |||
| 4411 | pd->osport = pd->nsport = virtual_type; | |||
| 4412 | pd->odport = pd->ndport = virtual_id; | |||
| 4413 | } | |||
| 4414 | break; | |||
| 4415 | #endif /* INET6 */ | |||
| 4416 | } | |||
| 4417 | ||||
| 4418 | ruleset = &pf_main_rulesetpf_main_anchor.ruleset; | |||
| 4419 | rv = pf_match_rule(&ctx, ruleset); | |||
| 4420 | if (rv == PF_TEST_FAIL) { | |||
| 4421 | /* | |||
| 4422 | * Reason has been set in pf_match_rule() already. | |||
| 4423 | */ | |||
| 4424 | goto cleanup; | |||
| 4425 | } | |||
| 4426 | ||||
| 4427 | r = *ctx.rm; /* matching rule */ | |||
| 4428 | a = *ctx.am; /* rule that defines an anchor containing 'r' */ | |||
| 4429 | ruleset = *ctx.rsm;/* ruleset of the anchor defined by the rule 'a' */ | |||
| 4430 | ctx.aruleset = ctx.arsm;/* ruleset of the 'a' rule itself */ | |||
| 4431 | ||||
| 4432 | /* apply actions for last matching pass/block rule */ | |||
| 4433 | pf_rule_to_actions(r, &ctx.act); | |||
| 4434 | if (r->rule_flag & PFRULE_AFTO0x00200000) | |||
| 4435 | pd->naf = r->naf; | |||
| 4436 | if (pf_get_transaddr(r, pd, ctx.sns, &ctx.nr) == -1) { | |||
| 4437 | REASON_SET(&ctx.reason, PFRES_TRANSLATE)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx .reason) = (15); if (15 < 17) pf_status.counters[15]++; } } while (0); | |||
| 4438 | goto cleanup; | |||
| 4439 | } | |||
| 4440 | REASON_SET(&ctx.reason, PFRES_MATCH)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx .reason) = (0); if (0 < 17) pf_status.counters[0]++; } } while (0); | |||
| 4441 | ||||
| 4442 | #if NPFLOG1 > 0 | |||
| 4443 | if (r->log) | |||
| 4444 | pflog_packet(pd, ctx.reason, r, a, ruleset, NULL((void *)0)); | |||
| 4445 | if (ctx.act.log & PF_LOG_MATCHES0x10) | |||
| 4446 | pf_log_matches(pd, r, a, ruleset, &ctx.rules); | |||
| 4447 | #endif /* NPFLOG > 0 */ | |||
| 4448 | ||||
| 4449 | if (pd->virtual_proto != PF_VPROTO_FRAGMENT256 && | |||
| 4450 | (r->action == PF_DROP) && | |||
| 4451 | ((r->rule_flag & PFRULE_RETURNRST0x0001) || | |||
| 4452 | (r->rule_flag & PFRULE_RETURNICMP0x0004) || | |||
| 4453 | (r->rule_flag & PFRULE_RETURN0x0008))) { | |||
| 4454 | if (pd->proto == IPPROTO_TCP6 && | |||
| 4455 | ((r->rule_flag & PFRULE_RETURNRST0x0001) || | |||
| 4456 | (r->rule_flag & PFRULE_RETURN0x0008)) && | |||
| 4457 | !(ctx.th->th_flags & TH_RST0x04)) { | |||
| 4458 | u_int32_t ack = | |||
| 4459 | ntohl(ctx.th->th_seq)(__uint32_t)(__builtin_constant_p(ctx.th->th_seq) ? (__uint32_t )(((__uint32_t)(ctx.th->th_seq) & 0xff) << 24 | ( (__uint32_t)(ctx.th->th_seq) & 0xff00) << 8 | (( __uint32_t)(ctx.th->th_seq) & 0xff0000) >> 8 | ( (__uint32_t)(ctx.th->th_seq) & 0xff000000) >> 24 ) : __swap32md(ctx.th->th_seq)) + pd->p_len; | |||
| 4460 | ||||
| 4461 | if (pf_check_tcp_cksum(pd->m, pd->off, | |||
| 4462 | pd->tot_len - pd->off, pd->af)) | |||
| 4463 | REASON_SET(&ctx.reason, PFRES_PROTCKSUM)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx .reason) = (9); if (9 < 17) pf_status.counters[9]++; } } while (0); | |||
| 4464 | else { | |||
| 4465 | if (ctx.th->th_flags & TH_SYN0x02) | |||
| 4466 | ack++; | |||
| 4467 | if (ctx.th->th_flags & TH_FIN0x01) | |||
| 4468 | ack++; | |||
| 4469 | pf_send_tcp(r, pd->af, pd->dst, | |||
| 4470 | pd->src, ctx.th->th_dport, | |||
| 4471 | ctx.th->th_sport, ntohl(ctx.th->th_ack)(__uint32_t)(__builtin_constant_p(ctx.th->th_ack) ? (__uint32_t )(((__uint32_t)(ctx.th->th_ack) & 0xff) << 24 | ( (__uint32_t)(ctx.th->th_ack) & 0xff00) << 8 | (( __uint32_t)(ctx.th->th_ack) & 0xff0000) >> 8 | ( (__uint32_t)(ctx.th->th_ack) & 0xff000000) >> 24 ) : __swap32md(ctx.th->th_ack)), | |||
| 4472 | ack, TH_RST0x04|TH_ACK0x10, 0, 0, r->return_ttl, | |||
| 4473 | 1, 0, pd->rdomain); | |||
| 4474 | } | |||
| 4475 | } else if ((pd->proto != IPPROTO_ICMP1 || | |||
| 4476 | ICMP_INFOTYPE(ctx.icmptype)((ctx.icmptype) == 0 || (ctx.icmptype) == 8 || (ctx.icmptype) == 9 || (ctx.icmptype) == 10 || (ctx.icmptype) == 13 || (ctx .icmptype) == 14 || (ctx.icmptype) == 15 || (ctx.icmptype) == 16 || (ctx.icmptype) == 17 || (ctx.icmptype) == 18)) && pd->af == AF_INET2 && | |||
| 4477 | r->return_icmp) | |||
| 4478 | pf_send_icmp(pd->m, r->return_icmp >> 8, | |||
| 4479 | r->return_icmp & 255, 0, pd->af, r, pd->rdomain); | |||
| 4480 | else if ((pd->proto != IPPROTO_ICMPV658 || | |||
| 4481 | (ctx.icmptype >= ICMP6_ECHO_REQUEST128 && | |||
| 4482 | ctx.icmptype != ND_REDIRECT137)) && pd->af == AF_INET624 && | |||
| 4483 | r->return_icmp6) | |||
| 4484 | pf_send_icmp(pd->m, r->return_icmp6 >> 8, | |||
| 4485 | r->return_icmp6 & 255, 0, pd->af, r, pd->rdomain); | |||
| 4486 | } | |||
| 4487 | ||||
| 4488 | if (r->action
| |||
| 4489 | goto cleanup; | |||
| 4490 | ||||
| 4491 | pf_tag_packet(pd->m, ctx.tag, ctx.act.rtableid); | |||
| 4492 | if (ctx.act.rtableid
| |||
| 4493 | rtable_l2(ctx.act.rtableid) != pd->rdomain) | |||
| 4494 | pd->destchg = 1; | |||
| 4495 | ||||
| 4496 | if (r->action == PF_PASS && pd->badopts != 0 && ! r->allow_opts) { | |||
| 4497 | REASON_SET(&ctx.reason, PFRES_IPOPTIONS)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx .reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 4498 | #if NPFLOG1 > 0 | |||
| 4499 | pd->pflog |= PF_LOG_FORCE0x08; | |||
| 4500 | #endif /* NPFLOG > 0 */ | |||
| 4501 | DPFPRINTF(LOG_NOTICE, "dropping packet with "do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "dropping packet with " "ip/ipv6 options in pf_test_rule()"); addlog("\n"); } } while (0) | |||
| 4502 | "ip/ipv6 options in pf_test_rule()")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "dropping packet with " "ip/ipv6 options in pf_test_rule()"); addlog("\n"); } } while (0); | |||
| 4503 | goto cleanup; | |||
| 4504 | } | |||
| 4505 | ||||
| 4506 | if (pd->virtual_proto
| |||
| 4507 | && !ctx.state_icmp && r->keep_state) { | |||
| 4508 | ||||
| 4509 | if (r->rule_flag & PFRULE_SRCTRACK0x0020 && | |||
| 4510 | pf_insert_src_node(&ctx.sns[PF_SN_NONE], r, PF_SN_NONE, | |||
| 4511 | pd->af, pd->src, NULL((void *)0), NULL((void *)0)) != 0) { | |||
| 4512 | REASON_SET(&ctx.reason, PFRES_SRCLIMIT)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx .reason) = (13); if (13 < 17) pf_status.counters[13]++; } } while (0); | |||
| 4513 | goto cleanup; | |||
| 4514 | } | |||
| 4515 | ||||
| 4516 | if (r->max_states && (r->states_cur >= r->max_states)) { | |||
| 4517 | pf_status.lcounters[LCNT_STATES0]++; | |||
| 4518 | REASON_SET(&ctx.reason, PFRES_MAXSTATES)do { if ((void *)(&ctx.reason) != ((void *)0)) { *(&ctx .reason) = (12); if (12 < 17) pf_status.counters[12]++; } } while (0); | |||
| 4519 | goto cleanup; | |||
| 4520 | } | |||
| 4521 | ||||
| 4522 | action = pf_create_state(pd, r, a, ctx.nr, &skw, &sks, | |||
| 4523 | &rewrite, sm, ctx.tag, &ctx.rules, &ctx.act, ctx.sns); | |||
| 4524 | ||||
| 4525 | if (action != PF_PASS) | |||
| 4526 | goto cleanup; | |||
| 4527 | if (sks != skw) { | |||
| 4528 | struct pf_state_key *sk; | |||
| 4529 | ||||
| 4530 | if (pd->dir == PF_IN) | |||
| 4531 | sk = sks; | |||
| 4532 | else | |||
| 4533 | sk = skw; | |||
| 4534 | rewrite += pf_translate(pd, | |||
| ||||
| 4535 | &sk->addr[pd->af == pd->naf ? pd->sidx : pd->didx], | |||
| 4536 | sk->port[pd->af
| |||
| 4537 | &sk->addr[pd->af
| |||
| 4538 | sk->port[pd->af
| |||
| 4539 | virtual_type, ctx.icmp_dir); | |||
| 4540 | } | |||
| 4541 | ||||
| 4542 | #ifdef INET61 | |||
| 4543 | if (rewrite && skw->af != sks->af) | |||
| 4544 | action = PF_AFRT; | |||
| 4545 | #endif /* INET6 */ | |||
| 4546 | ||||
| 4547 | } else { | |||
| 4548 | action = PF_PASS; | |||
| 4549 | ||||
| 4550 | while ((ctx.ri = SLIST_FIRST(&ctx.rules)((&ctx.rules)->slh_first))) { | |||
| 4551 | SLIST_REMOVE_HEAD(&ctx.rules, entry)do { (&ctx.rules)->slh_first = (&ctx.rules)->slh_first ->entry.sle_next; } while (0); | |||
| 4552 | pool_put(&pf_rule_item_pl, ctx.ri); | |||
| 4553 | } | |||
| 4554 | } | |||
| 4555 | ||||
| 4556 | /* copy back packet headers if needed */ | |||
| 4557 | if (rewrite && pd->hdrlen) { | |||
| 4558 | m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT0x0002); | |||
| 4559 | } | |||
| 4560 | ||||
| 4561 | #if NPFSYNC1 > 0 | |||
| 4562 | if (*sm != NULL((void *)0) && !ISSET((*sm)->state_flags, PFSTATE_NOSYNC)(((*sm)->state_flags) & (0x0008)) && | |||
| 4563 | pd->dir == PF_OUT && pfsync_is_up()) { | |||
| 4564 | /* | |||
| 4565 | * We want the state created, but we dont | |||
| 4566 | * want to send this in case a partner | |||
| 4567 | * firewall has to know about it to allow | |||
| 4568 | * replies through it. | |||
| 4569 | */ | |||
| 4570 | if (pfsync_defer(*sm, pd->m)) | |||
| 4571 | return (PF_DEFER); | |||
| 4572 | } | |||
| 4573 | #endif /* NPFSYNC > 0 */ | |||
| 4574 | ||||
| 4575 | return (action); | |||
| 4576 | ||||
| 4577 | cleanup: | |||
| 4578 | while ((ctx.ri = SLIST_FIRST(&ctx.rules)((&ctx.rules)->slh_first))) { | |||
| 4579 | SLIST_REMOVE_HEAD(&ctx.rules, entry)do { (&ctx.rules)->slh_first = (&ctx.rules)->slh_first ->entry.sle_next; } while (0); | |||
| 4580 | pool_put(&pf_rule_item_pl, ctx.ri); | |||
| 4581 | } | |||
| 4582 | ||||
| 4583 | return (action); | |||
| 4584 | } | |||
| 4585 | ||||
| 4586 | static __inline int | |||
| 4587 | pf_create_state(struct pf_pdesc *pd, struct pf_rule *r, struct pf_rule *a, | |||
| 4588 | struct pf_rule *nr, struct pf_state_key **skw, struct pf_state_key **sks, | |||
| 4589 | int *rewrite, struct pf_state **sm, int tag, struct pf_rule_slist *rules, | |||
| 4590 | struct pf_rule_actions *act, struct pf_src_node *sns[PF_SN_MAX]) | |||
| 4591 | { | |||
| 4592 | struct pf_state *st = NULL((void *)0); | |||
| 4593 | struct tcphdr *th = &pd->hdr.tcp; | |||
| 4594 | u_int16_t mss = tcp_mssdflt; | |||
| 4595 | u_short reason; | |||
| 4596 | u_int i; | |||
| 4597 | ||||
| 4598 | st = pool_get(&pf_state_pl, PR_NOWAIT0x0002 | PR_ZERO0x0008); | |||
| 4599 | if (st == NULL((void *)0)) { | |||
| 4600 | REASON_SET(&reason, PFRES_MEMORY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (5); if (5 < 17) pf_status.counters[5]++; } } while (0 ); | |||
| 4601 | goto csfailed; | |||
| 4602 | } | |||
| 4603 | st->rule.ptr = r; | |||
| 4604 | st->anchor.ptr = a; | |||
| 4605 | st->natrule.ptr = nr; | |||
| 4606 | if (r->allow_opts) | |||
| 4607 | st->state_flags |= PFSTATE_ALLOWOPTS0x0001; | |||
| 4608 | if (r->rule_flag & PFRULE_STATESLOPPY0x00020000) | |||
| 4609 | st->state_flags |= PFSTATE_SLOPPY0x0002; | |||
| 4610 | if (r->rule_flag & PFRULE_PFLOW0x00040000) | |||
| 4611 | st->state_flags |= PFSTATE_PFLOW0x0004; | |||
| 4612 | if (r->rule_flag & PFRULE_NOSYNC0x0010) | |||
| 4613 | st->state_flags |= PFSTATE_NOSYNC0x0008; | |||
| 4614 | #if NPFLOG1 > 0 | |||
| 4615 | st->log = act->log & PF_LOG_ALL0x02; | |||
| 4616 | #endif /* NPFLOG > 0 */ | |||
| 4617 | st->qid = act->qid; | |||
| 4618 | st->pqid = act->pqid; | |||
| 4619 | st->rtableid[pd->didx] = act->rtableid; | |||
| 4620 | st->rtableid[pd->sidx] = -1; /* return traffic is routed normally */ | |||
| 4621 | st->min_ttl = act->min_ttl; | |||
| 4622 | st->set_tos = act->set_tos; | |||
| 4623 | st->max_mss = act->max_mss; | |||
| 4624 | st->state_flags |= act->flags; | |||
| 4625 | #if NPFSYNC1 > 0 | |||
| 4626 | st->sync_state = PFSYNC_S_NONE0xd0; | |||
| 4627 | #endif /* NPFSYNC > 0 */ | |||
| 4628 | st->set_prio[0] = act->set_prio[0]; | |||
| 4629 | st->set_prio[1] = act->set_prio[1]; | |||
| 4630 | st->delay = act->delay; | |||
| 4631 | SLIST_INIT(&st->src_nodes){ ((&st->src_nodes)->slh_first) = ((void *)0); }; | |||
| 4632 | ||||
| 4633 | /* | |||
| 4634 | * must initialize refcnt, before pf_state_insert() gets called. | |||
| 4635 | * pf_state_inserts() grabs reference for pfsync! | |||
| 4636 | */ | |||
| 4637 | PF_REF_INIT(st->refcnt)refcnt_init(&(st->refcnt)); | |||
| 4638 | mtx_init(&st->mtx, IPL_NET)do { (void)(((void *)0)); (void)(0); __mtx_init((&st-> mtx), ((((0x4)) > 0x0 && ((0x4)) < 0x9) ? 0x9 : ((0x4)))); } while (0); | |||
| 4639 | ||||
| 4640 | switch (pd->proto) { | |||
| 4641 | case IPPROTO_TCP6: | |||
| 4642 | st->src.seqlo = ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)); | |||
| 4643 | st->src.seqhi = st->src.seqlo + pd->p_len + 1; | |||
| 4644 | if ((th->th_flags & (TH_SYN0x02|TH_ACK0x10)) == TH_SYN0x02 && | |||
| 4645 | r->keep_state == PF_STATE_MODULATE0x2) { | |||
| 4646 | /* Generate sequence number modulator */ | |||
| 4647 | st->src.seqdiff = pf_tcp_iss(pd) - st->src.seqlo; | |||
| 4648 | if (st->src.seqdiff == 0) | |||
| 4649 | st->src.seqdiff = 1; | |||
| 4650 | pf_patch_32(pd, &th->th_seq, | |||
| 4651 | htonl(st->src.seqlo + st->src.seqdiff)(__uint32_t)(__builtin_constant_p(st->src.seqlo + st->src .seqdiff) ? (__uint32_t)(((__uint32_t)(st->src.seqlo + st-> src.seqdiff) & 0xff) << 24 | ((__uint32_t)(st->src .seqlo + st->src.seqdiff) & 0xff00) << 8 | ((__uint32_t )(st->src.seqlo + st->src.seqdiff) & 0xff0000) >> 8 | ((__uint32_t)(st->src.seqlo + st->src.seqdiff) & 0xff000000) >> 24) : __swap32md(st->src.seqlo + st-> src.seqdiff))); | |||
| 4652 | *rewrite = 1; | |||
| 4653 | } else | |||
| 4654 | st->src.seqdiff = 0; | |||
| 4655 | if (th->th_flags & TH_SYN0x02) { | |||
| 4656 | st->src.seqhi++; | |||
| 4657 | st->src.wscale = pf_get_wscale(pd); | |||
| 4658 | } | |||
| 4659 | st->src.max_win = MAX(ntohs(th->th_win), 1)((((__uint16_t)(__builtin_constant_p(th->th_win) ? (__uint16_t )(((__uint16_t)(th->th_win) & 0xffU) << 8 | ((__uint16_t )(th->th_win) & 0xff00U) >> 8) : __swap16md(th-> th_win)))>(1))?((__uint16_t)(__builtin_constant_p(th->th_win ) ? (__uint16_t)(((__uint16_t)(th->th_win) & 0xffU) << 8 | ((__uint16_t)(th->th_win) & 0xff00U) >> 8) : __swap16md(th->th_win))):(1)); | |||
| 4660 | if (st->src.wscale & PF_WSCALE_MASK0x0f) { | |||
| 4661 | /* Remove scale factor from initial window */ | |||
| 4662 | int win = st->src.max_win; | |||
| 4663 | win += 1 << (st->src.wscale & PF_WSCALE_MASK0x0f); | |||
| 4664 | st->src.max_win = (win - 1) >> | |||
| 4665 | (st->src.wscale & PF_WSCALE_MASK0x0f); | |||
| 4666 | } | |||
| 4667 | if (th->th_flags & TH_FIN0x01) | |||
| 4668 | st->src.seqhi++; | |||
| 4669 | st->dst.seqhi = 1; | |||
| 4670 | st->dst.max_win = 1; | |||
| 4671 | pf_set_protostate(st, PF_PEER_SRC, TCPS_SYN_SENT2); | |||
| 4672 | pf_set_protostate(st, PF_PEER_DST, TCPS_CLOSED0); | |||
| 4673 | st->timeout = PFTM_TCP_FIRST_PACKET; | |||
| 4674 | pf_status.states_halfopen++; | |||
| 4675 | break; | |||
| 4676 | case IPPROTO_UDP17: | |||
| 4677 | pf_set_protostate(st, PF_PEER_SRC, PFUDPS_SINGLE1); | |||
| 4678 | pf_set_protostate(st, PF_PEER_DST, PFUDPS_NO_TRAFFIC0); | |||
| 4679 | st->timeout = PFTM_UDP_FIRST_PACKET; | |||
| 4680 | break; | |||
| 4681 | case IPPROTO_ICMP1: | |||
| 4682 | #ifdef INET61 | |||
| 4683 | case IPPROTO_ICMPV658: | |||
| 4684 | #endif /* INET6 */ | |||
| 4685 | st->timeout = PFTM_ICMP_FIRST_PACKET; | |||
| 4686 | break; | |||
| 4687 | default: | |||
| 4688 | pf_set_protostate(st, PF_PEER_SRC, PFOTHERS_SINGLE1); | |||
| 4689 | pf_set_protostate(st, PF_PEER_DST, PFOTHERS_NO_TRAFFIC0); | |||
| 4690 | st->timeout = PFTM_OTHER_FIRST_PACKET; | |||
| 4691 | } | |||
| 4692 | ||||
| 4693 | st->creation = getuptime(); | |||
| 4694 | st->expire = getuptime(); | |||
| 4695 | ||||
| 4696 | if (pd->proto == IPPROTO_TCP6) { | |||
| 4697 | if (st->state_flags & PFSTATE_SCRUB_TCP0x0100 && | |||
| 4698 | pf_normalize_tcp_init(pd, &st->src)) { | |||
| 4699 | REASON_SET(&reason, PFRES_MEMORY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (5); if (5 < 17) pf_status.counters[5]++; } } while (0 ); | |||
| 4700 | goto csfailed; | |||
| 4701 | } | |||
| 4702 | if (st->state_flags & PFSTATE_SCRUB_TCP0x0100 && st->src.scrub && | |||
| 4703 | pf_normalize_tcp_stateful(pd, &reason, st, | |||
| 4704 | &st->src, &st->dst, rewrite)) { | |||
| 4705 | /* This really shouldn't happen!!! */ | |||
| 4706 | DPFPRINTF(LOG_ERR,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: tcp normalize failed on first pkt", __func__); addlog("\n" ); } } while (0) | |||
| 4707 | "%s: tcp normalize failed on first pkt", __func__)do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: tcp normalize failed on first pkt", __func__); addlog("\n" ); } } while (0); | |||
| 4708 | goto csfailed; | |||
| 4709 | } | |||
| 4710 | } | |||
| 4711 | st->direction = pd->dir; | |||
| 4712 | ||||
| 4713 | if (pf_state_key_setup(pd, skw, sks, act->rtableid)) { | |||
| 4714 | REASON_SET(&reason, PFRES_MEMORY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (5); if (5 < 17) pf_status.counters[5]++; } } while (0 ); | |||
| 4715 | goto csfailed; | |||
| 4716 | } | |||
| 4717 | ||||
| 4718 | if (pf_set_rt_ifp(st, pd->src, (*skw)->af, sns) != 0) { | |||
| 4719 | REASON_SET(&reason, PFRES_NOROUTE)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (16); if (16 < 17) pf_status.counters[16]++; } } while (0); | |||
| 4720 | goto csfailed; | |||
| 4721 | } | |||
| 4722 | ||||
| 4723 | for (i = 0; i < PF_SN_MAX; i++) | |||
| 4724 | if (sns[i] != NULL((void *)0)) { | |||
| 4725 | struct pf_sn_item *sni; | |||
| 4726 | ||||
| 4727 | sni = pool_get(&pf_sn_item_pl, PR_NOWAIT0x0002); | |||
| 4728 | if (sni == NULL((void *)0)) { | |||
| 4729 | REASON_SET(&reason, PFRES_MEMORY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (5); if (5 < 17) pf_status.counters[5]++; } } while (0 ); | |||
| 4730 | goto csfailed; | |||
| 4731 | } | |||
| 4732 | sni->sn = sns[i]; | |||
| 4733 | SLIST_INSERT_HEAD(&st->src_nodes, sni, next)do { (sni)->next.sle_next = (&st->src_nodes)->slh_first ; (&st->src_nodes)->slh_first = (sni); } while (0); | |||
| 4734 | sni->sn->states++; | |||
| 4735 | } | |||
| 4736 | ||||
| 4737 | #if NPFSYNC1 > 0 | |||
| 4738 | pfsync_init_state(st, *skw, *sks, 0); | |||
| 4739 | #endif | |||
| 4740 | ||||
| 4741 | if (pf_state_insert(BOUND_IFACE(r, pd->kif)((r)->rule_flag & 0x00010000) ? (pd->kif) : pfi_all, skw, sks, st)) { | |||
| 4742 | *sks = *skw = NULL((void *)0); | |||
| 4743 | REASON_SET(&reason, PFRES_STATEINS)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (11); if (11 < 17) pf_status.counters[11]++; } } while (0); | |||
| 4744 | goto csfailed; | |||
| 4745 | } else | |||
| 4746 | *sm = st; | |||
| 4747 | ||||
| 4748 | /* | |||
| 4749 | * Make state responsible for rules it binds here. | |||
| 4750 | */ | |||
| 4751 | memcpy(&st->match_rules, rules, sizeof(st->match_rules))__builtin_memcpy((&st->match_rules), (rules), (sizeof( st->match_rules))); | |||
| 4752 | memset(rules, 0, sizeof(*rules))__builtin_memset((rules), (0), (sizeof(*rules))); | |||
| 4753 | STATE_INC_COUNTERS(st)do { struct pf_rule_item *mrm; st->rule.ptr->states_cur ++; st->rule.ptr->states_tot++; if (st->anchor.ptr != ((void *)0)) { st->anchor.ptr->states_cur++; st->anchor .ptr->states_tot++; } for((mrm) = ((&st->match_rules )->slh_first); (mrm) != ((void *)0); (mrm) = ((mrm)->entry .sle_next)) mrm->r->states_cur++; } while (0); | |||
| 4754 | ||||
| 4755 | if (tag > 0) { | |||
| 4756 | pf_tag_ref(tag); | |||
| 4757 | st->tag = tag; | |||
| 4758 | } | |||
| 4759 | if (pd->proto == IPPROTO_TCP6 && (th->th_flags & (TH_SYN0x02|TH_ACK0x10)) == | |||
| 4760 | TH_SYN0x02 && r->keep_state == PF_STATE_SYNPROXY0x3 && pd->dir == PF_IN) { | |||
| 4761 | int rtid = pd->rdomain; | |||
| 4762 | if (act->rtableid >= 0) | |||
| 4763 | rtid = act->rtableid; | |||
| 4764 | pf_set_protostate(st, PF_PEER_SRC, PF_TCPS_PROXY_SRC((11)+0)); | |||
| 4765 | st->src.seqhi = arc4random(); | |||
| 4766 | /* Find mss option */ | |||
| 4767 | mss = pf_get_mss(pd); | |||
| 4768 | mss = pf_calc_mss(pd->src, pd->af, rtid, mss); | |||
| 4769 | mss = pf_calc_mss(pd->dst, pd->af, rtid, mss); | |||
| 4770 | st->src.mss = mss; | |||
| 4771 | pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, | |||
| 4772 | th->th_sport, st->src.seqhi, ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)) + 1, | |||
| 4773 | TH_SYN0x02|TH_ACK0x10, 0, st->src.mss, 0, 1, 0, pd->rdomain); | |||
| 4774 | REASON_SET(&reason, PFRES_SYNPROXY)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (14); if (14 < 17) pf_status.counters[14]++; } } while (0); | |||
| 4775 | return (PF_SYNPROXY_DROP); | |||
| 4776 | } | |||
| 4777 | ||||
| 4778 | return (PF_PASS); | |||
| 4779 | ||||
| 4780 | csfailed: | |||
| 4781 | if (st) { | |||
| 4782 | pf_normalize_tcp_cleanup(st); /* safe even w/o init */ | |||
| 4783 | pf_src_tree_remove_state(st); | |||
| 4784 | pool_put(&pf_state_pl, st); | |||
| 4785 | } | |||
| 4786 | ||||
| 4787 | for (i = 0; i < PF_SN_MAX; i++) | |||
| 4788 | if (sns[i] != NULL((void *)0)) | |||
| 4789 | pf_remove_src_node(sns[i]); | |||
| 4790 | ||||
| 4791 | return (PF_DROP); | |||
| 4792 | } | |||
| 4793 | ||||
| 4794 | int | |||
| 4795 | pf_translate(struct pf_pdesc *pd, struct pf_addr *saddr, u_int16_t sport, | |||
| 4796 | struct pf_addr *daddr, u_int16_t dport, u_int16_t virtual_type, | |||
| 4797 | int icmp_dir) | |||
| 4798 | { | |||
| 4799 | int rewrite = 0; | |||
| 4800 | int afto = pd->af != pd->naf; | |||
| 4801 | ||||
| 4802 | if (afto || PF_ANEQ(daddr, pd->dst, pd->af)((pd->af == 2 && (daddr)->pfa.addr32[0] != (pd-> dst)->pfa.addr32[0]) || (pd->af == 24 && ((daddr )->pfa.addr32[3] != (pd->dst)->pfa.addr32[3] || (daddr )->pfa.addr32[2] != (pd->dst)->pfa.addr32[2] || (daddr )->pfa.addr32[1] != (pd->dst)->pfa.addr32[1] || (daddr )->pfa.addr32[0] != (pd->dst)->pfa.addr32[0])))) | |||
| 4803 | pd->destchg = 1; | |||
| 4804 | ||||
| 4805 | switch (pd->proto) { | |||
| 4806 | case IPPROTO_TCP6: /* FALLTHROUGH */ | |||
| 4807 | case IPPROTO_UDP17: | |||
| 4808 | rewrite += pf_patch_16(pd, pd->sport, sport); | |||
| 4809 | rewrite += pf_patch_16(pd, pd->dport, dport); | |||
| 4810 | break; | |||
| 4811 | ||||
| 4812 | case IPPROTO_ICMP1: | |||
| 4813 | if (pd->af != AF_INET2) | |||
| 4814 | return (0); | |||
| 4815 | ||||
| 4816 | #ifdef INET61 | |||
| 4817 | if (afto) { | |||
| 4818 | if (pf_translate_icmp_af(pd, AF_INET624, &pd->hdr.icmp)) | |||
| 4819 | return (0); | |||
| 4820 | pd->proto = IPPROTO_ICMPV658; | |||
| 4821 | rewrite = 1; | |||
| 4822 | } | |||
| 4823 | #endif /* INET6 */ | |||
| 4824 | if (virtual_type == htons(ICMP_ECHO)(__uint16_t)(__builtin_constant_p(8) ? (__uint16_t)(((__uint16_t )(8) & 0xffU) << 8 | ((__uint16_t)(8) & 0xff00U ) >> 8) : __swap16md(8))) { | |||
| 4825 | u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport; | |||
| 4826 | rewrite += pf_patch_16(pd, | |||
| 4827 | &pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id, icmpid); | |||
| 4828 | } | |||
| 4829 | break; | |||
| 4830 | ||||
| 4831 | #ifdef INET61 | |||
| 4832 | case IPPROTO_ICMPV658: | |||
| 4833 | if (pd->af != AF_INET624) | |||
| 4834 | return (0); | |||
| 4835 | ||||
| 4836 | if (afto) { | |||
| 4837 | if (pf_translate_icmp_af(pd, AF_INET2, &pd->hdr.icmp6)) | |||
| 4838 | return (0); | |||
| 4839 | pd->proto = IPPROTO_ICMP1; | |||
| 4840 | rewrite = 1; | |||
| 4841 | } | |||
| 4842 | if (virtual_type == htons(ICMP6_ECHO_REQUEST)(__uint16_t)(__builtin_constant_p(128) ? (__uint16_t)(((__uint16_t )(128) & 0xffU) << 8 | ((__uint16_t)(128) & 0xff00U ) >> 8) : __swap16md(128))) { | |||
| 4843 | u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport; | |||
| 4844 | rewrite += pf_patch_16(pd, | |||
| 4845 | &pd->hdr.icmp6.icmp6_idicmp6_dataun.icmp6_un_data16[0], icmpid); | |||
| 4846 | } | |||
| 4847 | break; | |||
| 4848 | #endif /* INET6 */ | |||
| 4849 | } | |||
| 4850 | ||||
| 4851 | if (!afto) { | |||
| 4852 | rewrite += pf_translate_a(pd, pd->src, saddr); | |||
| 4853 | rewrite += pf_translate_a(pd, pd->dst, daddr); | |||
| 4854 | } | |||
| 4855 | ||||
| 4856 | return (rewrite); | |||
| 4857 | } | |||
| 4858 | ||||
| 4859 | int | |||
| 4860 | pf_tcp_track_full(struct pf_pdesc *pd, struct pf_state **stp, u_short *reason, | |||
| 4861 | int *copyback, int reverse) | |||
| 4862 | { | |||
| 4863 | struct tcphdr *th = &pd->hdr.tcp; | |||
| 4864 | struct pf_state_peer *src, *dst; | |||
| 4865 | u_int16_t win = ntohs(th->th_win)(__uint16_t)(__builtin_constant_p(th->th_win) ? (__uint16_t )(((__uint16_t)(th->th_win) & 0xffU) << 8 | ((__uint16_t )(th->th_win) & 0xff00U) >> 8) : __swap16md(th-> th_win)); | |||
| 4866 | u_int32_t ack, end, data_end, seq, orig_seq; | |||
| 4867 | u_int8_t sws, dws, psrc, pdst; | |||
| 4868 | int ackskew; | |||
| 4869 | ||||
| 4870 | if ((pd->dir == (*stp)->direction && !reverse) || | |||
| 4871 | (pd->dir != (*stp)->direction && reverse)) { | |||
| 4872 | src = &(*stp)->src; | |||
| 4873 | dst = &(*stp)->dst; | |||
| 4874 | psrc = PF_PEER_SRC; | |||
| 4875 | pdst = PF_PEER_DST; | |||
| 4876 | } else { | |||
| 4877 | src = &(*stp)->dst; | |||
| 4878 | dst = &(*stp)->src; | |||
| 4879 | psrc = PF_PEER_DST; | |||
| 4880 | pdst = PF_PEER_SRC; | |||
| 4881 | } | |||
| 4882 | ||||
| 4883 | if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN0x02)) { | |||
| 4884 | sws = src->wscale & PF_WSCALE_MASK0x0f; | |||
| 4885 | dws = dst->wscale & PF_WSCALE_MASK0x0f; | |||
| 4886 | } else | |||
| 4887 | sws = dws = 0; | |||
| 4888 | ||||
| 4889 | /* | |||
| 4890 | * Sequence tracking algorithm from Guido van Rooij's paper: | |||
| 4891 | * http://www.madison-gurkha.com/publications/tcp_filtering/ | |||
| 4892 | * tcp_filtering.ps | |||
| 4893 | */ | |||
| 4894 | ||||
| 4895 | orig_seq = seq = ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)); | |||
| 4896 | if (src->seqlo == 0) { | |||
| 4897 | /* First packet from this end. Set its state */ | |||
| 4898 | ||||
| 4899 | if (((*stp)->state_flags & PFSTATE_SCRUB_TCP0x0100 || dst->scrub) && | |||
| 4900 | src->scrub == NULL((void *)0)) { | |||
| 4901 | if (pf_normalize_tcp_init(pd, src)) { | |||
| 4902 | REASON_SET(reason, PFRES_MEMORY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (5); if (5 < 17) pf_status.counters[5]++; } } while (0); | |||
| 4903 | return (PF_DROP); | |||
| 4904 | } | |||
| 4905 | } | |||
| 4906 | ||||
| 4907 | /* Deferred generation of sequence number modulator */ | |||
| 4908 | if (dst->seqdiff && !src->seqdiff) { | |||
| 4909 | /* use random iss for the TCP server */ | |||
| 4910 | while ((src->seqdiff = arc4random() - seq) == 0) | |||
| 4911 | continue; | |||
| 4912 | ack = ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t )(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t )(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th-> th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack ) & 0xff000000) >> 24) : __swap32md(th->th_ack)) - dst->seqdiff; | |||
| 4913 | pf_patch_32(pd, &th->th_seq, htonl(seq + src->seqdiff)(__uint32_t)(__builtin_constant_p(seq + src->seqdiff) ? (__uint32_t )(((__uint32_t)(seq + src->seqdiff) & 0xff) << 24 | ((__uint32_t)(seq + src->seqdiff) & 0xff00) << 8 | ((__uint32_t)(seq + src->seqdiff) & 0xff0000) >> 8 | ((__uint32_t)(seq + src->seqdiff) & 0xff000000) >> 24) : __swap32md(seq + src->seqdiff))); | |||
| 4914 | pf_patch_32(pd, &th->th_ack, htonl(ack)(__uint32_t)(__builtin_constant_p(ack) ? (__uint32_t)(((__uint32_t )(ack) & 0xff) << 24 | ((__uint32_t)(ack) & 0xff00 ) << 8 | ((__uint32_t)(ack) & 0xff0000) >> 8 | ((__uint32_t)(ack) & 0xff000000) >> 24) : __swap32md (ack))); | |||
| 4915 | *copyback = 1; | |||
| 4916 | } else { | |||
| 4917 | ack = ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t )(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t )(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th-> th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack ) & 0xff000000) >> 24) : __swap32md(th->th_ack)); | |||
| 4918 | } | |||
| 4919 | ||||
| 4920 | end = seq + pd->p_len; | |||
| 4921 | if (th->th_flags & TH_SYN0x02) { | |||
| 4922 | end++; | |||
| 4923 | if (dst->wscale & PF_WSCALE_FLAG0x80) { | |||
| 4924 | src->wscale = pf_get_wscale(pd); | |||
| 4925 | if (src->wscale & PF_WSCALE_FLAG0x80) { | |||
| 4926 | /* Remove scale factor from initial | |||
| 4927 | * window */ | |||
| 4928 | sws = src->wscale & PF_WSCALE_MASK0x0f; | |||
| 4929 | win = ((u_int32_t)win + (1 << sws) - 1) | |||
| 4930 | >> sws; | |||
| 4931 | dws = dst->wscale & PF_WSCALE_MASK0x0f; | |||
| 4932 | } else { | |||
| 4933 | /* fixup other window */ | |||
| 4934 | dst->max_win = MIN(TCP_MAXWIN,(((65535)<((u_int32_t)dst->max_win << (dst->wscale & 0x0f)))?(65535):((u_int32_t)dst->max_win << ( dst->wscale & 0x0f))) | |||
| 4935 | (u_int32_t)dst->max_win <<(((65535)<((u_int32_t)dst->max_win << (dst->wscale & 0x0f)))?(65535):((u_int32_t)dst->max_win << ( dst->wscale & 0x0f))) | |||
| 4936 | (dst->wscale & PF_WSCALE_MASK))(((65535)<((u_int32_t)dst->max_win << (dst->wscale & 0x0f)))?(65535):((u_int32_t)dst->max_win << ( dst->wscale & 0x0f))); | |||
| 4937 | /* in case of a retrans SYN|ACK */ | |||
| 4938 | dst->wscale = 0; | |||
| 4939 | } | |||
| 4940 | } | |||
| 4941 | } | |||
| 4942 | data_end = end; | |||
| 4943 | if (th->th_flags & TH_FIN0x01) | |||
| 4944 | end++; | |||
| 4945 | ||||
| 4946 | src->seqlo = seq; | |||
| 4947 | if (src->state < TCPS_SYN_SENT2) | |||
| 4948 | pf_set_protostate(*stp, psrc, TCPS_SYN_SENT2); | |||
| 4949 | ||||
| 4950 | /* | |||
| 4951 | * May need to slide the window (seqhi may have been set by | |||
| 4952 | * the crappy stack check or if we picked up the connection | |||
| 4953 | * after establishment) | |||
| 4954 | */ | |||
| 4955 | if (src->seqhi == 1 || | |||
| 4956 | SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)((int)((end + (((1)>(dst->max_win << dws))?(1):(dst ->max_win << dws)))-(src->seqhi)) >= 0)) | |||
| 4957 | src->seqhi = end + MAX(1, dst->max_win << dws)(((1)>(dst->max_win << dws))?(1):(dst->max_win << dws)); | |||
| 4958 | if (win > src->max_win) | |||
| 4959 | src->max_win = win; | |||
| 4960 | ||||
| 4961 | } else { | |||
| 4962 | ack = ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t )(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t )(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th-> th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack ) & 0xff000000) >> 24) : __swap32md(th->th_ack)) - dst->seqdiff; | |||
| 4963 | if (src->seqdiff) { | |||
| 4964 | /* Modulate sequence numbers */ | |||
| 4965 | pf_patch_32(pd, &th->th_seq, htonl(seq + src->seqdiff)(__uint32_t)(__builtin_constant_p(seq + src->seqdiff) ? (__uint32_t )(((__uint32_t)(seq + src->seqdiff) & 0xff) << 24 | ((__uint32_t)(seq + src->seqdiff) & 0xff00) << 8 | ((__uint32_t)(seq + src->seqdiff) & 0xff0000) >> 8 | ((__uint32_t)(seq + src->seqdiff) & 0xff000000) >> 24) : __swap32md(seq + src->seqdiff))); | |||
| 4966 | pf_patch_32(pd, &th->th_ack, htonl(ack)(__uint32_t)(__builtin_constant_p(ack) ? (__uint32_t)(((__uint32_t )(ack) & 0xff) << 24 | ((__uint32_t)(ack) & 0xff00 ) << 8 | ((__uint32_t)(ack) & 0xff0000) >> 8 | ((__uint32_t)(ack) & 0xff000000) >> 24) : __swap32md (ack))); | |||
| 4967 | *copyback = 1; | |||
| 4968 | } | |||
| 4969 | end = seq + pd->p_len; | |||
| 4970 | if (th->th_flags & TH_SYN0x02) | |||
| 4971 | end++; | |||
| 4972 | data_end = end; | |||
| 4973 | if (th->th_flags & TH_FIN0x01) | |||
| 4974 | end++; | |||
| 4975 | } | |||
| 4976 | ||||
| 4977 | if ((th->th_flags & TH_ACK0x10) == 0) { | |||
| 4978 | /* Let it pass through the ack skew check */ | |||
| 4979 | ack = dst->seqlo; | |||
| 4980 | } else if ((ack == 0 && | |||
| 4981 | (th->th_flags & (TH_ACK0x10|TH_RST0x04)) == (TH_ACK0x10|TH_RST0x04)) || | |||
| 4982 | /* broken tcp stacks do not set ack */ | |||
| 4983 | (dst->state < TCPS_SYN_SENT2)) { | |||
| 4984 | /* | |||
| 4985 | * Many stacks (ours included) will set the ACK number in an | |||
| 4986 | * FIN|ACK if the SYN times out -- no sequence to ACK. | |||
| 4987 | */ | |||
| 4988 | ack = dst->seqlo; | |||
| 4989 | } | |||
| 4990 | ||||
| 4991 | if (seq == end) { | |||
| 4992 | /* Ease sequencing restrictions on no data packets */ | |||
| 4993 | seq = src->seqlo; | |||
| 4994 | data_end = end = seq; | |||
| 4995 | } | |||
| 4996 | ||||
| 4997 | ackskew = dst->seqlo - ack; | |||
| 4998 | ||||
| 4999 | ||||
| 5000 | /* | |||
| 5001 | * Need to demodulate the sequence numbers in any TCP SACK options | |||
| 5002 | * (Selective ACK). We could optionally validate the SACK values | |||
| 5003 | * against the current ACK window, either forwards or backwards, but | |||
| 5004 | * I'm not confident that SACK has been implemented properly | |||
| 5005 | * everywhere. It wouldn't surprise me if several stacks accidently | |||
| 5006 | * SACK too far backwards of previously ACKed data. There really aren't | |||
| 5007 | * any security implications of bad SACKing unless the target stack | |||
| 5008 | * doesn't validate the option length correctly. Someone trying to | |||
| 5009 | * spoof into a TCP connection won't bother blindly sending SACK | |||
| 5010 | * options anyway. | |||
| 5011 | */ | |||
| 5012 | if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { | |||
| 5013 | if (pf_modulate_sack(pd, dst)) | |||
| 5014 | *copyback = 1; | |||
| 5015 | } | |||
| 5016 | ||||
| 5017 | ||||
| 5018 | #define MAXACKWINDOW(0xffff + 1500) (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ | |||
| 5019 | if (SEQ_GEQ(src->seqhi, data_end)((int)((src->seqhi)-(data_end)) >= 0) && | |||
| 5020 | /* Last octet inside other's window space */ | |||
| 5021 | SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws))((int)((seq)-(src->seqlo - (dst->max_win << dws)) ) >= 0) && | |||
| 5022 | /* Retrans: not more than one window back */ | |||
| 5023 | (ackskew >= -MAXACKWINDOW(0xffff + 1500)) && | |||
| 5024 | /* Acking not more than one reassembled fragment backwards */ | |||
| 5025 | (ackskew <= (MAXACKWINDOW(0xffff + 1500) << sws)) && | |||
| 5026 | /* Acking not more than one window forward */ | |||
| 5027 | ((th->th_flags & TH_RST0x04) == 0 || orig_seq == src->seqlo || | |||
| 5028 | (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) { | |||
| 5029 | /* Require an exact/+1 sequence match on resets when possible */ | |||
| 5030 | ||||
| 5031 | if (dst->scrub || src->scrub) { | |||
| 5032 | if (pf_normalize_tcp_stateful(pd, reason, *stp, src, | |||
| 5033 | dst, copyback)) | |||
| 5034 | return (PF_DROP); | |||
| 5035 | } | |||
| 5036 | ||||
| 5037 | /* update max window */ | |||
| 5038 | if (src->max_win < win) | |||
| 5039 | src->max_win = win; | |||
| 5040 | /* synchronize sequencing */ | |||
| 5041 | if (SEQ_GT(end, src->seqlo)((int)((end)-(src->seqlo)) > 0)) | |||
| 5042 | src->seqlo = end; | |||
| 5043 | /* slide the window of what the other end can send */ | |||
| 5044 | if (SEQ_GEQ(ack + (win << sws), dst->seqhi)((int)((ack + (win << sws))-(dst->seqhi)) >= 0)) | |||
| 5045 | dst->seqhi = ack + MAX((win << sws), 1)((((win << sws))>(1))?((win << sws)):(1)); | |||
| 5046 | ||||
| 5047 | /* update states */ | |||
| 5048 | if (th->th_flags & TH_SYN0x02) | |||
| 5049 | if (src->state < TCPS_SYN_SENT2) | |||
| 5050 | pf_set_protostate(*stp, psrc, TCPS_SYN_SENT2); | |||
| 5051 | if (th->th_flags & TH_FIN0x01) | |||
| 5052 | if (src->state < TCPS_CLOSING7) | |||
| 5053 | pf_set_protostate(*stp, psrc, TCPS_CLOSING7); | |||
| 5054 | if (th->th_flags & TH_ACK0x10) { | |||
| 5055 | if (dst->state == TCPS_SYN_SENT2) { | |||
| 5056 | pf_set_protostate(*stp, pdst, | |||
| 5057 | TCPS_ESTABLISHED4); | |||
| 5058 | if (src->state == TCPS_ESTABLISHED4 && | |||
| 5059 | !SLIST_EMPTY(&(*stp)->src_nodes)(((&(*stp)->src_nodes)->slh_first) == ((void *)0)) && | |||
| 5060 | pf_src_connlimit(stp)) { | |||
| 5061 | REASON_SET(reason, PFRES_SRCLIMIT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (13); if (13 < 17) pf_status.counters[13]++; } } while (0); | |||
| 5062 | return (PF_DROP); | |||
| 5063 | } | |||
| 5064 | } else if (dst->state == TCPS_CLOSING7) | |||
| 5065 | pf_set_protostate(*stp, pdst, | |||
| 5066 | TCPS_FIN_WAIT_29); | |||
| 5067 | } | |||
| 5068 | if (th->th_flags & TH_RST0x04) | |||
| 5069 | pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_TIME_WAIT10); | |||
| 5070 | ||||
| 5071 | /* update expire time */ | |||
| 5072 | (*stp)->expire = getuptime(); | |||
| 5073 | if (src->state >= TCPS_FIN_WAIT_29 && | |||
| 5074 | dst->state >= TCPS_FIN_WAIT_29) | |||
| 5075 | pf_update_state_timeout(*stp, PFTM_TCP_CLOSED); | |||
| 5076 | else if (src->state >= TCPS_CLOSING7 && | |||
| 5077 | dst->state >= TCPS_CLOSING7) | |||
| 5078 | pf_update_state_timeout(*stp, PFTM_TCP_FIN_WAIT); | |||
| 5079 | else if (src->state < TCPS_ESTABLISHED4 || | |||
| 5080 | dst->state < TCPS_ESTABLISHED4) | |||
| 5081 | pf_update_state_timeout(*stp, PFTM_TCP_OPENING); | |||
| 5082 | else if (src->state >= TCPS_CLOSING7 || | |||
| 5083 | dst->state >= TCPS_CLOSING7) | |||
| 5084 | pf_update_state_timeout(*stp, PFTM_TCP_CLOSING); | |||
| 5085 | else | |||
| 5086 | pf_update_state_timeout(*stp, PFTM_TCP_ESTABLISHED); | |||
| 5087 | ||||
| 5088 | /* Fall through to PASS packet */ | |||
| 5089 | } else if ((dst->state < TCPS_SYN_SENT2 || | |||
| 5090 | dst->state >= TCPS_FIN_WAIT_29 || | |||
| 5091 | src->state >= TCPS_FIN_WAIT_29) && | |||
| 5092 | SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end)((int)((src->seqhi + (0xffff + 1500))-(data_end)) >= 0) && | |||
| 5093 | /* Within a window forward of the originating packet */ | |||
| 5094 | SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)((int)((seq)-(src->seqlo - (0xffff + 1500))) >= 0)) { | |||
| 5095 | /* Within a window backward of the originating packet */ | |||
| 5096 | ||||
| 5097 | /* | |||
| 5098 | * This currently handles three situations: | |||
| 5099 | * 1) Stupid stacks will shotgun SYNs before their peer | |||
| 5100 | * replies. | |||
| 5101 | * 2) When PF catches an already established stream (the | |||
| 5102 | * firewall rebooted, the state table was flushed, routes | |||
| 5103 | * changed...) | |||
| 5104 | * 3) Packets get funky immediately after the connection | |||
| 5105 | * closes (this should catch Solaris spurious ACK|FINs | |||
| 5106 | * that web servers like to spew after a close) | |||
| 5107 | * | |||
| 5108 | * This must be a little more careful than the above code | |||
| 5109 | * since packet floods will also be caught here. We don't | |||
| 5110 | * update the TTL here to mitigate the damage of a packet | |||
| 5111 | * flood and so the same code can handle awkward establishment | |||
| 5112 | * and a loosened connection close. | |||
| 5113 | * In the establishment case, a correct peer response will | |||
| 5114 | * validate the connection, go through the normal state code | |||
| 5115 | * and keep updating the state TTL. | |||
| 5116 | */ | |||
| 5117 | ||||
| 5118 | if (pf_status.debug >= LOG_NOTICE5) { | |||
| 5119 | log(LOG_NOTICE5, "pf: loose state match: "); | |||
| 5120 | pf_print_state(*stp); | |||
| 5121 | pf_print_flags(th->th_flags); | |||
| 5122 | addlog(" seq=%u (%u) ack=%u len=%u ackskew=%d " | |||
| 5123 | "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, | |||
| 5124 | pd->p_len, ackskew, (*stp)->packets[0], | |||
| 5125 | (*stp)->packets[1], | |||
| 5126 | pd->dir == PF_IN ? "in" : "out", | |||
| 5127 | pd->dir == (*stp)->direction ? "fwd" : "rev"); | |||
| 5128 | } | |||
| 5129 | ||||
| 5130 | if (dst->scrub || src->scrub) { | |||
| 5131 | if (pf_normalize_tcp_stateful(pd, reason, *stp, src, | |||
| 5132 | dst, copyback)) | |||
| 5133 | return (PF_DROP); | |||
| 5134 | } | |||
| 5135 | ||||
| 5136 | /* update max window */ | |||
| 5137 | if (src->max_win < win) | |||
| 5138 | src->max_win = win; | |||
| 5139 | /* synchronize sequencing */ | |||
| 5140 | if (SEQ_GT(end, src->seqlo)((int)((end)-(src->seqlo)) > 0)) | |||
| 5141 | src->seqlo = end; | |||
| 5142 | /* slide the window of what the other end can send */ | |||
| 5143 | if (SEQ_GEQ(ack + (win << sws), dst->seqhi)((int)((ack + (win << sws))-(dst->seqhi)) >= 0)) | |||
| 5144 | dst->seqhi = ack + MAX((win << sws), 1)((((win << sws))>(1))?((win << sws)):(1)); | |||
| 5145 | ||||
| 5146 | /* | |||
| 5147 | * Cannot set dst->seqhi here since this could be a shotgunned | |||
| 5148 | * SYN and not an already established connection. | |||
| 5149 | */ | |||
| 5150 | if (th->th_flags & TH_FIN0x01) | |||
| 5151 | if (src->state < TCPS_CLOSING7) | |||
| 5152 | pf_set_protostate(*stp, psrc, TCPS_CLOSING7); | |||
| 5153 | if (th->th_flags & TH_RST0x04) | |||
| 5154 | pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_TIME_WAIT10); | |||
| 5155 | ||||
| 5156 | /* Fall through to PASS packet */ | |||
| 5157 | } else { | |||
| 5158 | if ((*stp)->dst.state == TCPS_SYN_SENT2 && | |||
| 5159 | (*stp)->src.state == TCPS_SYN_SENT2) { | |||
| 5160 | /* Send RST for state mismatches during handshake */ | |||
| 5161 | if (!(th->th_flags & TH_RST0x04)) | |||
| 5162 | pf_send_tcp((*stp)->rule.ptr, pd->af, | |||
| 5163 | pd->dst, pd->src, th->th_dport, | |||
| 5164 | th->th_sport, ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t )(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t )(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th-> th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack ) & 0xff000000) >> 24) : __swap32md(th->th_ack)), 0, | |||
| 5165 | TH_RST0x04, 0, 0, | |||
| 5166 | (*stp)->rule.ptr->return_ttl, 1, 0, | |||
| 5167 | pd->rdomain); | |||
| 5168 | src->seqlo = 0; | |||
| 5169 | src->seqhi = 1; | |||
| 5170 | src->max_win = 1; | |||
| 5171 | } else if (pf_status.debug >= LOG_NOTICE5) { | |||
| 5172 | log(LOG_NOTICE5, "pf: BAD state: "); | |||
| 5173 | pf_print_state(*stp); | |||
| 5174 | pf_print_flags(th->th_flags); | |||
| 5175 | addlog(" seq=%u (%u) ack=%u len=%u ackskew=%d " | |||
| 5176 | "pkts=%llu:%llu dir=%s,%s\n", | |||
| 5177 | seq, orig_seq, ack, pd->p_len, ackskew, | |||
| 5178 | (*stp)->packets[0], (*stp)->packets[1], | |||
| 5179 | pd->dir == PF_IN ? "in" : "out", | |||
| 5180 | pd->dir == (*stp)->direction ? "fwd" : "rev"); | |||
| 5181 | addlog("pf: State failure on: %c %c %c %c | %c %c\n", | |||
| 5182 | SEQ_GEQ(src->seqhi, data_end)((int)((src->seqhi)-(data_end)) >= 0) ? ' ' : '1', | |||
| 5183 | SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws))((int)((seq)-(src->seqlo - (dst->max_win << dws)) ) >= 0) ? | |||
| 5184 | ' ': '2', | |||
| 5185 | (ackskew >= -MAXACKWINDOW(0xffff + 1500)) ? ' ' : '3', | |||
| 5186 | (ackskew <= (MAXACKWINDOW(0xffff + 1500) << sws)) ? ' ' : '4', | |||
| 5187 | SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end)((int)((src->seqhi + (0xffff + 1500))-(data_end)) >= 0) ? | |||
| 5188 | ' ' :'5', | |||
| 5189 | SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)((int)((seq)-(src->seqlo - (0xffff + 1500))) >= 0) ?' ' :'6'); | |||
| 5190 | } | |||
| 5191 | REASON_SET(reason, PFRES_BADSTATE)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (10); if (10 < 17) pf_status.counters[10]++; } } while (0); | |||
| 5192 | return (PF_DROP); | |||
| 5193 | } | |||
| 5194 | ||||
| 5195 | return (PF_PASS); | |||
| 5196 | } | |||
| 5197 | ||||
| 5198 | int | |||
| 5199 | pf_tcp_track_sloppy(struct pf_pdesc *pd, struct pf_state **stp, | |||
| 5200 | u_short *reason) | |||
| 5201 | { | |||
| 5202 | struct tcphdr *th = &pd->hdr.tcp; | |||
| 5203 | struct pf_state_peer *src, *dst; | |||
| 5204 | u_int8_t psrc, pdst; | |||
| 5205 | ||||
| 5206 | if (pd->dir == (*stp)->direction) { | |||
| 5207 | src = &(*stp)->src; | |||
| 5208 | dst = &(*stp)->dst; | |||
| 5209 | psrc = PF_PEER_SRC; | |||
| 5210 | pdst = PF_PEER_DST; | |||
| 5211 | } else { | |||
| 5212 | src = &(*stp)->dst; | |||
| 5213 | dst = &(*stp)->src; | |||
| 5214 | psrc = PF_PEER_DST; | |||
| 5215 | pdst = PF_PEER_SRC; | |||
| 5216 | } | |||
| 5217 | ||||
| 5218 | if (th->th_flags & TH_SYN0x02) | |||
| 5219 | if (src->state < TCPS_SYN_SENT2) | |||
| 5220 | pf_set_protostate(*stp, psrc, TCPS_SYN_SENT2); | |||
| 5221 | if (th->th_flags & TH_FIN0x01) | |||
| 5222 | if (src->state < TCPS_CLOSING7) | |||
| 5223 | pf_set_protostate(*stp, psrc, TCPS_CLOSING7); | |||
| 5224 | if (th->th_flags & TH_ACK0x10) { | |||
| 5225 | if (dst->state == TCPS_SYN_SENT2) { | |||
| 5226 | pf_set_protostate(*stp, pdst, TCPS_ESTABLISHED4); | |||
| 5227 | if (src->state == TCPS_ESTABLISHED4 && | |||
| 5228 | !SLIST_EMPTY(&(*stp)->src_nodes)(((&(*stp)->src_nodes)->slh_first) == ((void *)0)) && | |||
| 5229 | pf_src_connlimit(stp)) { | |||
| 5230 | REASON_SET(reason, PFRES_SRCLIMIT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (13); if (13 < 17) pf_status.counters[13]++; } } while (0); | |||
| 5231 | return (PF_DROP); | |||
| 5232 | } | |||
| 5233 | } else if (dst->state == TCPS_CLOSING7) { | |||
| 5234 | pf_set_protostate(*stp, pdst, TCPS_FIN_WAIT_29); | |||
| 5235 | } else if (src->state == TCPS_SYN_SENT2 && | |||
| 5236 | dst->state < TCPS_SYN_SENT2) { | |||
| 5237 | /* | |||
| 5238 | * Handle a special sloppy case where we only see one | |||
| 5239 | * half of the connection. If there is a ACK after | |||
| 5240 | * the initial SYN without ever seeing a packet from | |||
| 5241 | * the destination, set the connection to established. | |||
| 5242 | */ | |||
| 5243 | pf_set_protostate(*stp, PF_PEER_BOTH, | |||
| 5244 | TCPS_ESTABLISHED4); | |||
| 5245 | if (!SLIST_EMPTY(&(*stp)->src_nodes)(((&(*stp)->src_nodes)->slh_first) == ((void *)0)) && | |||
| 5246 | pf_src_connlimit(stp)) { | |||
| 5247 | REASON_SET(reason, PFRES_SRCLIMIT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (13); if (13 < 17) pf_status.counters[13]++; } } while (0); | |||
| 5248 | return (PF_DROP); | |||
| 5249 | } | |||
| 5250 | } else if (src->state == TCPS_CLOSING7 && | |||
| 5251 | dst->state == TCPS_ESTABLISHED4 && | |||
| 5252 | dst->seqlo == 0) { | |||
| 5253 | /* | |||
| 5254 | * Handle the closing of half connections where we | |||
| 5255 | * don't see the full bidirectional FIN/ACK+ACK | |||
| 5256 | * handshake. | |||
| 5257 | */ | |||
| 5258 | pf_set_protostate(*stp, pdst, TCPS_CLOSING7); | |||
| 5259 | } | |||
| 5260 | } | |||
| 5261 | if (th->th_flags & TH_RST0x04) | |||
| 5262 | pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_TIME_WAIT10); | |||
| 5263 | ||||
| 5264 | /* update expire time */ | |||
| 5265 | (*stp)->expire = getuptime(); | |||
| 5266 | if (src->state >= TCPS_FIN_WAIT_29 && | |||
| 5267 | dst->state >= TCPS_FIN_WAIT_29) | |||
| 5268 | pf_update_state_timeout(*stp, PFTM_TCP_CLOSED); | |||
| 5269 | else if (src->state >= TCPS_CLOSING7 && | |||
| 5270 | dst->state >= TCPS_CLOSING7) | |||
| 5271 | pf_update_state_timeout(*stp, PFTM_TCP_FIN_WAIT); | |||
| 5272 | else if (src->state < TCPS_ESTABLISHED4 || | |||
| 5273 | dst->state < TCPS_ESTABLISHED4) | |||
| 5274 | pf_update_state_timeout(*stp, PFTM_TCP_OPENING); | |||
| 5275 | else if (src->state >= TCPS_CLOSING7 || | |||
| 5276 | dst->state >= TCPS_CLOSING7) | |||
| 5277 | pf_update_state_timeout(*stp, PFTM_TCP_CLOSING); | |||
| 5278 | else | |||
| 5279 | pf_update_state_timeout(*stp, PFTM_TCP_ESTABLISHED); | |||
| 5280 | ||||
| 5281 | return (PF_PASS); | |||
| 5282 | } | |||
| 5283 | ||||
| 5284 | static __inline int | |||
| 5285 | pf_synproxy(struct pf_pdesc *pd, struct pf_state **stp, u_short *reason) | |||
| 5286 | { | |||
| 5287 | struct pf_state_key *sk = (*stp)->key[pd->didx]; | |||
| 5288 | ||||
| 5289 | if ((*stp)->src.state == PF_TCPS_PROXY_SRC((11)+0)) { | |||
| 5290 | struct tcphdr *th = &pd->hdr.tcp; | |||
| 5291 | ||||
| 5292 | if (pd->dir != (*stp)->direction) { | |||
| 5293 | REASON_SET(reason, PFRES_SYNPROXY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (14); if (14 < 17) pf_status.counters[14]++; } } while (0); | |||
| 5294 | return (PF_SYNPROXY_DROP); | |||
| 5295 | } | |||
| 5296 | if (th->th_flags & TH_SYN0x02) { | |||
| 5297 | if (ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)) != (*stp)->src.seqlo) { | |||
| 5298 | REASON_SET(reason, PFRES_SYNPROXY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (14); if (14 < 17) pf_status.counters[14]++; } } while (0); | |||
| 5299 | return (PF_DROP); | |||
| 5300 | } | |||
| 5301 | pf_send_tcp((*stp)->rule.ptr, pd->af, pd->dst, | |||
| 5302 | pd->src, th->th_dport, th->th_sport, | |||
| 5303 | (*stp)->src.seqhi, ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)) + 1, | |||
| 5304 | TH_SYN0x02|TH_ACK0x10, 0, (*stp)->src.mss, 0, 1, | |||
| 5305 | 0, pd->rdomain); | |||
| 5306 | REASON_SET(reason, PFRES_SYNPROXY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (14); if (14 < 17) pf_status.counters[14]++; } } while (0); | |||
| 5307 | return (PF_SYNPROXY_DROP); | |||
| 5308 | } else if ((th->th_flags & (TH_ACK0x10|TH_RST0x04|TH_FIN0x01)) != TH_ACK0x10 || | |||
| 5309 | (ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t )(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t )(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th-> th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack ) & 0xff000000) >> 24) : __swap32md(th->th_ack)) != (*stp)->src.seqhi + 1) || | |||
| 5310 | (ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)) != (*stp)->src.seqlo + 1)) { | |||
| 5311 | REASON_SET(reason, PFRES_SYNPROXY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (14); if (14 < 17) pf_status.counters[14]++; } } while (0); | |||
| 5312 | return (PF_DROP); | |||
| 5313 | } else if (!SLIST_EMPTY(&(*stp)->src_nodes)(((&(*stp)->src_nodes)->slh_first) == ((void *)0)) && | |||
| 5314 | pf_src_connlimit(stp)) { | |||
| 5315 | REASON_SET(reason, PFRES_SRCLIMIT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (13); if (13 < 17) pf_status.counters[13]++; } } while (0); | |||
| 5316 | return (PF_DROP); | |||
| 5317 | } else | |||
| 5318 | pf_set_protostate(*stp, PF_PEER_SRC, | |||
| 5319 | PF_TCPS_PROXY_DST((11)+1)); | |||
| 5320 | } | |||
| 5321 | if ((*stp)->src.state == PF_TCPS_PROXY_DST((11)+1)) { | |||
| 5322 | struct tcphdr *th = &pd->hdr.tcp; | |||
| 5323 | ||||
| 5324 | if (pd->dir == (*stp)->direction) { | |||
| 5325 | if (((th->th_flags & (TH_SYN0x02|TH_ACK0x10)) != TH_ACK0x10) || | |||
| 5326 | (ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t )(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t )(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th-> th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack ) & 0xff000000) >> 24) : __swap32md(th->th_ack)) != (*stp)->src.seqhi + 1) || | |||
| 5327 | (ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)) != (*stp)->src.seqlo + 1)) { | |||
| 5328 | REASON_SET(reason, PFRES_SYNPROXY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (14); if (14 < 17) pf_status.counters[14]++; } } while (0); | |||
| 5329 | return (PF_DROP); | |||
| 5330 | } | |||
| 5331 | (*stp)->src.max_win = MAX(ntohs(th->th_win), 1)((((__uint16_t)(__builtin_constant_p(th->th_win) ? (__uint16_t )(((__uint16_t)(th->th_win) & 0xffU) << 8 | ((__uint16_t )(th->th_win) & 0xff00U) >> 8) : __swap16md(th-> th_win)))>(1))?((__uint16_t)(__builtin_constant_p(th->th_win ) ? (__uint16_t)(((__uint16_t)(th->th_win) & 0xffU) << 8 | ((__uint16_t)(th->th_win) & 0xff00U) >> 8) : __swap16md(th->th_win))):(1)); | |||
| 5332 | if ((*stp)->dst.seqhi == 1) | |||
| 5333 | (*stp)->dst.seqhi = arc4random(); | |||
| 5334 | pf_send_tcp((*stp)->rule.ptr, pd->af, | |||
| 5335 | &sk->addr[pd->sidx], &sk->addr[pd->didx], | |||
| 5336 | sk->port[pd->sidx], sk->port[pd->didx], | |||
| 5337 | (*stp)->dst.seqhi, 0, TH_SYN0x02, 0, | |||
| 5338 | (*stp)->src.mss, 0, 0, (*stp)->tag, | |||
| 5339 | sk->rdomain); | |||
| 5340 | REASON_SET(reason, PFRES_SYNPROXY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (14); if (14 < 17) pf_status.counters[14]++; } } while (0); | |||
| 5341 | return (PF_SYNPROXY_DROP); | |||
| 5342 | } else if (((th->th_flags & (TH_SYN0x02|TH_ACK0x10)) != | |||
| 5343 | (TH_SYN0x02|TH_ACK0x10)) || | |||
| 5344 | (ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t )(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t )(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th-> th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack ) & 0xff000000) >> 24) : __swap32md(th->th_ack)) != (*stp)->dst.seqhi + 1)) { | |||
| 5345 | REASON_SET(reason, PFRES_SYNPROXY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (14); if (14 < 17) pf_status.counters[14]++; } } while (0); | |||
| 5346 | return (PF_DROP); | |||
| 5347 | } else { | |||
| 5348 | (*stp)->dst.max_win = MAX(ntohs(th->th_win), 1)((((__uint16_t)(__builtin_constant_p(th->th_win) ? (__uint16_t )(((__uint16_t)(th->th_win) & 0xffU) << 8 | ((__uint16_t )(th->th_win) & 0xff00U) >> 8) : __swap16md(th-> th_win)))>(1))?((__uint16_t)(__builtin_constant_p(th->th_win ) ? (__uint16_t)(((__uint16_t)(th->th_win) & 0xffU) << 8 | ((__uint16_t)(th->th_win) & 0xff00U) >> 8) : __swap16md(th->th_win))):(1)); | |||
| 5349 | (*stp)->dst.seqlo = ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)); | |||
| 5350 | pf_send_tcp((*stp)->rule.ptr, pd->af, pd->dst, | |||
| 5351 | pd->src, th->th_dport, th->th_sport, | |||
| 5352 | ntohl(th->th_ack)(__uint32_t)(__builtin_constant_p(th->th_ack) ? (__uint32_t )(((__uint32_t)(th->th_ack) & 0xff) << 24 | ((__uint32_t )(th->th_ack) & 0xff00) << 8 | ((__uint32_t)(th-> th_ack) & 0xff0000) >> 8 | ((__uint32_t)(th->th_ack ) & 0xff000000) >> 24) : __swap32md(th->th_ack)), ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)) + 1, | |||
| 5353 | TH_ACK0x10, (*stp)->src.max_win, 0, 0, 0, | |||
| 5354 | (*stp)->tag, pd->rdomain); | |||
| 5355 | pf_send_tcp((*stp)->rule.ptr, pd->af, | |||
| 5356 | &sk->addr[pd->sidx], &sk->addr[pd->didx], | |||
| 5357 | sk->port[pd->sidx], sk->port[pd->didx], | |||
| 5358 | (*stp)->src.seqhi + 1, (*stp)->src.seqlo + 1, | |||
| 5359 | TH_ACK0x10, (*stp)->dst.max_win, 0, 0, 1, | |||
| 5360 | 0, sk->rdomain); | |||
| 5361 | (*stp)->src.seqdiff = (*stp)->dst.seqhi - | |||
| 5362 | (*stp)->src.seqlo; | |||
| 5363 | (*stp)->dst.seqdiff = (*stp)->src.seqhi - | |||
| 5364 | (*stp)->dst.seqlo; | |||
| 5365 | (*stp)->src.seqhi = (*stp)->src.seqlo + | |||
| 5366 | (*stp)->dst.max_win; | |||
| 5367 | (*stp)->dst.seqhi = (*stp)->dst.seqlo + | |||
| 5368 | (*stp)->src.max_win; | |||
| 5369 | (*stp)->src.wscale = (*stp)->dst.wscale = 0; | |||
| 5370 | pf_set_protostate(*stp, PF_PEER_BOTH, | |||
| 5371 | TCPS_ESTABLISHED4); | |||
| 5372 | REASON_SET(reason, PFRES_SYNPROXY)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (14); if (14 < 17) pf_status.counters[14]++; } } while (0); | |||
| 5373 | return (PF_SYNPROXY_DROP); | |||
| 5374 | } | |||
| 5375 | } | |||
| 5376 | return (PF_PASS); | |||
| 5377 | } | |||
| 5378 | ||||
| 5379 | int | |||
| 5380 | pf_test_state(struct pf_pdesc *pd, struct pf_state **stp, u_short *reason) | |||
| 5381 | { | |||
| 5382 | int copyback = 0; | |||
| 5383 | struct pf_state_peer *src, *dst; | |||
| 5384 | int action; | |||
| 5385 | struct inpcb *inp = pd->m->m_pkthdrM_dat.MH.MH_pkthdr.pf.inp; | |||
| 5386 | u_int8_t psrc, pdst; | |||
| 5387 | ||||
| 5388 | action = PF_PASS; | |||
| 5389 | if (pd->dir == (*stp)->direction) { | |||
| 5390 | src = &(*stp)->src; | |||
| 5391 | dst = &(*stp)->dst; | |||
| 5392 | psrc = PF_PEER_SRC; | |||
| 5393 | pdst = PF_PEER_DST; | |||
| 5394 | } else { | |||
| 5395 | src = &(*stp)->dst; | |||
| 5396 | dst = &(*stp)->src; | |||
| 5397 | psrc = PF_PEER_DST; | |||
| 5398 | pdst = PF_PEER_SRC; | |||
| 5399 | } | |||
| 5400 | ||||
| 5401 | switch (pd->virtual_proto) { | |||
| 5402 | case IPPROTO_TCP6: | |||
| 5403 | if ((action = pf_synproxy(pd, stp, reason)) != PF_PASS) | |||
| 5404 | return (action); | |||
| 5405 | if ((pd->hdr.tcp.th_flags & (TH_SYN0x02|TH_ACK0x10)) == TH_SYN0x02) { | |||
| 5406 | ||||
| 5407 | if (dst->state >= TCPS_FIN_WAIT_29 && | |||
| 5408 | src->state >= TCPS_FIN_WAIT_29) { | |||
| 5409 | if (pf_status.debug >= LOG_NOTICE5) { | |||
| 5410 | log(LOG_NOTICE5, "pf: state reuse "); | |||
| 5411 | pf_print_state(*stp); | |||
| 5412 | pf_print_flags(pd->hdr.tcp.th_flags); | |||
| 5413 | addlog("\n"); | |||
| 5414 | } | |||
| 5415 | /* XXX make sure it's the same direction ?? */ | |||
| 5416 | pf_update_state_timeout(*stp, PFTM_PURGE); | |||
| 5417 | pf_state_unref(*stp); | |||
| 5418 | *stp = NULL((void *)0); | |||
| 5419 | pf_mbuf_link_inpcb(pd->m, inp); | |||
| 5420 | return (PF_DROP); | |||
| 5421 | } else if (dst->state >= TCPS_ESTABLISHED4 && | |||
| 5422 | src->state >= TCPS_ESTABLISHED4) { | |||
| 5423 | /* | |||
| 5424 | * SYN matches existing state??? | |||
| 5425 | * Typically happens when sender boots up after | |||
| 5426 | * sudden panic. Certain protocols (NFSv3) are | |||
| 5427 | * always using same port numbers. Challenge | |||
| 5428 | * ACK enables all parties (firewall and peers) | |||
| 5429 | * to get in sync again. | |||
| 5430 | */ | |||
| 5431 | pf_send_challenge_ack(pd, *stp, src, dst); | |||
| 5432 | return (PF_DROP); | |||
| 5433 | } | |||
| 5434 | } | |||
| 5435 | ||||
| 5436 | if ((*stp)->state_flags & PFSTATE_SLOPPY0x0002) { | |||
| 5437 | if (pf_tcp_track_sloppy(pd, stp, reason) == PF_DROP) | |||
| 5438 | return (PF_DROP); | |||
| 5439 | } else { | |||
| 5440 | if (pf_tcp_track_full(pd, stp, reason, ©back, | |||
| 5441 | PF_REVERSED_KEY((*stp)->key, pd->af)(((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK ]->af) && ((*stp)->key[PF_SK_WIRE]->af != (pd ->af)))) == PF_DROP) | |||
| 5442 | return (PF_DROP); | |||
| 5443 | } | |||
| 5444 | break; | |||
| 5445 | case IPPROTO_UDP17: | |||
| 5446 | /* update states */ | |||
| 5447 | if (src->state < PFUDPS_SINGLE1) | |||
| 5448 | pf_set_protostate(*stp, psrc, PFUDPS_SINGLE1); | |||
| 5449 | if (dst->state == PFUDPS_SINGLE1) | |||
| 5450 | pf_set_protostate(*stp, pdst, PFUDPS_MULTIPLE2); | |||
| 5451 | ||||
| 5452 | /* update expire time */ | |||
| 5453 | (*stp)->expire = getuptime(); | |||
| 5454 | if (src->state == PFUDPS_MULTIPLE2 && | |||
| 5455 | dst->state == PFUDPS_MULTIPLE2) | |||
| 5456 | pf_update_state_timeout(*stp, PFTM_UDP_MULTIPLE); | |||
| 5457 | else | |||
| 5458 | pf_update_state_timeout(*stp, PFTM_UDP_SINGLE); | |||
| 5459 | break; | |||
| 5460 | default: | |||
| 5461 | /* update states */ | |||
| 5462 | if (src->state < PFOTHERS_SINGLE1) | |||
| 5463 | pf_set_protostate(*stp, psrc, PFOTHERS_SINGLE1); | |||
| 5464 | if (dst->state == PFOTHERS_SINGLE1) | |||
| 5465 | pf_set_protostate(*stp, pdst, PFOTHERS_MULTIPLE2); | |||
| 5466 | ||||
| 5467 | /* update expire time */ | |||
| 5468 | (*stp)->expire = getuptime(); | |||
| 5469 | if (src->state == PFOTHERS_MULTIPLE2 && | |||
| 5470 | dst->state == PFOTHERS_MULTIPLE2) | |||
| 5471 | pf_update_state_timeout(*stp, PFTM_OTHER_MULTIPLE); | |||
| 5472 | else | |||
| 5473 | pf_update_state_timeout(*stp, PFTM_OTHER_SINGLE); | |||
| 5474 | break; | |||
| 5475 | } | |||
| 5476 | ||||
| 5477 | /* translate source/destination address, if necessary */ | |||
| 5478 | if ((*stp)->key[PF_SK_WIRE] != (*stp)->key[PF_SK_STACK]) { | |||
| 5479 | struct pf_state_key *nk; | |||
| 5480 | int afto, sidx, didx; | |||
| 5481 | ||||
| 5482 | if (PF_REVERSED_KEY((*stp)->key, pd->af)(((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK ]->af) && ((*stp)->key[PF_SK_WIRE]->af != (pd ->af)))) | |||
| 5483 | nk = (*stp)->key[pd->sidx]; | |||
| 5484 | else | |||
| 5485 | nk = (*stp)->key[pd->didx]; | |||
| 5486 | ||||
| 5487 | afto = pd->af != nk->af; | |||
| 5488 | sidx = afto ? pd->didx : pd->sidx; | |||
| 5489 | didx = afto ? pd->sidx : pd->didx; | |||
| 5490 | ||||
| 5491 | #ifdef INET61 | |||
| 5492 | if (afto) { | |||
| 5493 | pf_addrcpy(&pd->nsaddr, &nk->addr[sidx], nk->af); | |||
| 5494 | pf_addrcpy(&pd->ndaddr, &nk->addr[didx], nk->af); | |||
| 5495 | pd->naf = nk->af; | |||
| 5496 | action = PF_AFRT; | |||
| 5497 | } | |||
| 5498 | #endif /* INET6 */ | |||
| 5499 | ||||
| 5500 | if (!afto) | |||
| 5501 | pf_translate_a(pd, pd->src, &nk->addr[sidx]); | |||
| 5502 | ||||
| 5503 | if (pd->sport != NULL((void *)0)) | |||
| 5504 | pf_patch_16(pd, pd->sport, nk->port[sidx]); | |||
| 5505 | ||||
| 5506 | if (afto || PF_ANEQ(pd->dst, &nk->addr[didx], pd->af)((pd->af == 2 && (pd->dst)->pfa.addr32[0] != (&nk->addr[didx])->pfa.addr32[0]) || (pd->af == 24 && ((pd->dst)->pfa.addr32[3] != (&nk-> addr[didx])->pfa.addr32[3] || (pd->dst)->pfa.addr32[ 2] != (&nk->addr[didx])->pfa.addr32[2] || (pd->dst )->pfa.addr32[1] != (&nk->addr[didx])->pfa.addr32 [1] || (pd->dst)->pfa.addr32[0] != (&nk->addr[didx ])->pfa.addr32[0]))) || | |||
| 5507 | pd->rdomain != nk->rdomain) | |||
| 5508 | pd->destchg = 1; | |||
| 5509 | ||||
| 5510 | if (!afto) | |||
| 5511 | pf_translate_a(pd, pd->dst, &nk->addr[didx]); | |||
| 5512 | ||||
| 5513 | if (pd->dport != NULL((void *)0)) | |||
| 5514 | pf_patch_16(pd, pd->dport, nk->port[didx]); | |||
| 5515 | ||||
| 5516 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = nk->rdomain; | |||
| 5517 | copyback = 1; | |||
| 5518 | } | |||
| 5519 | ||||
| 5520 | if (copyback && pd->hdrlen > 0) { | |||
| 5521 | m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT0x0002); | |||
| 5522 | } | |||
| 5523 | ||||
| 5524 | return (action); | |||
| 5525 | } | |||
| 5526 | ||||
| 5527 | int | |||
| 5528 | pf_icmp_state_lookup(struct pf_pdesc *pd, struct pf_state_key_cmp *key, | |||
| 5529 | struct pf_state **stp, u_int16_t icmpid, u_int16_t type, | |||
| 5530 | int icmp_dir, int *iidx, int multi, int inner) | |||
| 5531 | { | |||
| 5532 | int direction, action; | |||
| 5533 | ||||
| 5534 | key->af = pd->af; | |||
| 5535 | key->proto = pd->proto; | |||
| 5536 | key->rdomain = pd->rdomain; | |||
| 5537 | if (icmp_dir == PF_IN) { | |||
| 5538 | *iidx = pd->sidx; | |||
| 5539 | key->port[pd->sidx] = icmpid; | |||
| 5540 | key->port[pd->didx] = type; | |||
| 5541 | } else { | |||
| 5542 | *iidx = pd->didx; | |||
| 5543 | key->port[pd->sidx] = type; | |||
| 5544 | key->port[pd->didx] = icmpid; | |||
| 5545 | } | |||
| 5546 | ||||
| 5547 | if (pf_state_key_addr_setup(pd, key, pd->sidx, pd->src, pd->didx, | |||
| 5548 | pd->dst, pd->af, multi)) | |||
| 5549 | return (PF_DROP); | |||
| 5550 | ||||
| 5551 | key->hash = pf_pkt_hash(key->af, key->proto, | |||
| 5552 | &key->addr[0], &key->addr[1], 0, 0); | |||
| 5553 | ||||
| 5554 | action = pf_find_state(pd, key, stp); | |||
| 5555 | if (action != PF_MATCH) | |||
| 5556 | return (action); | |||
| 5557 | ||||
| 5558 | if ((*stp)->state_flags & PFSTATE_SLOPPY0x0002) | |||
| 5559 | return (-1); | |||
| 5560 | ||||
| 5561 | /* Is this ICMP message flowing in right direction? */ | |||
| 5562 | if ((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK]->af) | |||
| 5563 | direction = (pd->af == (*stp)->key[PF_SK_WIRE]->af) ? | |||
| 5564 | PF_IN : PF_OUT; | |||
| 5565 | else | |||
| 5566 | direction = (*stp)->direction; | |||
| 5567 | if ((((!inner && direction == pd->dir) || | |||
| 5568 | (inner && direction != pd->dir)) ? | |||
| 5569 | PF_IN : PF_OUT) != icmp_dir) { | |||
| 5570 | if (pf_status.debug >= LOG_NOTICE5) { | |||
| 5571 | log(LOG_NOTICE5, | |||
| 5572 | "pf: icmp type %d in wrong direction (%d): ", | |||
| 5573 | ntohs(type)(__uint16_t)(__builtin_constant_p(type) ? (__uint16_t)(((__uint16_t )(type) & 0xffU) << 8 | ((__uint16_t)(type) & 0xff00U ) >> 8) : __swap16md(type)), icmp_dir); | |||
| 5574 | pf_print_state(*stp); | |||
| 5575 | addlog("\n"); | |||
| 5576 | } | |||
| 5577 | return (PF_DROP); | |||
| 5578 | } | |||
| 5579 | return (-1); | |||
| 5580 | } | |||
| 5581 | ||||
| 5582 | int | |||
| 5583 | pf_test_state_icmp(struct pf_pdesc *pd, struct pf_state **stp, | |||
| 5584 | u_short *reason) | |||
| 5585 | { | |||
| 5586 | u_int16_t virtual_id, virtual_type; | |||
| 5587 | u_int8_t icmptype, icmpcode; | |||
| 5588 | int icmp_dir, iidx, ret, copyback = 0; | |||
| 5589 | ||||
| 5590 | struct pf_state_key_cmp key; | |||
| 5591 | ||||
| 5592 | switch (pd->proto) { | |||
| 5593 | case IPPROTO_ICMP1: | |||
| 5594 | icmptype = pd->hdr.icmp.icmp_type; | |||
| 5595 | icmpcode = pd->hdr.icmp.icmp_code; | |||
| 5596 | break; | |||
| 5597 | #ifdef INET61 | |||
| 5598 | case IPPROTO_ICMPV658: | |||
| 5599 | icmptype = pd->hdr.icmp6.icmp6_type; | |||
| 5600 | icmpcode = pd->hdr.icmp6.icmp6_code; | |||
| 5601 | break; | |||
| 5602 | #endif /* INET6 */ | |||
| 5603 | default: | |||
| 5604 | panic("unhandled proto %d", pd->proto); | |||
| 5605 | } | |||
| 5606 | ||||
| 5607 | if (pf_icmp_mapping(pd, icmptype, &icmp_dir, &virtual_id, | |||
| 5608 | &virtual_type) == 0) { | |||
| 5609 | /* | |||
| 5610 | * ICMP query/reply message not related to a TCP/UDP packet. | |||
| 5611 | * Search for an ICMP state. | |||
| 5612 | */ | |||
| 5613 | ret = pf_icmp_state_lookup(pd, &key, stp, | |||
| 5614 | virtual_id, virtual_type, icmp_dir, &iidx, | |||
| 5615 | 0, 0); | |||
| 5616 | /* IPv6? try matching a multicast address */ | |||
| 5617 | if (ret == PF_DROP && pd->af == AF_INET624 && icmp_dir == PF_OUT) | |||
| 5618 | ret = pf_icmp_state_lookup(pd, &key, stp, virtual_id, | |||
| 5619 | virtual_type, icmp_dir, &iidx, 1, 0); | |||
| 5620 | if (ret >= 0) | |||
| 5621 | return (ret); | |||
| 5622 | ||||
| 5623 | (*stp)->expire = getuptime(); | |||
| 5624 | pf_update_state_timeout(*stp, PFTM_ICMP_ERROR_REPLY); | |||
| 5625 | ||||
| 5626 | /* translate source/destination address, if necessary */ | |||
| 5627 | if ((*stp)->key[PF_SK_WIRE] != (*stp)->key[PF_SK_STACK]) { | |||
| 5628 | struct pf_state_key *nk; | |||
| 5629 | int afto, sidx, didx; | |||
| 5630 | ||||
| 5631 | if (PF_REVERSED_KEY((*stp)->key, pd->af)(((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK ]->af) && ((*stp)->key[PF_SK_WIRE]->af != (pd ->af)))) | |||
| 5632 | nk = (*stp)->key[pd->sidx]; | |||
| 5633 | else | |||
| 5634 | nk = (*stp)->key[pd->didx]; | |||
| 5635 | ||||
| 5636 | afto = pd->af != nk->af; | |||
| 5637 | sidx = afto ? pd->didx : pd->sidx; | |||
| 5638 | didx = afto ? pd->sidx : pd->didx; | |||
| 5639 | iidx = afto ? !iidx : iidx; | |||
| 5640 | #ifdef INET61 | |||
| 5641 | if (afto) { | |||
| 5642 | pf_addrcpy(&pd->nsaddr, &nk->addr[sidx], | |||
| 5643 | nk->af); | |||
| 5644 | pf_addrcpy(&pd->ndaddr, &nk->addr[didx], | |||
| 5645 | nk->af); | |||
| 5646 | pd->naf = nk->af; | |||
| 5647 | } | |||
| 5648 | #endif /* INET6 */ | |||
| 5649 | if (!afto) { | |||
| 5650 | pf_translate_a(pd, pd->src, &nk->addr[sidx]); | |||
| 5651 | pf_translate_a(pd, pd->dst, &nk->addr[didx]); | |||
| 5652 | } | |||
| 5653 | ||||
| 5654 | if (pd->rdomain != nk->rdomain) | |||
| 5655 | pd->destchg = 1; | |||
| 5656 | if (!afto && PF_ANEQ(pd->dst,((pd->af == 2 && (pd->dst)->pfa.addr32[0] != (&nk->addr[didx])->pfa.addr32[0]) || (pd->af == 24 && ((pd->dst)->pfa.addr32[3] != (&nk-> addr[didx])->pfa.addr32[3] || (pd->dst)->pfa.addr32[ 2] != (&nk->addr[didx])->pfa.addr32[2] || (pd->dst )->pfa.addr32[1] != (&nk->addr[didx])->pfa.addr32 [1] || (pd->dst)->pfa.addr32[0] != (&nk->addr[didx ])->pfa.addr32[0]))) | |||
| 5657 | &nk->addr[didx], pd->af)((pd->af == 2 && (pd->dst)->pfa.addr32[0] != (&nk->addr[didx])->pfa.addr32[0]) || (pd->af == 24 && ((pd->dst)->pfa.addr32[3] != (&nk-> addr[didx])->pfa.addr32[3] || (pd->dst)->pfa.addr32[ 2] != (&nk->addr[didx])->pfa.addr32[2] || (pd->dst )->pfa.addr32[1] != (&nk->addr[didx])->pfa.addr32 [1] || (pd->dst)->pfa.addr32[0] != (&nk->addr[didx ])->pfa.addr32[0])))) | |||
| 5658 | pd->destchg = 1; | |||
| 5659 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = nk->rdomain; | |||
| 5660 | ||||
| 5661 | switch (pd->af) { | |||
| 5662 | case AF_INET2: | |||
| 5663 | #ifdef INET61 | |||
| 5664 | if (afto) { | |||
| 5665 | if (pf_translate_icmp_af(pd, AF_INET624, | |||
| 5666 | &pd->hdr.icmp)) | |||
| 5667 | return (PF_DROP); | |||
| 5668 | pd->proto = IPPROTO_ICMPV658; | |||
| 5669 | } | |||
| 5670 | #endif /* INET6 */ | |||
| 5671 | pf_patch_16(pd, | |||
| 5672 | &pd->hdr.icmp.icmp_idicmp_hun.ih_idseq.icd_id, nk->port[iidx]); | |||
| 5673 | ||||
| 5674 | m_copyback(pd->m, pd->off, ICMP_MINLEN8, | |||
| 5675 | &pd->hdr.icmp, M_NOWAIT0x0002); | |||
| 5676 | copyback = 1; | |||
| 5677 | break; | |||
| 5678 | #ifdef INET61 | |||
| 5679 | case AF_INET624: | |||
| 5680 | if (afto) { | |||
| 5681 | if (pf_translate_icmp_af(pd, AF_INET2, | |||
| 5682 | &pd->hdr.icmp6)) | |||
| 5683 | return (PF_DROP); | |||
| 5684 | pd->proto = IPPROTO_ICMP1; | |||
| 5685 | } | |||
| 5686 | ||||
| 5687 | pf_patch_16(pd, | |||
| 5688 | &pd->hdr.icmp6.icmp6_idicmp6_dataun.icmp6_un_data16[0], nk->port[iidx]); | |||
| 5689 | ||||
| 5690 | m_copyback(pd->m, pd->off, | |||
| 5691 | sizeof(struct icmp6_hdr), &pd->hdr.icmp6, | |||
| 5692 | M_NOWAIT0x0002); | |||
| 5693 | copyback = 1; | |||
| 5694 | break; | |||
| 5695 | #endif /* INET6 */ | |||
| 5696 | } | |||
| 5697 | #ifdef INET61 | |||
| 5698 | if (afto) | |||
| 5699 | return (PF_AFRT); | |||
| 5700 | #endif /* INET6 */ | |||
| 5701 | } | |||
| 5702 | } else { | |||
| 5703 | /* | |||
| 5704 | * ICMP error message in response to a TCP/UDP packet. | |||
| 5705 | * Extract the inner TCP/UDP header and search for that state. | |||
| 5706 | */ | |||
| 5707 | struct pf_pdesc pd2; | |||
| 5708 | struct ip h2; | |||
| 5709 | #ifdef INET61 | |||
| 5710 | struct ip6_hdr h2_6; | |||
| 5711 | #endif /* INET6 */ | |||
| 5712 | int ipoff2; | |||
| 5713 | ||||
| 5714 | /* Initialize pd2 fields valid for both packets with pd. */ | |||
| 5715 | memset(&pd2, 0, sizeof(pd2))__builtin_memset((&pd2), (0), (sizeof(pd2))); | |||
| 5716 | pd2.af = pd->af; | |||
| 5717 | pd2.dir = pd->dir; | |||
| 5718 | pd2.kif = pd->kif; | |||
| 5719 | pd2.m = pd->m; | |||
| 5720 | pd2.rdomain = pd->rdomain; | |||
| 5721 | /* Payload packet is from the opposite direction. */ | |||
| 5722 | pd2.sidx = (pd2.dir == PF_IN) ? 1 : 0; | |||
| 5723 | pd2.didx = (pd2.dir == PF_IN) ? 0 : 1; | |||
| 5724 | switch (pd->af) { | |||
| 5725 | case AF_INET2: | |||
| 5726 | /* offset of h2 in mbuf chain */ | |||
| 5727 | ipoff2 = pd->off + ICMP_MINLEN8; | |||
| 5728 | ||||
| 5729 | if (!pf_pull_hdr(pd2.m, ipoff2, &h2, sizeof(h2), | |||
| 5730 | reason, pd2.af)) { | |||
| 5731 | DPFPRINTF(LOG_NOTICE,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (ip)"); addlog("\n"); } } while (0) | |||
| 5732 | "ICMP error message too short (ip)")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (ip)"); addlog("\n"); } } while (0); | |||
| 5733 | return (PF_DROP); | |||
| 5734 | } | |||
| 5735 | /* | |||
| 5736 | * ICMP error messages don't refer to non-first | |||
| 5737 | * fragments | |||
| 5738 | */ | |||
| 5739 | if (h2.ip_off & htons(IP_OFFMASK)(__uint16_t)(__builtin_constant_p(0x1fff) ? (__uint16_t)(((__uint16_t )(0x1fff) & 0xffU) << 8 | ((__uint16_t)(0x1fff) & 0xff00U) >> 8) : __swap16md(0x1fff))) { | |||
| 5740 | REASON_SET(reason, PFRES_FRAG)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (2); if (2 < 17) pf_status.counters[2]++; } } while (0); | |||
| 5741 | return (PF_DROP); | |||
| 5742 | } | |||
| 5743 | ||||
| 5744 | /* offset of protocol header that follows h2 */ | |||
| 5745 | pd2.off = ipoff2; | |||
| 5746 | if (pf_walk_header(&pd2, &h2, reason) != PF_PASS) | |||
| 5747 | return (PF_DROP); | |||
| 5748 | ||||
| 5749 | pd2.tot_len = ntohs(h2.ip_len)(__uint16_t)(__builtin_constant_p(h2.ip_len) ? (__uint16_t)(( (__uint16_t)(h2.ip_len) & 0xffU) << 8 | ((__uint16_t )(h2.ip_len) & 0xff00U) >> 8) : __swap16md(h2.ip_len )); | |||
| 5750 | pd2.src = (struct pf_addr *)&h2.ip_src; | |||
| 5751 | pd2.dst = (struct pf_addr *)&h2.ip_dst; | |||
| 5752 | break; | |||
| 5753 | #ifdef INET61 | |||
| 5754 | case AF_INET624: | |||
| 5755 | ipoff2 = pd->off + sizeof(struct icmp6_hdr); | |||
| 5756 | ||||
| 5757 | if (!pf_pull_hdr(pd2.m, ipoff2, &h2_6, sizeof(h2_6), | |||
| 5758 | reason, pd2.af)) { | |||
| 5759 | DPFPRINTF(LOG_NOTICE,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (ip6)"); addlog("\n"); } } while (0) | |||
| 5760 | "ICMP error message too short (ip6)")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (ip6)"); addlog("\n"); } } while (0); | |||
| 5761 | return (PF_DROP); | |||
| 5762 | } | |||
| 5763 | ||||
| 5764 | pd2.off = ipoff2; | |||
| 5765 | if (pf_walk_header6(&pd2, &h2_6, reason) != PF_PASS) | |||
| 5766 | return (PF_DROP); | |||
| 5767 | ||||
| 5768 | pd2.tot_len = ntohs(h2_6.ip6_plen)(__uint16_t)(__builtin_constant_p(h2_6.ip6_ctlun.ip6_un1.ip6_un1_plen ) ? (__uint16_t)(((__uint16_t)(h2_6.ip6_ctlun.ip6_un1.ip6_un1_plen ) & 0xffU) << 8 | ((__uint16_t)(h2_6.ip6_ctlun.ip6_un1 .ip6_un1_plen) & 0xff00U) >> 8) : __swap16md(h2_6.ip6_ctlun .ip6_un1.ip6_un1_plen)) + | |||
| 5769 | sizeof(struct ip6_hdr); | |||
| 5770 | pd2.src = (struct pf_addr *)&h2_6.ip6_src; | |||
| 5771 | pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; | |||
| 5772 | break; | |||
| 5773 | #endif /* INET6 */ | |||
| 5774 | default: | |||
| 5775 | unhandled_af(pd->af); | |||
| 5776 | } | |||
| 5777 | ||||
| 5778 | if (PF_ANEQ(pd->dst, pd2.src, pd->af)((pd->af == 2 && (pd->dst)->pfa.addr32[0] != (pd2.src)->pfa.addr32[0]) || (pd->af == 24 && ( (pd->dst)->pfa.addr32[3] != (pd2.src)->pfa.addr32[3] || (pd->dst)->pfa.addr32[2] != (pd2.src)->pfa.addr32 [2] || (pd->dst)->pfa.addr32[1] != (pd2.src)->pfa.addr32 [1] || (pd->dst)->pfa.addr32[0] != (pd2.src)->pfa.addr32 [0])))) { | |||
| 5779 | if (pf_status.debug >= LOG_NOTICE5) { | |||
| 5780 | log(LOG_NOTICE5, | |||
| 5781 | "pf: BAD ICMP %d:%d outer dst: ", | |||
| 5782 | icmptype, icmpcode); | |||
| 5783 | pf_print_host(pd->src, 0, pd->af); | |||
| 5784 | addlog(" -> "); | |||
| 5785 | pf_print_host(pd->dst, 0, pd->af); | |||
| 5786 | addlog(" inner src: "); | |||
| 5787 | pf_print_host(pd2.src, 0, pd2.af); | |||
| 5788 | addlog(" -> "); | |||
| 5789 | pf_print_host(pd2.dst, 0, pd2.af); | |||
| 5790 | addlog("\n"); | |||
| 5791 | } | |||
| 5792 | REASON_SET(reason, PFRES_BADSTATE)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (10); if (10 < 17) pf_status.counters[10]++; } } while (0); | |||
| 5793 | return (PF_DROP); | |||
| 5794 | } | |||
| 5795 | ||||
| 5796 | switch (pd2.proto) { | |||
| 5797 | case IPPROTO_TCP6: { | |||
| 5798 | struct tcphdr *th = &pd2.hdr.tcp; | |||
| 5799 | u_int32_t seq; | |||
| 5800 | struct pf_state_peer *src, *dst; | |||
| 5801 | u_int8_t dws; | |||
| 5802 | int action; | |||
| 5803 | ||||
| 5804 | /* | |||
| 5805 | * Only the first 8 bytes of the TCP header can be | |||
| 5806 | * expected. Don't access any TCP header fields after | |||
| 5807 | * th_seq, an ackskew test is not possible. | |||
| 5808 | */ | |||
| 5809 | if (!pf_pull_hdr(pd2.m, pd2.off, th, 8, reason, | |||
| 5810 | pd2.af)) { | |||
| 5811 | DPFPRINTF(LOG_NOTICE,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (tcp)"); addlog("\n"); } } while (0) | |||
| 5812 | "ICMP error message too short (tcp)")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (tcp)"); addlog("\n"); } } while (0); | |||
| 5813 | return (PF_DROP); | |||
| 5814 | } | |||
| 5815 | ||||
| 5816 | key.af = pd2.af; | |||
| 5817 | key.proto = IPPROTO_TCP6; | |||
| 5818 | key.rdomain = pd2.rdomain; | |||
| 5819 | pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af); | |||
| 5820 | pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af); | |||
| 5821 | key.port[pd2.sidx] = th->th_sport; | |||
| 5822 | key.port[pd2.didx] = th->th_dport; | |||
| 5823 | key.hash = pf_pkt_hash(pd2.af, pd2.proto, | |||
| 5824 | pd2.src, pd2.dst, th->th_sport, th->th_dport); | |||
| 5825 | ||||
| 5826 | action = pf_find_state(&pd2, &key, stp); | |||
| 5827 | if (action != PF_MATCH) | |||
| 5828 | return (action); | |||
| 5829 | ||||
| 5830 | if (pd2.dir == (*stp)->direction) { | |||
| 5831 | if (PF_REVERSED_KEY((*stp)->key, pd->af)(((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK ]->af) && ((*stp)->key[PF_SK_WIRE]->af != (pd ->af)))) { | |||
| 5832 | src = &(*stp)->src; | |||
| 5833 | dst = &(*stp)->dst; | |||
| 5834 | } else { | |||
| 5835 | src = &(*stp)->dst; | |||
| 5836 | dst = &(*stp)->src; | |||
| 5837 | } | |||
| 5838 | } else { | |||
| 5839 | if (PF_REVERSED_KEY((*stp)->key, pd->af)(((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK ]->af) && ((*stp)->key[PF_SK_WIRE]->af != (pd ->af)))) { | |||
| 5840 | src = &(*stp)->dst; | |||
| 5841 | dst = &(*stp)->src; | |||
| 5842 | } else { | |||
| 5843 | src = &(*stp)->src; | |||
| 5844 | dst = &(*stp)->dst; | |||
| 5845 | } | |||
| 5846 | } | |||
| 5847 | ||||
| 5848 | if (src->wscale && dst->wscale) | |||
| 5849 | dws = dst->wscale & PF_WSCALE_MASK0x0f; | |||
| 5850 | else | |||
| 5851 | dws = 0; | |||
| 5852 | ||||
| 5853 | /* Demodulate sequence number */ | |||
| 5854 | seq = ntohl(th->th_seq)(__uint32_t)(__builtin_constant_p(th->th_seq) ? (__uint32_t )(((__uint32_t)(th->th_seq) & 0xff) << 24 | ((__uint32_t )(th->th_seq) & 0xff00) << 8 | ((__uint32_t)(th-> th_seq) & 0xff0000) >> 8 | ((__uint32_t)(th->th_seq ) & 0xff000000) >> 24) : __swap32md(th->th_seq)) - src->seqdiff; | |||
| 5855 | if (src->seqdiff) { | |||
| 5856 | pf_patch_32(pd, &th->th_seq, htonl(seq)(__uint32_t)(__builtin_constant_p(seq) ? (__uint32_t)(((__uint32_t )(seq) & 0xff) << 24 | ((__uint32_t)(seq) & 0xff00 ) << 8 | ((__uint32_t)(seq) & 0xff0000) >> 8 | ((__uint32_t)(seq) & 0xff000000) >> 24) : __swap32md (seq))); | |||
| 5857 | copyback = 1; | |||
| 5858 | } | |||
| 5859 | ||||
| 5860 | if (!((*stp)->state_flags & PFSTATE_SLOPPY0x0002) && | |||
| 5861 | (!SEQ_GEQ(src->seqhi, seq)((int)((src->seqhi)-(seq)) >= 0) || !SEQ_GEQ(seq,((int)((seq)-(src->seqlo - (dst->max_win << dws)) ) >= 0) | |||
| 5862 | src->seqlo - (dst->max_win << dws))((int)((seq)-(src->seqlo - (dst->max_win << dws)) ) >= 0))) { | |||
| 5863 | if (pf_status.debug >= LOG_NOTICE5) { | |||
| 5864 | log(LOG_NOTICE5, | |||
| 5865 | "pf: BAD ICMP %d:%d ", | |||
| 5866 | icmptype, icmpcode); | |||
| 5867 | pf_print_host(pd->src, 0, pd->af); | |||
| 5868 | addlog(" -> "); | |||
| 5869 | pf_print_host(pd->dst, 0, pd->af); | |||
| 5870 | addlog(" state: "); | |||
| 5871 | pf_print_state(*stp); | |||
| 5872 | addlog(" seq=%u\n", seq); | |||
| 5873 | } | |||
| 5874 | REASON_SET(reason, PFRES_BADSTATE)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (10); if (10 < 17) pf_status.counters[10]++; } } while (0); | |||
| 5875 | return (PF_DROP); | |||
| 5876 | } else { | |||
| 5877 | if (pf_status.debug >= LOG_DEBUG7) { | |||
| 5878 | log(LOG_DEBUG7, | |||
| 5879 | "pf: OK ICMP %d:%d ", | |||
| 5880 | icmptype, icmpcode); | |||
| 5881 | pf_print_host(pd->src, 0, pd->af); | |||
| 5882 | addlog(" -> "); | |||
| 5883 | pf_print_host(pd->dst, 0, pd->af); | |||
| 5884 | addlog(" state: "); | |||
| 5885 | pf_print_state(*stp); | |||
| 5886 | addlog(" seq=%u\n", seq); | |||
| 5887 | } | |||
| 5888 | } | |||
| 5889 | ||||
| 5890 | /* translate source/destination address, if necessary */ | |||
| 5891 | if ((*stp)->key[PF_SK_WIRE] != | |||
| 5892 | (*stp)->key[PF_SK_STACK]) { | |||
| 5893 | struct pf_state_key *nk; | |||
| 5894 | int afto, sidx, didx; | |||
| 5895 | ||||
| 5896 | if (PF_REVERSED_KEY((*stp)->key, pd->af)(((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK ]->af) && ((*stp)->key[PF_SK_WIRE]->af != (pd ->af)))) | |||
| 5897 | nk = (*stp)->key[pd->sidx]; | |||
| 5898 | else | |||
| 5899 | nk = (*stp)->key[pd->didx]; | |||
| 5900 | ||||
| 5901 | afto = pd->af != nk->af; | |||
| 5902 | sidx = afto ? pd2.didx : pd2.sidx; | |||
| 5903 | didx = afto ? pd2.sidx : pd2.didx; | |||
| 5904 | ||||
| 5905 | #ifdef INET61 | |||
| 5906 | if (afto) { | |||
| 5907 | if (pf_translate_icmp_af(pd, nk->af, | |||
| 5908 | &pd->hdr.icmp)) | |||
| 5909 | return (PF_DROP); | |||
| 5910 | m_copyback(pd->m, pd->off, | |||
| 5911 | sizeof(struct icmp6_hdr), | |||
| 5912 | &pd->hdr.icmp6, M_NOWAIT0x0002); | |||
| 5913 | if (pf_change_icmp_af(pd->m, ipoff2, | |||
| 5914 | pd, &pd2, &nk->addr[sidx], | |||
| 5915 | &nk->addr[didx], pd->af, nk->af)) | |||
| 5916 | return (PF_DROP); | |||
| 5917 | if (nk->af == AF_INET2) | |||
| 5918 | pd->proto = IPPROTO_ICMP1; | |||
| 5919 | else | |||
| 5920 | pd->proto = IPPROTO_ICMPV658; | |||
| 5921 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = | |||
| 5922 | nk->rdomain; | |||
| 5923 | pd->destchg = 1; | |||
| 5924 | pf_addrcpy(&pd->nsaddr, | |||
| 5925 | &nk->addr[pd2.sidx], nk->af); | |||
| 5926 | pf_addrcpy(&pd->ndaddr, | |||
| 5927 | &nk->addr[pd2.didx], nk->af); | |||
| 5928 | pd->naf = nk->af; | |||
| 5929 | ||||
| 5930 | pf_patch_16(pd, | |||
| 5931 | &th->th_sport, nk->port[sidx]); | |||
| 5932 | pf_patch_16(pd, | |||
| 5933 | &th->th_dport, nk->port[didx]); | |||
| 5934 | ||||
| 5935 | m_copyback(pd2.m, pd2.off, 8, th, | |||
| 5936 | M_NOWAIT0x0002); | |||
| 5937 | return (PF_AFRT); | |||
| 5938 | } | |||
| 5939 | #endif /* INET6 */ | |||
| 5940 | if (PF_ANEQ(pd2.src,((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0]))) | |||
| 5941 | &nk->addr[pd2.sidx], pd2.af)((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0]))) || | |||
| 5942 | nk->port[pd2.sidx] != th->th_sport) | |||
| 5943 | pf_translate_icmp(pd, pd2.src, | |||
| 5944 | &th->th_sport, pd->dst, | |||
| 5945 | &nk->addr[pd2.sidx], | |||
| 5946 | nk->port[pd2.sidx]); | |||
| 5947 | ||||
| 5948 | if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 5949 | pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) || pd2.rdomain != nk->rdomain) | |||
| 5950 | pd->destchg = 1; | |||
| 5951 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = nk->rdomain; | |||
| 5952 | ||||
| 5953 | if (PF_ANEQ(pd2.dst,((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 5954 | &nk->addr[pd2.didx], pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) || | |||
| 5955 | nk->port[pd2.didx] != th->th_dport) | |||
| 5956 | pf_translate_icmp(pd, pd2.dst, | |||
| 5957 | &th->th_dport, pd->src, | |||
| 5958 | &nk->addr[pd2.didx], | |||
| 5959 | nk->port[pd2.didx]); | |||
| 5960 | copyback = 1; | |||
| 5961 | } | |||
| 5962 | ||||
| 5963 | if (copyback) { | |||
| 5964 | switch (pd2.af) { | |||
| 5965 | case AF_INET2: | |||
| 5966 | m_copyback(pd->m, pd->off, ICMP_MINLEN8, | |||
| 5967 | &pd->hdr.icmp, M_NOWAIT0x0002); | |||
| 5968 | m_copyback(pd2.m, ipoff2, sizeof(h2), | |||
| 5969 | &h2, M_NOWAIT0x0002); | |||
| 5970 | break; | |||
| 5971 | #ifdef INET61 | |||
| 5972 | case AF_INET624: | |||
| 5973 | m_copyback(pd->m, pd->off, | |||
| 5974 | sizeof(struct icmp6_hdr), | |||
| 5975 | &pd->hdr.icmp6, M_NOWAIT0x0002); | |||
| 5976 | m_copyback(pd2.m, ipoff2, sizeof(h2_6), | |||
| 5977 | &h2_6, M_NOWAIT0x0002); | |||
| 5978 | break; | |||
| 5979 | #endif /* INET6 */ | |||
| 5980 | } | |||
| 5981 | m_copyback(pd2.m, pd2.off, 8, th, M_NOWAIT0x0002); | |||
| 5982 | } | |||
| 5983 | break; | |||
| 5984 | } | |||
| 5985 | case IPPROTO_UDP17: { | |||
| 5986 | struct udphdr *uh = &pd2.hdr.udp; | |||
| 5987 | int action; | |||
| 5988 | ||||
| 5989 | if (!pf_pull_hdr(pd2.m, pd2.off, uh, sizeof(*uh), | |||
| 5990 | reason, pd2.af)) { | |||
| 5991 | DPFPRINTF(LOG_NOTICE,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (udp)"); addlog("\n"); } } while (0) | |||
| 5992 | "ICMP error message too short (udp)")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (udp)"); addlog("\n"); } } while (0); | |||
| 5993 | return (PF_DROP); | |||
| 5994 | } | |||
| 5995 | ||||
| 5996 | key.af = pd2.af; | |||
| 5997 | key.proto = IPPROTO_UDP17; | |||
| 5998 | key.rdomain = pd2.rdomain; | |||
| 5999 | pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af); | |||
| 6000 | pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af); | |||
| 6001 | key.port[pd2.sidx] = uh->uh_sport; | |||
| 6002 | key.port[pd2.didx] = uh->uh_dport; | |||
| 6003 | key.hash = pf_pkt_hash(pd2.af, pd2.proto, | |||
| 6004 | pd2.src, pd2.dst, uh->uh_sport, uh->uh_dport); | |||
| 6005 | ||||
| 6006 | action = pf_find_state(&pd2, &key, stp); | |||
| 6007 | if (action != PF_MATCH) | |||
| 6008 | return (action); | |||
| 6009 | ||||
| 6010 | /* translate source/destination address, if necessary */ | |||
| 6011 | if ((*stp)->key[PF_SK_WIRE] != | |||
| 6012 | (*stp)->key[PF_SK_STACK]) { | |||
| 6013 | struct pf_state_key *nk; | |||
| 6014 | int afto, sidx, didx; | |||
| 6015 | ||||
| 6016 | if (PF_REVERSED_KEY((*stp)->key, pd->af)(((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK ]->af) && ((*stp)->key[PF_SK_WIRE]->af != (pd ->af)))) | |||
| 6017 | nk = (*stp)->key[pd->sidx]; | |||
| 6018 | else | |||
| 6019 | nk = (*stp)->key[pd->didx]; | |||
| 6020 | ||||
| 6021 | afto = pd->af != nk->af; | |||
| 6022 | sidx = afto ? pd2.didx : pd2.sidx; | |||
| 6023 | didx = afto ? pd2.sidx : pd2.didx; | |||
| 6024 | ||||
| 6025 | #ifdef INET61 | |||
| 6026 | if (afto) { | |||
| 6027 | if (pf_translate_icmp_af(pd, nk->af, | |||
| 6028 | &pd->hdr.icmp)) | |||
| 6029 | return (PF_DROP); | |||
| 6030 | m_copyback(pd->m, pd->off, | |||
| 6031 | sizeof(struct icmp6_hdr), | |||
| 6032 | &pd->hdr.icmp6, M_NOWAIT0x0002); | |||
| 6033 | if (pf_change_icmp_af(pd->m, ipoff2, | |||
| 6034 | pd, &pd2, &nk->addr[sidx], | |||
| 6035 | &nk->addr[didx], pd->af, nk->af)) | |||
| 6036 | return (PF_DROP); | |||
| 6037 | if (nk->af == AF_INET2) | |||
| 6038 | pd->proto = IPPROTO_ICMP1; | |||
| 6039 | else | |||
| 6040 | pd->proto = IPPROTO_ICMPV658; | |||
| 6041 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = | |||
| 6042 | nk->rdomain; | |||
| 6043 | pd->destchg = 1; | |||
| 6044 | pf_addrcpy(&pd->nsaddr, | |||
| 6045 | &nk->addr[pd2.sidx], nk->af); | |||
| 6046 | pf_addrcpy(&pd->ndaddr, | |||
| 6047 | &nk->addr[pd2.didx], nk->af); | |||
| 6048 | pd->naf = nk->af; | |||
| 6049 | ||||
| 6050 | pf_patch_16(pd, | |||
| 6051 | &uh->uh_sport, nk->port[sidx]); | |||
| 6052 | pf_patch_16(pd, | |||
| 6053 | &uh->uh_dport, nk->port[didx]); | |||
| 6054 | ||||
| 6055 | m_copyback(pd2.m, pd2.off, sizeof(*uh), | |||
| 6056 | uh, M_NOWAIT0x0002); | |||
| 6057 | return (PF_AFRT); | |||
| 6058 | } | |||
| 6059 | #endif /* INET6 */ | |||
| 6060 | ||||
| 6061 | if (PF_ANEQ(pd2.src,((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0]))) | |||
| 6062 | &nk->addr[pd2.sidx], pd2.af)((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0]))) || | |||
| 6063 | nk->port[pd2.sidx] != uh->uh_sport) | |||
| 6064 | pf_translate_icmp(pd, pd2.src, | |||
| 6065 | &uh->uh_sport, pd->dst, | |||
| 6066 | &nk->addr[pd2.sidx], | |||
| 6067 | nk->port[pd2.sidx]); | |||
| 6068 | ||||
| 6069 | if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 6070 | pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) || pd2.rdomain != nk->rdomain) | |||
| 6071 | pd->destchg = 1; | |||
| 6072 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = nk->rdomain; | |||
| 6073 | ||||
| 6074 | if (PF_ANEQ(pd2.dst,((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 6075 | &nk->addr[pd2.didx], pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) || | |||
| 6076 | nk->port[pd2.didx] != uh->uh_dport) | |||
| 6077 | pf_translate_icmp(pd, pd2.dst, | |||
| 6078 | &uh->uh_dport, pd->src, | |||
| 6079 | &nk->addr[pd2.didx], | |||
| 6080 | nk->port[pd2.didx]); | |||
| 6081 | ||||
| 6082 | switch (pd2.af) { | |||
| 6083 | case AF_INET2: | |||
| 6084 | m_copyback(pd->m, pd->off, ICMP_MINLEN8, | |||
| 6085 | &pd->hdr.icmp, M_NOWAIT0x0002); | |||
| 6086 | m_copyback(pd2.m, ipoff2, sizeof(h2), | |||
| 6087 | &h2, M_NOWAIT0x0002); | |||
| 6088 | break; | |||
| 6089 | #ifdef INET61 | |||
| 6090 | case AF_INET624: | |||
| 6091 | m_copyback(pd->m, pd->off, | |||
| 6092 | sizeof(struct icmp6_hdr), | |||
| 6093 | &pd->hdr.icmp6, M_NOWAIT0x0002); | |||
| 6094 | m_copyback(pd2.m, ipoff2, sizeof(h2_6), | |||
| 6095 | &h2_6, M_NOWAIT0x0002); | |||
| 6096 | break; | |||
| 6097 | #endif /* INET6 */ | |||
| 6098 | } | |||
| 6099 | /* Avoid recomputing quoted UDP checksum. | |||
| 6100 | * note: udp6 0 csum invalid per rfc2460 p27. | |||
| 6101 | * but presumed nothing cares in this context */ | |||
| 6102 | pf_patch_16(pd, &uh->uh_sum, 0); | |||
| 6103 | m_copyback(pd2.m, pd2.off, sizeof(*uh), uh, | |||
| 6104 | M_NOWAIT0x0002); | |||
| 6105 | copyback = 1; | |||
| 6106 | } | |||
| 6107 | break; | |||
| 6108 | } | |||
| 6109 | case IPPROTO_ICMP1: { | |||
| 6110 | struct icmp *iih = &pd2.hdr.icmp; | |||
| 6111 | ||||
| 6112 | if (pd2.af != AF_INET2) { | |||
| 6113 | REASON_SET(reason, PFRES_NORM)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (4); if (4 < 17) pf_status.counters[4]++; } } while (0); | |||
| 6114 | return (PF_DROP); | |||
| 6115 | } | |||
| 6116 | ||||
| 6117 | if (!pf_pull_hdr(pd2.m, pd2.off, iih, ICMP_MINLEN8, | |||
| 6118 | reason, pd2.af)) { | |||
| 6119 | DPFPRINTF(LOG_NOTICE,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (icmp)"); addlog("\n"); } } while (0) | |||
| 6120 | "ICMP error message too short (icmp)")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (icmp)"); addlog("\n"); } } while (0); | |||
| 6121 | return (PF_DROP); | |||
| 6122 | } | |||
| 6123 | ||||
| 6124 | pf_icmp_mapping(&pd2, iih->icmp_type, | |||
| 6125 | &icmp_dir, &virtual_id, &virtual_type); | |||
| 6126 | ||||
| 6127 | ret = pf_icmp_state_lookup(&pd2, &key, stp, | |||
| 6128 | virtual_id, virtual_type, icmp_dir, &iidx, 0, 1); | |||
| 6129 | if (ret >= 0) | |||
| 6130 | return (ret); | |||
| 6131 | ||||
| 6132 | /* translate source/destination address, if necessary */ | |||
| 6133 | if ((*stp)->key[PF_SK_WIRE] != | |||
| 6134 | (*stp)->key[PF_SK_STACK]) { | |||
| 6135 | struct pf_state_key *nk; | |||
| 6136 | int afto, sidx, didx; | |||
| 6137 | ||||
| 6138 | if (PF_REVERSED_KEY((*stp)->key, pd->af)(((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK ]->af) && ((*stp)->key[PF_SK_WIRE]->af != (pd ->af)))) | |||
| 6139 | nk = (*stp)->key[pd->sidx]; | |||
| 6140 | else | |||
| 6141 | nk = (*stp)->key[pd->didx]; | |||
| 6142 | ||||
| 6143 | afto = pd->af != nk->af; | |||
| 6144 | sidx = afto ? pd2.didx : pd2.sidx; | |||
| 6145 | didx = afto ? pd2.sidx : pd2.didx; | |||
| 6146 | iidx = afto ? !iidx : iidx; | |||
| 6147 | ||||
| 6148 | #ifdef INET61 | |||
| 6149 | if (afto) { | |||
| 6150 | if (nk->af != AF_INET624) | |||
| 6151 | return (PF_DROP); | |||
| 6152 | if (pf_translate_icmp_af(pd, nk->af, | |||
| 6153 | &pd->hdr.icmp)) | |||
| 6154 | return (PF_DROP); | |||
| 6155 | m_copyback(pd->m, pd->off, | |||
| 6156 | sizeof(struct icmp6_hdr), | |||
| 6157 | &pd->hdr.icmp6, M_NOWAIT0x0002); | |||
| 6158 | if (pf_change_icmp_af(pd->m, ipoff2, | |||
| 6159 | pd, &pd2, &nk->addr[sidx], | |||
| 6160 | &nk->addr[didx], pd->af, nk->af)) | |||
| 6161 | return (PF_DROP); | |||
| 6162 | pd->proto = IPPROTO_ICMPV658; | |||
| 6163 | if (pf_translate_icmp_af(pd, | |||
| 6164 | nk->af, iih)) | |||
| 6165 | return (PF_DROP); | |||
| 6166 | if (virtual_type == htons(ICMP_ECHO)(__uint16_t)(__builtin_constant_p(8) ? (__uint16_t)(((__uint16_t )(8) & 0xffU) << 8 | ((__uint16_t)(8) & 0xff00U ) >> 8) : __swap16md(8))) | |||
| 6167 | pf_patch_16(pd, &iih->icmp_idicmp_hun.ih_idseq.icd_id, | |||
| 6168 | nk->port[iidx]); | |||
| 6169 | m_copyback(pd2.m, pd2.off, ICMP_MINLEN8, | |||
| 6170 | iih, M_NOWAIT0x0002); | |||
| 6171 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = | |||
| 6172 | nk->rdomain; | |||
| 6173 | pd->destchg = 1; | |||
| 6174 | pf_addrcpy(&pd->nsaddr, | |||
| 6175 | &nk->addr[pd2.sidx], nk->af); | |||
| 6176 | pf_addrcpy(&pd->ndaddr, | |||
| 6177 | &nk->addr[pd2.didx], nk->af); | |||
| 6178 | pd->naf = nk->af; | |||
| 6179 | return (PF_AFRT); | |||
| 6180 | } | |||
| 6181 | #endif /* INET6 */ | |||
| 6182 | ||||
| 6183 | if (PF_ANEQ(pd2.src,((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0]))) | |||
| 6184 | &nk->addr[pd2.sidx], pd2.af)((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0]))) || | |||
| 6185 | (virtual_type == htons(ICMP_ECHO)(__uint16_t)(__builtin_constant_p(8) ? (__uint16_t)(((__uint16_t )(8) & 0xffU) << 8 | ((__uint16_t)(8) & 0xff00U ) >> 8) : __swap16md(8)) && | |||
| 6186 | nk->port[iidx] != iih->icmp_idicmp_hun.ih_idseq.icd_id)) | |||
| 6187 | pf_translate_icmp(pd, pd2.src, | |||
| 6188 | (virtual_type == htons(ICMP_ECHO)(__uint16_t)(__builtin_constant_p(8) ? (__uint16_t)(((__uint16_t )(8) & 0xffU) << 8 | ((__uint16_t)(8) & 0xff00U ) >> 8) : __swap16md(8))) ? | |||
| 6189 | &iih->icmp_idicmp_hun.ih_idseq.icd_id : NULL((void *)0), | |||
| 6190 | pd->dst, &nk->addr[pd2.sidx], | |||
| 6191 | (virtual_type == htons(ICMP_ECHO)(__uint16_t)(__builtin_constant_p(8) ? (__uint16_t)(((__uint16_t )(8) & 0xffU) << 8 | ((__uint16_t)(8) & 0xff00U ) >> 8) : __swap16md(8))) ? | |||
| 6192 | nk->port[iidx] : 0); | |||
| 6193 | ||||
| 6194 | if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 6195 | pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) || pd2.rdomain != nk->rdomain) | |||
| 6196 | pd->destchg = 1; | |||
| 6197 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = nk->rdomain; | |||
| 6198 | ||||
| 6199 | if (PF_ANEQ(pd2.dst,((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 6200 | &nk->addr[pd2.didx], pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0])))) | |||
| 6201 | pf_translate_icmp(pd, pd2.dst, NULL((void *)0), | |||
| 6202 | pd->src, &nk->addr[pd2.didx], 0); | |||
| 6203 | ||||
| 6204 | m_copyback(pd->m, pd->off, ICMP_MINLEN8, | |||
| 6205 | &pd->hdr.icmp, M_NOWAIT0x0002); | |||
| 6206 | m_copyback(pd2.m, ipoff2, sizeof(h2), &h2, | |||
| 6207 | M_NOWAIT0x0002); | |||
| 6208 | m_copyback(pd2.m, pd2.off, ICMP_MINLEN8, iih, | |||
| 6209 | M_NOWAIT0x0002); | |||
| 6210 | copyback = 1; | |||
| 6211 | } | |||
| 6212 | break; | |||
| 6213 | } | |||
| 6214 | #ifdef INET61 | |||
| 6215 | case IPPROTO_ICMPV658: { | |||
| 6216 | struct icmp6_hdr *iih = &pd2.hdr.icmp6; | |||
| 6217 | ||||
| 6218 | if (pd2.af != AF_INET624) { | |||
| 6219 | REASON_SET(reason, PFRES_NORM)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (4); if (4 < 17) pf_status.counters[4]++; } } while (0); | |||
| 6220 | return (PF_DROP); | |||
| 6221 | } | |||
| 6222 | ||||
| 6223 | if (!pf_pull_hdr(pd2.m, pd2.off, iih, | |||
| 6224 | sizeof(struct icmp6_hdr), reason, pd2.af)) { | |||
| 6225 | DPFPRINTF(LOG_NOTICE,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (icmp6)"); addlog("\n"); } } while (0) | |||
| 6226 | "ICMP error message too short (icmp6)")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "ICMP error message too short (icmp6)"); addlog("\n"); } } while (0); | |||
| 6227 | return (PF_DROP); | |||
| 6228 | } | |||
| 6229 | ||||
| 6230 | pf_icmp_mapping(&pd2, iih->icmp6_type, | |||
| 6231 | &icmp_dir, &virtual_id, &virtual_type); | |||
| 6232 | ret = pf_icmp_state_lookup(&pd2, &key, stp, | |||
| 6233 | virtual_id, virtual_type, icmp_dir, &iidx, 0, 1); | |||
| 6234 | /* IPv6? try matching a multicast address */ | |||
| 6235 | if (ret == PF_DROP && pd2.af == AF_INET624 && | |||
| 6236 | icmp_dir == PF_OUT) | |||
| 6237 | ret = pf_icmp_state_lookup(&pd2, &key, stp, | |||
| 6238 | virtual_id, virtual_type, icmp_dir, &iidx, | |||
| 6239 | 1, 1); | |||
| 6240 | if (ret >= 0) | |||
| 6241 | return (ret); | |||
| 6242 | ||||
| 6243 | /* translate source/destination address, if necessary */ | |||
| 6244 | if ((*stp)->key[PF_SK_WIRE] != | |||
| 6245 | (*stp)->key[PF_SK_STACK]) { | |||
| 6246 | struct pf_state_key *nk; | |||
| 6247 | int afto, sidx, didx; | |||
| 6248 | ||||
| 6249 | if (PF_REVERSED_KEY((*stp)->key, pd->af)(((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK ]->af) && ((*stp)->key[PF_SK_WIRE]->af != (pd ->af)))) | |||
| 6250 | nk = (*stp)->key[pd->sidx]; | |||
| 6251 | else | |||
| 6252 | nk = (*stp)->key[pd->didx]; | |||
| 6253 | ||||
| 6254 | afto = pd->af != nk->af; | |||
| 6255 | sidx = afto ? pd2.didx : pd2.sidx; | |||
| 6256 | didx = afto ? pd2.sidx : pd2.didx; | |||
| 6257 | iidx = afto ? !iidx : iidx; | |||
| 6258 | ||||
| 6259 | if (afto) { | |||
| 6260 | if (nk->af != AF_INET2) | |||
| 6261 | return (PF_DROP); | |||
| 6262 | if (pf_translate_icmp_af(pd, nk->af, | |||
| 6263 | &pd->hdr.icmp)) | |||
| 6264 | return (PF_DROP); | |||
| 6265 | m_copyback(pd->m, pd->off, | |||
| 6266 | sizeof(struct icmp6_hdr), | |||
| 6267 | &pd->hdr.icmp6, M_NOWAIT0x0002); | |||
| 6268 | if (pf_change_icmp_af(pd->m, ipoff2, | |||
| 6269 | pd, &pd2, &nk->addr[sidx], | |||
| 6270 | &nk->addr[didx], pd->af, nk->af)) | |||
| 6271 | return (PF_DROP); | |||
| 6272 | pd->proto = IPPROTO_ICMP1; | |||
| 6273 | if (pf_translate_icmp_af(pd, | |||
| 6274 | nk->af, iih)) | |||
| 6275 | return (PF_DROP); | |||
| 6276 | if (virtual_type == | |||
| 6277 | htons(ICMP6_ECHO_REQUEST)(__uint16_t)(__builtin_constant_p(128) ? (__uint16_t)(((__uint16_t )(128) & 0xffU) << 8 | ((__uint16_t)(128) & 0xff00U ) >> 8) : __swap16md(128))) | |||
| 6278 | pf_patch_16(pd, &iih->icmp6_idicmp6_dataun.icmp6_un_data16[0], | |||
| 6279 | nk->port[iidx]); | |||
| 6280 | m_copyback(pd2.m, pd2.off, | |||
| 6281 | sizeof(struct icmp6_hdr), iih, | |||
| 6282 | M_NOWAIT0x0002); | |||
| 6283 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = | |||
| 6284 | nk->rdomain; | |||
| 6285 | pd->destchg = 1; | |||
| 6286 | pf_addrcpy(&pd->nsaddr, | |||
| 6287 | &nk->addr[pd2.sidx], nk->af); | |||
| 6288 | pf_addrcpy(&pd->ndaddr, | |||
| 6289 | &nk->addr[pd2.didx], nk->af); | |||
| 6290 | pd->naf = nk->af; | |||
| 6291 | return (PF_AFRT); | |||
| 6292 | } | |||
| 6293 | ||||
| 6294 | if (PF_ANEQ(pd2.src,((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0]))) | |||
| 6295 | &nk->addr[pd2.sidx], pd2.af)((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0]))) || | |||
| 6296 | ((virtual_type == | |||
| 6297 | htons(ICMP6_ECHO_REQUEST)(__uint16_t)(__builtin_constant_p(128) ? (__uint16_t)(((__uint16_t )(128) & 0xffU) << 8 | ((__uint16_t)(128) & 0xff00U ) >> 8) : __swap16md(128))) && | |||
| 6298 | nk->port[pd2.sidx] != iih->icmp6_idicmp6_dataun.icmp6_un_data16[0])) | |||
| 6299 | pf_translate_icmp(pd, pd2.src, | |||
| 6300 | (virtual_type == | |||
| 6301 | htons(ICMP6_ECHO_REQUEST)(__uint16_t)(__builtin_constant_p(128) ? (__uint16_t)(((__uint16_t )(128) & 0xffU) << 8 | ((__uint16_t)(128) & 0xff00U ) >> 8) : __swap16md(128))) | |||
| 6302 | ? &iih->icmp6_idicmp6_dataun.icmp6_un_data16[0] : NULL((void *)0), | |||
| 6303 | pd->dst, &nk->addr[pd2.sidx], | |||
| 6304 | (virtual_type == | |||
| 6305 | htons(ICMP6_ECHO_REQUEST)(__uint16_t)(__builtin_constant_p(128) ? (__uint16_t)(((__uint16_t )(128) & 0xffU) << 8 | ((__uint16_t)(128) & 0xff00U ) >> 8) : __swap16md(128))) | |||
| 6306 | ? nk->port[iidx] : 0); | |||
| 6307 | ||||
| 6308 | if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 6309 | pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) || pd2.rdomain != nk->rdomain) | |||
| 6310 | pd->destchg = 1; | |||
| 6311 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = nk->rdomain; | |||
| 6312 | ||||
| 6313 | if (PF_ANEQ(pd2.dst,((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 6314 | &nk->addr[pd2.didx], pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0])))) | |||
| 6315 | pf_translate_icmp(pd, pd2.dst, NULL((void *)0), | |||
| 6316 | pd->src, &nk->addr[pd2.didx], 0); | |||
| 6317 | ||||
| 6318 | m_copyback(pd->m, pd->off, | |||
| 6319 | sizeof(struct icmp6_hdr), &pd->hdr.icmp6, | |||
| 6320 | M_NOWAIT0x0002); | |||
| 6321 | m_copyback(pd2.m, ipoff2, sizeof(h2_6), &h2_6, | |||
| 6322 | M_NOWAIT0x0002); | |||
| 6323 | m_copyback(pd2.m, pd2.off, | |||
| 6324 | sizeof(struct icmp6_hdr), iih, M_NOWAIT0x0002); | |||
| 6325 | copyback = 1; | |||
| 6326 | } | |||
| 6327 | break; | |||
| 6328 | } | |||
| 6329 | #endif /* INET6 */ | |||
| 6330 | default: { | |||
| 6331 | int action; | |||
| 6332 | ||||
| 6333 | key.af = pd2.af; | |||
| 6334 | key.proto = pd2.proto; | |||
| 6335 | key.rdomain = pd2.rdomain; | |||
| 6336 | pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af); | |||
| 6337 | pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af); | |||
| 6338 | key.port[0] = key.port[1] = 0; | |||
| 6339 | key.hash = pf_pkt_hash(pd2.af, pd2.proto, | |||
| 6340 | pd2.src, pd2.dst, 0, 0); | |||
| 6341 | ||||
| 6342 | action = pf_find_state(&pd2, &key, stp); | |||
| 6343 | if (action != PF_MATCH) | |||
| 6344 | return (action); | |||
| 6345 | ||||
| 6346 | /* translate source/destination address, if necessary */ | |||
| 6347 | if ((*stp)->key[PF_SK_WIRE] != | |||
| 6348 | (*stp)->key[PF_SK_STACK]) { | |||
| 6349 | struct pf_state_key *nk = | |||
| 6350 | (*stp)->key[pd->didx]; | |||
| 6351 | ||||
| 6352 | if (PF_ANEQ(pd2.src,((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0]))) | |||
| 6353 | &nk->addr[pd2.sidx], pd2.af)((pd2.af == 2 && (pd2.src)->pfa.addr32[0] != (& nk->addr[pd2.sidx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.src)->pfa.addr32[3] != (&nk->addr[pd2.sidx]) ->pfa.addr32[3] || (pd2.src)->pfa.addr32[2] != (&nk ->addr[pd2.sidx])->pfa.addr32[2] || (pd2.src)->pfa.addr32 [1] != (&nk->addr[pd2.sidx])->pfa.addr32[1] || (pd2 .src)->pfa.addr32[0] != (&nk->addr[pd2.sidx])->pfa .addr32[0])))) | |||
| 6354 | pf_translate_icmp(pd, pd2.src, NULL((void *)0), | |||
| 6355 | pd->dst, &nk->addr[pd2.sidx], 0); | |||
| 6356 | ||||
| 6357 | if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 6358 | pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) || pd2.rdomain != nk->rdomain) | |||
| 6359 | pd->destchg = 1; | |||
| 6360 | pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = nk->rdomain; | |||
| 6361 | ||||
| 6362 | if (PF_ANEQ(pd2.dst,((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0]))) | |||
| 6363 | &nk->addr[pd2.didx], pd2.af)((pd2.af == 2 && (pd2.dst)->pfa.addr32[0] != (& nk->addr[pd2.didx])->pfa.addr32[0]) || (pd2.af == 24 && ((pd2.dst)->pfa.addr32[3] != (&nk->addr[pd2.didx]) ->pfa.addr32[3] || (pd2.dst)->pfa.addr32[2] != (&nk ->addr[pd2.didx])->pfa.addr32[2] || (pd2.dst)->pfa.addr32 [1] != (&nk->addr[pd2.didx])->pfa.addr32[1] || (pd2 .dst)->pfa.addr32[0] != (&nk->addr[pd2.didx])->pfa .addr32[0])))) | |||
| 6364 | pf_translate_icmp(pd, pd2.dst, NULL((void *)0), | |||
| 6365 | pd->src, &nk->addr[pd2.didx], 0); | |||
| 6366 | ||||
| 6367 | switch (pd2.af) { | |||
| 6368 | case AF_INET2: | |||
| 6369 | m_copyback(pd->m, pd->off, ICMP_MINLEN8, | |||
| 6370 | &pd->hdr.icmp, M_NOWAIT0x0002); | |||
| 6371 | m_copyback(pd2.m, ipoff2, sizeof(h2), | |||
| 6372 | &h2, M_NOWAIT0x0002); | |||
| 6373 | break; | |||
| 6374 | #ifdef INET61 | |||
| 6375 | case AF_INET624: | |||
| 6376 | m_copyback(pd->m, pd->off, | |||
| 6377 | sizeof(struct icmp6_hdr), | |||
| 6378 | &pd->hdr.icmp6, M_NOWAIT0x0002); | |||
| 6379 | m_copyback(pd2.m, ipoff2, sizeof(h2_6), | |||
| 6380 | &h2_6, M_NOWAIT0x0002); | |||
| 6381 | break; | |||
| 6382 | #endif /* INET6 */ | |||
| 6383 | } | |||
| 6384 | copyback = 1; | |||
| 6385 | } | |||
| 6386 | break; | |||
| 6387 | } | |||
| 6388 | } | |||
| 6389 | } | |||
| 6390 | if (copyback) { | |||
| 6391 | m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT0x0002); | |||
| 6392 | } | |||
| 6393 | ||||
| 6394 | return (PF_PASS); | |||
| 6395 | } | |||
| 6396 | ||||
| 6397 | /* | |||
| 6398 | * ipoff and off are measured from the start of the mbuf chain. | |||
| 6399 | * h must be at "ipoff" on the mbuf chain. | |||
| 6400 | */ | |||
| 6401 | void * | |||
| 6402 | pf_pull_hdr(struct mbuf *m, int off, void *p, int len, | |||
| 6403 | u_short *reasonp, sa_family_t af) | |||
| 6404 | { | |||
| 6405 | int iplen = 0; | |||
| 6406 | ||||
| 6407 | switch (af) { | |||
| 6408 | case AF_INET2: { | |||
| 6409 | struct ip *h = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data)); | |||
| 6410 | u_int16_t fragoff = (ntohs(h->ip_off)(__uint16_t)(__builtin_constant_p(h->ip_off) ? (__uint16_t )(((__uint16_t)(h->ip_off) & 0xffU) << 8 | ((__uint16_t )(h->ip_off) & 0xff00U) >> 8) : __swap16md(h-> ip_off)) & IP_OFFMASK0x1fff) << 3; | |||
| 6411 | ||||
| 6412 | if (fragoff) { | |||
| 6413 | REASON_SET(reasonp, PFRES_FRAG)do { if ((void *)(reasonp) != ((void *)0)) { *(reasonp) = (2) ; if (2 < 17) pf_status.counters[2]++; } } while (0); | |||
| 6414 | return (NULL((void *)0)); | |||
| 6415 | } | |||
| 6416 | iplen = ntohs(h->ip_len)(__uint16_t)(__builtin_constant_p(h->ip_len) ? (__uint16_t )(((__uint16_t)(h->ip_len) & 0xffU) << 8 | ((__uint16_t )(h->ip_len) & 0xff00U) >> 8) : __swap16md(h-> ip_len)); | |||
| 6417 | break; | |||
| 6418 | } | |||
| 6419 | #ifdef INET61 | |||
| 6420 | case AF_INET624: { | |||
| 6421 | struct ip6_hdr *h = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data)); | |||
| 6422 | ||||
| 6423 | iplen = ntohs(h->ip6_plen)(__uint16_t)(__builtin_constant_p(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) ? (__uint16_t)(((__uint16_t)(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) & 0xffU) << 8 | ((__uint16_t)(h->ip6_ctlun.ip6_un1 .ip6_un1_plen) & 0xff00U) >> 8) : __swap16md(h-> ip6_ctlun.ip6_un1.ip6_un1_plen)) + sizeof(struct ip6_hdr); | |||
| 6424 | break; | |||
| 6425 | } | |||
| 6426 | #endif /* INET6 */ | |||
| 6427 | } | |||
| 6428 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < off + len || iplen < off + len) { | |||
| 6429 | REASON_SET(reasonp, PFRES_SHORT)do { if ((void *)(reasonp) != ((void *)0)) { *(reasonp) = (3) ; if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 6430 | return (NULL((void *)0)); | |||
| 6431 | } | |||
| 6432 | m_copydata(m, off, len, p); | |||
| 6433 | return (p); | |||
| 6434 | } | |||
| 6435 | ||||
| 6436 | int | |||
| 6437 | pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif, | |||
| 6438 | int rtableid) | |||
| 6439 | { | |||
| 6440 | struct sockaddr_storage ss; | |||
| 6441 | struct sockaddr_in *dst; | |||
| 6442 | int ret = 1; | |||
| 6443 | int check_mpath; | |||
| 6444 | #ifdef INET61 | |||
| 6445 | struct sockaddr_in6 *dst6; | |||
| 6446 | #endif /* INET6 */ | |||
| 6447 | struct rtentry *rt = NULL((void *)0); | |||
| 6448 | ||||
| 6449 | check_mpath = 0; | |||
| 6450 | memset(&ss, 0, sizeof(ss))__builtin_memset((&ss), (0), (sizeof(ss))); | |||
| 6451 | switch (af) { | |||
| 6452 | case AF_INET2: | |||
| 6453 | dst = (struct sockaddr_in *)&ss; | |||
| 6454 | dst->sin_family = AF_INET2; | |||
| 6455 | dst->sin_len = sizeof(*dst); | |||
| 6456 | dst->sin_addr = addr->v4pfa.v4; | |||
| 6457 | if (ipmultipath) | |||
| 6458 | check_mpath = 1; | |||
| 6459 | break; | |||
| 6460 | #ifdef INET61 | |||
| 6461 | case AF_INET624: | |||
| 6462 | /* | |||
| 6463 | * Skip check for addresses with embedded interface scope, | |||
| 6464 | * as they would always match anyway. | |||
| 6465 | */ | |||
| 6466 | if (IN6_IS_SCOPE_EMBED(&addr->v6)(((((&addr->pfa.v6)->__u6_addr.__u6_addr8[0] == 0xfe ) && (((&addr->pfa.v6)->__u6_addr.__u6_addr8 [1] & 0xc0) == 0x80))) || ((((&addr->pfa.v6)->__u6_addr .__u6_addr8[0] == 0xff) && (((&addr->pfa.v6)-> __u6_addr.__u6_addr8[1] & 0x0f) == 0x02))) || ((((&addr ->pfa.v6)->__u6_addr.__u6_addr8[0] == 0xff) && ( ((&addr->pfa.v6)->__u6_addr.__u6_addr8[1] & 0x0f ) == 0x01))))) | |||
| 6467 | goto out; | |||
| 6468 | dst6 = (struct sockaddr_in6 *)&ss; | |||
| 6469 | dst6->sin6_family = AF_INET624; | |||
| 6470 | dst6->sin6_len = sizeof(*dst6); | |||
| 6471 | dst6->sin6_addr = addr->v6pfa.v6; | |||
| 6472 | if (ip6_multipath) | |||
| 6473 | check_mpath = 1; | |||
| 6474 | break; | |||
| 6475 | #endif /* INET6 */ | |||
| 6476 | } | |||
| 6477 | ||||
| 6478 | /* Skip checks for ipsec interfaces */ | |||
| 6479 | if (kif != NULL((void *)0) && kif->pfik_ifp->if_typeif_data.ifi_type == IFT_ENC0xf4) | |||
| 6480 | goto out; | |||
| 6481 | ||||
| 6482 | rt = rtalloc(sstosa(&ss), 0, rtableid); | |||
| 6483 | if (rt != NULL((void *)0)) { | |||
| 6484 | /* No interface given, this is a no-route check */ | |||
| 6485 | if (kif == NULL((void *)0)) | |||
| 6486 | goto out; | |||
| 6487 | ||||
| 6488 | if (kif->pfik_ifp == NULL((void *)0)) { | |||
| 6489 | ret = 0; | |||
| 6490 | goto out; | |||
| 6491 | } | |||
| 6492 | ||||
| 6493 | /* Perform uRPF check if passed input interface */ | |||
| 6494 | ret = 0; | |||
| 6495 | do { | |||
| 6496 | if (rt->rt_ifidx == kif->pfik_ifp->if_index) { | |||
| 6497 | ret = 1; | |||
| 6498 | #if NCARP1 > 0 | |||
| 6499 | } else { | |||
| 6500 | struct ifnet *ifp; | |||
| 6501 | ||||
| 6502 | ifp = if_get(rt->rt_ifidx); | |||
| 6503 | if (ifp != NULL((void *)0) && ifp->if_typeif_data.ifi_type == IFT_CARP0xf7 && | |||
| 6504 | ifp->if_carpdevidxif_carp_ptr.carp_idx == | |||
| 6505 | kif->pfik_ifp->if_index) | |||
| 6506 | ret = 1; | |||
| 6507 | if_put(ifp); | |||
| 6508 | #endif /* NCARP */ | |||
| 6509 | } | |||
| 6510 | ||||
| 6511 | rt = rtable_iterate(rt); | |||
| 6512 | } while (check_mpath == 1 && rt != NULL((void *)0) && ret == 0); | |||
| 6513 | } else | |||
| 6514 | ret = 0; | |||
| 6515 | out: | |||
| 6516 | rtfree(rt); | |||
| 6517 | return (ret); | |||
| 6518 | } | |||
| 6519 | ||||
| 6520 | int | |||
| 6521 | pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw, | |||
| 6522 | int rtableid) | |||
| 6523 | { | |||
| 6524 | struct sockaddr_storage ss; | |||
| 6525 | struct sockaddr_in *dst; | |||
| 6526 | #ifdef INET61 | |||
| 6527 | struct sockaddr_in6 *dst6; | |||
| 6528 | #endif /* INET6 */ | |||
| 6529 | struct rtentry *rt; | |||
| 6530 | int ret = 0; | |||
| 6531 | ||||
| 6532 | memset(&ss, 0, sizeof(ss))__builtin_memset((&ss), (0), (sizeof(ss))); | |||
| 6533 | switch (af) { | |||
| 6534 | case AF_INET2: | |||
| 6535 | dst = (struct sockaddr_in *)&ss; | |||
| 6536 | dst->sin_family = AF_INET2; | |||
| 6537 | dst->sin_len = sizeof(*dst); | |||
| 6538 | dst->sin_addr = addr->v4pfa.v4; | |||
| 6539 | break; | |||
| 6540 | #ifdef INET61 | |||
| 6541 | case AF_INET624: | |||
| 6542 | dst6 = (struct sockaddr_in6 *)&ss; | |||
| 6543 | dst6->sin6_family = AF_INET624; | |||
| 6544 | dst6->sin6_len = sizeof(*dst6); | |||
| 6545 | dst6->sin6_addr = addr->v6pfa.v6; | |||
| 6546 | break; | |||
| 6547 | #endif /* INET6 */ | |||
| 6548 | } | |||
| 6549 | ||||
| 6550 | rt = rtalloc(sstosa(&ss), RT_RESOLVE1, rtableid); | |||
| 6551 | if (rt != NULL((void *)0)) { | |||
| 6552 | if (rt->rt_labelid == aw->v.rtlabel) | |||
| 6553 | ret = 1; | |||
| 6554 | rtfree(rt); | |||
| 6555 | } | |||
| 6556 | ||||
| 6557 | return (ret); | |||
| 6558 | } | |||
| 6559 | ||||
| 6560 | /* pf_route() may change pd->m, adjust local copies after calling */ | |||
| 6561 | void | |||
| 6562 | pf_route(struct pf_pdesc *pd, struct pf_state *st) | |||
| 6563 | { | |||
| 6564 | struct mbuf *m0; | |||
| 6565 | struct mbuf_list ml; | |||
| 6566 | struct sockaddr_in *dst, sin; | |||
| 6567 | struct rtentry *rt = NULL((void *)0); | |||
| 6568 | struct ip *ip; | |||
| 6569 | struct ifnet *ifp = NULL((void *)0); | |||
| 6570 | unsigned int rtableid; | |||
| 6571 | ||||
| 6572 | if (pd->m->m_pkthdrM_dat.MH.MH_pkthdr.pf.routed++ > 3) { | |||
| 6573 | m_freem(pd->m); | |||
| 6574 | pd->m = NULL((void *)0); | |||
| 6575 | return; | |||
| 6576 | } | |||
| 6577 | ||||
| 6578 | if (st->rt == PF_DUPTO) { | |||
| 6579 | if ((m0 = m_dup_pkt(pd->m, max_linkhdr, M_NOWAIT0x0002)) == NULL((void *)0)) | |||
| 6580 | return; | |||
| 6581 | } else { | |||
| 6582 | if ((st->rt == PF_REPLYTO) == (st->direction == pd->dir)) | |||
| 6583 | return; | |||
| 6584 | m0 = pd->m; | |||
| 6585 | pd->m = NULL((void *)0); | |||
| 6586 | } | |||
| 6587 | ||||
| 6588 | if (m0->m_lenm_hdr.mh_len < sizeof(struct ip)) { | |||
| 6589 | DPFPRINTF(LOG_ERR,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: m0->m_len < sizeof(struct ip)", __func__); addlog( "\n"); } } while (0) | |||
| 6590 | "%s: m0->m_len < sizeof(struct ip)", __func__)do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: m0->m_len < sizeof(struct ip)", __func__); addlog( "\n"); } } while (0); | |||
| 6591 | goto bad; | |||
| 6592 | } | |||
| 6593 | ||||
| 6594 | ip = mtod(m0, struct ip *)((struct ip *)((m0)->m_hdr.mh_data)); | |||
| 6595 | ||||
| 6596 | if (pd->dir == PF_IN) { | |||
| 6597 | if (ip->ip_ttl <= IPTTLDEC1) { | |||
| 6598 | if (st->rt != PF_DUPTO) { | |||
| 6599 | pf_send_icmp(m0, ICMP_TIMXCEED11, | |||
| 6600 | ICMP_TIMXCEED_INTRANS0, 0, | |||
| 6601 | pd->af, st->rule.ptr, pd->rdomain); | |||
| 6602 | } | |||
| 6603 | goto bad; | |||
| 6604 | } | |||
| 6605 | ip->ip_ttl -= IPTTLDEC1; | |||
| 6606 | } | |||
| 6607 | ||||
| 6608 | memset(&sin, 0, sizeof(sin))__builtin_memset((&sin), (0), (sizeof(sin))); | |||
| 6609 | dst = &sin; | |||
| 6610 | dst->sin_family = AF_INET2; | |||
| 6611 | dst->sin_len = sizeof(*dst); | |||
| 6612 | dst->sin_addr = st->rt_addr.v4pfa.v4; | |||
| 6613 | rtableid = m0->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid; | |||
| 6614 | ||||
| 6615 | rt = rtalloc_mpath(sintosa(dst), &ip->ip_src.s_addr, rtableid); | |||
| 6616 | if (!rtisvalid(rt)) { | |||
| 6617 | if (st->rt != PF_DUPTO) { | |||
| 6618 | pf_send_icmp(m0, ICMP_UNREACH3, ICMP_UNREACH_HOST1, | |||
| 6619 | 0, pd->af, st->rule.ptr, pd->rdomain); | |||
| 6620 | } | |||
| 6621 | ipstat_inc(ips_noroute); | |||
| 6622 | goto bad; | |||
| 6623 | } | |||
| 6624 | ||||
| 6625 | ifp = if_get(rt->rt_ifidx); | |||
| 6626 | if (ifp == NULL((void *)0)) | |||
| 6627 | goto bad; | |||
| 6628 | ||||
| 6629 | /* A locally generated packet may have invalid source address. */ | |||
| 6630 | if ((ntohl(ip->ip_src.s_addr)(__uint32_t)(__builtin_constant_p(ip->ip_src.s_addr) ? (__uint32_t )(((__uint32_t)(ip->ip_src.s_addr) & 0xff) << 24 | ((__uint32_t)(ip->ip_src.s_addr) & 0xff00) << 8 | ((__uint32_t)(ip->ip_src.s_addr) & 0xff0000) >> 8 | ((__uint32_t)(ip->ip_src.s_addr) & 0xff000000) >> 24) : __swap32md(ip->ip_src.s_addr)) >> IN_CLASSA_NSHIFT24) == IN_LOOPBACKNET127 && | |||
| 6631 | (ifp->if_flags & IFF_LOOPBACK0x8) == 0) | |||
| 6632 | ip->ip_src = ifatoia(rt->rt_ifa)->ia_addr.sin_addr; | |||
| 6633 | ||||
| 6634 | if (st->rt != PF_DUPTO && pd->dir == PF_IN) { | |||
| 6635 | if (pf_test(AF_INET2, PF_OUT, ifp, &m0) != PF_PASS) | |||
| 6636 | goto bad; | |||
| 6637 | else if (m0 == NULL((void *)0)) | |||
| 6638 | goto done; | |||
| 6639 | if (m0->m_lenm_hdr.mh_len < sizeof(struct ip)) { | |||
| 6640 | DPFPRINTF(LOG_ERR,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: m0->m_len < sizeof(struct ip)", __func__); addlog( "\n"); } } while (0) | |||
| 6641 | "%s: m0->m_len < sizeof(struct ip)", __func__)do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: m0->m_len < sizeof(struct ip)", __func__); addlog( "\n"); } } while (0); | |||
| 6642 | goto bad; | |||
| 6643 | } | |||
| 6644 | ip = mtod(m0, struct ip *)((struct ip *)((m0)->m_hdr.mh_data)); | |||
| 6645 | } | |||
| 6646 | ||||
| 6647 | if (if_output_tso(ifp, &m0, sintosa(dst), rt, ifp->if_mtuif_data.ifi_mtu) || | |||
| 6648 | m0 == NULL((void *)0)) | |||
| 6649 | goto done; | |||
| 6650 | ||||
| 6651 | /* | |||
| 6652 | * Too large for interface; fragment if possible. | |||
| 6653 | * Must be able to put at least 8 bytes per fragment. | |||
| 6654 | */ | |||
| 6655 | if (ip->ip_off & htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t )(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) & 0xff00U) >> 8) : __swap16md(0x4000))) { | |||
| 6656 | ipstat_inc(ips_cantfrag); | |||
| 6657 | if (st->rt != PF_DUPTO) | |||
| 6658 | pf_send_icmp(m0, ICMP_UNREACH3, ICMP_UNREACH_NEEDFRAG4, | |||
| 6659 | ifp->if_mtuif_data.ifi_mtu, pd->af, st->rule.ptr, pd->rdomain); | |||
| 6660 | goto bad; | |||
| 6661 | } | |||
| 6662 | ||||
| 6663 | if (ip_fragment(m0, &ml, ifp, ifp->if_mtuif_data.ifi_mtu) || | |||
| 6664 | if_output_ml(ifp, &ml, sintosa(dst), rt)) | |||
| 6665 | goto done; | |||
| 6666 | ipstat_inc(ips_fragmented); | |||
| 6667 | ||||
| 6668 | done: | |||
| 6669 | if_put(ifp); | |||
| 6670 | rtfree(rt); | |||
| 6671 | return; | |||
| 6672 | ||||
| 6673 | bad: | |||
| 6674 | m_freem(m0); | |||
| 6675 | goto done; | |||
| 6676 | } | |||
| 6677 | ||||
| 6678 | #ifdef INET61 | |||
| 6679 | /* pf_route6() may change pd->m, adjust local copies after calling */ | |||
| 6680 | void | |||
| 6681 | pf_route6(struct pf_pdesc *pd, struct pf_state *st) | |||
| 6682 | { | |||
| 6683 | struct mbuf *m0; | |||
| 6684 | struct sockaddr_in6 *dst, sin6; | |||
| 6685 | struct rtentry *rt = NULL((void *)0); | |||
| 6686 | struct ip6_hdr *ip6; | |||
| 6687 | struct ifnet *ifp = NULL((void *)0); | |||
| 6688 | struct m_tag *mtag; | |||
| 6689 | unsigned int rtableid; | |||
| 6690 | ||||
| 6691 | if (pd->m->m_pkthdrM_dat.MH.MH_pkthdr.pf.routed++ > 3) { | |||
| 6692 | m_freem(pd->m); | |||
| 6693 | pd->m = NULL((void *)0); | |||
| 6694 | return; | |||
| 6695 | } | |||
| 6696 | ||||
| 6697 | if (st->rt == PF_DUPTO) { | |||
| 6698 | if ((m0 = m_dup_pkt(pd->m, max_linkhdr, M_NOWAIT0x0002)) == NULL((void *)0)) | |||
| 6699 | return; | |||
| 6700 | } else { | |||
| 6701 | if ((st->rt == PF_REPLYTO) == (st->direction == pd->dir)) | |||
| 6702 | return; | |||
| 6703 | m0 = pd->m; | |||
| 6704 | pd->m = NULL((void *)0); | |||
| 6705 | } | |||
| 6706 | ||||
| 6707 | if (m0->m_lenm_hdr.mh_len < sizeof(struct ip6_hdr)) { | |||
| 6708 | DPFPRINTF(LOG_ERR,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: m0->m_len < sizeof(struct ip6_hdr)", __func__); addlog ("\n"); } } while (0) | |||
| 6709 | "%s: m0->m_len < sizeof(struct ip6_hdr)", __func__)do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: m0->m_len < sizeof(struct ip6_hdr)", __func__); addlog ("\n"); } } while (0); | |||
| 6710 | goto bad; | |||
| 6711 | } | |||
| 6712 | ip6 = mtod(m0, struct ip6_hdr *)((struct ip6_hdr *)((m0)->m_hdr.mh_data)); | |||
| 6713 | ||||
| 6714 | if (pd->dir == PF_IN) { | |||
| 6715 | if (ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim <= IPV6_HLIMDEC1) { | |||
| 6716 | if (st->rt != PF_DUPTO) { | |||
| 6717 | pf_send_icmp(m0, ICMP6_TIME_EXCEEDED3, | |||
| 6718 | ICMP6_TIME_EXCEED_TRANSIT0, 0, | |||
| 6719 | pd->af, st->rule.ptr, pd->rdomain); | |||
| 6720 | } | |||
| 6721 | goto bad; | |||
| 6722 | } | |||
| 6723 | ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim -= IPV6_HLIMDEC1; | |||
| 6724 | } | |||
| 6725 | ||||
| 6726 | memset(&sin6, 0, sizeof(sin6))__builtin_memset((&sin6), (0), (sizeof(sin6))); | |||
| 6727 | dst = &sin6; | |||
| 6728 | dst->sin6_family = AF_INET624; | |||
| 6729 | dst->sin6_len = sizeof(*dst); | |||
| 6730 | dst->sin6_addr = st->rt_addr.v6pfa.v6; | |||
| 6731 | rtableid = m0->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid; | |||
| 6732 | ||||
| 6733 | rt = rtalloc_mpath(sin6tosa(dst), &ip6->ip6_src.s6_addr32__u6_addr.__u6_addr32[0], | |||
| 6734 | rtableid); | |||
| 6735 | if (!rtisvalid(rt)) { | |||
| 6736 | if (st->rt != PF_DUPTO) { | |||
| 6737 | pf_send_icmp(m0, ICMP6_DST_UNREACH1, | |||
| 6738 | ICMP6_DST_UNREACH_NOROUTE0, 0, | |||
| 6739 | pd->af, st->rule.ptr, pd->rdomain); | |||
| 6740 | } | |||
| 6741 | ip6stat_inc(ip6s_noroute); | |||
| 6742 | goto bad; | |||
| 6743 | } | |||
| 6744 | ||||
| 6745 | ifp = if_get(rt->rt_ifidx); | |||
| 6746 | if (ifp == NULL((void *)0)) | |||
| 6747 | goto bad; | |||
| 6748 | ||||
| 6749 | /* A locally generated packet may have invalid source address. */ | |||
| 6750 | if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src)((*(const u_int32_t *)(const void *)(&(&ip6->ip6_src )->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&ip6->ip6_src)->__u6_addr.__u6_addr8 [4]) == 0) && (*(const u_int32_t *)(const void *)(& (&ip6->ip6_src)->__u6_addr.__u6_addr8[8]) == 0) && (*(const u_int32_t *)(const void *)(&(&ip6->ip6_src )->__u6_addr.__u6_addr8[12]) == (__uint32_t)(__builtin_constant_p (1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t)(1) & 0xff000000 ) >> 24) : __swap32md(1)))) && | |||
| 6751 | (ifp->if_flags & IFF_LOOPBACK0x8) == 0) | |||
| 6752 | ip6->ip6_src = ifatoia6(rt->rt_ifa)->ia_addr.sin6_addr; | |||
| 6753 | ||||
| 6754 | if (st->rt != PF_DUPTO && pd->dir == PF_IN) { | |||
| 6755 | if (pf_test(AF_INET624, PF_OUT, ifp, &m0) != PF_PASS) | |||
| 6756 | goto bad; | |||
| 6757 | else if (m0 == NULL((void *)0)) | |||
| 6758 | goto done; | |||
| 6759 | if (m0->m_lenm_hdr.mh_len < sizeof(struct ip6_hdr)) { | |||
| 6760 | DPFPRINTF(LOG_ERR,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: m0->m_len < sizeof(struct ip6_hdr)", __func__); addlog ("\n"); } } while (0) | |||
| 6761 | "%s: m0->m_len < sizeof(struct ip6_hdr)", __func__)do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: m0->m_len < sizeof(struct ip6_hdr)", __func__); addlog ("\n"); } } while (0); | |||
| 6762 | goto bad; | |||
| 6763 | } | |||
| 6764 | } | |||
| 6765 | ||||
| 6766 | /* | |||
| 6767 | * If packet has been reassembled by PF earlier, we have to | |||
| 6768 | * use pf_refragment6() here to turn it back to fragments. | |||
| 6769 | */ | |||
| 6770 | if ((mtag = m_tag_find(m0, PACKET_TAG_PF_REASSEMBLED0x0800, NULL((void *)0)))) { | |||
| 6771 | (void) pf_refragment6(&m0, mtag, dst, ifp, rt); | |||
| 6772 | goto done; | |||
| 6773 | } | |||
| 6774 | ||||
| 6775 | if (if_output_tso(ifp, &m0, sin6tosa(dst), rt, ifp->if_mtuif_data.ifi_mtu) || | |||
| 6776 | m0 == NULL((void *)0)) | |||
| 6777 | goto done; | |||
| 6778 | ||||
| 6779 | ip6stat_inc(ip6s_cantfrag); | |||
| 6780 | if (st->rt != PF_DUPTO) | |||
| 6781 | pf_send_icmp(m0, ICMP6_PACKET_TOO_BIG2, 0, | |||
| 6782 | ifp->if_mtuif_data.ifi_mtu, pd->af, st->rule.ptr, pd->rdomain); | |||
| 6783 | goto bad; | |||
| 6784 | ||||
| 6785 | done: | |||
| 6786 | if_put(ifp); | |||
| 6787 | rtfree(rt); | |||
| 6788 | return; | |||
| 6789 | ||||
| 6790 | bad: | |||
| 6791 | m_freem(m0); | |||
| 6792 | goto done; | |||
| 6793 | } | |||
| 6794 | #endif /* INET6 */ | |||
| 6795 | ||||
| 6796 | /* | |||
| 6797 | * check TCP checksum and set mbuf flag | |||
| 6798 | * off is the offset where the protocol header starts | |||
| 6799 | * len is the total length of protocol header plus payload | |||
| 6800 | * returns 0 when the checksum is valid, otherwise returns 1. | |||
| 6801 | * if the _OUT flag is set the checksum isn't done yet, consider these ok | |||
| 6802 | */ | |||
| 6803 | int | |||
| 6804 | pf_check_tcp_cksum(struct mbuf *m, int off, int len, sa_family_t af) | |||
| 6805 | { | |||
| 6806 | u_int16_t sum; | |||
| 6807 | ||||
| 6808 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & | |||
| 6809 | (M_TCP_CSUM_IN_OK0x0020 | M_TCP_CSUM_OUT0x0002)) { | |||
| 6810 | return (0); | |||
| 6811 | } | |||
| 6812 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags & M_TCP_CSUM_IN_BAD0x0040 || | |||
| 6813 | off < sizeof(struct ip) || | |||
| 6814 | m->m_pkthdrM_dat.MH.MH_pkthdr.len < off + len) { | |||
| 6815 | return (1); | |||
| 6816 | } | |||
| 6817 | ||||
| 6818 | /* need to do it in software */ | |||
| 6819 | tcpstat_inc(tcps_inswcsum); | |||
| 6820 | ||||
| 6821 | switch (af) { | |||
| 6822 | case AF_INET2: | |||
| 6823 | if (m->m_lenm_hdr.mh_len < sizeof(struct ip)) | |||
| 6824 | return (1); | |||
| 6825 | ||||
| 6826 | sum = in4_cksum(m, IPPROTO_TCP6, off, len); | |||
| 6827 | break; | |||
| 6828 | #ifdef INET61 | |||
| 6829 | case AF_INET624: | |||
| 6830 | if (m->m_lenm_hdr.mh_len < sizeof(struct ip6_hdr)) | |||
| 6831 | return (1); | |||
| 6832 | ||||
| 6833 | sum = in6_cksum(m, IPPROTO_TCP6, off, len); | |||
| 6834 | break; | |||
| 6835 | #endif /* INET6 */ | |||
| 6836 | default: | |||
| 6837 | unhandled_af(af); | |||
| 6838 | } | |||
| 6839 | if (sum) { | |||
| 6840 | tcpstat_inc(tcps_rcvbadsum); | |||
| 6841 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_BAD0x0040; | |||
| 6842 | return (1); | |||
| 6843 | } | |||
| 6844 | ||||
| 6845 | m->m_pkthdrM_dat.MH.MH_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK0x0020; | |||
| 6846 | return (0); | |||
| 6847 | } | |||
| 6848 | ||||
| 6849 | struct pf_divert * | |||
| 6850 | pf_find_divert(struct mbuf *m) | |||
| 6851 | { | |||
| 6852 | struct m_tag *mtag; | |||
| 6853 | ||||
| 6854 | if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT0x0200, NULL((void *)0))) == NULL((void *)0)) | |||
| 6855 | return (NULL((void *)0)); | |||
| 6856 | ||||
| 6857 | return ((struct pf_divert *)(mtag + 1)); | |||
| 6858 | } | |||
| 6859 | ||||
| 6860 | struct pf_divert * | |||
| 6861 | pf_get_divert(struct mbuf *m) | |||
| 6862 | { | |||
| 6863 | struct m_tag *mtag; | |||
| 6864 | ||||
| 6865 | if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT0x0200, NULL((void *)0))) == NULL((void *)0)) { | |||
| 6866 | mtag = m_tag_get(PACKET_TAG_PF_DIVERT0x0200, sizeof(struct pf_divert), | |||
| 6867 | M_NOWAIT0x0002); | |||
| 6868 | if (mtag == NULL((void *)0)) | |||
| 6869 | return (NULL((void *)0)); | |||
| 6870 | memset(mtag + 1, 0, sizeof(struct pf_divert))__builtin_memset((mtag + 1), (0), (sizeof(struct pf_divert))); | |||
| 6871 | m_tag_prepend(m, mtag); | |||
| 6872 | } | |||
| 6873 | ||||
| 6874 | return ((struct pf_divert *)(mtag + 1)); | |||
| 6875 | } | |||
| 6876 | ||||
| 6877 | int | |||
| 6878 | pf_walk_option(struct pf_pdesc *pd, struct ip *h, int off, int end, | |||
| 6879 | u_short *reason) | |||
| 6880 | { | |||
| 6881 | uint8_t type, length, opts[15 * 4 - sizeof(struct ip)]; | |||
| 6882 | ||||
| 6883 | /* IP header in payload of ICMP packet may be too short */ | |||
| 6884 | if (pd->m->m_pkthdrM_dat.MH.MH_pkthdr.len < end) { | |||
| 6885 | DPFPRINTF(LOG_NOTICE, "IP option too short")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IP option too short"); addlog("\n"); } } while (0); | |||
| 6886 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 6887 | return (PF_DROP); | |||
| 6888 | } | |||
| 6889 | ||||
| 6890 | KASSERT(end - off <= sizeof(opts))((end - off <= sizeof(opts)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 6890, "end - off <= sizeof(opts)" )); | |||
| 6891 | m_copydata(pd->m, off, end - off, opts); | |||
| 6892 | end -= off; | |||
| 6893 | off = 0; | |||
| 6894 | ||||
| 6895 | while (off < end) { | |||
| 6896 | type = opts[off]; | |||
| 6897 | if (type == IPOPT_EOL0) | |||
| 6898 | break; | |||
| 6899 | if (type == IPOPT_NOP1) { | |||
| 6900 | off++; | |||
| 6901 | continue; | |||
| 6902 | } | |||
| 6903 | if (off + 2 > end) { | |||
| 6904 | DPFPRINTF(LOG_NOTICE, "IP length opt")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IP length opt"); addlog("\n"); } } while (0); | |||
| 6905 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 6906 | return (PF_DROP); | |||
| 6907 | } | |||
| 6908 | length = opts[off + 1]; | |||
| 6909 | if (length < 2) { | |||
| 6910 | DPFPRINTF(LOG_NOTICE, "IP short opt")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IP short opt"); addlog("\n"); } } while (0); | |||
| 6911 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 6912 | return (PF_DROP); | |||
| 6913 | } | |||
| 6914 | if (off + length > end) { | |||
| 6915 | DPFPRINTF(LOG_NOTICE, "IP long opt")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IP long opt"); addlog("\n"); } } while (0); | |||
| 6916 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 6917 | return (PF_DROP); | |||
| 6918 | } | |||
| 6919 | switch (type) { | |||
| 6920 | case IPOPT_RA148: | |||
| 6921 | SET(pd->badopts, PF_OPT_ROUTER_ALERT)((pd->badopts) |= (0x0004)); | |||
| 6922 | break; | |||
| 6923 | default: | |||
| 6924 | SET(pd->badopts, PF_OPT_OTHER)((pd->badopts) |= (0x0001)); | |||
| 6925 | break; | |||
| 6926 | } | |||
| 6927 | off += length; | |||
| 6928 | } | |||
| 6929 | ||||
| 6930 | return (PF_PASS); | |||
| 6931 | } | |||
| 6932 | ||||
| 6933 | int | |||
| 6934 | pf_walk_header(struct pf_pdesc *pd, struct ip *h, u_short *reason) | |||
| 6935 | { | |||
| 6936 | struct ip6_ext ext; | |||
| 6937 | u_int32_t hlen, end; | |||
| 6938 | int hdr_cnt; | |||
| 6939 | ||||
| 6940 | hlen = h->ip_hl << 2; | |||
| 6941 | if (hlen < sizeof(struct ip) || hlen > ntohs(h->ip_len)(__uint16_t)(__builtin_constant_p(h->ip_len) ? (__uint16_t )(((__uint16_t)(h->ip_len) & 0xffU) << 8 | ((__uint16_t )(h->ip_len) & 0xff00U) >> 8) : __swap16md(h-> ip_len))) { | |||
| 6942 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 6943 | return (PF_DROP); | |||
| 6944 | } | |||
| 6945 | if (hlen != sizeof(struct ip)) { | |||
| 6946 | if (pf_walk_option(pd, h, pd->off + sizeof(struct ip), | |||
| 6947 | pd->off + hlen, reason) != PF_PASS) | |||
| 6948 | return (PF_DROP); | |||
| 6949 | /* header options which contain only padding is fishy */ | |||
| 6950 | if (pd->badopts == 0) | |||
| 6951 | SET(pd->badopts, PF_OPT_OTHER)((pd->badopts) |= (0x0001)); | |||
| 6952 | } | |||
| 6953 | end = pd->off + ntohs(h->ip_len)(__uint16_t)(__builtin_constant_p(h->ip_len) ? (__uint16_t )(((__uint16_t)(h->ip_len) & 0xffU) << 8 | ((__uint16_t )(h->ip_len) & 0xff00U) >> 8) : __swap16md(h-> ip_len)); | |||
| 6954 | pd->off += hlen; | |||
| 6955 | pd->proto = h->ip_p; | |||
| 6956 | /* IGMP packets have router alert options, allow them */ | |||
| 6957 | if (pd->proto == IPPROTO_IGMP2) { | |||
| 6958 | /* | |||
| 6959 | * According to RFC 1112 ttl must be set to 1 in all IGMP | |||
| 6960 | * packets sent to 224.0.0.1 | |||
| 6961 | */ | |||
| 6962 | if ((h->ip_ttl != 1) && | |||
| 6963 | (h->ip_dst.s_addr == INADDR_ALLHOSTS_GROUP((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xe0000001 )) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0xe0000001)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)(0xe0000001)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t)(0xe0000001)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0xe0000001) ) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0xe0000001 )))))) { | |||
| 6964 | DPFPRINTF(LOG_NOTICE, "Invalid IGMP")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "Invalid IGMP"); addlog("\n"); } } while (0); | |||
| 6965 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 6966 | return (PF_DROP); | |||
| 6967 | } | |||
| 6968 | CLR(pd->badopts, PF_OPT_ROUTER_ALERT)((pd->badopts) &= ~(0x0004)); | |||
| 6969 | } | |||
| 6970 | /* stop walking over non initial fragments */ | |||
| 6971 | if ((h->ip_off & htons(IP_OFFMASK)(__uint16_t)(__builtin_constant_p(0x1fff) ? (__uint16_t)(((__uint16_t )(0x1fff) & 0xffU) << 8 | ((__uint16_t)(0x1fff) & 0xff00U) >> 8) : __swap16md(0x1fff))) != 0) | |||
| 6972 | return (PF_PASS); | |||
| 6973 | ||||
| 6974 | for (hdr_cnt = 0; hdr_cnt < pf_hdr_limit; hdr_cnt++) { | |||
| 6975 | switch (pd->proto) { | |||
| 6976 | case IPPROTO_AH51: | |||
| 6977 | /* fragments may be short */ | |||
| 6978 | if ((h->ip_off & htons(IP_MF | IP_OFFMASK)(__uint16_t)(__builtin_constant_p(0x2000 | 0x1fff) ? (__uint16_t )(((__uint16_t)(0x2000 | 0x1fff) & 0xffU) << 8 | (( __uint16_t)(0x2000 | 0x1fff) & 0xff00U) >> 8) : __swap16md (0x2000 | 0x1fff))) != 0 && | |||
| 6979 | end < pd->off + sizeof(ext)) | |||
| 6980 | return (PF_PASS); | |||
| 6981 | if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext), | |||
| 6982 | reason, AF_INET2)) { | |||
| 6983 | DPFPRINTF(LOG_NOTICE, "IP short exthdr")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IP short exthdr"); addlog("\n"); } } while (0); | |||
| 6984 | return (PF_DROP); | |||
| 6985 | } | |||
| 6986 | pd->off += (ext.ip6e_len + 2) * 4; | |||
| 6987 | pd->proto = ext.ip6e_nxt; | |||
| 6988 | break; | |||
| 6989 | default: | |||
| 6990 | return (PF_PASS); | |||
| 6991 | } | |||
| 6992 | } | |||
| 6993 | DPFPRINTF(LOG_NOTICE, "IPv4 nested authentication header limit")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv4 nested authentication header limit"); addlog("\n"); } } while (0); | |||
| 6994 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 6995 | return (PF_DROP); | |||
| 6996 | } | |||
| 6997 | ||||
| 6998 | #ifdef INET61 | |||
| 6999 | int | |||
| 7000 | pf_walk_option6(struct pf_pdesc *pd, struct ip6_hdr *h, int off, int end, | |||
| 7001 | u_short *reason) | |||
| 7002 | { | |||
| 7003 | struct ip6_opt opt; | |||
| 7004 | struct ip6_opt_jumbo jumbo; | |||
| 7005 | ||||
| 7006 | while (off < end) { | |||
| 7007 | if (!pf_pull_hdr(pd->m, off, &opt.ip6o_type, | |||
| 7008 | sizeof(opt.ip6o_type), reason, AF_INET624)) { | |||
| 7009 | DPFPRINTF(LOG_NOTICE, "IPv6 short opt type")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 short opt type"); addlog("\n"); } } while (0); | |||
| 7010 | return (PF_DROP); | |||
| 7011 | } | |||
| 7012 | if (opt.ip6o_type == IP6OPT_PAD10x00) { | |||
| 7013 | off++; | |||
| 7014 | continue; | |||
| 7015 | } | |||
| 7016 | if (!pf_pull_hdr(pd->m, off, &opt, sizeof(opt), | |||
| 7017 | reason, AF_INET624)) { | |||
| 7018 | DPFPRINTF(LOG_NOTICE, "IPv6 short opt")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 short opt"); addlog("\n"); } } while (0); | |||
| 7019 | return (PF_DROP); | |||
| 7020 | } | |||
| 7021 | if (off + sizeof(opt) + opt.ip6o_len > end) { | |||
| 7022 | DPFPRINTF(LOG_NOTICE, "IPv6 long opt")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 long opt"); addlog("\n"); } } while (0); | |||
| 7023 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7024 | return (PF_DROP); | |||
| 7025 | } | |||
| 7026 | switch (opt.ip6o_type) { | |||
| 7027 | case IP6OPT_PADN0x01: | |||
| 7028 | break; | |||
| 7029 | case IP6OPT_JUMBO0xC2: | |||
| 7030 | SET(pd->badopts, PF_OPT_JUMBO)((pd->badopts) |= (0x0002)); | |||
| 7031 | if (pd->jumbolen != 0) { | |||
| 7032 | DPFPRINTF(LOG_NOTICE, "IPv6 multiple jumbo")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 multiple jumbo"); addlog("\n"); } } while (0); | |||
| 7033 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7034 | return (PF_DROP); | |||
| 7035 | } | |||
| 7036 | if (ntohs(h->ip6_plen)(__uint16_t)(__builtin_constant_p(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) ? (__uint16_t)(((__uint16_t)(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) & 0xffU) << 8 | ((__uint16_t)(h->ip6_ctlun.ip6_un1 .ip6_un1_plen) & 0xff00U) >> 8) : __swap16md(h-> ip6_ctlun.ip6_un1.ip6_un1_plen)) != 0) { | |||
| 7037 | DPFPRINTF(LOG_NOTICE, "IPv6 bad jumbo plen")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 bad jumbo plen"); addlog("\n"); } } while (0); | |||
| 7038 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7039 | return (PF_DROP); | |||
| 7040 | } | |||
| 7041 | if (!pf_pull_hdr(pd->m, off, &jumbo, sizeof(jumbo), | |||
| 7042 | reason, AF_INET624)) { | |||
| 7043 | DPFPRINTF(LOG_NOTICE, "IPv6 short jumbo")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 short jumbo"); addlog("\n"); } } while (0); | |||
| 7044 | return (PF_DROP); | |||
| 7045 | } | |||
| 7046 | memcpy(&pd->jumbolen, jumbo.ip6oj_jumbo_len,__builtin_memcpy((&pd->jumbolen), (jumbo.ip6oj_jumbo_len ), (sizeof(pd->jumbolen))) | |||
| 7047 | sizeof(pd->jumbolen))__builtin_memcpy((&pd->jumbolen), (jumbo.ip6oj_jumbo_len ), (sizeof(pd->jumbolen))); | |||
| 7048 | pd->jumbolen = ntohl(pd->jumbolen)(__uint32_t)(__builtin_constant_p(pd->jumbolen) ? (__uint32_t )(((__uint32_t)(pd->jumbolen) & 0xff) << 24 | (( __uint32_t)(pd->jumbolen) & 0xff00) << 8 | ((__uint32_t )(pd->jumbolen) & 0xff0000) >> 8 | ((__uint32_t) (pd->jumbolen) & 0xff000000) >> 24) : __swap32md (pd->jumbolen)); | |||
| 7049 | if (pd->jumbolen < IPV6_MAXPACKET65535) { | |||
| 7050 | DPFPRINTF(LOG_NOTICE, "IPv6 short jumbolen")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 short jumbolen"); addlog("\n"); } } while (0); | |||
| 7051 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7052 | return (PF_DROP); | |||
| 7053 | } | |||
| 7054 | break; | |||
| 7055 | case IP6OPT_ROUTER_ALERT0x05: | |||
| 7056 | SET(pd->badopts, PF_OPT_ROUTER_ALERT)((pd->badopts) |= (0x0004)); | |||
| 7057 | break; | |||
| 7058 | default: | |||
| 7059 | SET(pd->badopts, PF_OPT_OTHER)((pd->badopts) |= (0x0001)); | |||
| 7060 | break; | |||
| 7061 | } | |||
| 7062 | off += sizeof(opt) + opt.ip6o_len; | |||
| 7063 | } | |||
| 7064 | ||||
| 7065 | return (PF_PASS); | |||
| 7066 | } | |||
| 7067 | ||||
| 7068 | int | |||
| 7069 | pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason) | |||
| 7070 | { | |||
| 7071 | struct ip6_frag frag; | |||
| 7072 | struct ip6_ext ext; | |||
| 7073 | struct icmp6_hdr icmp6; | |||
| 7074 | struct ip6_rthdr rthdr; | |||
| 7075 | u_int32_t end; | |||
| 7076 | int hdr_cnt, fraghdr_cnt = 0, rthdr_cnt = 0; | |||
| 7077 | ||||
| 7078 | pd->off += sizeof(struct ip6_hdr); | |||
| 7079 | end = pd->off + ntohs(h->ip6_plen)(__uint16_t)(__builtin_constant_p(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) ? (__uint16_t)(((__uint16_t)(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) & 0xffU) << 8 | ((__uint16_t)(h->ip6_ctlun.ip6_un1 .ip6_un1_plen) & 0xff00U) >> 8) : __swap16md(h-> ip6_ctlun.ip6_un1.ip6_un1_plen)); | |||
| 7080 | pd->fragoff = pd->extoff = pd->jumbolen = 0; | |||
| 7081 | pd->proto = h->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt; | |||
| 7082 | ||||
| 7083 | for (hdr_cnt = 0; hdr_cnt < pf_hdr_limit; hdr_cnt++) { | |||
| 7084 | switch (pd->proto) { | |||
| 7085 | case IPPROTO_ROUTING43: | |||
| 7086 | case IPPROTO_DSTOPTS60: | |||
| 7087 | SET(pd->badopts, PF_OPT_OTHER)((pd->badopts) |= (0x0001)); | |||
| 7088 | break; | |||
| 7089 | case IPPROTO_HOPOPTS0: | |||
| 7090 | if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext), | |||
| 7091 | reason, AF_INET624)) { | |||
| 7092 | DPFPRINTF(LOG_NOTICE, "IPv6 short exthdr")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 short exthdr"); addlog("\n"); } } while (0); | |||
| 7093 | return (PF_DROP); | |||
| 7094 | } | |||
| 7095 | if (pf_walk_option6(pd, h, pd->off + sizeof(ext), | |||
| 7096 | pd->off + (ext.ip6e_len + 1) * 8, reason) | |||
| 7097 | != PF_PASS) | |||
| 7098 | return (PF_DROP); | |||
| 7099 | /* option header which contains only padding is fishy */ | |||
| 7100 | if (pd->badopts == 0) | |||
| 7101 | SET(pd->badopts, PF_OPT_OTHER)((pd->badopts) |= (0x0001)); | |||
| 7102 | break; | |||
| 7103 | } | |||
| 7104 | switch (pd->proto) { | |||
| 7105 | case IPPROTO_FRAGMENT44: | |||
| 7106 | if (fraghdr_cnt++) { | |||
| 7107 | DPFPRINTF(LOG_NOTICE, "IPv6 multiple fragment")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 multiple fragment"); addlog("\n"); } } while (0); | |||
| 7108 | REASON_SET(reason, PFRES_FRAG)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (2); if (2 < 17) pf_status.counters[2]++; } } while (0); | |||
| 7109 | return (PF_DROP); | |||
| 7110 | } | |||
| 7111 | /* jumbo payload packets cannot be fragmented */ | |||
| 7112 | if (pd->jumbolen != 0) { | |||
| 7113 | DPFPRINTF(LOG_NOTICE, "IPv6 fragmented jumbo")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 fragmented jumbo"); addlog("\n"); } } while (0); | |||
| 7114 | REASON_SET(reason, PFRES_FRAG)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (2); if (2 < 17) pf_status.counters[2]++; } } while (0); | |||
| 7115 | return (PF_DROP); | |||
| 7116 | } | |||
| 7117 | if (!pf_pull_hdr(pd->m, pd->off, &frag, sizeof(frag), | |||
| 7118 | reason, AF_INET624)) { | |||
| 7119 | DPFPRINTF(LOG_NOTICE, "IPv6 short fragment")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 short fragment"); addlog("\n"); } } while (0); | |||
| 7120 | return (PF_DROP); | |||
| 7121 | } | |||
| 7122 | /* stop walking over non initial fragments */ | |||
| 7123 | if (ntohs((frag.ip6f_offlg & IP6F_OFF_MASK))(__uint16_t)(__builtin_constant_p((frag.ip6f_offlg & 0xf8ff )) ? (__uint16_t)(((__uint16_t)((frag.ip6f_offlg & 0xf8ff )) & 0xffU) << 8 | ((__uint16_t)((frag.ip6f_offlg & 0xf8ff)) & 0xff00U) >> 8) : __swap16md((frag.ip6f_offlg & 0xf8ff))) != 0) { | |||
| 7124 | pd->fragoff = pd->off; | |||
| 7125 | return (PF_PASS); | |||
| 7126 | } | |||
| 7127 | /* RFC6946: reassemble only non atomic fragments */ | |||
| 7128 | if (frag.ip6f_offlg & IP6F_MORE_FRAG0x0100) | |||
| 7129 | pd->fragoff = pd->off; | |||
| 7130 | pd->off += sizeof(frag); | |||
| 7131 | pd->proto = frag.ip6f_nxt; | |||
| 7132 | break; | |||
| 7133 | case IPPROTO_ROUTING43: | |||
| 7134 | if (rthdr_cnt++) { | |||
| 7135 | DPFPRINTF(LOG_NOTICE, "IPv6 multiple rthdr")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 multiple rthdr"); addlog("\n"); } } while (0); | |||
| 7136 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7137 | return (PF_DROP); | |||
| 7138 | } | |||
| 7139 | /* fragments may be short */ | |||
| 7140 | if (pd->fragoff != 0 && end < pd->off + sizeof(rthdr)) { | |||
| 7141 | pd->off = pd->fragoff; | |||
| 7142 | pd->proto = IPPROTO_FRAGMENT44; | |||
| 7143 | return (PF_PASS); | |||
| 7144 | } | |||
| 7145 | if (!pf_pull_hdr(pd->m, pd->off, &rthdr, sizeof(rthdr), | |||
| 7146 | reason, AF_INET624)) { | |||
| 7147 | DPFPRINTF(LOG_NOTICE, "IPv6 short rthdr")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 short rthdr"); addlog("\n"); } } while (0); | |||
| 7148 | return (PF_DROP); | |||
| 7149 | } | |||
| 7150 | if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_00) { | |||
| 7151 | DPFPRINTF(LOG_NOTICE, "IPv6 rthdr0")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 rthdr0"); addlog("\n"); } } while (0); | |||
| 7152 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7153 | return (PF_DROP); | |||
| 7154 | } | |||
| 7155 | /* FALLTHROUGH */ | |||
| 7156 | case IPPROTO_HOPOPTS0: | |||
| 7157 | /* RFC2460 4.1: Hop-by-Hop only after IPv6 header */ | |||
| 7158 | if (pd->proto == IPPROTO_HOPOPTS0 && hdr_cnt > 0) { | |||
| 7159 | DPFPRINTF(LOG_NOTICE, "IPv6 hopopts not first")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 hopopts not first"); addlog("\n"); } } while (0); | |||
| 7160 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7161 | return (PF_DROP); | |||
| 7162 | } | |||
| 7163 | /* FALLTHROUGH */ | |||
| 7164 | case IPPROTO_AH51: | |||
| 7165 | case IPPROTO_DSTOPTS60: | |||
| 7166 | /* fragments may be short */ | |||
| 7167 | if (pd->fragoff != 0 && end < pd->off + sizeof(ext)) { | |||
| 7168 | pd->off = pd->fragoff; | |||
| 7169 | pd->proto = IPPROTO_FRAGMENT44; | |||
| 7170 | return (PF_PASS); | |||
| 7171 | } | |||
| 7172 | if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext), | |||
| 7173 | reason, AF_INET624)) { | |||
| 7174 | DPFPRINTF(LOG_NOTICE, "IPv6 short exthdr")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 short exthdr"); addlog("\n"); } } while (0); | |||
| 7175 | return (PF_DROP); | |||
| 7176 | } | |||
| 7177 | /* reassembly needs the ext header before the frag */ | |||
| 7178 | if (pd->fragoff == 0) | |||
| 7179 | pd->extoff = pd->off; | |||
| 7180 | if (pd->proto == IPPROTO_HOPOPTS0 && pd->fragoff == 0 && | |||
| 7181 | ntohs(h->ip6_plen)(__uint16_t)(__builtin_constant_p(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) ? (__uint16_t)(((__uint16_t)(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) & 0xffU) << 8 | ((__uint16_t)(h->ip6_ctlun.ip6_un1 .ip6_un1_plen) & 0xff00U) >> 8) : __swap16md(h-> ip6_ctlun.ip6_un1.ip6_un1_plen)) == 0 && pd->jumbolen != 0) { | |||
| 7182 | DPFPRINTF(LOG_NOTICE, "IPv6 missing jumbo")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 missing jumbo"); addlog("\n"); } } while (0); | |||
| 7183 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7184 | return (PF_DROP); | |||
| 7185 | } | |||
| 7186 | if (pd->proto == IPPROTO_AH51) | |||
| 7187 | pd->off += (ext.ip6e_len + 2) * 4; | |||
| 7188 | else | |||
| 7189 | pd->off += (ext.ip6e_len + 1) * 8; | |||
| 7190 | pd->proto = ext.ip6e_nxt; | |||
| 7191 | break; | |||
| 7192 | case IPPROTO_ICMPV658: | |||
| 7193 | /* fragments may be short, ignore inner header then */ | |||
| 7194 | if (pd->fragoff != 0 && end < pd->off + sizeof(icmp6)) { | |||
| 7195 | pd->off = pd->fragoff; | |||
| 7196 | pd->proto = IPPROTO_FRAGMENT44; | |||
| 7197 | return (PF_PASS); | |||
| 7198 | } | |||
| 7199 | if (!pf_pull_hdr(pd->m, pd->off, &icmp6, sizeof(icmp6), | |||
| 7200 | reason, AF_INET624)) { | |||
| 7201 | DPFPRINTF(LOG_NOTICE, "IPv6 short icmp6hdr")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 short icmp6hdr"); addlog("\n"); } } while (0); | |||
| 7202 | return (PF_DROP); | |||
| 7203 | } | |||
| 7204 | /* ICMP multicast packets have router alert options */ | |||
| 7205 | switch (icmp6.icmp6_type) { | |||
| 7206 | case MLD_LISTENER_QUERY130: | |||
| 7207 | case MLD_LISTENER_REPORT131: | |||
| 7208 | case MLD_LISTENER_DONE132: | |||
| 7209 | case MLDV2_LISTENER_REPORT143: | |||
| 7210 | /* | |||
| 7211 | * According to RFC 2710 all MLD messages are | |||
| 7212 | * sent with hop-limit (ttl) set to 1, and link | |||
| 7213 | * local source address. If either one is | |||
| 7214 | * missing then MLD message is invalid and | |||
| 7215 | * should be discarded. | |||
| 7216 | */ | |||
| 7217 | if ((h->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim != 1) || | |||
| 7218 | !IN6_IS_ADDR_LINKLOCAL(&h->ip6_src)(((&h->ip6_src)->__u6_addr.__u6_addr8[0] == 0xfe) && (((&h->ip6_src)->__u6_addr.__u6_addr8[1] & 0xc0 ) == 0x80))) { | |||
| 7219 | DPFPRINTF(LOG_NOTICE, "Invalid MLD")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "Invalid MLD"); addlog("\n"); } } while (0); | |||
| 7220 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7221 | return (PF_DROP); | |||
| 7222 | } | |||
| 7223 | CLR(pd->badopts, PF_OPT_ROUTER_ALERT)((pd->badopts) &= ~(0x0004)); | |||
| 7224 | break; | |||
| 7225 | } | |||
| 7226 | return (PF_PASS); | |||
| 7227 | case IPPROTO_TCP6: | |||
| 7228 | case IPPROTO_UDP17: | |||
| 7229 | /* fragments may be short, ignore inner header then */ | |||
| 7230 | if (pd->fragoff != 0 && end < pd->off + | |||
| 7231 | (pd->proto == IPPROTO_TCP6 ? sizeof(struct tcphdr) : | |||
| 7232 | pd->proto == IPPROTO_UDP17 ? sizeof(struct udphdr) : | |||
| 7233 | sizeof(struct icmp6_hdr))) { | |||
| 7234 | pd->off = pd->fragoff; | |||
| 7235 | pd->proto = IPPROTO_FRAGMENT44; | |||
| 7236 | } | |||
| 7237 | /* FALLTHROUGH */ | |||
| 7238 | default: | |||
| 7239 | return (PF_PASS); | |||
| 7240 | } | |||
| 7241 | } | |||
| 7242 | DPFPRINTF(LOG_NOTICE, "IPv6 nested extension header limit")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "IPv6 nested extension header limit"); addlog("\n"); } } while (0); | |||
| 7243 | REASON_SET(reason, PFRES_IPOPTIONS)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0); | |||
| 7244 | return (PF_DROP); | |||
| 7245 | } | |||
| 7246 | #endif /* INET6 */ | |||
| 7247 | ||||
| 7248 | u_int16_t | |||
| 7249 | pf_pkt_hash(sa_family_t af, uint8_t proto, | |||
| 7250 | const struct pf_addr *src, const struct pf_addr *dst, | |||
| 7251 | uint16_t sport, uint16_t dport) | |||
| 7252 | { | |||
| 7253 | uint32_t hash; | |||
| 7254 | ||||
| 7255 | hash = src->addr32pfa.addr32[0] ^ dst->addr32pfa.addr32[0]; | |||
| 7256 | #ifdef INET61 | |||
| 7257 | if (af == AF_INET624) { | |||
| 7258 | hash ^= src->addr32pfa.addr32[1] ^ dst->addr32pfa.addr32[1]; | |||
| 7259 | hash ^= src->addr32pfa.addr32[2] ^ dst->addr32pfa.addr32[2]; | |||
| 7260 | hash ^= src->addr32pfa.addr32[3] ^ dst->addr32pfa.addr32[3]; | |||
| 7261 | } | |||
| 7262 | #endif | |||
| 7263 | ||||
| 7264 | switch (proto) { | |||
| 7265 | case IPPROTO_TCP6: | |||
| 7266 | case IPPROTO_UDP17: | |||
| 7267 | hash ^= sport ^ dport; | |||
| 7268 | break; | |||
| 7269 | } | |||
| 7270 | ||||
| 7271 | return stoeplitz_n32(hash)stoeplitz_hash_n32(stoeplitz_cache, (hash)); | |||
| 7272 | } | |||
| 7273 | ||||
| 7274 | int | |||
| 7275 | pf_setup_pdesc(struct pf_pdesc *pd, sa_family_t af, int dir, | |||
| 7276 | struct pfi_kif *kif, struct mbuf *m, u_short *reason) | |||
| 7277 | { | |||
| 7278 | memset(pd, 0, sizeof(*pd))__builtin_memset((pd), (0), (sizeof(*pd))); | |||
| 7279 | pd->dir = dir; | |||
| 7280 | pd->kif = kif; /* kif is NULL when called by pflog */ | |||
| 7281 | pd->m = m; | |||
| 7282 | pd->sidx = (dir == PF_IN) ? 0 : 1; | |||
| 7283 | pd->didx = (dir == PF_IN) ? 1 : 0; | |||
| 7284 | pd->af = pd->naf = af; | |||
| 7285 | pd->rdomain = rtable_l2(pd->m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid); | |||
| 7286 | ||||
| 7287 | switch (pd->af) { | |||
| 7288 | case AF_INET2: { | |||
| 7289 | struct ip *h; | |||
| 7290 | ||||
| 7291 | /* Check for illegal packets */ | |||
| 7292 | if (pd->m->m_pkthdrM_dat.MH.MH_pkthdr.len < (int)sizeof(struct ip)) { | |||
| 7293 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 7294 | return (PF_DROP); | |||
| 7295 | } | |||
| 7296 | ||||
| 7297 | h = mtod(pd->m, struct ip *)((struct ip *)((pd->m)->m_hdr.mh_data)); | |||
| 7298 | if (pd->m->m_pkthdrM_dat.MH.MH_pkthdr.len < ntohs(h->ip_len)(__uint16_t)(__builtin_constant_p(h->ip_len) ? (__uint16_t )(((__uint16_t)(h->ip_len) & 0xffU) << 8 | ((__uint16_t )(h->ip_len) & 0xff00U) >> 8) : __swap16md(h-> ip_len))) { | |||
| 7299 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 7300 | return (PF_DROP); | |||
| 7301 | } | |||
| 7302 | ||||
| 7303 | if (pf_walk_header(pd, h, reason) != PF_PASS) | |||
| 7304 | return (PF_DROP); | |||
| 7305 | ||||
| 7306 | pd->src = (struct pf_addr *)&h->ip_src; | |||
| 7307 | pd->dst = (struct pf_addr *)&h->ip_dst; | |||
| 7308 | pd->tot_len = ntohs(h->ip_len)(__uint16_t)(__builtin_constant_p(h->ip_len) ? (__uint16_t )(((__uint16_t)(h->ip_len) & 0xffU) << 8 | ((__uint16_t )(h->ip_len) & 0xff00U) >> 8) : __swap16md(h-> ip_len)); | |||
| 7309 | pd->tos = h->ip_tos & ~IPTOS_ECN_MASK0x03; | |||
| 7310 | pd->ttl = h->ip_ttl; | |||
| 7311 | pd->virtual_proto = (h->ip_off & htons(IP_MF | IP_OFFMASK)(__uint16_t)(__builtin_constant_p(0x2000 | 0x1fff) ? (__uint16_t )(((__uint16_t)(0x2000 | 0x1fff) & 0xffU) << 8 | (( __uint16_t)(0x2000 | 0x1fff) & 0xff00U) >> 8) : __swap16md (0x2000 | 0x1fff))) ? | |||
| 7312 | PF_VPROTO_FRAGMENT256 : pd->proto; | |||
| 7313 | ||||
| 7314 | break; | |||
| 7315 | } | |||
| 7316 | #ifdef INET61 | |||
| 7317 | case AF_INET624: { | |||
| 7318 | struct ip6_hdr *h; | |||
| 7319 | ||||
| 7320 | /* Check for illegal packets */ | |||
| 7321 | if (pd->m->m_pkthdrM_dat.MH.MH_pkthdr.len < (int)sizeof(struct ip6_hdr)) { | |||
| 7322 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 7323 | return (PF_DROP); | |||
| 7324 | } | |||
| 7325 | ||||
| 7326 | h = mtod(pd->m, struct ip6_hdr *)((struct ip6_hdr *)((pd->m)->m_hdr.mh_data)); | |||
| 7327 | if (pd->m->m_pkthdrM_dat.MH.MH_pkthdr.len < | |||
| 7328 | sizeof(struct ip6_hdr) + ntohs(h->ip6_plen)(__uint16_t)(__builtin_constant_p(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) ? (__uint16_t)(((__uint16_t)(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) & 0xffU) << 8 | ((__uint16_t)(h->ip6_ctlun.ip6_un1 .ip6_un1_plen) & 0xff00U) >> 8) : __swap16md(h-> ip6_ctlun.ip6_un1.ip6_un1_plen))) { | |||
| 7329 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 7330 | return (PF_DROP); | |||
| 7331 | } | |||
| 7332 | ||||
| 7333 | if (pf_walk_header6(pd, h, reason) != PF_PASS) | |||
| 7334 | return (PF_DROP); | |||
| 7335 | ||||
| 7336 | #if 1 | |||
| 7337 | /* | |||
| 7338 | * we do not support jumbogram yet. if we keep going, zero | |||
| 7339 | * ip6_plen will do something bad, so drop the packet for now. | |||
| 7340 | */ | |||
| 7341 | if (pd->jumbolen != 0) { | |||
| 7342 | REASON_SET(reason, PFRES_NORM)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (4); if (4 < 17) pf_status.counters[4]++; } } while (0); | |||
| 7343 | return (PF_DROP); | |||
| 7344 | } | |||
| 7345 | #endif /* 1 */ | |||
| 7346 | ||||
| 7347 | pd->src = (struct pf_addr *)&h->ip6_src; | |||
| 7348 | pd->dst = (struct pf_addr *)&h->ip6_dst; | |||
| 7349 | pd->tot_len = ntohs(h->ip6_plen)(__uint16_t)(__builtin_constant_p(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) ? (__uint16_t)(((__uint16_t)(h->ip6_ctlun.ip6_un1.ip6_un1_plen ) & 0xffU) << 8 | ((__uint16_t)(h->ip6_ctlun.ip6_un1 .ip6_un1_plen) & 0xff00U) >> 8) : __swap16md(h-> ip6_ctlun.ip6_un1.ip6_un1_plen)) + sizeof(struct ip6_hdr); | |||
| 7350 | pd->tos = (ntohl(h->ip6_flow)(__uint32_t)(__builtin_constant_p(h->ip6_ctlun.ip6_un1.ip6_un1_flow ) ? (__uint32_t)(((__uint32_t)(h->ip6_ctlun.ip6_un1.ip6_un1_flow ) & 0xff) << 24 | ((__uint32_t)(h->ip6_ctlun.ip6_un1 .ip6_un1_flow) & 0xff00) << 8 | ((__uint32_t)(h-> ip6_ctlun.ip6_un1.ip6_un1_flow) & 0xff0000) >> 8 | ( (__uint32_t)(h->ip6_ctlun.ip6_un1.ip6_un1_flow) & 0xff000000 ) >> 24) : __swap32md(h->ip6_ctlun.ip6_un1.ip6_un1_flow )) & 0x0fc00000) >> 20; | |||
| 7351 | pd->ttl = h->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim; | |||
| 7352 | pd->virtual_proto = (pd->fragoff != 0) ? | |||
| 7353 | PF_VPROTO_FRAGMENT256 : pd->proto; | |||
| 7354 | ||||
| 7355 | break; | |||
| 7356 | } | |||
| 7357 | #endif /* INET6 */ | |||
| 7358 | default: | |||
| 7359 | panic("pf_setup_pdesc called with illegal af %u", pd->af); | |||
| 7360 | ||||
| 7361 | } | |||
| 7362 | ||||
| 7363 | pf_addrcpy(&pd->nsaddr, pd->src, pd->af); | |||
| 7364 | pf_addrcpy(&pd->ndaddr, pd->dst, pd->af); | |||
| 7365 | ||||
| 7366 | switch (pd->virtual_proto) { | |||
| 7367 | case IPPROTO_TCP6: { | |||
| 7368 | struct tcphdr *th = &pd->hdr.tcp; | |||
| 7369 | ||||
| 7370 | if (!pf_pull_hdr(pd->m, pd->off, th, sizeof(*th), | |||
| 7371 | reason, pd->af)) | |||
| 7372 | return (PF_DROP); | |||
| 7373 | pd->hdrlen = sizeof(*th); | |||
| 7374 | if (th->th_dport == 0 || | |||
| 7375 | pd->off + (th->th_off << 2) > pd->tot_len || | |||
| 7376 | (th->th_off << 2) < sizeof(struct tcphdr)) { | |||
| 7377 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 7378 | return (PF_DROP); | |||
| 7379 | } | |||
| 7380 | pd->p_len = pd->tot_len - pd->off - (th->th_off << 2); | |||
| 7381 | pd->sport = &th->th_sport; | |||
| 7382 | pd->dport = &th->th_dport; | |||
| 7383 | pd->pcksum = &th->th_sum; | |||
| 7384 | break; | |||
| 7385 | } | |||
| 7386 | case IPPROTO_UDP17: { | |||
| 7387 | struct udphdr *uh = &pd->hdr.udp; | |||
| 7388 | ||||
| 7389 | if (!pf_pull_hdr(pd->m, pd->off, uh, sizeof(*uh), | |||
| 7390 | reason, pd->af)) | |||
| 7391 | return (PF_DROP); | |||
| 7392 | pd->hdrlen = sizeof(*uh); | |||
| 7393 | if (uh->uh_dport == 0 || | |||
| 7394 | pd->off + ntohs(uh->uh_ulen)(__uint16_t)(__builtin_constant_p(uh->uh_ulen) ? (__uint16_t )(((__uint16_t)(uh->uh_ulen) & 0xffU) << 8 | ((__uint16_t )(uh->uh_ulen) & 0xff00U) >> 8) : __swap16md(uh-> uh_ulen)) > pd->tot_len || | |||
| 7395 | ntohs(uh->uh_ulen)(__uint16_t)(__builtin_constant_p(uh->uh_ulen) ? (__uint16_t )(((__uint16_t)(uh->uh_ulen) & 0xffU) << 8 | ((__uint16_t )(uh->uh_ulen) & 0xff00U) >> 8) : __swap16md(uh-> uh_ulen)) < sizeof(struct udphdr)) { | |||
| 7396 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 7397 | return (PF_DROP); | |||
| 7398 | } | |||
| 7399 | pd->sport = &uh->uh_sport; | |||
| 7400 | pd->dport = &uh->uh_dport; | |||
| 7401 | pd->pcksum = &uh->uh_sum; | |||
| 7402 | break; | |||
| 7403 | } | |||
| 7404 | case IPPROTO_ICMP1: { | |||
| 7405 | if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp, ICMP_MINLEN8, | |||
| 7406 | reason, pd->af)) | |||
| 7407 | return (PF_DROP); | |||
| 7408 | pd->hdrlen = ICMP_MINLEN8; | |||
| 7409 | if (pd->off + pd->hdrlen > pd->tot_len) { | |||
| 7410 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 7411 | return (PF_DROP); | |||
| 7412 | } | |||
| 7413 | pd->pcksum = &pd->hdr.icmp.icmp_cksum; | |||
| 7414 | break; | |||
| 7415 | } | |||
| 7416 | #ifdef INET61 | |||
| 7417 | case IPPROTO_ICMPV658: { | |||
| 7418 | size_t icmp_hlen = sizeof(struct icmp6_hdr); | |||
| 7419 | ||||
| 7420 | if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen, | |||
| 7421 | reason, pd->af)) | |||
| 7422 | return (PF_DROP); | |||
| 7423 | /* ICMP headers we look further into to match state */ | |||
| 7424 | switch (pd->hdr.icmp6.icmp6_type) { | |||
| 7425 | case MLD_LISTENER_QUERY130: | |||
| 7426 | case MLD_LISTENER_REPORT131: | |||
| 7427 | icmp_hlen = sizeof(struct mld_hdr); | |||
| 7428 | break; | |||
| 7429 | case ND_NEIGHBOR_SOLICIT135: | |||
| 7430 | case ND_NEIGHBOR_ADVERT136: | |||
| 7431 | icmp_hlen = sizeof(struct nd_neighbor_solicit); | |||
| 7432 | /* FALLTHROUGH */ | |||
| 7433 | case ND_ROUTER_SOLICIT133: | |||
| 7434 | case ND_ROUTER_ADVERT134: | |||
| 7435 | case ND_REDIRECT137: | |||
| 7436 | if (pd->ttl != 255) { | |||
| 7437 | REASON_SET(reason, PFRES_NORM)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (4); if (4 < 17) pf_status.counters[4]++; } } while (0); | |||
| 7438 | return (PF_DROP); | |||
| 7439 | } | |||
| 7440 | break; | |||
| 7441 | } | |||
| 7442 | if (icmp_hlen > sizeof(struct icmp6_hdr) && | |||
| 7443 | !pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen, | |||
| 7444 | reason, pd->af)) | |||
| 7445 | return (PF_DROP); | |||
| 7446 | pd->hdrlen = icmp_hlen; | |||
| 7447 | if (pd->off + pd->hdrlen > pd->tot_len) { | |||
| 7448 | REASON_SET(reason, PFRES_SHORT)do { if ((void *)(reason) != ((void *)0)) { *(reason) = (3); if (3 < 17) pf_status.counters[3]++; } } while (0); | |||
| 7449 | return (PF_DROP); | |||
| 7450 | } | |||
| 7451 | pd->pcksum = &pd->hdr.icmp6.icmp6_cksum; | |||
| 7452 | break; | |||
| 7453 | } | |||
| 7454 | #endif /* INET6 */ | |||
| 7455 | } | |||
| 7456 | ||||
| 7457 | if (pd->sport) | |||
| 7458 | pd->osport = pd->nsport = *pd->sport; | |||
| 7459 | if (pd->dport) | |||
| 7460 | pd->odport = pd->ndport = *pd->dport; | |||
| 7461 | ||||
| 7462 | pd->hash = pf_pkt_hash(pd->af, pd->proto, | |||
| 7463 | pd->src, pd->dst, pd->osport, pd->odport); | |||
| 7464 | ||||
| 7465 | return (PF_PASS); | |||
| 7466 | } | |||
| 7467 | ||||
| 7468 | void | |||
| 7469 | pf_counters_inc(int action, struct pf_pdesc *pd, struct pf_state *st, | |||
| 7470 | struct pf_rule *r, struct pf_rule *a) | |||
| 7471 | { | |||
| 7472 | int dirndx; | |||
| 7473 | pd->kif->pfik_bytes[pd->af == AF_INET624][pd->dir == PF_OUT] | |||
| 7474 | [action != PF_PASS] += pd->tot_len; | |||
| 7475 | pd->kif->pfik_packets[pd->af == AF_INET624][pd->dir == PF_OUT] | |||
| 7476 | [action != PF_PASS]++; | |||
| 7477 | ||||
| 7478 | if (action == PF_PASS || action == PF_AFRT || r->action == PF_DROP) { | |||
| 7479 | dirndx = (pd->dir == PF_OUT); | |||
| 7480 | r->packets[dirndx]++; | |||
| 7481 | r->bytes[dirndx] += pd->tot_len; | |||
| 7482 | if (a != NULL((void *)0)) { | |||
| 7483 | a->packets[dirndx]++; | |||
| 7484 | a->bytes[dirndx] += pd->tot_len; | |||
| 7485 | } | |||
| 7486 | if (st != NULL((void *)0)) { | |||
| 7487 | struct pf_rule_item *ri; | |||
| 7488 | struct pf_sn_item *sni; | |||
| 7489 | ||||
| 7490 | SLIST_FOREACH(sni, &st->src_nodes, next)for((sni) = ((&st->src_nodes)->slh_first); (sni) != ((void *)0); (sni) = ((sni)->next.sle_next)) { | |||
| 7491 | sni->sn->packets[dirndx]++; | |||
| 7492 | sni->sn->bytes[dirndx] += pd->tot_len; | |||
| 7493 | } | |||
| 7494 | dirndx = (pd->dir == st->direction) ? 0 : 1; | |||
| 7495 | st->packets[dirndx]++; | |||
| 7496 | st->bytes[dirndx] += pd->tot_len; | |||
| 7497 | ||||
| 7498 | SLIST_FOREACH(ri, &st->match_rules, entry)for((ri) = ((&st->match_rules)->slh_first); (ri) != ((void *)0); (ri) = ((ri)->entry.sle_next)) { | |||
| 7499 | ri->r->packets[dirndx]++; | |||
| 7500 | ri->r->bytes[dirndx] += pd->tot_len; | |||
| 7501 | ||||
| 7502 | if (ri->r->src.addr.type == PF_ADDR_TABLE) | |||
| 7503 | pfr_update_stats(ri->r->src.addr.p.tbl, | |||
| 7504 | &st->key[(st->direction == PF_IN)]-> | |||
| 7505 | addr[(st->direction == PF_OUT)], | |||
| 7506 | pd, ri->r->action, ri->r->src.neg); | |||
| 7507 | if (ri->r->dst.addr.type == PF_ADDR_TABLE) | |||
| 7508 | pfr_update_stats(ri->r->dst.addr.p.tbl, | |||
| 7509 | &st->key[(st->direction == PF_IN)]-> | |||
| 7510 | addr[(st->direction == PF_IN)], | |||
| 7511 | pd, ri->r->action, ri->r->dst.neg); | |||
| 7512 | } | |||
| 7513 | } | |||
| 7514 | if (r->src.addr.type == PF_ADDR_TABLE) | |||
| 7515 | pfr_update_stats(r->src.addr.p.tbl, | |||
| 7516 | (st == NULL((void *)0)) ? pd->src : | |||
| 7517 | &st->key[(st->direction == PF_IN)]-> | |||
| 7518 | addr[(st->direction == PF_OUT)], | |||
| 7519 | pd, r->action, r->src.neg); | |||
| 7520 | if (r->dst.addr.type == PF_ADDR_TABLE) | |||
| 7521 | pfr_update_stats(r->dst.addr.p.tbl, | |||
| 7522 | (st == NULL((void *)0)) ? pd->dst : | |||
| 7523 | &st->key[(st->direction == PF_IN)]-> | |||
| 7524 | addr[(st->direction == PF_IN)], | |||
| 7525 | pd, r->action, r->dst.neg); | |||
| 7526 | } | |||
| 7527 | } | |||
| 7528 | ||||
| 7529 | int | |||
| 7530 | pf_test(sa_family_t af, int fwdir, struct ifnet *ifp, struct mbuf **m0) | |||
| 7531 | { | |||
| 7532 | #if NCARP1 > 0 | |||
| 7533 | struct ifnet *ifp0; | |||
| 7534 | #endif | |||
| 7535 | struct pfi_kif *kif; | |||
| 7536 | u_short action, reason = 0; | |||
| 7537 | struct pf_rule *a = NULL((void *)0), *r = &pf_default_rule; | |||
| 7538 | struct pf_state *st = NULL((void *)0); | |||
| 7539 | struct pf_state_key_cmp key; | |||
| 7540 | struct pf_ruleset *ruleset = NULL((void *)0); | |||
| 7541 | struct pf_pdesc pd; | |||
| 7542 | int dir = (fwdir == PF_FWD) ? PF_OUT : fwdir; | |||
| 7543 | u_int32_t qid, pqid = 0; | |||
| 7544 | int have_pf_lock = 0; | |||
| 7545 | ||||
| 7546 | if (!pf_status.running) | |||
| 7547 | return (PF_PASS); | |||
| 7548 | ||||
| 7549 | #if NCARP1 > 0 | |||
| 7550 | if (ifp->if_typeif_data.ifi_type == IFT_CARP0xf7 && | |||
| 7551 | (ifp0 = if_get(ifp->if_carpdevidxif_carp_ptr.carp_idx)) != NULL((void *)0)) { | |||
| 7552 | kif = (struct pfi_kif *)ifp0->if_pf_kif; | |||
| 7553 | if_put(ifp0); | |||
| 7554 | } else | |||
| 7555 | #endif /* NCARP */ | |||
| 7556 | kif = (struct pfi_kif *)ifp->if_pf_kif; | |||
| 7557 | ||||
| 7558 | if (kif == NULL((void *)0)) { | |||
| 7559 | DPFPRINTF(LOG_ERR,do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: kif == NULL, if_xname %s", __func__, ifp->if_xname); addlog ("\n"); } } while (0) | |||
| 7560 | "%s: kif == NULL, if_xname %s", __func__, ifp->if_xname)do { if (pf_status.debug >= (3)) { log(3, "pf: "); addlog( "%s: kif == NULL, if_xname %s", __func__, ifp->if_xname); addlog ("\n"); } } while (0); | |||
| 7561 | return (PF_DROP); | |||
| 7562 | } | |||
| 7563 | if (kif->pfik_flags & PFI_IFLAG_SKIP0x0100) | |||
| 7564 | return (PF_PASS); | |||
| 7565 | ||||
| 7566 | #ifdef DIAGNOSTIC1 | |||
| 7567 | if (((*m0)->m_flagsm_hdr.mh_flags & M_PKTHDR0x0002) == 0) | |||
| 7568 | panic("non-M_PKTHDR is passed to pf_test"); | |||
| 7569 | #endif /* DIAGNOSTIC */ | |||
| 7570 | ||||
| 7571 | if ((*m0)->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags & PF_TAG_GENERATED0x01) | |||
| 7572 | return (PF_PASS); | |||
| 7573 | ||||
| 7574 | if ((*m0)->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags & PF_TAG_DIVERTED_PACKET0x10) { | |||
| 7575 | (*m0)->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags &= ~PF_TAG_DIVERTED_PACKET0x10; | |||
| 7576 | return (PF_PASS); | |||
| 7577 | } | |||
| 7578 | ||||
| 7579 | if ((*m0)->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags & PF_TAG_REFRAGMENTED0x40) { | |||
| 7580 | (*m0)->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags &= ~PF_TAG_REFRAGMENTED0x40; | |||
| 7581 | return (PF_PASS); | |||
| 7582 | } | |||
| 7583 | ||||
| 7584 | action = pf_setup_pdesc(&pd, af, dir, kif, *m0, &reason); | |||
| 7585 | if (action != PF_PASS) { | |||
| 7586 | #if NPFLOG1 > 0 | |||
| 7587 | pd.pflog |= PF_LOG_FORCE0x08; | |||
| 7588 | #endif /* NPFLOG > 0 */ | |||
| 7589 | goto done; | |||
| 7590 | } | |||
| 7591 | ||||
| 7592 | /* packet normalization and reassembly */ | |||
| 7593 | switch (pd.af) { | |||
| 7594 | case AF_INET2: | |||
| 7595 | action = pf_normalize_ip(&pd, &reason); | |||
| 7596 | break; | |||
| 7597 | #ifdef INET61 | |||
| 7598 | case AF_INET624: | |||
| 7599 | action = pf_normalize_ip6(&pd, &reason); | |||
| 7600 | break; | |||
| 7601 | #endif /* INET6 */ | |||
| 7602 | } | |||
| 7603 | *m0 = pd.m; | |||
| 7604 | /* if packet sits in reassembly queue, return without error */ | |||
| 7605 | if (pd.m == NULL((void *)0)) | |||
| 7606 | return PF_PASS; | |||
| 7607 | ||||
| 7608 | if (action != PF_PASS) { | |||
| 7609 | #if NPFLOG1 > 0 | |||
| 7610 | pd.pflog |= PF_LOG_FORCE0x08; | |||
| 7611 | #endif /* NPFLOG > 0 */ | |||
| 7612 | goto done; | |||
| 7613 | } | |||
| 7614 | ||||
| 7615 | /* if packet has been reassembled, update packet description */ | |||
| 7616 | if (pf_status.reass && pd.virtual_proto == PF_VPROTO_FRAGMENT256) { | |||
| 7617 | action = pf_setup_pdesc(&pd, af, dir, kif, pd.m, &reason); | |||
| 7618 | if (action != PF_PASS) { | |||
| 7619 | #if NPFLOG1 > 0 | |||
| 7620 | pd.pflog |= PF_LOG_FORCE0x08; | |||
| 7621 | #endif /* NPFLOG > 0 */ | |||
| 7622 | goto done; | |||
| 7623 | } | |||
| 7624 | } | |||
| 7625 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags |= PF_TAG_PROCESSED0x80; | |||
| 7626 | ||||
| 7627 | /* | |||
| 7628 | * Avoid pcb-lookups from the forwarding path. They should never | |||
| 7629 | * match and would cause MP locking problems. | |||
| 7630 | */ | |||
| 7631 | if (fwdir == PF_FWD) { | |||
| 7632 | pd.lookup.done = -1; | |||
| 7633 | pd.lookup.uid = -1; | |||
| 7634 | pd.lookup.gid = -1; | |||
| 7635 | pd.lookup.pid = NO_PID(99999 +1); | |||
| 7636 | } | |||
| 7637 | ||||
| 7638 | switch (pd.virtual_proto) { | |||
| 7639 | ||||
| 7640 | case PF_VPROTO_FRAGMENT256: { | |||
| 7641 | /* | |||
| 7642 | * handle fragments that aren't reassembled by | |||
| 7643 | * normalization | |||
| 7644 | */ | |||
| 7645 | PF_LOCK()do { rw_enter_write(&pf_lock); } while (0); | |||
| 7646 | have_pf_lock = 1; | |||
| 7647 | action = pf_test_rule(&pd, &r, &st, &a, &ruleset, &reason); | |||
| 7648 | st = pf_state_ref(st); | |||
| 7649 | if (action != PF_PASS) | |||
| 7650 | REASON_SET(&reason, PFRES_FRAG)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (2); if (2 < 17) pf_status.counters[2]++; } } while (0 ); | |||
| 7651 | break; | |||
| 7652 | } | |||
| 7653 | ||||
| 7654 | case IPPROTO_ICMP1: { | |||
| 7655 | if (pd.af != AF_INET2) { | |||
| 7656 | action = PF_DROP; | |||
| 7657 | REASON_SET(&reason, PFRES_NORM)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (4); if (4 < 17) pf_status.counters[4]++; } } while (0 ); | |||
| 7658 | DPFPRINTF(LOG_NOTICE,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "dropping IPv6 packet with ICMPv4 payload"); addlog("\n"); } } while (0) | |||
| 7659 | "dropping IPv6 packet with ICMPv4 payload")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "dropping IPv6 packet with ICMPv4 payload"); addlog("\n"); } } while (0); | |||
| 7660 | break; | |||
| 7661 | } | |||
| 7662 | PF_STATE_ENTER_READ()do { rw_enter_read(&pf_state_lock); } while (0); | |||
| 7663 | action = pf_test_state_icmp(&pd, &st, &reason); | |||
| 7664 | st = pf_state_ref(st); | |||
| 7665 | PF_STATE_EXIT_READ()do { rw_exit_read(&pf_state_lock); } while (0); | |||
| 7666 | if (action == PF_PASS || action == PF_AFRT) { | |||
| 7667 | #if NPFSYNC1 > 0 | |||
| 7668 | pfsync_update_state(st); | |||
| 7669 | #endif /* NPFSYNC > 0 */ | |||
| 7670 | r = st->rule.ptr; | |||
| 7671 | a = st->anchor.ptr; | |||
| 7672 | #if NPFLOG1 > 0 | |||
| 7673 | pd.pflog |= st->log; | |||
| 7674 | #endif /* NPFLOG > 0 */ | |||
| 7675 | } else if (st == NULL((void *)0)) { | |||
| 7676 | PF_LOCK()do { rw_enter_write(&pf_lock); } while (0); | |||
| 7677 | have_pf_lock = 1; | |||
| 7678 | action = pf_test_rule(&pd, &r, &st, &a, &ruleset, | |||
| 7679 | &reason); | |||
| 7680 | st = pf_state_ref(st); | |||
| 7681 | } | |||
| 7682 | break; | |||
| 7683 | } | |||
| 7684 | ||||
| 7685 | #ifdef INET61 | |||
| 7686 | case IPPROTO_ICMPV658: { | |||
| 7687 | if (pd.af != AF_INET624) { | |||
| 7688 | action = PF_DROP; | |||
| 7689 | REASON_SET(&reason, PFRES_NORM)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (4); if (4 < 17) pf_status.counters[4]++; } } while (0 ); | |||
| 7690 | DPFPRINTF(LOG_NOTICE,do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "dropping IPv4 packet with ICMPv6 payload"); addlog("\n"); } } while (0) | |||
| 7691 | "dropping IPv4 packet with ICMPv6 payload")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "dropping IPv4 packet with ICMPv6 payload"); addlog("\n"); } } while (0); | |||
| 7692 | break; | |||
| 7693 | } | |||
| 7694 | PF_STATE_ENTER_READ()do { rw_enter_read(&pf_state_lock); } while (0); | |||
| 7695 | action = pf_test_state_icmp(&pd, &st, &reason); | |||
| 7696 | st = pf_state_ref(st); | |||
| 7697 | PF_STATE_EXIT_READ()do { rw_exit_read(&pf_state_lock); } while (0); | |||
| 7698 | if (action == PF_PASS || action == PF_AFRT) { | |||
| 7699 | #if NPFSYNC1 > 0 | |||
| 7700 | pfsync_update_state(st); | |||
| 7701 | #endif /* NPFSYNC > 0 */ | |||
| 7702 | r = st->rule.ptr; | |||
| 7703 | a = st->anchor.ptr; | |||
| 7704 | #if NPFLOG1 > 0 | |||
| 7705 | pd.pflog |= st->log; | |||
| 7706 | #endif /* NPFLOG > 0 */ | |||
| 7707 | } else if (st == NULL((void *)0)) { | |||
| 7708 | PF_LOCK()do { rw_enter_write(&pf_lock); } while (0); | |||
| 7709 | have_pf_lock = 1; | |||
| 7710 | action = pf_test_rule(&pd, &r, &st, &a, &ruleset, | |||
| 7711 | &reason); | |||
| 7712 | st = pf_state_ref(st); | |||
| 7713 | } | |||
| 7714 | break; | |||
| 7715 | } | |||
| 7716 | #endif /* INET6 */ | |||
| 7717 | ||||
| 7718 | default: | |||
| 7719 | if (pd.virtual_proto == IPPROTO_TCP6) { | |||
| 7720 | if (pd.dir == PF_IN && (pd.hdr.tcp.th_flags & | |||
| 7721 | (TH_SYN0x02|TH_ACK0x10)) == TH_SYN0x02 && | |||
| 7722 | pf_synflood_check(&pd)) { | |||
| 7723 | PF_LOCK()do { rw_enter_write(&pf_lock); } while (0); | |||
| 7724 | have_pf_lock = 1; | |||
| 7725 | pf_syncookie_send(&pd); | |||
| 7726 | action = PF_DROP; | |||
| 7727 | break; | |||
| 7728 | } | |||
| 7729 | if ((pd.hdr.tcp.th_flags & TH_ACK0x10) && pd.p_len == 0) | |||
| 7730 | pqid = 1; | |||
| 7731 | action = pf_normalize_tcp(&pd); | |||
| 7732 | if (action == PF_DROP) | |||
| 7733 | break; | |||
| 7734 | } | |||
| 7735 | ||||
| 7736 | key.af = pd.af; | |||
| 7737 | key.proto = pd.virtual_proto; | |||
| 7738 | key.rdomain = pd.rdomain; | |||
| 7739 | pf_addrcpy(&key.addr[pd.sidx], pd.src, key.af); | |||
| 7740 | pf_addrcpy(&key.addr[pd.didx], pd.dst, key.af); | |||
| 7741 | key.port[pd.sidx] = pd.osport; | |||
| 7742 | key.port[pd.didx] = pd.odport; | |||
| 7743 | key.hash = pd.hash; | |||
| 7744 | ||||
| 7745 | PF_STATE_ENTER_READ()do { rw_enter_read(&pf_state_lock); } while (0); | |||
| 7746 | action = pf_find_state(&pd, &key, &st); | |||
| 7747 | st = pf_state_ref(st); | |||
| 7748 | PF_STATE_EXIT_READ()do { rw_exit_read(&pf_state_lock); } while (0); | |||
| 7749 | ||||
| 7750 | /* check for syncookies if tcp ack and no active state */ | |||
| 7751 | if (pd.dir == PF_IN && pd.virtual_proto == IPPROTO_TCP6 && | |||
| 7752 | (st == NULL((void *)0) || (st->src.state >= TCPS_FIN_WAIT_29 && | |||
| 7753 | st->dst.state >= TCPS_FIN_WAIT_29)) && | |||
| 7754 | (pd.hdr.tcp.th_flags & (TH_SYN0x02|TH_ACK0x10|TH_RST0x04)) == TH_ACK0x10 && | |||
| 7755 | pf_syncookie_validate(&pd)) { | |||
| 7756 | struct mbuf *msyn = pf_syncookie_recreate_syn(&pd); | |||
| 7757 | if (msyn) { | |||
| 7758 | action = pf_test(af, fwdir, ifp, &msyn); | |||
| 7759 | m_freem(msyn); | |||
| 7760 | if (action == PF_PASS || action == PF_AFRT) { | |||
| 7761 | PF_STATE_ENTER_READ()do { rw_enter_read(&pf_state_lock); } while (0); | |||
| 7762 | pf_state_unref(st); | |||
| 7763 | action = pf_find_state(&pd, &key, &st); | |||
| 7764 | st = pf_state_ref(st); | |||
| 7765 | PF_STATE_EXIT_READ()do { rw_exit_read(&pf_state_lock); } while (0); | |||
| 7766 | if (st == NULL((void *)0)) | |||
| 7767 | return (PF_DROP); | |||
| 7768 | st->src.seqhi = st->dst.seqhi = | |||
| 7769 | ntohl(pd.hdr.tcp.th_ack)(__uint32_t)(__builtin_constant_p(pd.hdr.tcp.th_ack) ? (__uint32_t )(((__uint32_t)(pd.hdr.tcp.th_ack) & 0xff) << 24 | ( (__uint32_t)(pd.hdr.tcp.th_ack) & 0xff00) << 8 | (( __uint32_t)(pd.hdr.tcp.th_ack) & 0xff0000) >> 8 | ( (__uint32_t)(pd.hdr.tcp.th_ack) & 0xff000000) >> 24 ) : __swap32md(pd.hdr.tcp.th_ack)) - 1; | |||
| 7770 | st->src.seqlo = | |||
| 7771 | ntohl(pd.hdr.tcp.th_seq)(__uint32_t)(__builtin_constant_p(pd.hdr.tcp.th_seq) ? (__uint32_t )(((__uint32_t)(pd.hdr.tcp.th_seq) & 0xff) << 24 | ( (__uint32_t)(pd.hdr.tcp.th_seq) & 0xff00) << 8 | (( __uint32_t)(pd.hdr.tcp.th_seq) & 0xff0000) >> 8 | ( (__uint32_t)(pd.hdr.tcp.th_seq) & 0xff000000) >> 24 ) : __swap32md(pd.hdr.tcp.th_seq)) - 1; | |||
| 7772 | pf_set_protostate(st, PF_PEER_SRC, | |||
| 7773 | PF_TCPS_PROXY_DST((11)+1)); | |||
| 7774 | } | |||
| 7775 | } else | |||
| 7776 | action = PF_DROP; | |||
| 7777 | } | |||
| 7778 | ||||
| 7779 | if (action == PF_MATCH) | |||
| 7780 | action = pf_test_state(&pd, &st, &reason); | |||
| 7781 | ||||
| 7782 | if (action == PF_PASS || action == PF_AFRT) { | |||
| 7783 | #if NPFSYNC1 > 0 | |||
| 7784 | pfsync_update_state(st); | |||
| 7785 | #endif /* NPFSYNC > 0 */ | |||
| 7786 | r = st->rule.ptr; | |||
| 7787 | a = st->anchor.ptr; | |||
| 7788 | #if NPFLOG1 > 0 | |||
| 7789 | pd.pflog |= st->log; | |||
| 7790 | #endif /* NPFLOG > 0 */ | |||
| 7791 | } else if (st == NULL((void *)0)) { | |||
| 7792 | PF_LOCK()do { rw_enter_write(&pf_lock); } while (0); | |||
| 7793 | have_pf_lock = 1; | |||
| 7794 | action = pf_test_rule(&pd, &r, &st, &a, &ruleset, | |||
| 7795 | &reason); | |||
| 7796 | st = pf_state_ref(st); | |||
| 7797 | } | |||
| 7798 | ||||
| 7799 | if (pd.virtual_proto == IPPROTO_TCP6) { | |||
| 7800 | if (st) { | |||
| 7801 | if (st->max_mss) | |||
| 7802 | pf_normalize_mss(&pd, st->max_mss); | |||
| 7803 | } else if (r->max_mss) | |||
| 7804 | pf_normalize_mss(&pd, r->max_mss); | |||
| 7805 | } | |||
| 7806 | ||||
| 7807 | break; | |||
| 7808 | } | |||
| 7809 | ||||
| 7810 | if (have_pf_lock != 0) | |||
| 7811 | PF_UNLOCK()do { do { if (rw_status(&pf_lock) != 0x0001UL) splassert_fail (0x0001UL, rw_status(&pf_lock),__func__); } while (0); rw_exit_write (&pf_lock); } while (0); | |||
| 7812 | ||||
| 7813 | /* | |||
| 7814 | * At the moment, we rely on NET_LOCK() to prevent removal of items | |||
| 7815 | * we've collected above ('r', 'anchor' and 'ruleset'). They'll have | |||
| 7816 | * to be refcounted when NET_LOCK() is gone. | |||
| 7817 | */ | |||
| 7818 | ||||
| 7819 | done: | |||
| 7820 | if (action != PF_DROP) { | |||
| 7821 | if (st) { | |||
| 7822 | /* The non-state case is handled in pf_test_rule() */ | |||
| 7823 | if (action == PF_PASS && pd.badopts != 0 && | |||
| 7824 | !(st->state_flags & PFSTATE_ALLOWOPTS0x0001)) { | |||
| 7825 | action = PF_DROP; | |||
| 7826 | REASON_SET(&reason, PFRES_IPOPTIONS)do { if ((void *)(&reason) != ((void *)0)) { *(&reason ) = (8); if (8 < 17) pf_status.counters[8]++; } } while (0 ); | |||
| 7827 | #if NPFLOG1 > 0 | |||
| 7828 | pd.pflog |= PF_LOG_FORCE0x08; | |||
| 7829 | #endif /* NPFLOG > 0 */ | |||
| 7830 | DPFPRINTF(LOG_NOTICE, "dropping packet with "do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "dropping packet with " "ip/ipv6 options in pf_test()"); addlog ("\n"); } } while (0) | |||
| 7831 | "ip/ipv6 options in pf_test()")do { if (pf_status.debug >= (5)) { log(5, "pf: "); addlog( "dropping packet with " "ip/ipv6 options in pf_test()"); addlog ("\n"); } } while (0); | |||
| 7832 | } | |||
| 7833 | ||||
| 7834 | pf_scrub(pd.m, st->state_flags, pd.af, st->min_ttl, | |||
| 7835 | st->set_tos); | |||
| 7836 | pf_tag_packet(pd.m, st->tag, st->rtableid[pd.didx]); | |||
| 7837 | if (pqid || (pd.tos & IPTOS_LOWDELAY0x10)) { | |||
| 7838 | qid = st->pqid; | |||
| 7839 | if (st->state_flags & PFSTATE_SETPRIO0x0200) { | |||
| 7840 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = | |||
| 7841 | st->set_prio[1]; | |||
| 7842 | } | |||
| 7843 | } else { | |||
| 7844 | qid = st->qid; | |||
| 7845 | if (st->state_flags & PFSTATE_SETPRIO0x0200) { | |||
| 7846 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = | |||
| 7847 | st->set_prio[0]; | |||
| 7848 | } | |||
| 7849 | } | |||
| 7850 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.delay = st->delay; | |||
| 7851 | } else { | |||
| 7852 | pf_scrub(pd.m, r->scrub_flags, pd.af, r->min_ttl, | |||
| 7853 | r->set_tos); | |||
| 7854 | if (pqid || (pd.tos & IPTOS_LOWDELAY0x10)) { | |||
| 7855 | qid = r->pqid; | |||
| 7856 | if (r->scrub_flags & PFSTATE_SETPRIO0x0200) | |||
| 7857 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = r->set_prio[1]; | |||
| 7858 | } else { | |||
| 7859 | qid = r->qid; | |||
| 7860 | if (r->scrub_flags & PFSTATE_SETPRIO0x0200) | |||
| 7861 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = r->set_prio[0]; | |||
| 7862 | } | |||
| 7863 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.delay = r->delay; | |||
| 7864 | } | |||
| 7865 | } | |||
| 7866 | ||||
| 7867 | if (action == PF_PASS && qid) | |||
| 7868 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.qid = qid; | |||
| 7869 | if (pd.dir == PF_IN && st && st->key[PF_SK_STACK]) | |||
| 7870 | pf_mbuf_link_state_key(pd.m, st->key[PF_SK_STACK]); | |||
| 7871 | if (pd.dir == PF_OUT && st && st->key[PF_SK_STACK]) | |||
| 7872 | pf_state_key_link_inpcb(st->key[PF_SK_STACK], | |||
| 7873 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.inp); | |||
| 7874 | ||||
| 7875 | if (st != NULL((void *)0) && !ISSET(pd.m->m_pkthdr.csum_flags, M_FLOWID)((pd.m->M_dat.MH.MH_pkthdr.csum_flags) & (0x4000))) { | |||
| 7876 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = st->key[PF_SK_WIRE]->hash; | |||
| 7877 | SET(pd.m->m_pkthdr.csum_flags, M_FLOWID)((pd.m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000)); | |||
| 7878 | } | |||
| 7879 | ||||
| 7880 | /* | |||
| 7881 | * connections redirected to loopback should not match sockets | |||
| 7882 | * bound specifically to loopback due to security implications, | |||
| 7883 | * see in_pcblookup_listen(). | |||
| 7884 | */ | |||
| 7885 | if (pd.destchg) | |||
| 7886 | if ((pd.af == AF_INET2 && (ntohl(pd.dst->v4.s_addr)(__uint32_t)(__builtin_constant_p(pd.dst->pfa.v4.s_addr) ? (__uint32_t)(((__uint32_t)(pd.dst->pfa.v4.s_addr) & 0xff ) << 24 | ((__uint32_t)(pd.dst->pfa.v4.s_addr) & 0xff00) << 8 | ((__uint32_t)(pd.dst->pfa.v4.s_addr) & 0xff0000) >> 8 | ((__uint32_t)(pd.dst->pfa.v4 .s_addr) & 0xff000000) >> 24) : __swap32md(pd.dst-> pfa.v4.s_addr)) >> | |||
| 7887 | IN_CLASSA_NSHIFT24) == IN_LOOPBACKNET127) || | |||
| 7888 | (pd.af == AF_INET624 && IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)((*(const u_int32_t *)(const void *)(&(&pd.dst->pfa .v6)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t *)(const void *)(&(&pd.dst->pfa.v6)->__u6_addr .__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void *)(&(&pd.dst->pfa.v6)->__u6_addr.__u6_addr8[8] ) == 0) && (*(const u_int32_t *)(const void *)(&( &pd.dst->pfa.v6)->__u6_addr.__u6_addr8[12]) == (__uint32_t )(__builtin_constant_p(1) ? (__uint32_t)(((__uint32_t)(1) & 0xff) << 24 | ((__uint32_t)(1) & 0xff00) << 8 | ((__uint32_t)(1) & 0xff0000) >> 8 | ((__uint32_t )(1) & 0xff000000) >> 24) : __swap32md(1)))))) | |||
| 7889 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST0x04; | |||
| 7890 | /* We need to redo the route lookup on outgoing routes. */ | |||
| 7891 | if (pd.destchg && pd.dir == PF_OUT) | |||
| 7892 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags |= PF_TAG_REROUTE0x20; | |||
| 7893 | ||||
| 7894 | if (pd.dir == PF_IN && action == PF_PASS && | |||
| 7895 | (r->divert.type == PF_DIVERT_TO || | |||
| 7896 | r->divert.type == PF_DIVERT_REPLY)) { | |||
| 7897 | struct pf_divert *divert; | |||
| 7898 | ||||
| 7899 | if ((divert = pf_get_divert(pd.m))) { | |||
| 7900 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags |= PF_TAG_DIVERTED0x08; | |||
| 7901 | divert->addr = r->divert.addr; | |||
| 7902 | divert->port = r->divert.port; | |||
| 7903 | divert->rdomain = pd.rdomain; | |||
| 7904 | divert->type = r->divert.type; | |||
| 7905 | } | |||
| 7906 | } | |||
| 7907 | ||||
| 7908 | if (action == PF_PASS && r->divert.type == PF_DIVERT_PACKET) | |||
| 7909 | action = PF_DIVERT; | |||
| 7910 | ||||
| 7911 | #if NPFLOG1 > 0 | |||
| 7912 | if (pd.pflog) { | |||
| 7913 | struct pf_rule_item *ri; | |||
| 7914 | ||||
| 7915 | if (pd.pflog & PF_LOG_FORCE0x08 || r->log & PF_LOG_ALL0x02) | |||
| 7916 | pflog_packet(&pd, reason, r, a, ruleset, NULL((void *)0)); | |||
| 7917 | if (st) { | |||
| 7918 | SLIST_FOREACH(ri, &st->match_rules, entry)for((ri) = ((&st->match_rules)->slh_first); (ri) != ((void *)0); (ri) = ((ri)->entry.sle_next)) | |||
| 7919 | if (ri->r->log & PF_LOG_ALL0x02) | |||
| 7920 | pflog_packet(&pd, reason, ri->r, a, | |||
| 7921 | ruleset, NULL((void *)0)); | |||
| 7922 | } | |||
| 7923 | } | |||
| 7924 | #endif /* NPFLOG > 0 */ | |||
| 7925 | ||||
| 7926 | pf_counters_inc(action, &pd, st, r, a); | |||
| 7927 | ||||
| 7928 | switch (action) { | |||
| 7929 | case PF_SYNPROXY_DROP: | |||
| 7930 | m_freem(pd.m); | |||
| 7931 | /* FALLTHROUGH */ | |||
| 7932 | case PF_DEFER: | |||
| 7933 | pd.m = NULL((void *)0); | |||
| 7934 | action = PF_PASS; | |||
| 7935 | break; | |||
| 7936 | case PF_DIVERT: | |||
| 7937 | switch (pd.af) { | |||
| 7938 | case AF_INET2: | |||
| 7939 | divert_packet(pd.m, pd.dir, r->divert.port); | |||
| 7940 | pd.m = NULL((void *)0); | |||
| 7941 | break; | |||
| 7942 | #ifdef INET61 | |||
| 7943 | case AF_INET624: | |||
| 7944 | divert6_packet(pd.m, pd.dir, r->divert.port); | |||
| 7945 | pd.m = NULL((void *)0); | |||
| 7946 | break; | |||
| 7947 | #endif /* INET6 */ | |||
| 7948 | } | |||
| 7949 | action = PF_PASS; | |||
| 7950 | break; | |||
| 7951 | #ifdef INET61 | |||
| 7952 | case PF_AFRT: | |||
| 7953 | if (pf_translate_af(&pd)) { | |||
| 7954 | action = PF_DROP; | |||
| 7955 | break; | |||
| 7956 | } | |||
| 7957 | pd.m->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags |= PF_TAG_GENERATED0x01; | |||
| 7958 | switch (pd.naf) { | |||
| 7959 | case AF_INET2: | |||
| 7960 | if (pd.dir == PF_IN) { | |||
| 7961 | if (ipforwarding == 0) { | |||
| 7962 | ipstat_inc(ips_cantforward); | |||
| 7963 | action = PF_DROP; | |||
| 7964 | break; | |||
| 7965 | } | |||
| 7966 | ip_forward(pd.m, ifp, NULL((void *)0), 1); | |||
| 7967 | } else | |||
| 7968 | ip_output(pd.m, NULL((void *)0), NULL((void *)0), 0, NULL((void *)0), NULL((void *)0), 0); | |||
| 7969 | break; | |||
| 7970 | case AF_INET624: | |||
| 7971 | if (pd.dir == PF_IN) { | |||
| 7972 | if (ip6_forwarding == 0) { | |||
| 7973 | ip6stat_inc(ip6s_cantforward); | |||
| 7974 | action = PF_DROP; | |||
| 7975 | break; | |||
| 7976 | } | |||
| 7977 | ip6_forward(pd.m, NULL((void *)0), 1); | |||
| 7978 | } else | |||
| 7979 | ip6_output(pd.m, NULL((void *)0), NULL((void *)0), 0, NULL((void *)0), NULL((void *)0)); | |||
| 7980 | break; | |||
| 7981 | } | |||
| 7982 | if (action != PF_DROP) { | |||
| 7983 | pd.m = NULL((void *)0); | |||
| 7984 | action = PF_PASS; | |||
| 7985 | } | |||
| 7986 | break; | |||
| 7987 | #endif /* INET6 */ | |||
| 7988 | case PF_DROP: | |||
| 7989 | m_freem(pd.m); | |||
| 7990 | pd.m = NULL((void *)0); | |||
| 7991 | break; | |||
| 7992 | default: | |||
| 7993 | if (st && st->rt) { | |||
| 7994 | switch (pd.af) { | |||
| 7995 | case AF_INET2: | |||
| 7996 | pf_route(&pd, st); | |||
| 7997 | break; | |||
| 7998 | #ifdef INET61 | |||
| 7999 | case AF_INET624: | |||
| 8000 | pf_route6(&pd, st); | |||
| 8001 | break; | |||
| 8002 | #endif /* INET6 */ | |||
| 8003 | } | |||
| 8004 | } | |||
| 8005 | break; | |||
| 8006 | } | |||
| 8007 | ||||
| 8008 | #ifdef INET61 | |||
| 8009 | /* if reassembled packet passed, create new fragments */ | |||
| 8010 | if (pf_status.reass && action == PF_PASS && pd.m && fwdir == PF_FWD && | |||
| 8011 | pd.af == AF_INET624) { | |||
| 8012 | struct m_tag *mtag; | |||
| 8013 | ||||
| 8014 | if ((mtag = m_tag_find(pd.m, PACKET_TAG_PF_REASSEMBLED0x0800, NULL((void *)0)))) | |||
| 8015 | action = pf_refragment6(&pd.m, mtag, NULL((void *)0), NULL((void *)0), NULL((void *)0)); | |||
| 8016 | } | |||
| 8017 | #endif /* INET6 */ | |||
| 8018 | if (st && action != PF_DROP) { | |||
| 8019 | if (!st->if_index_in && dir == PF_IN) | |||
| 8020 | st->if_index_in = ifp->if_index; | |||
| 8021 | else if (!st->if_index_out && dir == PF_OUT) | |||
| 8022 | st->if_index_out = ifp->if_index; | |||
| 8023 | } | |||
| 8024 | ||||
| 8025 | *m0 = pd.m; | |||
| 8026 | ||||
| 8027 | pf_state_unref(st); | |||
| 8028 | ||||
| 8029 | return (action); | |||
| 8030 | } | |||
| 8031 | ||||
| 8032 | int | |||
| 8033 | pf_ouraddr(struct mbuf *m) | |||
| 8034 | { | |||
| 8035 | struct pf_state_key *sk; | |||
| 8036 | ||||
| 8037 | if (m->m_pkthdrM_dat.MH.MH_pkthdr.pf.flags & PF_TAG_DIVERTED0x08) | |||
| 8038 | return (1); | |||
| 8039 | ||||
| 8040 | sk = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.statekey; | |||
| 8041 | if (sk != NULL((void *)0)) { | |||
| 8042 | if (READ_ONCE(sk->sk_inp)({ typeof(sk->sk_inp) __tmp = *(volatile typeof(sk->sk_inp ) *)&(sk->sk_inp); membar_datadep_consumer(); __tmp; } ) != NULL((void *)0)) | |||
| 8043 | return (1); | |||
| 8044 | } | |||
| 8045 | ||||
| 8046 | return (-1); | |||
| 8047 | } | |||
| 8048 | ||||
| 8049 | /* | |||
| 8050 | * must be called whenever any addressing information such as | |||
| 8051 | * address, port, protocol has changed | |||
| 8052 | */ | |||
| 8053 | void | |||
| 8054 | pf_pkt_addr_changed(struct mbuf *m) | |||
| 8055 | { | |||
| 8056 | pf_mbuf_unlink_state_key(m); | |||
| 8057 | pf_mbuf_unlink_inpcb(m); | |||
| 8058 | } | |||
| 8059 | ||||
| 8060 | struct inpcb * | |||
| 8061 | pf_inp_lookup(struct mbuf *m) | |||
| 8062 | { | |||
| 8063 | struct inpcb *inp = NULL((void *)0); | |||
| 8064 | struct pf_state_key *sk = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.statekey; | |||
| 8065 | ||||
| 8066 | if (!pf_state_key_isvalid(sk)) | |||
| 8067 | pf_mbuf_unlink_state_key(m); | |||
| 8068 | else if (READ_ONCE(sk->sk_inp)({ typeof(sk->sk_inp) __tmp = *(volatile typeof(sk->sk_inp ) *)&(sk->sk_inp); membar_datadep_consumer(); __tmp; } ) != NULL((void *)0)) { | |||
| 8069 | mtx_enter(&pf_inp_mtx); | |||
| 8070 | inp = in_pcbref(sk->sk_inp); | |||
| 8071 | mtx_leave(&pf_inp_mtx); | |||
| 8072 | } | |||
| 8073 | ||||
| 8074 | return (inp); | |||
| 8075 | } | |||
| 8076 | ||||
| 8077 | void | |||
| 8078 | pf_inp_link(struct mbuf *m, struct inpcb *inp) | |||
| 8079 | { | |||
| 8080 | struct pf_state_key *sk = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.statekey; | |||
| 8081 | ||||
| 8082 | if (!pf_state_key_isvalid(sk)) { | |||
| 8083 | pf_mbuf_unlink_state_key(m); | |||
| 8084 | return; | |||
| 8085 | } | |||
| 8086 | ||||
| 8087 | /* | |||
| 8088 | * we don't need to grab PF-lock here. At worst case we link inp to | |||
| 8089 | * state, which might be just being marked as deleted by another | |||
| 8090 | * thread. | |||
| 8091 | */ | |||
| 8092 | pf_state_key_link_inpcb(sk, inp); | |||
| 8093 | ||||
| 8094 | /* The statekey has finished finding the inp, it is no longer needed. */ | |||
| 8095 | pf_mbuf_unlink_state_key(m); | |||
| 8096 | } | |||
| 8097 | ||||
| 8098 | void | |||
| 8099 | pf_inp_unlink(struct inpcb *inp) | |||
| 8100 | { | |||
| 8101 | struct pf_state_key *sk; | |||
| 8102 | ||||
| 8103 | if (READ_ONCE(inp->inp_pf_sk)({ typeof(inp->inp_pf_sk) __tmp = *(volatile typeof(inp-> inp_pf_sk) *)&(inp->inp_pf_sk); membar_datadep_consumer (); __tmp; }) == NULL((void *)0)) | |||
| 8104 | return; | |||
| 8105 | ||||
| 8106 | mtx_enter(&pf_inp_mtx); | |||
| 8107 | sk = inp->inp_pf_sk; | |||
| 8108 | if (sk == NULL((void *)0)) { | |||
| 8109 | mtx_leave(&pf_inp_mtx); | |||
| 8110 | return; | |||
| 8111 | } | |||
| 8112 | KASSERT(sk->sk_inp == inp)((sk->sk_inp == inp) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c" , 8112, "sk->sk_inp == inp")); | |||
| 8113 | sk->sk_inp = NULL((void *)0); | |||
| 8114 | inp->inp_pf_sk = NULL((void *)0); | |||
| 8115 | mtx_leave(&pf_inp_mtx); | |||
| 8116 | ||||
| 8117 | pf_state_key_unref(sk); | |||
| 8118 | in_pcbunref(inp); | |||
| 8119 | } | |||
| 8120 | ||||
| 8121 | void | |||
| 8122 | pf_state_key_link_reverse(struct pf_state_key *sk, struct pf_state_key *skrev) | |||
| 8123 | { | |||
| 8124 | struct pf_state_key *old_reverse; | |||
| 8125 | ||||
| 8126 | old_reverse = atomic_cas_ptr(&sk->sk_reverse, NULL, skrev)_atomic_cas_ptr((&sk->sk_reverse), (((void *)0)), (skrev )); | |||
| 8127 | if (old_reverse != NULL((void *)0)) | |||
| 8128 | KASSERT(old_reverse == skrev)((old_reverse == skrev) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c" , 8128, "old_reverse == skrev")); | |||
| 8129 | else { | |||
| 8130 | pf_state_key_ref(skrev); | |||
| 8131 | ||||
| 8132 | /* | |||
| 8133 | * NOTE: if sk == skrev, then KASSERT() below holds true, we | |||
| 8134 | * still want to grab a reference in such case, because | |||
| 8135 | * pf_state_key_unlink_reverse() does not check whether keys | |||
| 8136 | * are identical or not. | |||
| 8137 | */ | |||
| 8138 | old_reverse = atomic_cas_ptr(&skrev->sk_reverse, NULL, sk)_atomic_cas_ptr((&skrev->sk_reverse), (((void *)0)), ( sk)); | |||
| 8139 | if (old_reverse != NULL((void *)0)) | |||
| 8140 | KASSERT(old_reverse == sk)((old_reverse == sk) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c" , 8140, "old_reverse == sk")); | |||
| 8141 | ||||
| 8142 | pf_state_key_ref(sk); | |||
| 8143 | } | |||
| 8144 | } | |||
| 8145 | ||||
| 8146 | #if NPFLOG1 > 0 | |||
| 8147 | void | |||
| 8148 | pf_log_matches(struct pf_pdesc *pd, struct pf_rule *rm, struct pf_rule *am, | |||
| 8149 | struct pf_ruleset *ruleset, struct pf_rule_slist *matchrules) | |||
| 8150 | { | |||
| 8151 | struct pf_rule_item *ri; | |||
| 8152 | ||||
| 8153 | /* if this is the log(matches) rule, packet has been logged already */ | |||
| 8154 | if (rm->log & PF_LOG_MATCHES0x10) | |||
| 8155 | return; | |||
| 8156 | ||||
| 8157 | SLIST_FOREACH(ri, matchrules, entry)for((ri) = ((matchrules)->slh_first); (ri) != ((void *)0); (ri) = ((ri)->entry.sle_next)) | |||
| 8158 | if (ri->r->log & PF_LOG_MATCHES0x10) | |||
| 8159 | pflog_packet(pd, PFRES_MATCH0, rm, am, ruleset, ri->r); | |||
| 8160 | } | |||
| 8161 | #endif /* NPFLOG > 0 */ | |||
| 8162 | ||||
| 8163 | struct pf_state_key * | |||
| 8164 | pf_state_key_ref(struct pf_state_key *sk) | |||
| 8165 | { | |||
| 8166 | if (sk != NULL((void *)0)) | |||
| 8167 | PF_REF_TAKE(sk->sk_refcnt)refcnt_take(&(sk->sk_refcnt)); | |||
| 8168 | ||||
| 8169 | return (sk); | |||
| 8170 | } | |||
| 8171 | ||||
| 8172 | void | |||
| 8173 | pf_state_key_unref(struct pf_state_key *sk) | |||
| 8174 | { | |||
| 8175 | if (PF_REF_RELE(sk->sk_refcnt)refcnt_rele(&(sk->sk_refcnt))) { | |||
| 8176 | /* state key must be removed from tree */ | |||
| 8177 | KASSERT(!pf_state_key_isvalid(sk))((!pf_state_key_isvalid(sk)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 8177, "!pf_state_key_isvalid(sk)") ); | |||
| 8178 | /* state key must be unlinked from reverse key */ | |||
| 8179 | KASSERT(sk->sk_reverse == NULL)((sk->sk_reverse == ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 8179, "sk->sk_reverse == NULL") ); | |||
| 8180 | /* state key must be unlinked from socket */ | |||
| 8181 | KASSERT(sk->sk_inp == NULL)((sk->sk_inp == ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 8181, "sk->sk_inp == NULL")); | |||
| 8182 | pool_put(&pf_state_key_pl, sk); | |||
| 8183 | } | |||
| 8184 | } | |||
| 8185 | ||||
| 8186 | int | |||
| 8187 | pf_state_key_isvalid(struct pf_state_key *sk) | |||
| 8188 | { | |||
| 8189 | return ((sk != NULL((void *)0)) && (sk->sk_removed == 0)); | |||
| 8190 | } | |||
| 8191 | ||||
| 8192 | void | |||
| 8193 | pf_mbuf_link_state_key(struct mbuf *m, struct pf_state_key *sk) | |||
| 8194 | { | |||
| 8195 | KASSERT(m->m_pkthdr.pf.statekey == NULL)((m->M_dat.MH.MH_pkthdr.pf.statekey == ((void *)0)) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c", 8195, "m->m_pkthdr.pf.statekey == NULL" )); | |||
| 8196 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.statekey = pf_state_key_ref(sk); | |||
| 8197 | } | |||
| 8198 | ||||
| 8199 | void | |||
| 8200 | pf_mbuf_unlink_state_key(struct mbuf *m) | |||
| 8201 | { | |||
| 8202 | struct pf_state_key *sk = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.statekey; | |||
| 8203 | ||||
| 8204 | if (sk != NULL((void *)0)) { | |||
| 8205 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.statekey = NULL((void *)0); | |||
| 8206 | pf_state_key_unref(sk); | |||
| 8207 | } | |||
| 8208 | } | |||
| 8209 | ||||
| 8210 | void | |||
| 8211 | pf_mbuf_link_inpcb(struct mbuf *m, struct inpcb *inp) | |||
| 8212 | { | |||
| 8213 | KASSERT(m->m_pkthdr.pf.inp == NULL)((m->M_dat.MH.MH_pkthdr.pf.inp == ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c", 8213, "m->m_pkthdr.pf.inp == NULL" )); | |||
| 8214 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.inp = in_pcbref(inp); | |||
| 8215 | } | |||
| 8216 | ||||
| 8217 | void | |||
| 8218 | pf_mbuf_unlink_inpcb(struct mbuf *m) | |||
| 8219 | { | |||
| 8220 | struct inpcb *inp = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.inp; | |||
| 8221 | ||||
| 8222 | if (inp != NULL((void *)0)) { | |||
| 8223 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.inp = NULL((void *)0); | |||
| 8224 | in_pcbunref(inp); | |||
| 8225 | } | |||
| 8226 | } | |||
| 8227 | ||||
| 8228 | void | |||
| 8229 | pf_state_key_link_inpcb(struct pf_state_key *sk, struct inpcb *inp) | |||
| 8230 | { | |||
| 8231 | if (inp == NULL((void *)0) || READ_ONCE(sk->sk_inp)({ typeof(sk->sk_inp) __tmp = *(volatile typeof(sk->sk_inp ) *)&(sk->sk_inp); membar_datadep_consumer(); __tmp; } ) != NULL((void *)0)) | |||
| 8232 | return; | |||
| 8233 | ||||
| 8234 | mtx_enter(&pf_inp_mtx); | |||
| 8235 | if (inp->inp_pf_sk != NULL((void *)0) || sk->sk_inp != NULL((void *)0)) { | |||
| 8236 | mtx_leave(&pf_inp_mtx); | |||
| 8237 | return; | |||
| 8238 | } | |||
| 8239 | sk->sk_inp = in_pcbref(inp); | |||
| 8240 | inp->inp_pf_sk = pf_state_key_ref(sk); | |||
| 8241 | mtx_leave(&pf_inp_mtx); | |||
| 8242 | } | |||
| 8243 | ||||
| 8244 | void | |||
| 8245 | pf_state_key_unlink_inpcb(struct pf_state_key *sk) | |||
| 8246 | { | |||
| 8247 | struct inpcb *inp; | |||
| 8248 | ||||
| 8249 | if (READ_ONCE(sk->sk_inp)({ typeof(sk->sk_inp) __tmp = *(volatile typeof(sk->sk_inp ) *)&(sk->sk_inp); membar_datadep_consumer(); __tmp; } ) == NULL((void *)0)) | |||
| 8250 | return; | |||
| 8251 | ||||
| 8252 | mtx_enter(&pf_inp_mtx); | |||
| 8253 | inp = sk->sk_inp; | |||
| 8254 | if (inp == NULL((void *)0)) { | |||
| 8255 | mtx_leave(&pf_inp_mtx); | |||
| 8256 | return; | |||
| 8257 | } | |||
| 8258 | KASSERT(inp->inp_pf_sk == sk)((inp->inp_pf_sk == sk) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 8258, "inp->inp_pf_sk == sk")); | |||
| 8259 | sk->sk_inp = NULL((void *)0); | |||
| 8260 | inp->inp_pf_sk = NULL((void *)0); | |||
| 8261 | mtx_leave(&pf_inp_mtx); | |||
| 8262 | ||||
| 8263 | pf_state_key_unref(sk); | |||
| 8264 | in_pcbunref(inp); | |||
| 8265 | } | |||
| 8266 | ||||
| 8267 | void | |||
| 8268 | pf_state_key_unlink_reverse(struct pf_state_key *sk) | |||
| 8269 | { | |||
| 8270 | struct pf_state_key *skrev = sk->sk_reverse; | |||
| 8271 | ||||
| 8272 | /* Note that sk and skrev may be equal, then we unref twice. */ | |||
| 8273 | if (skrev != NULL((void *)0)) { | |||
| 8274 | KASSERT(skrev->sk_reverse == sk)((skrev->sk_reverse == sk) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 8274, "skrev->sk_reverse == sk" )); | |||
| 8275 | sk->sk_reverse = NULL((void *)0); | |||
| 8276 | skrev->sk_reverse = NULL((void *)0); | |||
| 8277 | pf_state_key_unref(skrev); | |||
| 8278 | pf_state_key_unref(sk); | |||
| 8279 | } | |||
| 8280 | } | |||
| 8281 | ||||
| 8282 | struct pf_state * | |||
| 8283 | pf_state_ref(struct pf_state *st) | |||
| 8284 | { | |||
| 8285 | if (st != NULL((void *)0)) | |||
| 8286 | PF_REF_TAKE(st->refcnt)refcnt_take(&(st->refcnt)); | |||
| 8287 | return (st); | |||
| 8288 | } | |||
| 8289 | ||||
| 8290 | void | |||
| 8291 | pf_state_unref(struct pf_state *st) | |||
| 8292 | { | |||
| 8293 | if ((st != NULL((void *)0)) && PF_REF_RELE(st->refcnt)refcnt_rele(&(st->refcnt))) { | |||
| 8294 | /* never inserted or removed */ | |||
| 8295 | #if NPFSYNC1 > 0 | |||
| 8296 | KASSERT((TAILQ_NEXT(st, sync_list) == NULL) ||(((((st)->sync_list.tqe_next) == ((void *)0)) || ((((st)-> sync_list.tqe_next) == ((void *)-1)) && (st->sync_state >= 0xd0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c" , 8298, "(TAILQ_NEXT(st, sync_list) == NULL) || ((TAILQ_NEXT(st, sync_list) == _Q_INVALID) && (st->sync_state >= PFSYNC_S_NONE))" )) | |||
| 8297 | ((TAILQ_NEXT(st, sync_list) == _Q_INVALID) &&(((((st)->sync_list.tqe_next) == ((void *)0)) || ((((st)-> sync_list.tqe_next) == ((void *)-1)) && (st->sync_state >= 0xd0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c" , 8298, "(TAILQ_NEXT(st, sync_list) == NULL) || ((TAILQ_NEXT(st, sync_list) == _Q_INVALID) && (st->sync_state >= PFSYNC_S_NONE))" )) | |||
| 8298 | (st->sync_state >= PFSYNC_S_NONE)))(((((st)->sync_list.tqe_next) == ((void *)0)) || ((((st)-> sync_list.tqe_next) == ((void *)-1)) && (st->sync_state >= 0xd0))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/pf.c" , 8298, "(TAILQ_NEXT(st, sync_list) == NULL) || ((TAILQ_NEXT(st, sync_list) == _Q_INVALID) && (st->sync_state >= PFSYNC_S_NONE))" )); | |||
| 8299 | #endif /* NPFSYNC */ | |||
| 8300 | KASSERT((TAILQ_NEXT(st, entry_list) == NULL) ||(((((st)->entry_list.tqe_next) == ((void *)0)) || (((st)-> entry_list.tqe_next) == ((void *)-1))) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 8301, "(TAILQ_NEXT(st, entry_list) == NULL) || (TAILQ_NEXT(st, entry_list) == _Q_INVALID)" )) | |||
| 8301 | (TAILQ_NEXT(st, entry_list) == _Q_INVALID))(((((st)->entry_list.tqe_next) == ((void *)0)) || (((st)-> entry_list.tqe_next) == ((void *)-1))) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/net/pf.c", 8301, "(TAILQ_NEXT(st, entry_list) == NULL) || (TAILQ_NEXT(st, entry_list) == _Q_INVALID)" )); | |||
| 8302 | ||||
| 8303 | pf_state_key_unref(st->key[PF_SK_WIRE]); | |||
| 8304 | pf_state_key_unref(st->key[PF_SK_STACK]); | |||
| 8305 | ||||
| 8306 | pool_put(&pf_state_pl, st); | |||
| 8307 | } | |||
| 8308 | } | |||
| 8309 | ||||
| 8310 | int | |||
| 8311 | pf_delay_pkt(struct mbuf *m, u_int ifidx) | |||
| 8312 | { | |||
| 8313 | struct pf_pktdelay *pdy; | |||
| 8314 | ||||
| 8315 | if ((pdy = pool_get(&pf_pktdelay_pl, PR_NOWAIT0x0002)) == NULL((void *)0)) { | |||
| 8316 | m_freem(m); | |||
| 8317 | return (ENOBUFS55); | |||
| 8318 | } | |||
| 8319 | pdy->ifidx = ifidx; | |||
| 8320 | pdy->m = m; | |||
| 8321 | timeout_set(&pdy->to, pf_pktenqueue_delayed, pdy); | |||
| 8322 | timeout_add_msec(&pdy->to, m->m_pkthdrM_dat.MH.MH_pkthdr.pf.delay); | |||
| 8323 | m->m_pkthdrM_dat.MH.MH_pkthdr.pf.delay = 0; | |||
| 8324 | return (0); | |||
| 8325 | } | |||
| 8326 | ||||
| 8327 | void | |||
| 8328 | pf_pktenqueue_delayed(void *arg) | |||
| 8329 | { | |||
| 8330 | struct pf_pktdelay *pdy = arg; | |||
| 8331 | struct ifnet *ifp; | |||
| 8332 | ||||
| 8333 | ifp = if_get(pdy->ifidx); | |||
| 8334 | if (ifp != NULL((void *)0)) { | |||
| 8335 | if_enqueue(ifp, pdy->m); | |||
| 8336 | if_put(ifp); | |||
| 8337 | } else | |||
| 8338 | m_freem(pdy->m); | |||
| 8339 | ||||
| 8340 | pool_put(&pf_pktdelay_pl, pdy); | |||
| 8341 | } |