Bug Summary

File:net/if_gre.c
Warning:line 3249, column 41
The right operand of '>>' is a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_gre.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/net/if_gre.c
1/* $OpenBSD: if_gre.c,v 1.178 2023/12/23 10:52:54 bluhm Exp $ */
2/* $NetBSD: if_gre.c,v 1.9 1999/10/25 19:18:11 drochner Exp $ */
3
4/*
5 * Copyright (c) 1998 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Heiko W.Rupp <hwr@pilhuhn.de>
10 *
11 * IPv6-over-GRE contributed by Gert Doering <gert@greenie.muc.de>
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * Encapsulate L3 protocols into IP, per RFC 1701 and 1702.
37 * See gre(4) for more details.
38 * Also supported: IP in IP encapsulation (proto 55) per RFC 2004.
39 */
40
41#include "bpfilter.h"
42#include "pf.h"
43
44#include <sys/param.h>
45#include <sys/mbuf.h>
46#include <sys/socket.h>
47#include <sys/sockio.h>
48#include <sys/kernel.h>
49#include <sys/systm.h>
50#include <sys/errno.h>
51#include <sys/timeout.h>
52#include <sys/queue.h>
53#include <sys/tree.h>
54#include <sys/pool.h>
55#include <sys/rwlock.h>
56
57#include <crypto/siphash.h>
58
59#include <net/if.h>
60#include <net/if_var.h>
61#include <net/if_types.h>
62#include <net/if_media.h>
63#include <net/route.h>
64
65#include <netinet/in.h>
66#include <netinet/in_var.h>
67#include <netinet/if_ether.h>
68#include <netinet/ip.h>
69#include <netinet/ip_var.h>
70#include <netinet/ip_ecn.h>
71
72#ifdef INET61
73#include <netinet/ip6.h>
74#include <netinet6/ip6_var.h>
75#include <netinet6/in6_var.h>
76#endif
77
78#ifdef PIPEX1
79#include <net/pipex.h>
80#endif
81
82#ifdef MPLS1
83#include <netmpls/mpls.h>
84#endif /* MPLS */
85
86#if NBPFILTER1 > 0
87#include <net/bpf.h>
88#endif
89
90#if NPF1 > 0
91#include <net/pfvar.h>
92#endif
93
94#include <net/if_gre.h>
95
96#include <netinet/ip_gre.h>
97#include <sys/sysctl.h>
98
99/* for nvgre bridge shizz */
100#include <net/if_bridge.h>
101#include <net/if_etherbridge.h>
102
103/*
104 * packet formats
105 */
106struct gre_header {
107 uint16_t gre_flags;
108#define GRE_CP0x8000 0x8000 /* Checksum Present */
109#define GRE_KP0x2000 0x2000 /* Key Present */
110#define GRE_SP0x1000 0x1000 /* Sequence Present */
111
112#define GRE_VERS_MASK0x0007 0x0007
113#define GRE_VERS_00x0000 0x0000
114#define GRE_VERS_10x0001 0x0001
115
116 uint16_t gre_proto;
117} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
118
119struct gre_h_cksum {
120 uint16_t gre_cksum;
121 uint16_t gre_reserved1;
122} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
123
124struct gre_h_key {
125 uint32_t gre_key;
126} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
127
128#define GRE_EOIP0x6400 0x6400
129
130struct gre_h_key_eoip {
131 uint16_t eoip_len; /* network order */
132 uint16_t eoip_tunnel_id; /* little endian */
133} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
134
135#define NVGRE_VSID_RES_MIN0x000000 0x000000 /* reserved for future use */
136#define NVGRE_VSID_RES_MAX0x000fff 0x000fff
137#define NVGRE_VSID_NVE2NVE0xffffff 0xffffff /* vendor specific NVE-to-NVE comms */
138
139struct gre_h_seq {
140 uint32_t gre_seq;
141} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
142
143struct gre_h_wccp {
144 uint8_t wccp_flags;
145 uint8_t service_id;
146 uint8_t alt_bucket;
147 uint8_t pri_bucket;
148} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
149
150#define GRE_WCCP0x883e 0x883e
151
152#define GRE_HDRLEN(sizeof(struct ip) + sizeof(struct gre_header)) (sizeof(struct ip) + sizeof(struct gre_header))
153
154/*
155 * GRE tunnel metadata
156 */
157
158#define GRE_KA_NONE0 0
159#define GRE_KA_DOWN1 1
160#define GRE_KA_HOLD2 2
161#define GRE_KA_UP3 3
162
163union gre_addr {
164 struct in_addr in4;
165 struct in6_addr in6;
166};
167
168static inline int
169 gre_ip_cmp(int, const union gre_addr *,
170 const union gre_addr *);
171
172#define GRE_KEY_MIN0x00000000U 0x00000000U
173#define GRE_KEY_MAX0xffffffffU 0xffffffffU
174#define GRE_KEY_SHIFT0 0
175
176#define GRE_KEY_ENTROPY_MIN0x00000000U 0x00000000U
177#define GRE_KEY_ENTROPY_MAX0x00ffffffU 0x00ffffffU
178#define GRE_KEY_ENTROPY_SHIFT8 8
179
180struct gre_tunnel {
181 uint32_t t_key_mask;
182#define GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
htonl(0x00000000U)(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
183#define GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
htonl(0xffffff00U)(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
184#define GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
htonl(0xffffffffU)(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
185 uint32_t t_key;
186
187 u_int t_rtableid;
188 union gre_addr t_src;
189#define t_src4t_src.in4 t_src.in4
190#define t_src6t_src.in6 t_src.in6
191 union gre_addr t_dst;
192#define t_dst4t_dst.in4 t_dst.in4
193#define t_dst6t_dst.in6 t_dst.in6
194 int t_ttl;
195 int t_txhprio;
196 int t_rxhprio;
197 int t_ecn;
198 uint16_t t_df;
199 sa_family_t t_af;
200};
201
202static int
203 gre_cmp_src(const struct gre_tunnel *,
204 const struct gre_tunnel *);
205static int
206 gre_cmp(const struct gre_tunnel *, const struct gre_tunnel *);
207
208static int gre_set_tunnel(struct gre_tunnel *, struct if_laddrreq *, int);
209static int gre_get_tunnel(struct gre_tunnel *, struct if_laddrreq *);
210static int gre_del_tunnel(struct gre_tunnel *);
211
212static int gre_set_vnetid(struct gre_tunnel *, struct ifreq *);
213static int gre_get_vnetid(struct gre_tunnel *, struct ifreq *);
214static int gre_del_vnetid(struct gre_tunnel *);
215
216static int gre_set_vnetflowid(struct gre_tunnel *, struct ifreq *);
217static int gre_get_vnetflowid(struct gre_tunnel *, struct ifreq *);
218
219static struct mbuf *
220 gre_encap_dst(const struct gre_tunnel *, const union gre_addr *,
221 struct mbuf *, uint16_t, uint8_t, uint8_t);
222#define gre_encap(_t, _m, _p, _ttl, _tos)gre_encap_dst((_t), &(_t)->t_dst, (_m), (_p), (_ttl), (
_tos))
\
223 gre_encap_dst((_t), &(_t)->t_dst, (_m), (_p), (_ttl), (_tos))
224
225static struct mbuf *
226 gre_encap_dst_ip(const struct gre_tunnel *,
227 const union gre_addr *, struct mbuf *, uint8_t, uint8_t);
228#define gre_encap_ip(_t, _m, _ttl, _tos)gre_encap_dst_ip((_t), &(_t)->t_dst, (_m), (_ttl), (_tos
))
\
229 gre_encap_dst_ip((_t), &(_t)->t_dst, (_m), (_ttl), (_tos))
230
231static int
232 gre_ip_output(const struct gre_tunnel *, struct mbuf *);
233
234static int gre_tunnel_ioctl(struct ifnet *, struct gre_tunnel *,
235 u_long, void *);
236
237static uint8_t gre_l2_tos(const struct gre_tunnel *, const struct mbuf *);
238static uint8_t gre_l3_tos(const struct gre_tunnel *,
239 const struct mbuf *, uint8_t);
240
241/*
242 * layer 3 GRE tunnels
243 */
244
245struct gre_softc {
246 struct gre_tunnel sc_tunnel; /* must be first */
247 TAILQ_ENTRY(gre_softc)struct { struct gre_softc *tqe_next; struct gre_softc **tqe_prev
; }
sc_entry;
248
249 struct ifnet sc_if;
250
251 struct timeout sc_ka_send;
252 struct timeout sc_ka_hold;
253
254 unsigned int sc_ka_state;
255 unsigned int sc_ka_timeo;
256 unsigned int sc_ka_count;
257
258 unsigned int sc_ka_holdmax;
259 unsigned int sc_ka_holdcnt;
260
261 SIPHASH_KEY sc_ka_key;
262 uint32_t sc_ka_bias;
263 int sc_ka_recvtm;
264};
265
266TAILQ_HEAD(gre_list, gre_softc)struct gre_list { struct gre_softc *tqh_first; struct gre_softc
**tqh_last; }
;
267
268struct gre_keepalive {
269 uint32_t gk_uptime;
270 uint32_t gk_random;
271 uint8_t gk_digest[SIPHASH_DIGEST_LENGTH8];
272} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
273
274static int gre_clone_create(struct if_clone *, int);
275static int gre_clone_destroy(struct ifnet *);
276
277struct if_clone gre_cloner =
278 IF_CLONE_INITIALIZER("gre", gre_clone_create, gre_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "gre"
, .ifc_namelen = sizeof("gre") - 1, .ifc_create = gre_clone_create
, .ifc_destroy = gre_clone_destroy, }
;
279
280/* protected by NET_LOCK */
281struct gre_list gre_list = TAILQ_HEAD_INITIALIZER(gre_list){ ((void *)0), &(gre_list).tqh_first };
282
283static int gre_output(struct ifnet *, struct mbuf *, struct sockaddr *,
284 struct rtentry *);
285static void gre_start(struct ifnet *);
286static int gre_ioctl(struct ifnet *, u_long, caddr_t);
287
288static int gre_up(struct gre_softc *);
289static int gre_down(struct gre_softc *);
290static void gre_link_state(struct ifnet *, unsigned int);
291
292static int gre_input_key(struct mbuf **, int *, int, int, uint8_t,
293 struct gre_tunnel *);
294
295static struct mbuf *
296 gre_ipv4_patch(const struct gre_tunnel *, struct mbuf *,
297 uint8_t *, uint8_t);
298#ifdef INET61
299static struct mbuf *
300 gre_ipv6_patch(const struct gre_tunnel *, struct mbuf *,
301 uint8_t *, uint8_t);
302#endif
303#ifdef MPLS1
304static struct mbuf *
305 gre_mpls_patch(const struct gre_tunnel *, struct mbuf *,
306 uint8_t *, uint8_t);
307#endif
308static void gre_keepalive_send(void *);
309static void gre_keepalive_recv(struct ifnet *ifp, struct mbuf *);
310static void gre_keepalive_hold(void *);
311
312static struct mbuf *
313 gre_l3_encap_dst(const struct gre_tunnel *, const void *,
314 struct mbuf *m, sa_family_t);
315
316#define gre_l3_encap(_t, _m, _af)gre_l3_encap_dst((_t), &(_t)->t_dst, (_m), (_af)) \
317 gre_l3_encap_dst((_t), &(_t)->t_dst, (_m), (_af))
318
319struct mgre_softc {
320 struct gre_tunnel sc_tunnel; /* must be first */
321 RBT_ENTRY(mgre_softc)struct rb_entry sc_entry;
322
323 struct ifnet sc_if;
324};
325
326RBT_HEAD(mgre_tree, mgre_softc)struct mgre_tree { struct rb_tree rbh_root; };
327
328static inline int
329 mgre_cmp(const struct mgre_softc *, const struct mgre_softc *);
330
331RBT_PROTOTYPE(mgre_tree, mgre_softc, sc_entry, mgre_cmp)extern const struct rb_type *const mgre_tree_RBT_TYPE; __attribute__
((__unused__)) static inline void mgre_tree_RBT_INIT(struct mgre_tree
*head) { _rb_init(&head->rbh_root); } __attribute__((
__unused__)) static inline struct mgre_softc * mgre_tree_RBT_INSERT
(struct mgre_tree *head, struct mgre_softc *elm) { return _rb_insert
(mgre_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct mgre_softc * mgre_tree_RBT_REMOVE
(struct mgre_tree *head, struct mgre_softc *elm) { return _rb_remove
(mgre_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct mgre_softc * mgre_tree_RBT_FIND
(struct mgre_tree *head, const struct mgre_softc *key) { return
_rb_find(mgre_tree_RBT_TYPE, &head->rbh_root, key); }
__attribute__((__unused__)) static inline struct mgre_softc *
mgre_tree_RBT_NFIND(struct mgre_tree *head, const struct mgre_softc
*key) { return _rb_nfind(mgre_tree_RBT_TYPE, &head->rbh_root
, key); } __attribute__((__unused__)) static inline struct mgre_softc
* mgre_tree_RBT_ROOT(struct mgre_tree *head) { return _rb_root
(mgre_tree_RBT_TYPE, &head->rbh_root); } __attribute__
((__unused__)) static inline int mgre_tree_RBT_EMPTY(struct mgre_tree
*head) { return _rb_empty(&head->rbh_root); } __attribute__
((__unused__)) static inline struct mgre_softc * mgre_tree_RBT_MIN
(struct mgre_tree *head) { return _rb_min(mgre_tree_RBT_TYPE,
&head->rbh_root); } __attribute__((__unused__)) static
inline struct mgre_softc * mgre_tree_RBT_MAX(struct mgre_tree
*head) { return _rb_max(mgre_tree_RBT_TYPE, &head->rbh_root
); } __attribute__((__unused__)) static inline struct mgre_softc
* mgre_tree_RBT_NEXT(struct mgre_softc *elm) { return _rb_next
(mgre_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct mgre_softc * mgre_tree_RBT_PREV(struct mgre_softc
*elm) { return _rb_prev(mgre_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct mgre_softc * mgre_tree_RBT_LEFT
(struct mgre_softc *elm) { return _rb_left(mgre_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct mgre_softc
* mgre_tree_RBT_RIGHT(struct mgre_softc *elm) { return _rb_right
(mgre_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct mgre_softc * mgre_tree_RBT_PARENT(struct mgre_softc
*elm) { return _rb_parent(mgre_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline void mgre_tree_RBT_SET_LEFT(struct
mgre_softc *elm, struct mgre_softc *left) { _rb_set_left(mgre_tree_RBT_TYPE
, elm, left); } __attribute__((__unused__)) static inline void
mgre_tree_RBT_SET_RIGHT(struct mgre_softc *elm, struct mgre_softc
*right) { _rb_set_right(mgre_tree_RBT_TYPE, elm, right); } __attribute__
((__unused__)) static inline void mgre_tree_RBT_SET_PARENT(struct
mgre_softc *elm, struct mgre_softc *parent) { _rb_set_parent
(mgre_tree_RBT_TYPE, elm, parent); } __attribute__((__unused__
)) static inline void mgre_tree_RBT_POISON(struct mgre_softc *
elm, unsigned long poison) { _rb_poison(mgre_tree_RBT_TYPE, elm
, poison); } __attribute__((__unused__)) static inline int mgre_tree_RBT_CHECK
(struct mgre_softc *elm, unsigned long poison) { return _rb_check
(mgre_tree_RBT_TYPE, elm, poison); }
;
332
333static int mgre_clone_create(struct if_clone *, int);
334static int mgre_clone_destroy(struct ifnet *);
335
336struct if_clone mgre_cloner =
337 IF_CLONE_INITIALIZER("mgre", mgre_clone_create, mgre_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "mgre"
, .ifc_namelen = sizeof("mgre") - 1, .ifc_create = mgre_clone_create
, .ifc_destroy = mgre_clone_destroy, }
;
338
339static void mgre_rtrequest(struct ifnet *, int, struct rtentry *);
340static int mgre_output(struct ifnet *, struct mbuf *, struct sockaddr *,
341 struct rtentry *);
342static void mgre_start(struct ifnet *);
343static int mgre_ioctl(struct ifnet *, u_long, caddr_t);
344
345static int mgre_set_tunnel(struct mgre_softc *, struct if_laddrreq *);
346static int mgre_get_tunnel(struct mgre_softc *, struct if_laddrreq *);
347static int mgre_up(struct mgre_softc *);
348static int mgre_down(struct mgre_softc *);
349
350/* protected by NET_LOCK */
351struct mgre_tree mgre_tree = RBT_INITIALIZER(){ { ((void *)0) } };
352
353/*
354 * Ethernet GRE tunnels
355 */
356
357static struct mbuf *
358 gre_ether_align(struct mbuf *, int);
359
360struct egre_softc {
361 struct gre_tunnel sc_tunnel; /* must be first */
362 RBT_ENTRY(egre_softc)struct rb_entry sc_entry;
363
364 struct arpcom sc_ac;
365 struct ifmedia sc_media;
366};
367
368RBT_HEAD(egre_tree, egre_softc)struct egre_tree { struct rb_tree rbh_root; };
369
370static inline int
371 egre_cmp(const struct egre_softc *, const struct egre_softc *);
372
373RBT_PROTOTYPE(egre_tree, egre_softc, sc_entry, egre_cmp)extern const struct rb_type *const egre_tree_RBT_TYPE; __attribute__
((__unused__)) static inline void egre_tree_RBT_INIT(struct egre_tree
*head) { _rb_init(&head->rbh_root); } __attribute__((
__unused__)) static inline struct egre_softc * egre_tree_RBT_INSERT
(struct egre_tree *head, struct egre_softc *elm) { return _rb_insert
(egre_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct egre_softc * egre_tree_RBT_REMOVE
(struct egre_tree *head, struct egre_softc *elm) { return _rb_remove
(egre_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct egre_softc * egre_tree_RBT_FIND
(struct egre_tree *head, const struct egre_softc *key) { return
_rb_find(egre_tree_RBT_TYPE, &head->rbh_root, key); }
__attribute__((__unused__)) static inline struct egre_softc *
egre_tree_RBT_NFIND(struct egre_tree *head, const struct egre_softc
*key) { return _rb_nfind(egre_tree_RBT_TYPE, &head->rbh_root
, key); } __attribute__((__unused__)) static inline struct egre_softc
* egre_tree_RBT_ROOT(struct egre_tree *head) { return _rb_root
(egre_tree_RBT_TYPE, &head->rbh_root); } __attribute__
((__unused__)) static inline int egre_tree_RBT_EMPTY(struct egre_tree
*head) { return _rb_empty(&head->rbh_root); } __attribute__
((__unused__)) static inline struct egre_softc * egre_tree_RBT_MIN
(struct egre_tree *head) { return _rb_min(egre_tree_RBT_TYPE,
&head->rbh_root); } __attribute__((__unused__)) static
inline struct egre_softc * egre_tree_RBT_MAX(struct egre_tree
*head) { return _rb_max(egre_tree_RBT_TYPE, &head->rbh_root
); } __attribute__((__unused__)) static inline struct egre_softc
* egre_tree_RBT_NEXT(struct egre_softc *elm) { return _rb_next
(egre_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct egre_softc * egre_tree_RBT_PREV(struct egre_softc
*elm) { return _rb_prev(egre_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct egre_softc * egre_tree_RBT_LEFT
(struct egre_softc *elm) { return _rb_left(egre_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct egre_softc
* egre_tree_RBT_RIGHT(struct egre_softc *elm) { return _rb_right
(egre_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct egre_softc * egre_tree_RBT_PARENT(struct egre_softc
*elm) { return _rb_parent(egre_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline void egre_tree_RBT_SET_LEFT(struct
egre_softc *elm, struct egre_softc *left) { _rb_set_left(egre_tree_RBT_TYPE
, elm, left); } __attribute__((__unused__)) static inline void
egre_tree_RBT_SET_RIGHT(struct egre_softc *elm, struct egre_softc
*right) { _rb_set_right(egre_tree_RBT_TYPE, elm, right); } __attribute__
((__unused__)) static inline void egre_tree_RBT_SET_PARENT(struct
egre_softc *elm, struct egre_softc *parent) { _rb_set_parent
(egre_tree_RBT_TYPE, elm, parent); } __attribute__((__unused__
)) static inline void egre_tree_RBT_POISON(struct egre_softc *
elm, unsigned long poison) { _rb_poison(egre_tree_RBT_TYPE, elm
, poison); } __attribute__((__unused__)) static inline int egre_tree_RBT_CHECK
(struct egre_softc *elm, unsigned long poison) { return _rb_check
(egre_tree_RBT_TYPE, elm, poison); }
;
374
375static int egre_clone_create(struct if_clone *, int);
376static int egre_clone_destroy(struct ifnet *);
377
378static void egre_start(struct ifnet *);
379static int egre_ioctl(struct ifnet *, u_long, caddr_t);
380static int egre_media_change(struct ifnet *);
381static void egre_media_status(struct ifnet *, struct ifmediareq *);
382
383static int egre_up(struct egre_softc *);
384static int egre_down(struct egre_softc *);
385
386static int egre_input(const struct gre_tunnel *, struct mbuf *, int,
387 uint8_t);
388struct if_clone egre_cloner =
389 IF_CLONE_INITIALIZER("egre", egre_clone_create, egre_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "egre"
, .ifc_namelen = sizeof("egre") - 1, .ifc_create = egre_clone_create
, .ifc_destroy = egre_clone_destroy, }
;
390
391/* protected by NET_LOCK */
392struct egre_tree egre_tree = RBT_INITIALIZER(){ { ((void *)0) } };
393
394/*
395 * Network Virtualisation Using Generic Routing Encapsulation (NVGRE)
396 */
397
398struct nvgre_softc {
399 struct gre_tunnel sc_tunnel; /* must be first */
400 unsigned int sc_ifp0;
401 RBT_ENTRY(nvgre_softc)struct rb_entry sc_uentry;
402 RBT_ENTRY(nvgre_softc)struct rb_entry sc_mentry;
403
404 struct arpcom sc_ac;
405 struct ifmedia sc_media;
406
407 struct mbuf_queue sc_send_list;
408 struct task sc_send_task;
409
410 void *sc_inm;
411 struct task sc_ltask;
412 struct task sc_dtask;
413
414 struct etherbridge sc_eb;
415};
416
417RBT_HEAD(nvgre_ucast_tree, nvgre_softc)struct nvgre_ucast_tree { struct rb_tree rbh_root; };
418RBT_HEAD(nvgre_mcast_tree, nvgre_softc)struct nvgre_mcast_tree { struct rb_tree rbh_root; };
419
420static inline int
421 nvgre_cmp_ucast(const struct nvgre_softc *,
422 const struct nvgre_softc *);
423static int
424 nvgre_cmp_mcast(const struct gre_tunnel *,
425 const union gre_addr *, unsigned int,
426 const struct gre_tunnel *, const union gre_addr *,
427 unsigned int);
428static inline int
429 nvgre_cmp_mcast_sc(const struct nvgre_softc *,
430 const struct nvgre_softc *);
431
432RBT_PROTOTYPE(nvgre_ucast_tree, nvgre_softc, sc_uentry, nvgre_cmp_ucast)extern const struct rb_type *const nvgre_ucast_tree_RBT_TYPE;
__attribute__((__unused__)) static inline void nvgre_ucast_tree_RBT_INIT
(struct nvgre_ucast_tree *head) { _rb_init(&head->rbh_root
); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_ucast_tree_RBT_INSERT(struct nvgre_ucast_tree *head,
struct nvgre_softc *elm) { return _rb_insert(nvgre_ucast_tree_RBT_TYPE
, &head->rbh_root, elm); } __attribute__((__unused__))
static inline struct nvgre_softc * nvgre_ucast_tree_RBT_REMOVE
(struct nvgre_ucast_tree *head, struct nvgre_softc *elm) { return
_rb_remove(nvgre_ucast_tree_RBT_TYPE, &head->rbh_root
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_ucast_tree_RBT_FIND(struct nvgre_ucast_tree *head, const
struct nvgre_softc *key) { return _rb_find(nvgre_ucast_tree_RBT_TYPE
, &head->rbh_root, key); } __attribute__((__unused__))
static inline struct nvgre_softc * nvgre_ucast_tree_RBT_NFIND
(struct nvgre_ucast_tree *head, const struct nvgre_softc *key
) { return _rb_nfind(nvgre_ucast_tree_RBT_TYPE, &head->
rbh_root, key); } __attribute__((__unused__)) static inline struct
nvgre_softc * nvgre_ucast_tree_RBT_ROOT(struct nvgre_ucast_tree
*head) { return _rb_root(nvgre_ucast_tree_RBT_TYPE, &head
->rbh_root); } __attribute__((__unused__)) static inline int
nvgre_ucast_tree_RBT_EMPTY(struct nvgre_ucast_tree *head) { return
_rb_empty(&head->rbh_root); } __attribute__((__unused__
)) static inline struct nvgre_softc * nvgre_ucast_tree_RBT_MIN
(struct nvgre_ucast_tree *head) { return _rb_min(nvgre_ucast_tree_RBT_TYPE
, &head->rbh_root); } __attribute__((__unused__)) static
inline struct nvgre_softc * nvgre_ucast_tree_RBT_MAX(struct nvgre_ucast_tree
*head) { return _rb_max(nvgre_ucast_tree_RBT_TYPE, &head
->rbh_root); } __attribute__((__unused__)) static inline struct
nvgre_softc * nvgre_ucast_tree_RBT_NEXT(struct nvgre_softc *
elm) { return _rb_next(nvgre_ucast_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct nvgre_softc * nvgre_ucast_tree_RBT_PREV
(struct nvgre_softc *elm) { return _rb_prev(nvgre_ucast_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_ucast_tree_RBT_LEFT(struct nvgre_softc *elm) { return
_rb_left(nvgre_ucast_tree_RBT_TYPE, elm); } __attribute__((__unused__
)) static inline struct nvgre_softc * nvgre_ucast_tree_RBT_RIGHT
(struct nvgre_softc *elm) { return _rb_right(nvgre_ucast_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_ucast_tree_RBT_PARENT(struct nvgre_softc *elm) { return
_rb_parent(nvgre_ucast_tree_RBT_TYPE, elm); } __attribute__(
(__unused__)) static inline void nvgre_ucast_tree_RBT_SET_LEFT
(struct nvgre_softc *elm, struct nvgre_softc *left) { _rb_set_left
(nvgre_ucast_tree_RBT_TYPE, elm, left); } __attribute__((__unused__
)) static inline void nvgre_ucast_tree_RBT_SET_RIGHT(struct nvgre_softc
*elm, struct nvgre_softc *right) { _rb_set_right(nvgre_ucast_tree_RBT_TYPE
, elm, right); } __attribute__((__unused__)) static inline void
nvgre_ucast_tree_RBT_SET_PARENT(struct nvgre_softc *elm, struct
nvgre_softc *parent) { _rb_set_parent(nvgre_ucast_tree_RBT_TYPE
, elm, parent); } __attribute__((__unused__)) static inline void
nvgre_ucast_tree_RBT_POISON(struct nvgre_softc *elm, unsigned
long poison) { _rb_poison(nvgre_ucast_tree_RBT_TYPE, elm, poison
); } __attribute__((__unused__)) static inline int nvgre_ucast_tree_RBT_CHECK
(struct nvgre_softc *elm, unsigned long poison) { return _rb_check
(nvgre_ucast_tree_RBT_TYPE, elm, poison); }
;
433RBT_PROTOTYPE(nvgre_mcast_tree, nvgre_softc, sc_mentry, nvgre_cmp_mcast_sc)extern const struct rb_type *const nvgre_mcast_tree_RBT_TYPE;
__attribute__((__unused__)) static inline void nvgre_mcast_tree_RBT_INIT
(struct nvgre_mcast_tree *head) { _rb_init(&head->rbh_root
); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_mcast_tree_RBT_INSERT(struct nvgre_mcast_tree *head,
struct nvgre_softc *elm) { return _rb_insert(nvgre_mcast_tree_RBT_TYPE
, &head->rbh_root, elm); } __attribute__((__unused__))
static inline struct nvgre_softc * nvgre_mcast_tree_RBT_REMOVE
(struct nvgre_mcast_tree *head, struct nvgre_softc *elm) { return
_rb_remove(nvgre_mcast_tree_RBT_TYPE, &head->rbh_root
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_mcast_tree_RBT_FIND(struct nvgre_mcast_tree *head, const
struct nvgre_softc *key) { return _rb_find(nvgre_mcast_tree_RBT_TYPE
, &head->rbh_root, key); } __attribute__((__unused__))
static inline struct nvgre_softc * nvgre_mcast_tree_RBT_NFIND
(struct nvgre_mcast_tree *head, const struct nvgre_softc *key
) { return _rb_nfind(nvgre_mcast_tree_RBT_TYPE, &head->
rbh_root, key); } __attribute__((__unused__)) static inline struct
nvgre_softc * nvgre_mcast_tree_RBT_ROOT(struct nvgre_mcast_tree
*head) { return _rb_root(nvgre_mcast_tree_RBT_TYPE, &head
->rbh_root); } __attribute__((__unused__)) static inline int
nvgre_mcast_tree_RBT_EMPTY(struct nvgre_mcast_tree *head) { return
_rb_empty(&head->rbh_root); } __attribute__((__unused__
)) static inline struct nvgre_softc * nvgre_mcast_tree_RBT_MIN
(struct nvgre_mcast_tree *head) { return _rb_min(nvgre_mcast_tree_RBT_TYPE
, &head->rbh_root); } __attribute__((__unused__)) static
inline struct nvgre_softc * nvgre_mcast_tree_RBT_MAX(struct nvgre_mcast_tree
*head) { return _rb_max(nvgre_mcast_tree_RBT_TYPE, &head
->rbh_root); } __attribute__((__unused__)) static inline struct
nvgre_softc * nvgre_mcast_tree_RBT_NEXT(struct nvgre_softc *
elm) { return _rb_next(nvgre_mcast_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct nvgre_softc * nvgre_mcast_tree_RBT_PREV
(struct nvgre_softc *elm) { return _rb_prev(nvgre_mcast_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_mcast_tree_RBT_LEFT(struct nvgre_softc *elm) { return
_rb_left(nvgre_mcast_tree_RBT_TYPE, elm); } __attribute__((__unused__
)) static inline struct nvgre_softc * nvgre_mcast_tree_RBT_RIGHT
(struct nvgre_softc *elm) { return _rb_right(nvgre_mcast_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_mcast_tree_RBT_PARENT(struct nvgre_softc *elm) { return
_rb_parent(nvgre_mcast_tree_RBT_TYPE, elm); } __attribute__(
(__unused__)) static inline void nvgre_mcast_tree_RBT_SET_LEFT
(struct nvgre_softc *elm, struct nvgre_softc *left) { _rb_set_left
(nvgre_mcast_tree_RBT_TYPE, elm, left); } __attribute__((__unused__
)) static inline void nvgre_mcast_tree_RBT_SET_RIGHT(struct nvgre_softc
*elm, struct nvgre_softc *right) { _rb_set_right(nvgre_mcast_tree_RBT_TYPE
, elm, right); } __attribute__((__unused__)) static inline void
nvgre_mcast_tree_RBT_SET_PARENT(struct nvgre_softc *elm, struct
nvgre_softc *parent) { _rb_set_parent(nvgre_mcast_tree_RBT_TYPE
, elm, parent); } __attribute__((__unused__)) static inline void
nvgre_mcast_tree_RBT_POISON(struct nvgre_softc *elm, unsigned
long poison) { _rb_poison(nvgre_mcast_tree_RBT_TYPE, elm, poison
); } __attribute__((__unused__)) static inline int nvgre_mcast_tree_RBT_CHECK
(struct nvgre_softc *elm, unsigned long poison) { return _rb_check
(nvgre_mcast_tree_RBT_TYPE, elm, poison); }
;
434
435static int nvgre_clone_create(struct if_clone *, int);
436static int nvgre_clone_destroy(struct ifnet *);
437
438static void nvgre_start(struct ifnet *);
439static int nvgre_ioctl(struct ifnet *, u_long, caddr_t);
440
441static int nvgre_up(struct nvgre_softc *);
442static int nvgre_down(struct nvgre_softc *);
443static int nvgre_set_parent(struct nvgre_softc *, const char *);
444static void nvgre_link_change(void *);
445static void nvgre_detach(void *);
446
447static int nvgre_input(const struct gre_tunnel *, struct mbuf *, int,
448 uint8_t);
449static void nvgre_send(void *);
450
451static int nvgre_add_addr(struct nvgre_softc *, const struct ifbareq *);
452static int nvgre_del_addr(struct nvgre_softc *, const struct ifbareq *);
453
454static int nvgre_eb_port_eq(void *, void *, void *);
455static void *nvgre_eb_port_take(void *, void *);
456static void nvgre_eb_port_rele(void *, void *);
457static size_t nvgre_eb_port_ifname(void *, char *, size_t, void *);
458static void nvgre_eb_port_sa(void *, struct sockaddr_storage *, void *);
459
460static const struct etherbridge_ops nvgre_etherbridge_ops = {
461 nvgre_eb_port_eq,
462 nvgre_eb_port_take,
463 nvgre_eb_port_rele,
464 nvgre_eb_port_ifname,
465 nvgre_eb_port_sa,
466};
467
468struct if_clone nvgre_cloner =
469 IF_CLONE_INITIALIZER("nvgre", nvgre_clone_create, nvgre_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "nvgre"
, .ifc_namelen = sizeof("nvgre") - 1, .ifc_create = nvgre_clone_create
, .ifc_destroy = nvgre_clone_destroy, }
;
470
471struct pool nvgre_endpoint_pool;
472
473/* protected by NET_LOCK */
474struct nvgre_ucast_tree nvgre_ucast_tree = RBT_INITIALIZER(){ { ((void *)0) } };
475struct nvgre_mcast_tree nvgre_mcast_tree = RBT_INITIALIZER(){ { ((void *)0) } };
476
477/*
478 * MikroTik Ethernet over IP protocol (eoip)
479 */
480
481struct eoip_softc {
482 struct gre_tunnel sc_tunnel; /* must be first */
483 uint16_t sc_tunnel_id;
484 RBT_ENTRY(eoip_softc)struct rb_entry sc_entry;
485
486 struct arpcom sc_ac;
487 struct ifmedia sc_media;
488
489 struct timeout sc_ka_send;
490 struct timeout sc_ka_hold;
491
492 unsigned int sc_ka_state;
493 unsigned int sc_ka_timeo;
494 unsigned int sc_ka_count;
495
496 unsigned int sc_ka_holdmax;
497 unsigned int sc_ka_holdcnt;
498};
499
500RBT_HEAD(eoip_tree, eoip_softc)struct eoip_tree { struct rb_tree rbh_root; };
501
502static inline int
503 eoip_cmp(const struct eoip_softc *, const struct eoip_softc *);
504
505RBT_PROTOTYPE(eoip_tree, eoip_softc, sc_entry, eoip_cmp)extern const struct rb_type *const eoip_tree_RBT_TYPE; __attribute__
((__unused__)) static inline void eoip_tree_RBT_INIT(struct eoip_tree
*head) { _rb_init(&head->rbh_root); } __attribute__((
__unused__)) static inline struct eoip_softc * eoip_tree_RBT_INSERT
(struct eoip_tree *head, struct eoip_softc *elm) { return _rb_insert
(eoip_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct eoip_softc * eoip_tree_RBT_REMOVE
(struct eoip_tree *head, struct eoip_softc *elm) { return _rb_remove
(eoip_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct eoip_softc * eoip_tree_RBT_FIND
(struct eoip_tree *head, const struct eoip_softc *key) { return
_rb_find(eoip_tree_RBT_TYPE, &head->rbh_root, key); }
__attribute__((__unused__)) static inline struct eoip_softc *
eoip_tree_RBT_NFIND(struct eoip_tree *head, const struct eoip_softc
*key) { return _rb_nfind(eoip_tree_RBT_TYPE, &head->rbh_root
, key); } __attribute__((__unused__)) static inline struct eoip_softc
* eoip_tree_RBT_ROOT(struct eoip_tree *head) { return _rb_root
(eoip_tree_RBT_TYPE, &head->rbh_root); } __attribute__
((__unused__)) static inline int eoip_tree_RBT_EMPTY(struct eoip_tree
*head) { return _rb_empty(&head->rbh_root); } __attribute__
((__unused__)) static inline struct eoip_softc * eoip_tree_RBT_MIN
(struct eoip_tree *head) { return _rb_min(eoip_tree_RBT_TYPE,
&head->rbh_root); } __attribute__((__unused__)) static
inline struct eoip_softc * eoip_tree_RBT_MAX(struct eoip_tree
*head) { return _rb_max(eoip_tree_RBT_TYPE, &head->rbh_root
); } __attribute__((__unused__)) static inline struct eoip_softc
* eoip_tree_RBT_NEXT(struct eoip_softc *elm) { return _rb_next
(eoip_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct eoip_softc * eoip_tree_RBT_PREV(struct eoip_softc
*elm) { return _rb_prev(eoip_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct eoip_softc * eoip_tree_RBT_LEFT
(struct eoip_softc *elm) { return _rb_left(eoip_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct eoip_softc
* eoip_tree_RBT_RIGHT(struct eoip_softc *elm) { return _rb_right
(eoip_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct eoip_softc * eoip_tree_RBT_PARENT(struct eoip_softc
*elm) { return _rb_parent(eoip_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline void eoip_tree_RBT_SET_LEFT(struct
eoip_softc *elm, struct eoip_softc *left) { _rb_set_left(eoip_tree_RBT_TYPE
, elm, left); } __attribute__((__unused__)) static inline void
eoip_tree_RBT_SET_RIGHT(struct eoip_softc *elm, struct eoip_softc
*right) { _rb_set_right(eoip_tree_RBT_TYPE, elm, right); } __attribute__
((__unused__)) static inline void eoip_tree_RBT_SET_PARENT(struct
eoip_softc *elm, struct eoip_softc *parent) { _rb_set_parent
(eoip_tree_RBT_TYPE, elm, parent); } __attribute__((__unused__
)) static inline void eoip_tree_RBT_POISON(struct eoip_softc *
elm, unsigned long poison) { _rb_poison(eoip_tree_RBT_TYPE, elm
, poison); } __attribute__((__unused__)) static inline int eoip_tree_RBT_CHECK
(struct eoip_softc *elm, unsigned long poison) { return _rb_check
(eoip_tree_RBT_TYPE, elm, poison); }
;
506
507static int eoip_clone_create(struct if_clone *, int);
508static int eoip_clone_destroy(struct ifnet *);
509
510static void eoip_start(struct ifnet *);
511static int eoip_ioctl(struct ifnet *, u_long, caddr_t);
512
513static void eoip_keepalive_send(void *);
514static void eoip_keepalive_recv(struct eoip_softc *);
515static void eoip_keepalive_hold(void *);
516
517static int eoip_up(struct eoip_softc *);
518static int eoip_down(struct eoip_softc *);
519
520static struct mbuf *
521 eoip_encap(struct eoip_softc *, struct mbuf *, uint8_t);
522
523static struct mbuf *
524 eoip_input(struct gre_tunnel *, struct mbuf *,
525 const struct gre_header *, uint8_t, int);
526struct if_clone eoip_cloner =
527 IF_CLONE_INITIALIZER("eoip", eoip_clone_create, eoip_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "eoip"
, .ifc_namelen = sizeof("eoip") - 1, .ifc_create = eoip_clone_create
, .ifc_destroy = eoip_clone_destroy, }
;
528
529/* protected by NET_LOCK */
530struct eoip_tree eoip_tree = RBT_INITIALIZER(){ { ((void *)0) } };
531
532/*
533 * It is not easy to calculate the right value for a GRE MTU.
534 * We leave this task to the admin and use the same default that
535 * other vendors use.
536 */
537#define GREMTU1476 1476
538
539/*
540 * We can control the acceptance of GRE and MobileIP packets by
541 * altering the sysctl net.inet.gre.allow values
542 * respectively. Zero means drop them, all else is acceptance. We can also
543 * control acceptance of WCCPv1-style GRE packets through the
544 * net.inet.gre.wccp value, but be aware it depends upon normal GRE being
545 * allowed as well.
546 *
547 */
548int gre_allow = 0;
549int gre_wccp = 0;
550
551void
552greattach(int n)
553{
554 if_clone_attach(&gre_cloner);
555 if_clone_attach(&mgre_cloner);
556 if_clone_attach(&egre_cloner);
557 if_clone_attach(&nvgre_cloner);
558 if_clone_attach(&eoip_cloner);
559}
560
561static int
562gre_clone_create(struct if_clone *ifc, int unit)
563{
564 struct gre_softc *sc;
565 struct ifnet *ifp;
566
567 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
568 snprintf(sc->sc_if.if_xname, sizeof sc->sc_if.if_xname, "%s%d",
569 ifc->ifc_name, unit);
570
571 ifp = &sc->sc_if;
572 ifp->if_softc = sc;
573 ifp->if_typeif_data.ifi_type = IFT_TUNNEL0x83;
574 ifp->if_hdrlenif_data.ifi_hdrlen = GRE_HDRLEN(sizeof(struct ip) + sizeof(struct gre_header));
575 ifp->if_mtuif_data.ifi_mtu = GREMTU1476;
576 ifp->if_flags = IFF_POINTOPOINT0x10|IFF_MULTICAST0x8000;
577 ifp->if_xflags = IFXF_CLONED0x2;
578 ifp->if_bpf_mtap = p2p_bpf_mtap;
579 ifp->if_input = p2p_input;
580 ifp->if_output = gre_output;
581 ifp->if_start = gre_start;
582 ifp->if_ioctl = gre_ioctl;
583 ifp->if_rtrequest = p2p_rtrequest;
584
585 sc->sc_tunnel.t_ttl = ip_defttl;
586 sc->sc_tunnel.t_txhprio = IF_HDRPRIO_PAYLOAD-2;
587 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
588 sc->sc_tunnel.t_df = htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
589 sc->sc_tunnel.t_ecn = ECN_ALLOWED1;
590
591 timeout_set(&sc->sc_ka_send, gre_keepalive_send, sc);
592 timeout_set_proc(&sc->sc_ka_hold, gre_keepalive_hold, sc);
593 sc->sc_ka_state = GRE_KA_NONE0;
594
595 if_counters_alloc(ifp);
596 if_attach(ifp);
597 if_alloc_sadl(ifp);
598
599#if NBPFILTER1 > 0
600 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP12, sizeof(uint32_t));
601#endif
602
603 ifp->if_llprio = IFQ_TOS2PRIO(IPTOS_PREC_INTERNETCONTROL)((0xc0) >> 5);
604
605 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
606 TAILQ_INSERT_TAIL(&gre_list, sc, sc_entry)do { (sc)->sc_entry.tqe_next = ((void *)0); (sc)->sc_entry
.tqe_prev = (&gre_list)->tqh_last; *(&gre_list)->
tqh_last = (sc); (&gre_list)->tqh_last = &(sc)->
sc_entry.tqe_next; } while (0)
;
607 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
608
609 return (0);
610}
611
612static int
613gre_clone_destroy(struct ifnet *ifp)
614{
615 struct gre_softc *sc = ifp->if_softc;
616
617 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
618 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
619 gre_down(sc);
620
621 TAILQ_REMOVE(&gre_list, sc, sc_entry)do { if (((sc)->sc_entry.tqe_next) != ((void *)0)) (sc)->
sc_entry.tqe_next->sc_entry.tqe_prev = (sc)->sc_entry.tqe_prev
; else (&gre_list)->tqh_last = (sc)->sc_entry.tqe_prev
; *(sc)->sc_entry.tqe_prev = (sc)->sc_entry.tqe_next; (
(sc)->sc_entry.tqe_prev) = ((void *)-1); ((sc)->sc_entry
.tqe_next) = ((void *)-1); } while (0)
;
622 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
623
624 if_detach(ifp);
625
626 free(sc, M_DEVBUF2, sizeof(*sc));
627
628 return (0);
629}
630
631static int
632mgre_clone_create(struct if_clone *ifc, int unit)
633{
634 struct mgre_softc *sc;
635 struct ifnet *ifp;
636
637 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
638 ifp = &sc->sc_if;
639
640 snprintf(ifp->if_xname, sizeof(ifp->if_xname),
641 "%s%d", ifc->ifc_name, unit);
642
643 ifp->if_softc = sc;
644 ifp->if_typeif_data.ifi_type = IFT_L3IPVLAN0x88;
645 ifp->if_hdrlenif_data.ifi_hdrlen = GRE_HDRLEN(sizeof(struct ip) + sizeof(struct gre_header));
646 ifp->if_mtuif_data.ifi_mtu = GREMTU1476;
647 ifp->if_flags = IFF_MULTICAST0x8000|IFF_SIMPLEX0x800;
648 ifp->if_xflags = IFXF_CLONED0x2;
649 ifp->if_bpf_mtap = p2p_bpf_mtap;
650 ifp->if_input = p2p_input;
651 ifp->if_rtrequest = mgre_rtrequest;
652 ifp->if_output = mgre_output;
653 ifp->if_start = mgre_start;
654 ifp->if_ioctl = mgre_ioctl;
655
656 sc->sc_tunnel.t_ttl = ip_defttl;
657 sc->sc_tunnel.t_txhprio = IF_HDRPRIO_PAYLOAD-2;
658 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
659 sc->sc_tunnel.t_df = htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
660 sc->sc_tunnel.t_ecn = ECN_ALLOWED1;
661
662 if_counters_alloc(ifp);
663 if_attach(ifp);
664 if_alloc_sadl(ifp);
665
666#if NBPFILTER1 > 0
667 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP12, sizeof(uint32_t));
668#endif
669
670 return (0);
671}
672
673static int
674mgre_clone_destroy(struct ifnet *ifp)
675{
676 struct mgre_softc *sc = ifp->if_softc;
677
678 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
679 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
680 mgre_down(sc);
681 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
682
683 if_detach(ifp);
684
685 free(sc, M_DEVBUF2, sizeof(*sc));
686
687 return (0);
688}
689
690static int
691egre_clone_create(struct if_clone *ifc, int unit)
692{
693 struct egre_softc *sc;
694 struct ifnet *ifp;
695
696 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
697 ifp = &sc->sc_ac.ac_if;
698
699 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d",
700 ifc->ifc_name, unit);
701
702 ifp->if_softc = sc;
703 ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435;
704 ifp->if_ioctl = egre_ioctl;
705 ifp->if_start = egre_start;
706 ifp->if_xflags = IFXF_CLONED0x2;
707 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
708 ether_fakeaddr(ifp);
709
710 sc->sc_tunnel.t_ttl = ip_defttl;
711 sc->sc_tunnel.t_txhprio = 0;
712 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
713 sc->sc_tunnel.t_df = htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
714
715 ifmedia_init(&sc->sc_media, 0, egre_media_change, egre_media_status);
716 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
717 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
718
719 if_counters_alloc(ifp);
720 if_attach(ifp);
721 ether_ifattach(ifp);
722
723 return (0);
724}
725
726static int
727egre_clone_destroy(struct ifnet *ifp)
728{
729 struct egre_softc *sc = ifp->if_softc;
730
731 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
732 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
733 egre_down(sc);
734 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
735
736 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY((uint64_t) -1));
737 ether_ifdetach(ifp);
738 if_detach(ifp);
739
740 free(sc, M_DEVBUF2, sizeof(*sc));
741
742 return (0);
743}
744
745static int
746nvgre_clone_create(struct if_clone *ifc, int unit)
747{
748 struct nvgre_softc *sc;
749 struct ifnet *ifp;
750 struct gre_tunnel *tunnel;
751 int error;
752
753 if (nvgre_endpoint_pool.pr_size == 0) {
754 pool_init(&nvgre_endpoint_pool, sizeof(union gre_addr),
755 0, IPL_SOFTNET0x2, 0, "nvgreep", NULL((void *)0));
756 }
757
758 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
759 ifp = &sc->sc_ac.ac_if;
760
761 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d",
762 ifc->ifc_name, unit);
763
764 error = etherbridge_init(&sc->sc_eb, ifp->if_xname,
765 &nvgre_etherbridge_ops, sc);
766 if (error != 0) {
767 free(sc, M_DEVBUF2, sizeof(*sc));
768 return (error);
769 }
770
771 ifp->if_softc = sc;
772 ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435;
773 ifp->if_ioctl = nvgre_ioctl;
774 ifp->if_start = nvgre_start;
775 ifp->if_xflags = IFXF_CLONED0x2;
776 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
777 ether_fakeaddr(ifp);
778
779 tunnel = &sc->sc_tunnel;
780 tunnel->t_ttl = IP_DEFAULT_MULTICAST_TTL1;
781 tunnel->t_txhprio = 0;
782 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
783 tunnel->t_df = htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
;
784 tunnel->t_key_mask = GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
785 tunnel->t_key = htonl((NVGRE_VSID_RES_MAX + 1) <<(__uint32_t)(__builtin_constant_p((0x000fff + 1) << 8) ?
(__uint32_t)(((__uint32_t)((0x000fff + 1) << 8) & 0xff
) << 24 | ((__uint32_t)((0x000fff + 1) << 8) &
0xff00) << 8 | ((__uint32_t)((0x000fff + 1) << 8
) & 0xff0000) >> 8 | ((__uint32_t)((0x000fff + 1) <<
8) & 0xff000000) >> 24) : __swap32md((0x000fff + 1
) << 8))
786 GRE_KEY_ENTROPY_SHIFT)(__uint32_t)(__builtin_constant_p((0x000fff + 1) << 8) ?
(__uint32_t)(((__uint32_t)((0x000fff + 1) << 8) & 0xff
) << 24 | ((__uint32_t)((0x000fff + 1) << 8) &
0xff00) << 8 | ((__uint32_t)((0x000fff + 1) << 8
) & 0xff0000) >> 8 | ((__uint32_t)((0x000fff + 1) <<
8) & 0xff000000) >> 24) : __swap32md((0x000fff + 1
) << 8))
;
787
788 mq_init(&sc->sc_send_list, IFQ_MAXLEN256 * 2, IPL_SOFTNET0x2);
789 task_set(&sc->sc_send_task, nvgre_send, sc);
790 task_set(&sc->sc_ltask, nvgre_link_change, sc);
791 task_set(&sc->sc_dtask, nvgre_detach, sc);
792
793 ifmedia_init(&sc->sc_media, 0, egre_media_change, egre_media_status);
794 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
795 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
796
797 if_counters_alloc(ifp);
798 if_attach(ifp);
799 ether_ifattach(ifp);
800
801 return (0);
802}
803
804static int
805nvgre_clone_destroy(struct ifnet *ifp)
806{
807 struct nvgre_softc *sc = ifp->if_softc;
808
809 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
810 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
811 nvgre_down(sc);
812 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
813
814 etherbridge_destroy(&sc->sc_eb);
815
816 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY((uint64_t) -1));
817 ether_ifdetach(ifp);
818 if_detach(ifp);
819
820 free(sc, M_DEVBUF2, sizeof(*sc));
821
822 return (0);
823}
824
825static int
826eoip_clone_create(struct if_clone *ifc, int unit)
827{
828 struct eoip_softc *sc;
829 struct ifnet *ifp;
830
831 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
832 ifp = &sc->sc_ac.ac_if;
833
834 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d",
835 ifc->ifc_name, unit);
836
837 ifp->if_softc = sc;
838 ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435;
839 ifp->if_ioctl = eoip_ioctl;
840 ifp->if_start = eoip_start;
841 ifp->if_xflags = IFXF_CLONED0x2;
842 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
843 ether_fakeaddr(ifp);
844
845 sc->sc_tunnel.t_ttl = ip_defttl;
846 sc->sc_tunnel.t_txhprio = 0;
847 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
848 sc->sc_tunnel.t_df = htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
849
850 sc->sc_ka_timeo = 10;
851 sc->sc_ka_count = 10;
852
853 timeout_set(&sc->sc_ka_send, eoip_keepalive_send, sc);
854 timeout_set_proc(&sc->sc_ka_hold, eoip_keepalive_hold, sc);
855 sc->sc_ka_state = GRE_KA_DOWN1;
856
857 ifmedia_init(&sc->sc_media, 0, egre_media_change, egre_media_status);
858 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
859 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
860
861 if_counters_alloc(ifp);
862 if_attach(ifp);
863 ether_ifattach(ifp);
864
865 return (0);
866}
867
868static int
869eoip_clone_destroy(struct ifnet *ifp)
870{
871 struct eoip_softc *sc = ifp->if_softc;
872
873 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
874 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
875 eoip_down(sc);
876 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
877
878 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY((uint64_t) -1));
879 ether_ifdetach(ifp);
880 if_detach(ifp);
881
882 free(sc, M_DEVBUF2, sizeof(*sc));
883
884 return (0);
885}
886
887int
888gre_input(struct mbuf **mp, int *offp, int type, int af)
889{
890 struct mbuf *m = *mp;
891 struct gre_tunnel key;
892 struct ip *ip;
893
894 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
895
896 /* XXX check if ip_src is sane for nvgre? */
897
898 key.t_af = AF_INET2;
899 key.t_src4t_src.in4 = ip->ip_dst;
900 key.t_dst4t_dst.in4 = ip->ip_src;
901
902 if (gre_input_key(mp, offp, type, af, ip->ip_tos, &key) == -1)
903 return (rip_input(mp, offp, type, af));
904
905 return (IPPROTO_DONE257);
906}
907
908#ifdef INET61
909int
910gre_input6(struct mbuf **mp, int *offp, int type, int af)
911{
912 struct mbuf *m = *mp;
913 struct gre_tunnel key;
914 struct ip6_hdr *ip6;
915 uint32_t flow;
916
917 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
918
919 /* XXX check if ip6_src is sane for nvgre? */
920
921 key.t_af = AF_INET624;
922 key.t_src6t_src.in6 = ip6->ip6_dst;
923 key.t_dst6t_dst.in6 = ip6->ip6_src;
924
925 flow = bemtoh32(&ip6->ip6_flow)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&ip6->
ip6_ctlun.ip6_un1.ip6_un1_flow)) ? (__uint32_t)(((__uint32_t)
(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)
) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(&
ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)) & 0xff00) <<
8 | ((__uint32_t)(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1
.ip6_un1_flow)) & 0xff0000) >> 8 | ((__uint32_t)(*(
__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&ip6
->ip6_ctlun.ip6_un1.ip6_un1_flow)))
;
926
927 if (gre_input_key(mp, offp, type, af, flow >> 20, &key) == -1)
928 return (rip6_input(mp, offp, type, af));
929
930 return (IPPROTO_DONE257);
931}
932#endif /* INET6 */
933
934static inline struct ifnet *
935gre_find(const struct gre_tunnel *key)
936{
937 struct gre_softc *sc;
938
939 TAILQ_FOREACH(sc, &gre_list, sc_entry)for((sc) = ((&gre_list)->tqh_first); (sc) != ((void *)
0); (sc) = ((sc)->sc_entry.tqe_next))
{
940 if (gre_cmp(key, &sc->sc_tunnel) != 0)
941 continue;
942
943 if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) & (0x40)))
944 continue;
945
946 return (&sc->sc_if);
947 }
948
949 return (NULL((void *)0));
950}
951
952static inline struct ifnet *
953mgre_find(const struct gre_tunnel *key)
954{
955 struct mgre_softc *sc;
956
957 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
958 sc = RBT_FIND(mgre_tree, &mgre_tree, (const struct mgre_softc *)key)mgre_tree_RBT_FIND(&mgre_tree, (const struct mgre_softc *
)key)
;
959 if (sc != NULL((void *)0))
960 return (&sc->sc_if);
961
962 return (NULL((void *)0));
963}
964
965static struct mbuf *
966gre_input_1(struct gre_tunnel *key, struct mbuf *m,
967 const struct gre_header *gh, uint8_t otos, int iphlen)
968{
969 switch (gh->gre_proto) {
970 case htons(ETHERTYPE_PPP)(__uint16_t)(__builtin_constant_p(0x880B) ? (__uint16_t)(((__uint16_t
)(0x880B) & 0xffU) << 8 | ((__uint16_t)(0x880B) &
0xff00U) >> 8) : __swap16md(0x880B))
:
971#ifdef PIPEX1
972 if (pipex_enable) {
973 struct pipex_session *session;
974
975 session = pipex_pptp_lookup_session(m);
976 if (session != NULL((void *)0)) {
977 struct mbuf *m0;
978
979 m0 = pipex_pptp_input(m, session);
980 pipex_rele_session(session);
981
982 if (m0 == NULL((void *)0))
983 return (NULL((void *)0));
984 }
985 }
986#endif
987 break;
988 case htons(GRE_EOIP)(__uint16_t)(__builtin_constant_p(0x6400) ? (__uint16_t)(((__uint16_t
)(0x6400) & 0xffU) << 8 | ((__uint16_t)(0x6400) &
0xff00U) >> 8) : __swap16md(0x6400))
:
989 return (eoip_input(key, m, gh, otos, iphlen));
990 break;
991 }
992
993 return (m);
994}
995
996static int
997gre_input_key(struct mbuf **mp, int *offp, int type, int af, uint8_t otos,
998 struct gre_tunnel *key)
999{
1000 struct mbuf *m = *mp;
1001 int iphlen = *offp, hlen, rxprio;
1002 struct ifnet *ifp;
1003 const struct gre_tunnel *tunnel;
1004 caddr_t buf;
1005 struct gre_header *gh;
1006 struct gre_h_key *gkh;
1007 struct mbuf *(*patch)(const struct gre_tunnel *, struct mbuf *,
1008 uint8_t *, uint8_t);
1009 int mcast = 0;
1010 uint8_t itos;
1011
1012 if (!gre_allow)
1013 goto decline;
1014
1015 key->t_rtableid = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid;
1016
1017 hlen = iphlen + sizeof(*gh);
1018 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen)
1019 goto decline;
1020
1021 m = m_pullup(m, hlen);
1022 if (m == NULL((void *)0))
1023 return (IPPROTO_DONE257);
1024
1025 buf = mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data));
1026 gh = (struct gre_header *)(buf + iphlen);
1027
1028 /* check the version */
1029 switch (gh->gre_flags & htons(GRE_VERS_MASK)(__uint16_t)(__builtin_constant_p(0x0007) ? (__uint16_t)(((__uint16_t
)(0x0007) & 0xffU) << 8 | ((__uint16_t)(0x0007) &
0xff00U) >> 8) : __swap16md(0x0007))
) {
1030 case htons(GRE_VERS_0)(__uint16_t)(__builtin_constant_p(0x0000) ? (__uint16_t)(((__uint16_t
)(0x0000) & 0xffU) << 8 | ((__uint16_t)(0x0000) &
0xff00U) >> 8) : __swap16md(0x0000))
:
1031 break;
1032
1033 case htons(GRE_VERS_1)(__uint16_t)(__builtin_constant_p(0x0001) ? (__uint16_t)(((__uint16_t
)(0x0001) & 0xffU) << 8 | ((__uint16_t)(0x0001) &
0xff00U) >> 8) : __swap16md(0x0001))
:
1034 m = gre_input_1(key, m, gh, otos, iphlen);
1035 if (m == NULL((void *)0))
1036 return (IPPROTO_DONE257);
1037 /* FALLTHROUGH */
1038 default:
1039 goto decline;
1040 }
1041
1042 /* the only optional bit in the header is K flag */
1043 if ((gh->gre_flags & htons(~(GRE_KP|GRE_VERS_MASK))(__uint16_t)(__builtin_constant_p(~(0x2000|0x0007)) ? (__uint16_t
)(((__uint16_t)(~(0x2000|0x0007)) & 0xffU) << 8 | (
(__uint16_t)(~(0x2000|0x0007)) & 0xff00U) >> 8) : __swap16md
(~(0x2000|0x0007)))
) != htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
)
1044 goto decline;
1045
1046 if (gh->gre_flags & htons(GRE_KP)(__uint16_t)(__builtin_constant_p(0x2000) ? (__uint16_t)(((__uint16_t
)(0x2000) & 0xffU) << 8 | ((__uint16_t)(0x2000) &
0xff00U) >> 8) : __swap16md(0x2000))
) {
1047 hlen += sizeof(*gkh);
1048 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen)
1049 goto decline;
1050
1051 m = m_pullup(m, hlen);
1052 if (m == NULL((void *)0))
1053 return (IPPROTO_DONE257);
1054
1055 buf = mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data));
1056 gh = (struct gre_header *)(buf + iphlen);
1057 gkh = (struct gre_h_key *)(gh + 1);
1058
1059 key->t_key_mask = GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
;
1060 key->t_key = gkh->gre_key;
1061 } else
1062 key->t_key_mask = GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
;
1063
1064 if (gh->gre_proto == htons(ETHERTYPE_TRANSETHER)(__uint16_t)(__builtin_constant_p(0x6558) ? (__uint16_t)(((__uint16_t
)(0x6558) & 0xffU) << 8 | ((__uint16_t)(0x6558) &
0xff00U) >> 8) : __swap16md(0x6558))
) {
1065 if (egre_input(key, m, hlen, otos) == -1 &&
1066 nvgre_input(key, m, hlen, otos) == -1)
1067 goto decline;
1068
1069 return (IPPROTO_DONE257);
1070 }
1071
1072 ifp = gre_find(key);
1073 if (ifp == NULL((void *)0)) {
1074 ifp = mgre_find(key);
1075 if (ifp == NULL((void *)0))
1076 goto decline;
1077 }
1078
1079 switch (gh->gre_proto) {
1080 case htons(GRE_WCCP)(__uint16_t)(__builtin_constant_p(0x883e) ? (__uint16_t)(((__uint16_t
)(0x883e) & 0xffU) << 8 | ((__uint16_t)(0x883e) &
0xff00U) >> 8) : __swap16md(0x883e))
: {
1081 struct mbuf *n;
1082 int off;
1083
1084 /* WCCP/GRE:
1085 * So far as I can see (and test) it seems that Cisco's WCCP
1086 * GRE tunnel is precisely a IP-in-GRE tunnel that differs
1087 * only in its protocol number. At least, it works for me.
1088 *
1089 * The Internet Drafts can be found if you look for
1090 * the following:
1091 * draft-forster-wrec-wccp-v1-00.txt
1092 * draft-wilson-wrec-wccp-v2-01.txt
1093 */
1094
1095 if (!gre_wccp && !ISSET(ifp->if_flags, IFF_LINK0)((ifp->if_flags) & (0x1000)))
1096 goto decline;
1097
1098 /*
1099 * If the first nibble of the payload does not look like
1100 * IPv4, assume it is WCCP v2.
1101 */
1102 n = m_getptr(m, hlen, &off);
1103 if (n == NULL((void *)0))
1104 goto decline;
1105 if (n->m_datam_hdr.mh_data[off] >> 4 != IPVERSION4)
1106 hlen += 4; /* four-octet Redirect header */
1107
1108 /* FALLTHROUGH */
1109 }
1110 case htons(ETHERTYPE_IP)(__uint16_t)(__builtin_constant_p(0x0800) ? (__uint16_t)(((__uint16_t
)(0x0800) & 0xffU) << 8 | ((__uint16_t)(0x0800) &
0xff00U) >> 8) : __swap16md(0x0800))
:
1111 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = AF_INET2;
1112 patch = gre_ipv4_patch;
1113 break;
1114#ifdef INET61
1115 case htons(ETHERTYPE_IPV6)(__uint16_t)(__builtin_constant_p(0x86DD) ? (__uint16_t)(((__uint16_t
)(0x86DD) & 0xffU) << 8 | ((__uint16_t)(0x86DD) &
0xff00U) >> 8) : __swap16md(0x86DD))
:
1116 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = AF_INET624;
1117 patch = gre_ipv6_patch;
1118 break;
1119#endif
1120#ifdef MPLS1
1121 case htons(ETHERTYPE_MPLS_MCAST)(__uint16_t)(__builtin_constant_p(0x8848) ? (__uint16_t)(((__uint16_t
)(0x8848) & 0xffU) << 8 | ((__uint16_t)(0x8848) &
0xff00U) >> 8) : __swap16md(0x8848))
:
1122 mcast = M_MCAST0x0200|M_BCAST0x0100;
1123 /* fallthrough */
1124 case htons(ETHERTYPE_MPLS)(__uint16_t)(__builtin_constant_p(0x8847) ? (__uint16_t)(((__uint16_t
)(0x8847) & 0xffU) << 8 | ((__uint16_t)(0x8847) &
0xff00U) >> 8) : __swap16md(0x8847))
:
1125 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = AF_MPLS33;
1126 patch = gre_mpls_patch;
1127 break;
1128#endif
1129 case htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
:
1130 if (ifp->if_typeif_data.ifi_type != IFT_TUNNEL0x83) {
1131 /* keepalives dont make sense for mgre */
1132 goto decline;
1133 }
1134
1135 m_adj(m, hlen);
1136 gre_keepalive_recv(ifp, m);
1137 return (IPPROTO_DONE257);
1138
1139 default:
1140 goto decline;
1141 }
1142
1143 /* it's ours now */
1144
1145 m_adj(m, hlen);
1146
1147 tunnel = ifp->if_softc; /* gre and mgre tunnel info is at the front */
1148
1149 m = (*patch)(tunnel, m, &itos, otos);
1150 if (m == NULL((void *)0))
1151 return (IPPROTO_DONE257);
1152
1153 if (tunnel->t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
) {
1154 SET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000));
1155 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid =
1156 bemtoh32(&key->t_key)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&key->
t_key)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&key
->t_key)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(&key->t_key)) & 0xff00) << 8 | ((__uint32_t
)(*(__uint32_t *)(&key->t_key)) & 0xff0000) >>
8 | ((__uint32_t)(*(__uint32_t *)(&key->t_key)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&key
->t_key)))
& ~GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
1157 }
1158
1159 rxprio = tunnel->t_rxhprio;
1160 switch (rxprio) {
1161 case IF_HDRPRIO_PACKET-1:
1162 /* nop */
1163 break;
1164 case IF_HDRPRIO_OUTER-3:
1165 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = IFQ_TOS2PRIO(otos)((otos) >> 5);
1166 break;
1167 case IF_HDRPRIO_PAYLOAD-2:
1168 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = IFQ_TOS2PRIO(itos)((itos) >> 5);
1169 break;
1170 default:
1171 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = rxprio;
1172 break;
1173 }
1174
1175 m->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200|M_BCAST0x0100);
1176 m->m_flagsm_hdr.mh_flags |= mcast;
1177
1178 if_vinput(ifp, m);
1179 return (IPPROTO_DONE257);
1180decline:
1181 *mp = m;
1182 return (-1);
1183}
1184
1185static struct mbuf *
1186gre_ipv4_patch(const struct gre_tunnel *tunnel, struct mbuf *m,
1187 uint8_t *itosp, uint8_t otos)
1188{
1189 struct ip *ip;
1190 uint8_t itos;
1191
1192 m = m_pullup(m, sizeof(*ip));
1193 if (m == NULL((void *)0))
1194 return (NULL((void *)0));
1195
1196 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
1197
1198 itos = ip->ip_tos;
1199 if (ip_ecn_egress(tunnel->t_ecn, &otos, &itos) == 0) {
1200 m_freem(m);
1201 return (NULL((void *)0));
1202 }
1203 if (itos != ip->ip_tos)
1204 ip_tos_patch(ip, itos);
1205
1206 *itosp = itos;
1207
1208 return (m);
1209}
1210
1211#ifdef INET61
1212static struct mbuf *
1213gre_ipv6_patch(const struct gre_tunnel *tunnel, struct mbuf *m,
1214 uint8_t *itosp, uint8_t otos)
1215{
1216 struct ip6_hdr *ip6;
1217 uint32_t flow;
1218 uint8_t itos;
1219
1220 m = m_pullup(m, sizeof(*ip6));
1221 if (m == NULL((void *)0))
1222 return (NULL((void *)0));
1223
1224 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
1225
1226 flow = bemtoh32(&ip6->ip6_flow)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&ip6->
ip6_ctlun.ip6_un1.ip6_un1_flow)) ? (__uint32_t)(((__uint32_t)
(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)
) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(&
ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)) & 0xff00) <<
8 | ((__uint32_t)(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1
.ip6_un1_flow)) & 0xff0000) >> 8 | ((__uint32_t)(*(
__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&ip6
->ip6_ctlun.ip6_un1.ip6_un1_flow)))
;
1227 itos = flow >> 20;
1228 if (ip_ecn_egress(tunnel->t_ecn, &otos, &itos) == 0) {
1229 m_freem(m);
1230 return (NULL((void *)0));
1231 }
1232
1233 CLR(flow, 0xff << 20)((flow) &= ~(0xff << 20));
1234 SET(flow, itos << 20)((flow) |= (itos << 20));
1235 htobem32(&ip6->ip6_flow, flow)(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)
= (__uint32_t)(__builtin_constant_p(flow) ? (__uint32_t)(((__uint32_t
)(flow) & 0xff) << 24 | ((__uint32_t)(flow) & 0xff00
) << 8 | ((__uint32_t)(flow) & 0xff0000) >> 8
| ((__uint32_t)(flow) & 0xff000000) >> 24) : __swap32md
(flow)))
;
1236
1237 *itosp = itos;
1238
1239 return (m);
1240}
1241#endif
1242
1243#ifdef MPLS1
1244static struct mbuf *
1245gre_mpls_patch(const struct gre_tunnel *tunnel, struct mbuf *m,
1246 uint8_t *itosp, uint8_t otos)
1247{
1248 uint8_t itos;
1249 uint32_t shim;
1250
1251 m = m_pullup(m, sizeof(shim));
1252 if (m == NULL((void *)0))
1253 return (NULL((void *)0));
1254
1255 shim = *mtod(m, uint32_t *)((uint32_t *)((m)->m_hdr.mh_data));
1256 itos = (ntohl(shim & MPLS_EXP_MASK)(__uint32_t)(__builtin_constant_p(shim & ((u_int32_t)(__uint32_t
)(__builtin_constant_p((u_int32_t)(0x00000e00U)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff) <<
24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff00) <<
8 | ((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0x00000e00U))))) ? (__uint32_t
)(((__uint32_t)(shim & ((u_int32_t)(__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000e00U)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000e00U)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000e00U)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000e00U)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000e00U)) & 0xff000000) >> 24) : __swap32md((
u_int32_t)(0x00000e00U))))) & 0xff) << 24 | ((__uint32_t
)(shim & ((u_int32_t)(__uint32_t)(__builtin_constant_p((u_int32_t
)(0x00000e00U)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))) & 0xff00) << 8 | ((__uint32_t)(shim & ((u_int32_t
)(__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000e00U)) ?
(__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U))
& 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))) & 0xff0000) >> 8 | ((__uint32_t)(shim & (
(u_int32_t)(__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000e00U
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U))
& 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))) & 0xff000000) >> 24) : __swap32md(shim & (
(u_int32_t)(__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000e00U
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U))
& 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))))
>> MPLS_EXP_OFFSET9) << 5;
1257
1258 if (ip_ecn_egress(tunnel->t_ecn, &otos, &itos) == 0) {
1259 m_freem(m);
1260 return (NULL((void *)0));
1261 }
1262
1263 *itosp = itos;
1264
1265 return (m);
1266}
1267#endif
1268
1269#define gre_l2_prio(_t, _m, _otos)do { int rxprio = (_t)->t_rxhprio; switch (rxprio) { case -
1: break; case -3: (_m)->M_dat.MH.MH_pkthdr.pf.prio = (((_otos
)) >> 5); break; default: (_m)->M_dat.MH.MH_pkthdr.pf
.prio = rxprio; break; } } while (0)
do { \
1270 int rxprio = (_t)->t_rxhprio; \
1271 switch (rxprio) { \
1272 case IF_HDRPRIO_PACKET-1: \
1273 /* nop */ \
1274 break; \
1275 case IF_HDRPRIO_OUTER-3: \
1276 (_m)->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = IFQ_TOS2PRIO((_otos))(((_otos)) >> 5); \
1277 break; \
1278 default: \
1279 (_m)->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = rxprio; \
1280 break; \
1281 } \
1282} while (0)
1283
1284static int
1285egre_input(const struct gre_tunnel *key, struct mbuf *m, int hlen, uint8_t otos)
1286{
1287 struct egre_softc *sc;
1288
1289 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1290 sc = RBT_FIND(egre_tree, &egre_tree, (const struct egre_softc *)key)egre_tree_RBT_FIND(&egre_tree, (const struct egre_softc *
)key)
;
1291 if (sc == NULL((void *)0))
1292 return (-1);
1293
1294 /* it's ours now */
1295 m = gre_ether_align(m, hlen);
1296 if (m == NULL((void *)0))
1297 return (0);
1298
1299 if (sc->sc_tunnel.t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
) {
1300 SET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000));
1301 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid =
1302 bemtoh32(&key->t_key)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&key->
t_key)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&key
->t_key)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(&key->t_key)) & 0xff00) << 8 | ((__uint32_t
)(*(__uint32_t *)(&key->t_key)) & 0xff0000) >>
8 | ((__uint32_t)(*(__uint32_t *)(&key->t_key)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&key
->t_key)))
& ~GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
1303 }
1304
1305 m->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200|M_BCAST0x0100);
1306
1307 gre_l2_prio(&sc->sc_tunnel, m, otos)do { int rxprio = (&sc->sc_tunnel)->t_rxhprio; switch
(rxprio) { case -1: break; case -3: (m)->M_dat.MH.MH_pkthdr
.pf.prio = (((otos)) >> 5); break; default: (m)->M_dat
.MH.MH_pkthdr.pf.prio = rxprio; break; } } while (0)
;
1308
1309 if_vinput(&sc->sc_ac.ac_if, m);
1310
1311 return (0);
1312}
1313
1314static inline struct nvgre_softc *
1315nvgre_mcast_find(const struct gre_tunnel *key, unsigned int if0idx)
1316{
1317 struct nvgre_softc *sc;
1318 int rv;
1319
1320 /*
1321 * building an nvgre_softc to use with RBT_FIND is expensive, and
1322 * would need to swap the src and dst addresses in the key. so do the
1323 * find by hand.
1324 */
1325
1326 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1327 sc = RBT_ROOT(nvgre_mcast_tree, &nvgre_mcast_tree)nvgre_mcast_tree_RBT_ROOT(&nvgre_mcast_tree);
1328 while (sc != NULL((void *)0)) {
1329 rv = nvgre_cmp_mcast(key, &key->t_src, if0idx,
1330 &sc->sc_tunnel, &sc->sc_tunnel.t_dst, sc->sc_ifp0);
1331 if (rv == 0)
1332 return (sc);
1333 if (rv < 0)
1334 sc = RBT_LEFT(nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_LEFT(sc);
1335 else
1336 sc = RBT_RIGHT(nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_RIGHT(sc);
1337 }
1338
1339 return (NULL((void *)0));
1340}
1341
1342static inline struct nvgre_softc *
1343nvgre_ucast_find(const struct gre_tunnel *key)
1344{
1345 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1346 return (RBT_FIND(nvgre_ucast_tree, &nvgre_ucast_tree,nvgre_ucast_tree_RBT_FIND(&nvgre_ucast_tree, (struct nvgre_softc
*)key)
1347 (struct nvgre_softc *)key)nvgre_ucast_tree_RBT_FIND(&nvgre_ucast_tree, (struct nvgre_softc
*)key)
);
1348}
1349
1350static int
1351nvgre_input(const struct gre_tunnel *key, struct mbuf *m, int hlen,
1352 uint8_t otos)
1353{
1354 struct nvgre_softc *sc;
1355 struct ether_header *eh;
1356
1357 if (ISSET(m->m_flags, M_MCAST|M_BCAST)((m->m_hdr.mh_flags) & (0x0200|0x0100)))
1358 sc = nvgre_mcast_find(key, m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx);
1359 else
1360 sc = nvgre_ucast_find(key);
1361
1362 if (sc == NULL((void *)0))
1363 return (-1);
1364
1365 /* it's ours now */
1366 m = gre_ether_align(m, hlen);
1367 if (m == NULL((void *)0))
1368 return (0);
1369
1370 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
1371 etherbridge_map_ea(&sc->sc_eb, (void *)&key->t_dst,
1372 (struct ether_addr *)eh->ether_shost);
1373
1374 SET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000));
1375 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = bemtoh32(&key->t_key)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&key->
t_key)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&key
->t_key)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(&key->t_key)) & 0xff00) << 8 | ((__uint32_t
)(*(__uint32_t *)(&key->t_key)) & 0xff0000) >>
8 | ((__uint32_t)(*(__uint32_t *)(&key->t_key)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&key
->t_key)))
& ~GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
1376
1377 m->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200|M_BCAST0x0100);
1378
1379 gre_l2_prio(&sc->sc_tunnel, m, otos)do { int rxprio = (&sc->sc_tunnel)->t_rxhprio; switch
(rxprio) { case -1: break; case -3: (m)->M_dat.MH.MH_pkthdr
.pf.prio = (((otos)) >> 5); break; default: (m)->M_dat
.MH.MH_pkthdr.pf.prio = rxprio; break; } } while (0)
;
1380
1381 if_vinput(&sc->sc_ac.ac_if, m);
1382
1383 return (0);
1384}
1385
1386static struct mbuf *
1387gre_ether_align(struct mbuf *m, int hlen)
1388{
1389 struct mbuf *n;
1390 int off;
1391
1392 m_adj(m, hlen);
1393
1394 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < sizeof(struct ether_header)) {
1395 m_freem(m);
1396 return (NULL((void *)0));
1397 }
1398
1399 m = m_pullup(m, sizeof(struct ether_header));
1400 if (m == NULL((void *)0))
1401 return (NULL((void *)0));
1402
1403 n = m_getptr(m, sizeof(struct ether_header), &off);
1404 if (n == NULL((void *)0)) {
1405 m_freem(m);
1406 return (NULL((void *)0));
1407 }
1408
1409 if (!ALIGNED_POINTER(mtod(n, caddr_t) + off, uint32_t)1) {
1410 n = m_dup_pkt(m, ETHER_ALIGN2, M_NOWAIT0x0002);
1411 m_freem(m);
1412 if (n == NULL((void *)0))
1413 return (NULL((void *)0));
1414 m = n;
1415 }
1416
1417 return (m);
1418}
1419
1420static void
1421gre_keepalive_recv(struct ifnet *ifp, struct mbuf *m)
1422{
1423 struct gre_softc *sc = ifp->if_softc;
1424 struct gre_keepalive *gk;
1425 SIPHASH_CTX ctx;
1426 uint8_t digest[SIPHASH_DIGEST_LENGTH8];
1427 int uptime, delta;
1428 int tick = ticks;
1429
1430 if (sc->sc_ka_state == GRE_KA_NONE0 ||
1431 sc->sc_tunnel.t_rtableid != sc->sc_if.if_rdomainif_data.ifi_rdomain)
1432 goto drop;
1433
1434 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < sizeof(*gk))
1435 goto drop;
1436 m = m_pullup(m, sizeof(*gk));
1437 if (m == NULL((void *)0))
1438 return;
1439
1440 gk = mtod(m, struct gre_keepalive *)((struct gre_keepalive *)((m)->m_hdr.mh_data));
1441 uptime = bemtoh32(&gk->gk_uptime)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&gk->
gk_uptime)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&
gk->gk_uptime)) & 0xff) << 24 | ((__uint32_t)(*(
__uint32_t *)(&gk->gk_uptime)) & 0xff00) << 8
| ((__uint32_t)(*(__uint32_t *)(&gk->gk_uptime)) &
0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t *)(&gk
->gk_uptime)) & 0xff000000) >> 24) : __swap32md(
*(__uint32_t *)(&gk->gk_uptime)))
- sc->sc_ka_bias;
1442 delta = tick - uptime;
1443 if (delta < 0)
1444 goto drop;
1445 if (delta > hz * 10) /* magic */
1446 goto drop;
1447
1448 /* avoid too much siphash work */
1449 delta = tick - sc->sc_ka_recvtm;
1450 if (delta > 0 && delta < (hz / 10))
1451 goto drop;
1452
1453 SipHash24_Init(&ctx, &sc->sc_ka_key)SipHash_Init((&ctx), (&sc->sc_ka_key));
1454 SipHash24_Update(&ctx, &gk->gk_uptime, sizeof(gk->gk_uptime))SipHash_Update((&ctx), 2, 4, (&gk->gk_uptime), (sizeof
(gk->gk_uptime)))
;
1455 SipHash24_Update(&ctx, &gk->gk_random, sizeof(gk->gk_random))SipHash_Update((&ctx), 2, 4, (&gk->gk_random), (sizeof
(gk->gk_random)))
;
1456 SipHash24_Final(digest, &ctx)SipHash_Final((digest), (&ctx), 2, 4);
1457
1458 if (memcmp(digest, gk->gk_digest, sizeof(digest))__builtin_memcmp((digest), (gk->gk_digest), (sizeof(digest
)))
!= 0)
1459 goto drop;
1460
1461 sc->sc_ka_recvtm = tick;
1462
1463 switch (sc->sc_ka_state) {
1464 case GRE_KA_DOWN1:
1465 sc->sc_ka_state = GRE_KA_HOLD2;
1466 sc->sc_ka_holdcnt = sc->sc_ka_holdmax;
1467 sc->sc_ka_holdmax = MIN(sc->sc_ka_holdmax * 2,(((sc->sc_ka_holdmax * 2)<(16 * sc->sc_ka_count))?(sc
->sc_ka_holdmax * 2):(16 * sc->sc_ka_count))
1468 16 * sc->sc_ka_count)(((sc->sc_ka_holdmax * 2)<(16 * sc->sc_ka_count))?(sc
->sc_ka_holdmax * 2):(16 * sc->sc_ka_count))
;
1469 break;
1470 case GRE_KA_HOLD2:
1471 if (--sc->sc_ka_holdcnt > 0)
1472 break;
1473
1474 sc->sc_ka_state = GRE_KA_UP3;
1475 gre_link_state(&sc->sc_if, sc->sc_ka_state);
1476 break;
1477
1478 case GRE_KA_UP3:
1479 sc->sc_ka_holdmax--;
1480 sc->sc_ka_holdmax = MAX(sc->sc_ka_holdmax, sc->sc_ka_count)(((sc->sc_ka_holdmax)>(sc->sc_ka_count))?(sc->sc_ka_holdmax
):(sc->sc_ka_count))
;
1481 break;
1482 }
1483
1484 timeout_add_sec(&sc->sc_ka_hold, sc->sc_ka_timeo * sc->sc_ka_count);
1485
1486drop:
1487 m_freem(m);
1488}
1489
1490static int
1491gre_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1492 struct rtentry *rt)
1493{
1494 struct m_tag *mtag;
1495 int error = 0;
1496
1497 if (!gre_allow) {
1498 error = EACCES13;
1499 goto drop;
1500 }
1501
1502 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
1503 error = ENETDOWN50;
1504 goto drop;
1505 }
1506
1507 switch (dst->sa_family) {
1508 case AF_INET2:
1509#ifdef INET61
1510 case AF_INET624:
1511#endif
1512#ifdef MPLS1
1513 case AF_MPLS33:
1514#endif
1515 break;
1516 default:
1517 error = EAFNOSUPPORT47;
1518 goto drop;
1519 }
1520
1521 /* Try to limit infinite recursion through misconfiguration. */
1522 for (mtag = m_tag_find(m, PACKET_TAG_GRE0x0080, NULL((void *)0)); mtag;
1523 mtag = m_tag_find(m, PACKET_TAG_GRE0x0080, mtag)) {
1524 if (memcmp((caddr_t)(mtag + 1), &ifp->if_index,__builtin_memcmp(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
1525 sizeof(ifp->if_index))__builtin_memcmp(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
== 0) {
1526 m_freem(m);
1527 error = EIO5;
1528 goto end;
1529 }
1530 }
1531
1532 mtag = m_tag_get(PACKET_TAG_GRE0x0080, sizeof(ifp->if_index), M_NOWAIT0x0002);
1533 if (mtag == NULL((void *)0)) {
1534 m_freem(m);
1535 error = ENOBUFS55;
1536 goto end;
1537 }
1538 memcpy((caddr_t)(mtag + 1), &ifp->if_index, sizeof(ifp->if_index))__builtin_memcpy(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
;
1539 m_tag_prepend(m, mtag);
1540
1541 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = dst->sa_family;
1542
1543 error = if_enqueue(ifp, m);
1544end:
1545 if (error)
1546 ifp->if_oerrorsif_data.ifi_oerrors++;
1547 return (error);
1548
1549drop:
1550 m_freem(m);
1551 return (error);
1552}
1553
1554void
1555gre_start(struct ifnet *ifp)
1556{
1557 struct gre_softc *sc = ifp->if_softc;
1558 struct mbuf *m;
1559 int af;
1560#if NBPFILTER1 > 0
1561 caddr_t if_bpf;
1562#endif
1563
1564 while ((m = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
1565 af = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family;
1566
1567#if NBPFILTER1 > 0
1568 if_bpf = ifp->if_bpf;
1569 if (if_bpf)
1570 bpf_mtap_af(if_bpf, af, m, BPF_DIRECTION_OUT(1 << 1));
1571#endif
1572
1573 m = gre_l3_encap(&sc->sc_tunnel, m, af)gre_l3_encap_dst((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), (af))
;
1574 if (m == NULL((void *)0) || gre_ip_output(&sc->sc_tunnel, m) != 0) {
1575 ifp->if_oerrorsif_data.ifi_oerrors++;
1576 continue;
1577 }
1578 }
1579}
1580
1581void
1582mgre_rtrequest(struct ifnet *ifp, int req, struct rtentry *rt)
1583{
1584 struct ifnet *lo0ifp;
1585 struct ifaddr *ifa, *lo0ifa;
1586
1587 switch (req) {
1588 case RTM_ADD0x1:
1589 if (!ISSET(rt->rt_flags, RTF_LOCAL)((rt->rt_flags) & (0x200000)))
1590 break;
1591
1592 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
{
1593 if (memcmp(rt_key(rt), ifa->ifa_addr,__builtin_memcmp((((rt)->rt_dest)), (ifa->ifa_addr), ((
(rt)->rt_dest)->sa_len))
1594 rt_key(rt)->sa_len)__builtin_memcmp((((rt)->rt_dest)), (ifa->ifa_addr), ((
(rt)->rt_dest)->sa_len))
== 0)
1595 break;
1596 }
1597
1598 if (ifa == NULL((void *)0))
1599 break;
1600
1601 KASSERT(ifa == rt->rt_ifa)((ifa == rt->rt_ifa) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_gre.c"
, 1601, "ifa == rt->rt_ifa"))
;
1602
1603 lo0ifp = if_get(rtable_loindex(ifp->if_rdomainif_data.ifi_rdomain));
1604 KASSERT(lo0ifp != NULL)((lo0ifp != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_gre.c"
, 1604, "lo0ifp != NULL"))
;
1605 TAILQ_FOREACH(lo0ifa, &lo0ifp->if_addrlist, ifa_list)for((lo0ifa) = ((&lo0ifp->if_addrlist)->tqh_first);
(lo0ifa) != ((void *)0); (lo0ifa) = ((lo0ifa)->ifa_list.tqe_next
))
{
1606 if (lo0ifa->ifa_addr->sa_family ==
1607 ifa->ifa_addr->sa_family)
1608 break;
1609 }
1610 if_put(lo0ifp);
1611
1612 if (lo0ifa == NULL((void *)0))
1613 break;
1614
1615 rt->rt_flags &= ~RTF_LLINFO0x400;
1616 break;
1617 case RTM_DELETE0x2:
1618 case RTM_RESOLVE0xb:
1619 default:
1620 break;
1621 }
1622}
1623
1624static int
1625mgre_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dest,
1626 struct rtentry *rt0)
1627{
1628 struct mgre_softc *sc = ifp->if_softc;
1629 struct sockaddr *gate;
1630 struct rtentry *rt;
1631 struct m_tag *mtag;
1632 int error = 0;
1633 sa_family_t af;
1634 const void *addr;
1635
1636 if (!gre_allow) {
1637 error = EACCES13;
1638 goto drop;
1639 }
1640
1641 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
1642 error = ENETDOWN50;
1643 goto drop;
1644 }
1645
1646 switch (dest->sa_family) {
1647 case AF_INET2:
1648#ifdef INET61
1649 case AF_INET624:
1650#endif
1651#ifdef MPLS1
1652 case AF_MPLS33:
1653#endif
1654 break;
1655 default:
1656 error = EAFNOSUPPORT47;
1657 goto drop;
1658 }
1659
1660 if (ISSET(m->m_flags, M_MCAST|M_BCAST)((m->m_hdr.mh_flags) & (0x0200|0x0100))) {
1661 error = ENETUNREACH51;
1662 goto drop;
1663 }
1664
1665 rt = rt_getll(rt0);
1666
1667 /* check rt_expire? */
1668 if (ISSET(rt->rt_flags, RTF_REJECT)((rt->rt_flags) & (0x8))) {
1669 error = (rt == rt0) ? EHOSTDOWN64 : EHOSTUNREACH65;
1670 goto drop;
1671 }
1672 if (!ISSET(rt->rt_flags, RTF_HOST)((rt->rt_flags) & (0x4))) {
1673 error = EHOSTUNREACH65;
1674 goto drop;
1675 }
1676 if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) {
1677 error = EINVAL22;
1678 goto drop;
1679 }
1680
1681 gate = rt->rt_gateway;
1682 af = gate->sa_family;
1683 if (af != sc->sc_tunnel.t_af) {
1684 error = EAGAIN35;
1685 goto drop;
1686 }
1687
1688 /* Try to limit infinite recursion through misconfiguration. */
1689 for (mtag = m_tag_find(m, PACKET_TAG_GRE0x0080, NULL((void *)0)); mtag;
1690 mtag = m_tag_find(m, PACKET_TAG_GRE0x0080, mtag)) {
1691 if (memcmp((caddr_t)(mtag + 1), &ifp->if_index,__builtin_memcmp(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
1692 sizeof(ifp->if_index))__builtin_memcmp(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
== 0) {
1693 error = EIO5;
1694 goto drop;
1695 }
1696 }
1697
1698 mtag = m_tag_get(PACKET_TAG_GRE0x0080, sizeof(ifp->if_index), M_NOWAIT0x0002);
1699 if (mtag == NULL((void *)0)) {
1700 error = ENOBUFS55;
1701 goto drop;
1702 }
1703 memcpy((caddr_t)(mtag + 1), &ifp->if_index, sizeof(ifp->if_index))__builtin_memcpy(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
;
1704 m_tag_prepend(m, mtag);
1705
1706 switch (af) {
1707 case AF_INET2: {
1708 struct sockaddr_in *sin = (struct sockaddr_in *)gate;
1709 addr = &sin->sin_addr;
1710 break;
1711 }
1712#ifdef INET61
1713 case AF_INET624: {
1714 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)gate;
1715 addr = &sin6->sin6_addr;
1716 break;
1717 }
1718 #endif
1719 default:
1720 unhandled_af(af);
1721 /* NOTREACHED */
1722 }
1723
1724 m = gre_l3_encap_dst(&sc->sc_tunnel, addr, m, dest->sa_family);
1725 if (m == NULL((void *)0)) {
1726 ifp->if_oerrorsif_data.ifi_oerrors++;
1727 return (ENOBUFS55);
1728 }
1729
1730 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = dest->sa_family;
1731
1732 error = if_enqueue(ifp, m);
1733 if (error)
1734 ifp->if_oerrorsif_data.ifi_oerrors++;
1735 return (error);
1736
1737drop:
1738 m_freem(m);
1739 return (error);
1740}
1741
1742static void
1743mgre_start(struct ifnet *ifp)
1744{
1745 struct mgre_softc *sc = ifp->if_softc;
1746 struct mbuf *m;
1747#if NBPFILTER1 > 0
1748 caddr_t if_bpf;
1749#endif
1750
1751 while ((m = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
1752#if NBPFILTER1 > 0
1753 if_bpf = ifp->if_bpf;
1754 if (if_bpf) {
1755 struct m_hdr mh;
1756 struct mbuf *n;
1757 int off;
1758
1759 n = m_getptr(m, ifp->if_hdrlenif_data.ifi_hdrlen, &off);
1760 KASSERT(n != NULL)((n != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_gre.c"
, 1760, "n != NULL"))
;
1761
1762 mh.mh_flags = 0;
1763 mh.mh_next = n->m_nextm_hdr.mh_next;
1764 mh.mh_len = n->m_lenm_hdr.mh_len - off;
1765 mh.mh_data = n->m_datam_hdr.mh_data + off;
1766
1767 bpf_mtap_af(if_bpf, m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family,
1768 (struct mbuf *)&mh, BPF_DIRECTION_OUT(1 << 1));
1769 }
1770#endif
1771
1772 if (gre_ip_output(&sc->sc_tunnel, m) != 0) {
1773 ifp->if_oerrorsif_data.ifi_oerrors++;
1774 continue;
1775 }
1776 }
1777}
1778
1779static void
1780egre_start(struct ifnet *ifp)
1781{
1782 struct egre_softc *sc = ifp->if_softc;
1783 struct mbuf *m0, *m;
1784#if NBPFILTER1 > 0
1785 caddr_t if_bpf;
1786#endif
1787
1788 if (!gre_allow) {
1789 ifq_purge(&ifp->if_snd);
1790 return;
1791 }
1792
1793 while ((m0 = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
1794#if NBPFILTER1 > 0
1795 if_bpf = ifp->if_bpf;
1796 if (if_bpf)
1797 bpf_mtap_ether(if_bpf, m0, BPF_DIRECTION_OUT(1 << 1));
1798#endif
1799
1800 /* force prepend mbuf because of alignment problems */
1801 m = m_get(M_DONTWAIT0x0002, m0->m_typem_hdr.mh_type);
1802 if (m == NULL((void *)0)) {
1803 m_freem(m0);
1804 continue;
1805 }
1806
1807 M_MOVE_PKTHDR(m, m0)do { (m)->m_hdr.mh_flags = ((m)->m_hdr.mh_flags & (
0x0001 | 0x0008)); (m)->m_hdr.mh_flags |= (m0)->m_hdr.mh_flags
& (0x0002|0x0004|0x0010|0x0100|0x0200|0x0400|0x4000| 0x0800
|0x0040|0x1000|0x8000|0x0020|0x2000); do { ((m))->M_dat.MH
.MH_pkthdr = ((m0))->M_dat.MH.MH_pkthdr; ((m0))->m_hdr.
mh_flags &= ~0x0002; { ((&((m0))->M_dat.MH.MH_pkthdr
.ph_tags)->slh_first) = ((void *)0); }; ((m0))->M_dat.MH
.MH_pkthdr.pf.statekey = ((void *)0); } while ( 0); if (((m)->
m_hdr.mh_flags & 0x0001) == 0) (m)->m_hdr.mh_data = (m
)->M_dat.MH.MH_dat.MH_databuf; } while ( 0)
;
1808 m->m_nextm_hdr.mh_next = m0;
1809
1810 m_align(m, 0);
1811 m->m_lenm_hdr.mh_len = 0;
1812
1813 m = gre_encap(&sc->sc_tunnel, m, htons(ETHERTYPE_TRANSETHER),gre_encap_dst((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), ((__uint16_t)(__builtin_constant_p(0x6558) ?
(__uint16_t)(((__uint16_t)(0x6558) & 0xffU) << 8 |
((__uint16_t)(0x6558) & 0xff00U) >> 8) : __swap16md
(0x6558))), (sc->sc_tunnel.t_ttl), (gre_l2_tos(&sc->
sc_tunnel, m)))
1814 sc->sc_tunnel.t_ttl, gre_l2_tos(&sc->sc_tunnel, m))gre_encap_dst((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), ((__uint16_t)(__builtin_constant_p(0x6558) ?
(__uint16_t)(((__uint16_t)(0x6558) & 0xffU) << 8 |
((__uint16_t)(0x6558) & 0xff00U) >> 8) : __swap16md
(0x6558))), (sc->sc_tunnel.t_ttl), (gre_l2_tos(&sc->
sc_tunnel, m)))
;
1815 if (m == NULL((void *)0) || gre_ip_output(&sc->sc_tunnel, m) != 0) {
1816 ifp->if_oerrorsif_data.ifi_oerrors++;
1817 continue;
1818 }
1819 }
1820}
1821
1822static struct mbuf *
1823gre_l3_encap_dst(const struct gre_tunnel *tunnel, const void *dst,
1824 struct mbuf *m, sa_family_t af)
1825{
1826 uint16_t proto;
1827 uint8_t ttl, itos, otos;
1828 int tttl = tunnel->t_ttl;
1829 int ttloff;
1830
1831 switch (af) {
1832 case AF_INET2: {
1833 struct ip *ip;
1834
1835 m = m_pullup(m, sizeof(*ip));
1836 if (m == NULL((void *)0))
1837 return (NULL((void *)0));
1838
1839 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
1840 itos = ip->ip_tos;
1841
1842 ttloff = offsetof(struct ip, ip_ttl)__builtin_offsetof(struct ip, ip_ttl);
1843 proto = htons(ETHERTYPE_IP)(__uint16_t)(__builtin_constant_p(0x0800) ? (__uint16_t)(((__uint16_t
)(0x0800) & 0xffU) << 8 | ((__uint16_t)(0x0800) &
0xff00U) >> 8) : __swap16md(0x0800))
;
1844 break;
1845 }
1846#ifdef INET61
1847 case AF_INET624: {
1848 struct ip6_hdr *ip6;
1849
1850 m = m_pullup(m, sizeof(*ip6));
1851 if (m == NULL((void *)0))
1852 return (NULL((void *)0));
1853
1854 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
1855 itos = (ntohl(ip6->ip6_flow)(__uint32_t)(__builtin_constant_p(ip6->ip6_ctlun.ip6_un1.ip6_un1_flow
) ? (__uint32_t)(((__uint32_t)(ip6->ip6_ctlun.ip6_un1.ip6_un1_flow
) & 0xff) << 24 | ((__uint32_t)(ip6->ip6_ctlun.ip6_un1
.ip6_un1_flow) & 0xff00) << 8 | ((__uint32_t)(ip6->
ip6_ctlun.ip6_un1.ip6_un1_flow) & 0xff0000) >> 8 | (
(__uint32_t)(ip6->ip6_ctlun.ip6_un1.ip6_un1_flow) & 0xff000000
) >> 24) : __swap32md(ip6->ip6_ctlun.ip6_un1.ip6_un1_flow
))
& 0x0ff00000) >> 20;
1856
1857 ttloff = offsetof(struct ip6_hdr, ip6_hlim)__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_hlim
)
;
1858 proto = htons(ETHERTYPE_IPV6)(__uint16_t)(__builtin_constant_p(0x86DD) ? (__uint16_t)(((__uint16_t
)(0x86DD) & 0xffU) << 8 | ((__uint16_t)(0x86DD) &
0xff00U) >> 8) : __swap16md(0x86DD))
;
1859 break;
1860 }
1861 #endif
1862#ifdef MPLS1
1863 case AF_MPLS33: {
1864 uint32_t shim;
1865
1866 m = m_pullup(m, sizeof(shim));
1867 if (m == NULL((void *)0))
1868 return (NULL((void *)0));
1869
1870 shim = bemtoh32(mtod(m, uint32_t *))(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(((uint32_t *
)((m)->m_hdr.mh_data)))) ? (__uint32_t)(((__uint32_t)(*(__uint32_t
*)(((uint32_t *)((m)->m_hdr.mh_data)))) & 0xff) <<
24 | ((__uint32_t)(*(__uint32_t *)(((uint32_t *)((m)->m_hdr
.mh_data)))) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t
*)(((uint32_t *)((m)->m_hdr.mh_data)))) & 0xff0000) >>
8 | ((__uint32_t)(*(__uint32_t *)(((uint32_t *)((m)->m_hdr
.mh_data)))) & 0xff000000) >> 24) : __swap32md(*(__uint32_t
*)(((uint32_t *)((m)->m_hdr.mh_data)))))
& MPLS_EXP_MASK((u_int32_t)(__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000e00U
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U))
& 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))
;
1871 itos = (shim >> MPLS_EXP_OFFSET9) << 5;
1872
1873 ttloff = 3;
1874
1875 if (m->m_flagsm_hdr.mh_flags & (M_BCAST0x0100 | M_MCAST0x0200))
1876 proto = htons(ETHERTYPE_MPLS_MCAST)(__uint16_t)(__builtin_constant_p(0x8848) ? (__uint16_t)(((__uint16_t
)(0x8848) & 0xffU) << 8 | ((__uint16_t)(0x8848) &
0xff00U) >> 8) : __swap16md(0x8848))
;
1877 else
1878 proto = htons(ETHERTYPE_MPLS)(__uint16_t)(__builtin_constant_p(0x8847) ? (__uint16_t)(((__uint16_t
)(0x8847) & 0xffU) << 8 | ((__uint16_t)(0x8847) &
0xff00U) >> 8) : __swap16md(0x8847))
;
1879 break;
1880 }
1881#endif
1882 default:
1883 unhandled_af(af);
1884 }
1885
1886 if (tttl == -1) {
1887 KASSERT(m->m_len > ttloff)((m->m_hdr.mh_len > ttloff) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/if_gre.c", 1887, "m->m_len > ttloff"
))
; /* m_pullup has happened */
1888
1889 ttl = *(m->m_datam_hdr.mh_data + ttloff);
1890 } else
1891 ttl = tttl;
1892
1893 itos = gre_l3_tos(tunnel, m, itos);
1894 ip_ecn_ingress(tunnel->t_ecn, &otos, &itos);
1895
1896 return (gre_encap_dst(tunnel, dst, m, proto, ttl, otos));
1897}
1898
1899static struct mbuf *
1900gre_encap_dst(const struct gre_tunnel *tunnel, const union gre_addr *dst,
1901 struct mbuf *m, uint16_t proto, uint8_t ttl, uint8_t tos)
1902{
1903 struct gre_header *gh;
1904 struct gre_h_key *gkh;
1905 int hlen;
1906
1907 hlen = sizeof(*gh);
1908 if (tunnel->t_key_mask != GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
)
1909 hlen += sizeof(*gkh);
1910
1911 m = m_prepend(m, hlen, M_DONTWAIT0x0002);
1912 if (m == NULL((void *)0))
1913 return (NULL((void *)0));
1914
1915 gh = mtod(m, struct gre_header *)((struct gre_header *)((m)->m_hdr.mh_data));
1916 gh->gre_flags = GRE_VERS_00x0000;
1917 gh->gre_proto = proto;
1918 if (tunnel->t_key_mask != GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
) {
1919 gh->gre_flags |= htons(GRE_KP)(__uint16_t)(__builtin_constant_p(0x2000) ? (__uint16_t)(((__uint16_t
)(0x2000) & 0xffU) << 8 | ((__uint16_t)(0x2000) &
0xff00U) >> 8) : __swap16md(0x2000))
;
1920
1921 gkh = (struct gre_h_key *)(gh + 1);
1922 gkh->gre_key = tunnel->t_key;
1923
1924 if (tunnel->t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
&&
1925 ISSET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) & (0x4000))) {
1926 gkh->gre_key |= htonl(~GRE_KEY_ENTROPY &(__uint32_t)(__builtin_constant_p(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) ? (__uint32_t)(((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff) << 24 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff00) << 8 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff0000) >> 8 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff000000) >> 24) : __swap32md(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
))
1927 m->m_pkthdr.ph_flowid)(__uint32_t)(__builtin_constant_p(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) ? (__uint32_t)(((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff) << 24 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff00) << 8 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff0000) >> 8 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff000000) >> 24) : __swap32md(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
))
;
1928 }
1929 }
1930
1931 return (gre_encap_dst_ip(tunnel, dst, m, ttl, tos));
1932}
1933
1934static struct mbuf *
1935gre_encap_dst_ip(const struct gre_tunnel *tunnel, const union gre_addr *dst,
1936 struct mbuf *m, uint8_t ttl, uint8_t tos)
1937{
1938 switch (tunnel->t_af) {
1939 case AF_UNSPEC0:
1940 /* packets may arrive before tunnel is set up */
1941 m_freem(m);
1942 return (NULL((void *)0));
1943 case AF_INET2: {
1944 struct ip *ip;
1945
1946 m = m_prepend(m, sizeof(*ip), M_DONTWAIT0x0002);
1947 if (m == NULL((void *)0))
1948 return (NULL((void *)0));
1949
1950 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
1951 ip->ip_v = IPVERSION4;
1952 ip->ip_hl = sizeof(*ip) >> 2;
1953 ip->ip_off = tunnel->t_df;
1954 ip->ip_tos = tos;
1955 ip->ip_len = htons(m->m_pkthdr.len)(__uint16_t)(__builtin_constant_p(m->M_dat.MH.MH_pkthdr.len
) ? (__uint16_t)(((__uint16_t)(m->M_dat.MH.MH_pkthdr.len) &
0xffU) << 8 | ((__uint16_t)(m->M_dat.MH.MH_pkthdr.len
) & 0xff00U) >> 8) : __swap16md(m->M_dat.MH.MH_pkthdr
.len))
;
1956 ip->ip_ttl = ttl;
1957 ip->ip_p = IPPROTO_GRE47;
1958 ip->ip_src = tunnel->t_src4t_src.in4;
1959 ip->ip_dst = dst->in4;
1960 break;
1961 }
1962#ifdef INET61
1963 case AF_INET624: {
1964 struct ip6_hdr *ip6;
1965 int len = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
1966
1967 m = m_prepend(m, sizeof(*ip6), M_DONTWAIT0x0002);
1968 if (m == NULL((void *)0))
1969 return (NULL((void *)0));
1970
1971 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
1972 ip6->ip6_flowip6_ctlun.ip6_un1.ip6_un1_flow = ISSET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) & (0x4000)) ?
1973 htonl(m->m_pkthdr.ph_flowid)(__uint32_t)(__builtin_constant_p(m->M_dat.MH.MH_pkthdr.ph_flowid
) ? (__uint32_t)(((__uint32_t)(m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff) << 24 | ((__uint32_t)(m->M_dat.MH.MH_pkthdr
.ph_flowid) & 0xff00) << 8 | ((__uint32_t)(m->M_dat
.MH.MH_pkthdr.ph_flowid) & 0xff0000) >> 8 | ((__uint32_t
)(m->M_dat.MH.MH_pkthdr.ph_flowid) & 0xff000000) >>
24) : __swap32md(m->M_dat.MH.MH_pkthdr.ph_flowid))
: 0;
1974 ip6->ip6_vfcip6_ctlun.ip6_un2_vfc |= IPV6_VERSION0x60;
1975 ip6->ip6_flowip6_ctlun.ip6_un1.ip6_un1_flow |= htonl((uint32_t)tos << 20)(__uint32_t)(__builtin_constant_p((uint32_t)tos << 20) ?
(__uint32_t)(((__uint32_t)((uint32_t)tos << 20) & 0xff
) << 24 | ((__uint32_t)((uint32_t)tos << 20) &
0xff00) << 8 | ((__uint32_t)((uint32_t)tos << 20
) & 0xff0000) >> 8 | ((__uint32_t)((uint32_t)tos <<
20) & 0xff000000) >> 24) : __swap32md((uint32_t)tos
<< 20))
;
1976 ip6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t
)(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U
) >> 8) : __swap16md(len))
;
1977 ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = IPPROTO_GRE47;
1978 ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = ttl;
1979 ip6->ip6_src = tunnel->t_src6t_src.in6;
1980 ip6->ip6_dst = dst->in6;
1981
1982 if (tunnel->t_df)
1983 SET(m->m_pkthdr.csum_flags, M_IPV6_DF_OUT)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x1000));
1984
1985 break;
1986 }
1987#endif /* INET6 */
1988 default:
1989 unhandled_af(tunnel->t_af);
1990 }
1991
1992 return (m);
1993}
1994
1995static int
1996gre_ip_output(const struct gre_tunnel *tunnel, struct mbuf *m)
1997{
1998 m->m_flagsm_hdr.mh_flags &= ~(M_BCAST0x0100|M_MCAST0x0200);
1999 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = tunnel->t_rtableid;
2000
2001#if NPF1 > 0
2002 pf_pkt_addr_changed(m);
2003#endif
2004
2005 switch (tunnel->t_af) {
2006 case AF_INET2:
2007 ip_send(m);
2008 break;
2009#ifdef INET61
2010 case AF_INET624:
2011 ip6_send(m);
2012 break;
2013#endif
2014 default:
2015 unhandled_af(tunnel->t_af);
2016 }
2017
2018 return (0);
2019}
2020
2021static int
2022gre_tunnel_ioctl(struct ifnet *ifp, struct gre_tunnel *tunnel,
2023 u_long cmd, void *data)
2024{
2025 struct ifreq *ifr = (struct ifreq *)data;
2026 int error = 0;
2027
2028 switch(cmd) {
2029 case SIOCSIFMTU((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((127)))
:
2030 if (ifr->ifr_mtuifr_ifru.ifru_metric < 576) {
2031 error = EINVAL22;
2032 break;
2033 }
2034 ifp->if_mtuif_data.ifi_mtu = ifr->ifr_mtuifr_ifru.ifru_metric;
2035 break;
2036 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
2037 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
2038 break;
2039
2040 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2041 error = gre_set_vnetid(tunnel, ifr);
2042 break;
2043
2044 case SIOCGVNETID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((167)))
:
2045 error = gre_get_vnetid(tunnel, ifr);
2046 break;
2047 case SIOCDVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((175)))
:
2048 error = gre_del_vnetid(tunnel);
2049 break;
2050
2051 case SIOCSVNETFLOWID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((195)))
:
2052 error = gre_set_vnetflowid(tunnel, ifr);
2053 break;
2054
2055 case SIOCGVNETFLOWID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((196)))
:
2056 error = gre_get_vnetflowid(tunnel, ifr);
2057 break;
2058
2059 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2060 error = gre_set_tunnel(tunnel, (struct if_laddrreq *)data, 1);
2061 break;
2062 case SIOCGLIFPHYADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((75)))
:
2063 error = gre_get_tunnel(tunnel, (struct if_laddrreq *)data);
2064 break;
2065 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2066 error = gre_del_tunnel(tunnel);
2067 break;
2068
2069 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2070 if (ifr->ifr_rdomainidifr_ifru.ifru_metric < 0 ||
2071 ifr->ifr_rdomainidifr_ifru.ifru_metric > RT_TABLEID_MAX255 ||
2072 !rtable_exists(ifr->ifr_rdomainidifr_ifru.ifru_metric)) {
2073 error = EINVAL22;
2074 break;
2075 }
2076 tunnel->t_rtableid = ifr->ifr_rdomainidifr_ifru.ifru_metric;
2077 break;
2078 case SIOCGLIFPHYRTABLE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((162)))
:
2079 ifr->ifr_rdomainidifr_ifru.ifru_metric = tunnel->t_rtableid;
2080 break;
2081
2082 case SIOCSLIFPHYDF((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((193)))
:
2083 /* commit */
2084 tunnel->t_df = ifr->ifr_dfifr_ifru.ifru_metric ? htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
: htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
2085 break;
2086 case SIOCGLIFPHYDF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((194)))
:
2087 ifr->ifr_dfifr_ifru.ifru_metric = tunnel->t_df ? 1 : 0;
2088 break;
2089
2090 default:
2091 error = ENOTTY25;
2092 break;
2093 }
2094
2095 return (error);
2096}
2097
2098static uint8_t
2099gre_l2_tos(const struct gre_tunnel *t, const struct mbuf *m)
2100{
2101 uint8_t prio;
2102
2103 switch (t->t_txhprio) {
2104 case IF_HDRPRIO_PACKET-1:
2105 prio = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio;
2106 break;
2107 default:
2108 prio = t->t_txhprio;
2109 break;
2110 }
2111
2112 return (IFQ_PRIO2TOS(prio)((prio) << 5));
2113}
2114
2115static uint8_t
2116gre_l3_tos(const struct gre_tunnel *t, const struct mbuf *m, uint8_t tos)
2117{
2118 uint8_t prio;
2119
2120 switch (t->t_txhprio) {
2121 case IF_HDRPRIO_PAYLOAD-2:
2122 return (tos);
2123 case IF_HDRPRIO_PACKET-1:
2124 prio = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio;
2125 break;
2126 default:
2127 prio = t->t_txhprio;
2128 break;
2129 }
2130
2131 return (IFQ_PRIO2TOS(prio)((prio) << 5) | (tos & IPTOS_ECN_MASK0x03));
2132}
2133
2134static int
2135gre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2136{
2137 struct gre_softc *sc = ifp->if_softc;
2138 struct ifreq *ifr = (struct ifreq *)data;
2139 struct ifkalivereq *ikar = (struct ifkalivereq *)data;
2140 int error = 0;
2141
2142 switch(cmd) {
2143 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2144 ifp->if_flags |= IFF_UP0x1;
2145 /* FALLTHROUGH */
2146 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2147 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2148 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2149 error = gre_up(sc);
2150 else
2151 error = 0;
2152 } else {
2153 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2154 error = gre_down(sc);
2155 }
2156 break;
2157 case SIOCSIFRDOMAIN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((159)))
:
2158 /* let if_rdomain do its thing */
2159 error = ENOTTY25;
2160 break;
2161
2162 case SIOCSETKALIVE((unsigned long)0x80000000 | ((sizeof(struct ifkalivereq) &
0x1fff) << 16) | ((('i')) << 8) | ((163)))
:
2163 if (ikar->ikar_timeo < 0 || ikar->ikar_timeo > 86400 ||
2164 ikar->ikar_cnt < 0 || ikar->ikar_cnt > 256 ||
2165 (ikar->ikar_timeo == 0) != (ikar->ikar_cnt == 0))
2166 return (EINVAL22);
2167
2168 if (ikar->ikar_timeo == 0 || ikar->ikar_cnt == 0) {
2169 sc->sc_ka_count = 0;
2170 sc->sc_ka_timeo = 0;
2171 sc->sc_ka_state = GRE_KA_NONE0;
2172 } else {
2173 sc->sc_ka_count = ikar->ikar_cnt;
2174 sc->sc_ka_timeo = ikar->ikar_timeo;
2175 sc->sc_ka_state = GRE_KA_DOWN1;
2176
2177 arc4random_buf(&sc->sc_ka_key, sizeof(sc->sc_ka_key));
2178 sc->sc_ka_bias = arc4random();
2179 sc->sc_ka_holdmax = sc->sc_ka_count;
2180
2181 sc->sc_ka_recvtm = ticks - hz;
2182 timeout_add(&sc->sc_ka_send, 1);
2183 timeout_add_sec(&sc->sc_ka_hold,
2184 sc->sc_ka_timeo * sc->sc_ka_count);
2185 }
2186 break;
2187
2188 case SIOCGETKALIVE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifkalivereq) & 0x1fff) << 16) | ((('i')) <<
8) | ((164)))
:
2189 ikar->ikar_cnt = sc->sc_ka_count;
2190 ikar->ikar_timeo = sc->sc_ka_timeo;
2191 break;
2192
2193 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2194 if (ifr->ifr_ttlifr_ifru.ifru_metric != -1 &&
2195 (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff)) {
2196 error = EINVAL22;
2197 break;
2198 }
2199
2200 /* commit */
2201 sc->sc_tunnel.t_ttl = ifr->ifr_ttlifr_ifru.ifru_metric;
2202 break;
2203
2204 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2205 ifr->ifr_ttlifr_ifru.ifru_metric = sc->sc_tunnel.t_ttl;
2206 break;
2207
2208 case SIOCSLIFPHYECN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((199)))
:
2209 sc->sc_tunnel.t_ecn =
2210 ifr->ifr_metricifr_ifru.ifru_metric ? ECN_ALLOWED1 : ECN_FORBIDDEN0;
2211 break;
2212 case SIOCGLIFPHYECN(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((200)))
:
2213 ifr->ifr_metricifr_ifru.ifru_metric = (sc->sc_tunnel.t_ecn == ECN_ALLOWED1);
2214 break;
2215
2216 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2217 error = if_txhprio_l3_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2218 if (error != 0)
2219 break;
2220
2221 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2222 break;
2223 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2224 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2225 break;
2226
2227 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2228 error = if_rxhprio_l3_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2229 if (error != 0)
2230 break;
2231
2232 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2233 break;
2234 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2235 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2236 break;
2237
2238 default:
2239 error = gre_tunnel_ioctl(ifp, &sc->sc_tunnel, cmd, data);
2240 break;
2241 }
2242
2243 return (error);
2244}
2245
2246static int
2247mgre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2248{
2249 struct mgre_softc *sc = ifp->if_softc;
2250 struct ifreq *ifr = (struct ifreq *)data;
2251 int error = 0;
2252
2253 switch(cmd) {
2254 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2255 break;
2256 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2257 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2258 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2259 error = mgre_up(sc);
2260 else
2261 error = 0;
2262 } else {
2263 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2264 error = mgre_down(sc);
2265 }
2266 break;
2267
2268 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2269 if (ifr->ifr_ttlifr_ifru.ifru_metric != -1 &&
2270 (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff)) {
2271 error = EINVAL22;
2272 break;
2273 }
2274
2275 /* commit */
2276 sc->sc_tunnel.t_ttl = ifr->ifr_ttlifr_ifru.ifru_metric;
2277 break;
2278
2279 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2280 ifr->ifr_ttlifr_ifru.ifru_metric = sc->sc_tunnel.t_ttl;
2281 break;
2282
2283 case SIOCSLIFPHYECN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((199)))
:
2284 sc->sc_tunnel.t_ecn =
2285 ifr->ifr_metricifr_ifru.ifru_metric ? ECN_ALLOWED1 : ECN_FORBIDDEN0;
2286 break;
2287 case SIOCGLIFPHYECN(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((200)))
:
2288 ifr->ifr_metricifr_ifru.ifru_metric = (sc->sc_tunnel.t_ecn == ECN_ALLOWED1);
2289 break;
2290
2291 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2292 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2293 error = EBUSY16;
2294 break;
2295 }
2296 error = mgre_set_tunnel(sc, (struct if_laddrreq *)data);
2297 break;
2298 case SIOCGLIFPHYADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((75)))
:
2299 error = mgre_get_tunnel(sc, (struct if_laddrreq *)data);
2300 break;
2301
2302 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2303 error = if_txhprio_l3_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2304 if (error != 0)
2305 break;
2306
2307 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2308 break;
2309 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2310 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2311 break;
2312
2313 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2314 error = if_rxhprio_l3_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2315 if (error != 0)
2316 break;
2317
2318 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2319 break;
2320 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2321 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2322 break;
2323
2324 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2325 case SIOCDVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((175)))
:
2326 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2327 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2328 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2329 error = EBUSY16;
2330 break;
2331 }
2332
2333 /* FALLTHROUGH */
2334 default:
2335 error = gre_tunnel_ioctl(ifp, &sc->sc_tunnel, cmd, data);
2336 break;
2337 }
2338
2339 return (error);
2340}
2341
2342static int
2343mgre_set_tunnel(struct mgre_softc *sc, struct if_laddrreq *req)
2344{
2345 struct gre_tunnel *tunnel = &sc->sc_tunnel;
2346 struct sockaddr *addr = (struct sockaddr *)&req->addr;
2347 struct sockaddr *dstaddr = (struct sockaddr *)&req->dstaddr;
2348 struct sockaddr_in *addr4;
2349#ifdef INET61
2350 struct sockaddr_in6 *addr6;
2351 int error;
2352#endif
2353
2354 if (dstaddr->sa_family != AF_UNSPEC0)
2355 return (EINVAL22);
2356
2357 /* validate */
2358 switch (addr->sa_family) {
2359 case AF_INET2:
2360 if (addr->sa_len != sizeof(*addr4))
2361 return (EINVAL22);
2362
2363 addr4 = (struct sockaddr_in *)addr;
2364 if (in_nullhost(addr4->sin_addr)((addr4->sin_addr).s_addr == ((u_int32_t) (__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000000)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff000000) >> 24) : __swap32md((u_int32_t
)(0x00000000)))))
||
2365 IN_MULTICAST(addr4->sin_addr.s_addr)(((u_int32_t)(addr4->sin_addr.s_addr) & ((u_int32_t) (
__uint32_t)(__builtin_constant_p((u_int32_t)(0xf0000000)) ? (
__uint32_t)(((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0xf0000000))))) == ((
u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xe0000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0xe0000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0xe0000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0xe0000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0xe0000000
)))))
)
2366 return (EINVAL22);
2367
2368 tunnel->t_src4t_src.in4 = addr4->sin_addr;
2369 tunnel->t_dst4t_dst.in4.s_addr = INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
;
2370
2371 break;
2372#ifdef INET61
2373 case AF_INET624:
2374 if (addr->sa_len != sizeof(*addr6))
2375 return (EINVAL22);
2376
2377 addr6 = (struct sockaddr_in6 *)addr;
2378 if (IN6_IS_ADDR_UNSPECIFIED(&addr6->sin6_addr)((*(const u_int32_t *)(const void *)(&(&addr6->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&addr6->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&addr6->sin6_addr)->__u6_addr.__u6_addr8[
8]) == 0) && (*(const u_int32_t *)(const void *)(&
(&addr6->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0
))
||
2379 IN6_IS_ADDR_MULTICAST(&addr6->sin6_addr)((&addr6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
)
2380 return (EINVAL22);
2381
2382 error = in6_embedscope(&tunnel->t_src6t_src.in6, addr6, NULL((void *)0), NULL((void *)0));
2383 if (error != 0)
2384 return (error);
2385
2386 memset(&tunnel->t_dst6, 0, sizeof(tunnel->t_dst6))__builtin_memset((&tunnel->t_dst.in6), (0), (sizeof(tunnel
->t_dst.in6)))
;
2387
2388 break;
2389#endif
2390 default:
2391 return (EAFNOSUPPORT47);
2392 }
2393
2394 /* commit */
2395 tunnel->t_af = addr->sa_family;
2396
2397 return (0);
2398}
2399
2400static int
2401mgre_get_tunnel(struct mgre_softc *sc, struct if_laddrreq *req)
2402{
2403 struct gre_tunnel *tunnel = &sc->sc_tunnel;
2404 struct sockaddr *dstaddr = (struct sockaddr *)&req->dstaddr;
2405 struct sockaddr_in *sin;
2406#ifdef INET61
2407 struct sockaddr_in6 *sin6;
2408#endif
2409
2410 switch (tunnel->t_af) {
2411 case AF_UNSPEC0:
2412 return (EADDRNOTAVAIL49);
2413 case AF_INET2:
2414 sin = (struct sockaddr_in *)&req->addr;
2415 memset(sin, 0, sizeof(*sin))__builtin_memset((sin), (0), (sizeof(*sin)));
2416 sin->sin_family = AF_INET2;
2417 sin->sin_len = sizeof(*sin);
2418 sin->sin_addr = tunnel->t_src4t_src.in4;
2419 break;
2420
2421#ifdef INET61
2422 case AF_INET624:
2423 sin6 = (struct sockaddr_in6 *)&req->addr;
2424 memset(sin6, 0, sizeof(*sin6))__builtin_memset((sin6), (0), (sizeof(*sin6)));
2425 sin6->sin6_family = AF_INET624;
2426 sin6->sin6_len = sizeof(*sin6);
2427 in6_recoverscope(sin6, &tunnel->t_src6t_src.in6);
2428 break;
2429#endif
2430 default:
2431 unhandled_af(tunnel->t_af);
2432 }
2433
2434 dstaddr->sa_len = 2;
2435 dstaddr->sa_family = AF_UNSPEC0;
2436
2437 return (0);
2438}
2439
2440static int
2441egre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2442{
2443 struct egre_softc *sc = ifp->if_softc;
2444 struct ifreq *ifr = (struct ifreq *)data;
2445 int error = 0;
2446
2447 switch(cmd) {
2448 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2449 break;
2450 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2451 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2452 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2453 error = egre_up(sc);
2454 else
2455 error = 0;
2456 } else {
2457 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2458 error = egre_down(sc);
2459 }
2460 break;
2461
2462 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2463 if (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff) {
2464 error = EINVAL22;
2465 break;
2466 }
2467
2468 /* commit */
2469 sc->sc_tunnel.t_ttl = (uint8_t)ifr->ifr_ttlifr_ifru.ifru_metric;
2470 break;
2471
2472 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2473 ifr->ifr_ttlifr_ifru.ifru_metric = (int)sc->sc_tunnel.t_ttl;
2474 break;
2475
2476 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2477 error = if_txhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2478 if (error != 0)
2479 break;
2480
2481 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2482 break;
2483 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2484 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2485 break;
2486
2487 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2488 error = if_rxhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2489 if (error != 0)
2490 break;
2491
2492 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2493 break;
2494 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2495 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2496 break;
2497
2498 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2499 case SIOCDVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((175)))
:
2500 case SIOCSVNETFLOWID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((195)))
:
2501 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2502 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2503 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2504 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2505 error = EBUSY16;
2506 break;
2507 }
2508
2509 /* FALLTHROUGH */
2510 default:
2511 error = gre_tunnel_ioctl(ifp, &sc->sc_tunnel, cmd, data);
2512 if (error == ENOTTY25)
2513 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2514 break;
2515 }
2516
2517 if (error == ENETRESET52) {
2518 /* no hardware to program */
2519 error = 0;
2520 }
2521
2522 return (error);
2523}
2524
2525static int
2526nvgre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2527{
2528 struct nvgre_softc *sc = ifp->if_softc;
2529 struct gre_tunnel *tunnel = &sc->sc_tunnel;
2530
2531 struct ifreq *ifr = (struct ifreq *)data;
2532 struct if_parent *parent = (struct if_parent *)data;
2533 struct ifbrparam *bparam = (struct ifbrparam *)data;
2534 struct ifnet *ifp0;
2535
2536 int error = 0;
2537
2538 switch (cmd) {
1
Control jumps to 'case 3223349671:' at line 2619
2539 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2540 break;
2541 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2542 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2543 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2544 error = nvgre_up(sc);
2545 else
2546 error = ENETRESET52;
2547 } else {
2548 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2549 error = nvgre_down(sc);
2550 }
2551 break;
2552
2553 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2554 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2555 error = EBUSY16;
2556 break;
2557 }
2558 error = gre_set_tunnel(tunnel, (struct if_laddrreq *)data, 0);
2559 if (error == 0)
2560 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2561 break;
2562 case SIOCGLIFPHYADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((75)))
:
2563 error = gre_get_tunnel(tunnel, (struct if_laddrreq *)data);
2564 break;
2565 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2566 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2567 error = EBUSY16;
2568 break;
2569 }
2570 error = gre_del_tunnel(tunnel);
2571 if (error == 0)
2572 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2573 break;
2574
2575 case SIOCSIFPARENT((unsigned long)0x80000000 | ((sizeof(struct if_parent) &
0x1fff) << 16) | ((('i')) << 8) | ((178)))
:
2576 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2577 error = EBUSY16;
2578 break;
2579 }
2580 error = nvgre_set_parent(sc, parent->ifp_parent);
2581 if (error == 0)
2582 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2583 break;
2584 case SIOCGIFPARENT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_parent) & 0x1fff) << 16) | ((('i')) <<
8) | ((179)))
:
2585 ifp0 = if_get(sc->sc_ifp0);
2586 if (ifp0 == NULL((void *)0))
2587 error = EADDRNOTAVAIL49;
2588 else {
2589 memcpy(parent->ifp_parent, ifp0->if_xname,__builtin_memcpy((parent->ifp_parent), (ifp0->if_xname)
, (sizeof(parent->ifp_parent)))
2590 sizeof(parent->ifp_parent))__builtin_memcpy((parent->ifp_parent), (ifp0->if_xname)
, (sizeof(parent->ifp_parent)))
;
2591 }
2592 if_put(ifp0);
2593 break;
2594 case SIOCDIFPARENT((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((180)))
:
2595 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2596 error = EBUSY16;
2597 break;
2598 }
2599 /* commit */
2600 sc->sc_ifp0 = 0;
2601 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2602 break;
2603
2604 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2605 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2606 error = EBUSY16;
2607 break;
2608 }
2609 if (ifr->ifr_vnetidifr_ifru.ifru_vnetid < GRE_KEY_ENTROPY_MIN0x00000000U ||
2610 ifr->ifr_vnetidifr_ifru.ifru_vnetid > GRE_KEY_ENTROPY_MAX0x00ffffffU) {
2611 error = EINVAL22;
2612 break;
2613 }
2614
2615 /* commit */
2616 tunnel->t_key = htonl(ifr->ifr_vnetid << GRE_KEY_ENTROPY_SHIFT)(__uint32_t)(__builtin_constant_p(ifr->ifr_ifru.ifru_vnetid
<< 8) ? (__uint32_t)(((__uint32_t)(ifr->ifr_ifru.ifru_vnetid
<< 8) & 0xff) << 24 | ((__uint32_t)(ifr->
ifr_ifru.ifru_vnetid << 8) & 0xff00) << 8 | (
(__uint32_t)(ifr->ifr_ifru.ifru_vnetid << 8) & 0xff0000
) >> 8 | ((__uint32_t)(ifr->ifr_ifru.ifru_vnetid <<
8) & 0xff000000) >> 24) : __swap32md(ifr->ifr_ifru
.ifru_vnetid << 8))
;
2617 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2618 break;
2619 case SIOCGVNETID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((167)))
:
2620 error = gre_get_vnetid(tunnel, ifr);
2
Calling 'gre_get_vnetid'
2621 break;
2622
2623 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2624 if (ifr->ifr_rdomainidifr_ifru.ifru_metric < 0 ||
2625 ifr->ifr_rdomainidifr_ifru.ifru_metric > RT_TABLEID_MAX255 ||
2626 !rtable_exists(ifr->ifr_rdomainidifr_ifru.ifru_metric)) {
2627 error = EINVAL22;
2628 break;
2629 }
2630 tunnel->t_rtableid = ifr->ifr_rdomainidifr_ifru.ifru_metric;
2631 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2632 break;
2633 case SIOCGLIFPHYRTABLE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((162)))
:
2634 ifr->ifr_rdomainidifr_ifru.ifru_metric = tunnel->t_rtableid;
2635 break;
2636
2637 case SIOCSLIFPHYDF((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((193)))
:
2638 /* commit */
2639 tunnel->t_df = ifr->ifr_dfifr_ifru.ifru_metric ? htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
: htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
2640 break;
2641 case SIOCGLIFPHYDF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((194)))
:
2642 ifr->ifr_dfifr_ifru.ifru_metric = tunnel->t_df ? 1 : 0;
2643 break;
2644
2645 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2646 if (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff) {
2647 error = EINVAL22;
2648 break;
2649 }
2650
2651 /* commit */
2652 tunnel->t_ttl = ifr->ifr_ttlifr_ifru.ifru_metric;
2653 break;
2654
2655 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2656 ifr->ifr_ttlifr_ifru.ifru_metric = tunnel->t_ttl;
2657 break;
2658
2659 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2660 error = if_txhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2661 if (error != 0)
2662 break;
2663
2664 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2665 break;
2666 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2667 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2668 break;
2669
2670 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2671 error = if_rxhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2672 if (error != 0)
2673 break;
2674
2675 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2676 break;
2677 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2678 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2679 break;
2680
2681 case SIOCBRDGSCACHE((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((64)))
:
2682 error = etherbridge_set_max(&sc->sc_eb, bparam);
2683 break;
2684 case SIOCBRDGGCACHE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbrparam) & 0x1fff) << 16) | ((('i')) <<
8) | ((65)))
:
2685 error = etherbridge_get_max(&sc->sc_eb, bparam);
2686 break;
2687
2688 case SIOCBRDGSTO((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((69)))
:
2689 error = etherbridge_set_tmo(&sc->sc_eb, bparam);
2690 break;
2691 case SIOCBRDGGTO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbrparam) & 0x1fff) << 16) | ((('i')) <<
8) | ((70)))
:
2692 error = etherbridge_get_tmo(&sc->sc_eb, bparam);
2693 break;
2694
2695 case SIOCBRDGRTS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbaconf) & 0x1fff) << 16) | ((('i')) <<
8) | ((67)))
:
2696 error = etherbridge_rtfind(&sc->sc_eb,
2697 (struct ifbaconf *)data);
2698 break;
2699 case SIOCBRDGFLUSH((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((72)))
:
2700 etherbridge_flush(&sc->sc_eb,
2701 ((struct ifbreq *)data)->ifbr_ifsflags);
2702 break;
2703 case SIOCBRDGSADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((68)))
:
2704 error = nvgre_add_addr(sc, (struct ifbareq *)data);
2705 break;
2706 case SIOCBRDGDADDR((unsigned long)0x80000000 | ((sizeof(struct ifbareq) & 0x1fff
) << 16) | ((('i')) << 8) | ((71)))
:
2707 error = nvgre_del_addr(sc, (struct ifbareq *)data);
2708 break;
2709
2710 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
2711 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
2712 break;
2713
2714 default:
2715 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2716 break;
2717 }
2718
2719 if (error == ENETRESET52) {
2720 /* no hardware to program */
2721 error = 0;
2722 }
2723
2724 return (error);
2725}
2726
2727static int
2728eoip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2729{
2730 struct eoip_softc *sc = ifp->if_softc;
2731 struct ifreq *ifr = (struct ifreq *)data;
2732 struct ifkalivereq *ikar = (struct ifkalivereq *)data;
2733 int error = 0;
2734
2735 switch(cmd) {
2736 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2737 break;
2738 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2739 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2740 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2741 error = eoip_up(sc);
2742 else
2743 error = 0;
2744 } else {
2745 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2746 error = eoip_down(sc);
2747 }
2748 break;
2749
2750 case SIOCSETKALIVE((unsigned long)0x80000000 | ((sizeof(struct ifkalivereq) &
0x1fff) << 16) | ((('i')) << 8) | ((163)))
:
2751 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2752 error = EBUSY16;
2753 break;
2754 }
2755
2756 if (ikar->ikar_timeo < 0 || ikar->ikar_timeo > 86400 ||
2757 ikar->ikar_cnt < 0 || ikar->ikar_cnt > 256)
2758 return (EINVAL22);
2759
2760 if (ikar->ikar_timeo == 0 || ikar->ikar_cnt == 0) {
2761 sc->sc_ka_count = 0;
2762 sc->sc_ka_timeo = 0;
2763 sc->sc_ka_state = GRE_KA_NONE0;
2764 } else {
2765 sc->sc_ka_count = ikar->ikar_cnt;
2766 sc->sc_ka_timeo = ikar->ikar_timeo;
2767 sc->sc_ka_state = GRE_KA_DOWN1;
2768 }
2769 break;
2770
2771 case SIOCGETKALIVE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifkalivereq) & 0x1fff) << 16) | ((('i')) <<
8) | ((164)))
:
2772 ikar->ikar_cnt = sc->sc_ka_count;
2773 ikar->ikar_timeo = sc->sc_ka_timeo;
2774 break;
2775
2776 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2777 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2778 error = EBUSY16;
2779 break;
2780 }
2781 if (ifr->ifr_vnetidifr_ifru.ifru_vnetid < 0 || ifr->ifr_vnetidifr_ifru.ifru_vnetid > 0xffff)
2782 return (EINVAL22);
2783
2784 sc->sc_tunnel.t_key = htole16(ifr->ifr_vnetid)((__uint16_t)(ifr->ifr_ifru.ifru_vnetid)); /* for cmp */
2785 sc->sc_tunnel_id = htole16(ifr->ifr_vnetid)((__uint16_t)(ifr->ifr_ifru.ifru_vnetid));
2786 break;
2787
2788 case SIOCGVNETID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((167)))
:
2789 ifr->ifr_vnetidifr_ifru.ifru_vnetid = letoh16(sc->sc_tunnel_id)((__uint16_t)(sc->sc_tunnel_id));
2790 break;
2791
2792 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2793 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2794 error = EBUSY16;
2795 break;
2796 }
2797
2798 error = gre_set_tunnel(&sc->sc_tunnel,
2799 (struct if_laddrreq *)data, 1);
2800 break;
2801 case SIOCGLIFPHYADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((75)))
:
2802 error = gre_get_tunnel(&sc->sc_tunnel,
2803 (struct if_laddrreq *)data);
2804 break;
2805 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2806 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2807 error = EBUSY16;
2808 break;
2809 }
2810
2811 error = gre_del_tunnel(&sc->sc_tunnel);
2812 break;
2813
2814 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2815 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2816 error = EBUSY16;
2817 break;
2818 }
2819
2820 if (ifr->ifr_rdomainidifr_ifru.ifru_metric < 0 ||
2821 ifr->ifr_rdomainidifr_ifru.ifru_metric > RT_TABLEID_MAX255 ||
2822 !rtable_exists(ifr->ifr_rdomainidifr_ifru.ifru_metric)) {
2823 error = EINVAL22;
2824 break;
2825 }
2826 sc->sc_tunnel.t_rtableid = ifr->ifr_rdomainidifr_ifru.ifru_metric;
2827 break;
2828 case SIOCGLIFPHYRTABLE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((162)))
:
2829 ifr->ifr_rdomainidifr_ifru.ifru_metric = sc->sc_tunnel.t_rtableid;
2830 break;
2831
2832 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2833 if (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff) {
2834 error = EINVAL22;
2835 break;
2836 }
2837
2838 /* commit */
2839 sc->sc_tunnel.t_ttl = (uint8_t)ifr->ifr_ttlifr_ifru.ifru_metric;
2840 break;
2841 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2842 ifr->ifr_ttlifr_ifru.ifru_metric = (int)sc->sc_tunnel.t_ttl;
2843 break;
2844
2845 case SIOCSLIFPHYDF((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((193)))
:
2846 /* commit */
2847 sc->sc_tunnel.t_df = ifr->ifr_dfifr_ifru.ifru_metric ? htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
: htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
2848 break;
2849 case SIOCGLIFPHYDF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((194)))
:
2850 ifr->ifr_dfifr_ifru.ifru_metric = sc->sc_tunnel.t_df ? 1 : 0;
2851 break;
2852
2853 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2854 error = if_txhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2855 if (error != 0)
2856 break;
2857
2858 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2859 break;
2860 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2861 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2862 break;
2863
2864 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2865 error = if_rxhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2866 if (error != 0)
2867 break;
2868
2869 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2870 break;
2871 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2872 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2873 break;
2874
2875 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
2876 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
2877 break;
2878
2879 default:
2880 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2881 break;
2882 }
2883
2884 if (error == ENETRESET52) {
2885 /* no hardware to program */
2886 error = 0;
2887 }
2888
2889 return (error);
2890}
2891
2892static int
2893gre_up(struct gre_softc *sc)
2894{
2895 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2896 SET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) |= (0x40));
2897
2898 if (sc->sc_ka_state != GRE_KA_NONE0)
2899 gre_keepalive_send(sc);
2900
2901 return (0);
2902}
2903
2904static int
2905gre_down(struct gre_softc *sc)
2906{
2907 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2908 CLR(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) &= ~(0x40));
2909
2910 if (sc->sc_ka_state != GRE_KA_NONE0) {
2911 timeout_del_barrier(&sc->sc_ka_hold);
2912 timeout_del_barrier(&sc->sc_ka_send);
2913
2914 sc->sc_ka_state = GRE_KA_DOWN1;
2915 gre_link_state(&sc->sc_if, sc->sc_ka_state);
2916 }
2917
2918 return (0);
2919}
2920
2921static void
2922gre_link_state(struct ifnet *ifp, unsigned int state)
2923{
2924 int link_state = LINK_STATE_UNKNOWN0;
2925
2926 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2927 switch (state) {
2928 case GRE_KA_NONE0:
2929 /* maybe up? or down? it's unknown, really */
2930 break;
2931 case GRE_KA_UP3:
2932 link_state = LINK_STATE_UP4;
2933 break;
2934 default:
2935 link_state = LINK_STATE_KALIVE_DOWN3;
2936 break;
2937 }
2938 }
2939
2940 if (ifp->if_link_stateif_data.ifi_link_state != link_state) {
2941 ifp->if_link_stateif_data.ifi_link_state = link_state;
2942 if_link_state_change(ifp);
2943 }
2944}
2945
2946static void
2947gre_keepalive_send(void *arg)
2948{
2949 struct gre_tunnel t;
2950 struct gre_softc *sc = arg;
2951 struct mbuf *m;
2952 struct gre_keepalive *gk;
2953 SIPHASH_CTX ctx;
2954 int linkhdr, len;
2955 uint16_t proto;
2956 uint8_t ttl;
2957 uint8_t tos;
2958
2959 /*
2960 * re-schedule immediately, so we deal with incomplete configuration
2961 * or temporary errors.
2962 */
2963 if (sc->sc_ka_timeo)
2964 timeout_add_sec(&sc->sc_ka_send, sc->sc_ka_timeo);
2965
2966 if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) & (0x40)) ||
2967 sc->sc_ka_state == GRE_KA_NONE0 ||
2968 sc->sc_tunnel.t_af == AF_UNSPEC0 ||
2969 sc->sc_tunnel.t_rtableid != sc->sc_if.if_rdomainif_data.ifi_rdomain)
2970 return;
2971
2972 /* this is really conservative */
2973#ifdef INET61
2974 linkhdr = max_linkhdr + MAX(sizeof(struct ip), sizeof(struct ip6_hdr))(((sizeof(struct ip))>(sizeof(struct ip6_hdr)))?(sizeof(struct
ip)):(sizeof(struct ip6_hdr)))
+
2975 sizeof(struct gre_header) + sizeof(struct gre_h_key);
2976#else
2977 linkhdr = max_linkhdr + sizeof(struct ip) +
2978 sizeof(struct gre_header) + sizeof(struct gre_h_key);
2979#endif
2980 len = linkhdr + sizeof(*gk);
2981
2982 MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1));
2983 if (m == NULL((void *)0))
2984 return;
2985
2986 if (len > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
2987 MCLGETL(m, M_DONTWAIT, len)m_clget((m), (0x0002), (len));
2988 if (!ISSET(m->m_flags, M_EXT)((m->m_hdr.mh_flags) & (0x0001))) {
2989 m_freem(m);
2990 return;
2991 }
2992 }
2993
2994 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
2995 m_adj(m, linkhdr);
2996
2997 /*
2998 * build the inside packet
2999 */
3000 gk = mtod(m, struct gre_keepalive *)((struct gre_keepalive *)((m)->m_hdr.mh_data));
3001 htobem32(&gk->gk_uptime, sc->sc_ka_bias + ticks)(*(__uint32_t *)(&gk->gk_uptime) = (__uint32_t)(__builtin_constant_p
(sc->sc_ka_bias + ticks) ? (__uint32_t)(((__uint32_t)(sc->
sc_ka_bias + ticks) & 0xff) << 24 | ((__uint32_t)(sc
->sc_ka_bias + ticks) & 0xff00) << 8 | ((__uint32_t
)(sc->sc_ka_bias + ticks) & 0xff0000) >> 8 | ((__uint32_t
)(sc->sc_ka_bias + ticks) & 0xff000000) >> 24) :
__swap32md(sc->sc_ka_bias + ticks)))
;
3002 htobem32(&gk->gk_random, arc4random())(*(__uint32_t *)(&gk->gk_random) = (__uint32_t)(__builtin_constant_p
(arc4random()) ? (__uint32_t)(((__uint32_t)(arc4random()) &
0xff) << 24 | ((__uint32_t)(arc4random()) & 0xff00
) << 8 | ((__uint32_t)(arc4random()) & 0xff0000) >>
8 | ((__uint32_t)(arc4random()) & 0xff000000) >> 24
) : __swap32md(arc4random())))
;
3003
3004 SipHash24_Init(&ctx, &sc->sc_ka_key)SipHash_Init((&ctx), (&sc->sc_ka_key));
3005 SipHash24_Update(&ctx, &gk->gk_uptime, sizeof(gk->gk_uptime))SipHash_Update((&ctx), 2, 4, (&gk->gk_uptime), (sizeof
(gk->gk_uptime)))
;
3006 SipHash24_Update(&ctx, &gk->gk_random, sizeof(gk->gk_random))SipHash_Update((&ctx), 2, 4, (&gk->gk_random), (sizeof
(gk->gk_random)))
;
3007 SipHash24_Final(gk->gk_digest, &ctx)SipHash_Final((gk->gk_digest), (&ctx), 2, 4);
3008
3009 ttl = sc->sc_tunnel.t_ttl == -1 ? ip_defttl : sc->sc_tunnel.t_ttl;
3010
3011 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = sc->sc_if.if_llprio;
3012 tos = gre_l3_tos(&sc->sc_tunnel, m, IFQ_PRIO2TOS(m->m_pkthdr.pf.prio)((m->M_dat.MH.MH_pkthdr.pf.prio) << 5));
3013
3014 t.t_af = sc->sc_tunnel.t_af;
3015 t.t_df = sc->sc_tunnel.t_df;
3016 t.t_src = sc->sc_tunnel.t_dst;
3017 t.t_dst = sc->sc_tunnel.t_src;
3018 t.t_key = sc->sc_tunnel.t_key;
3019 t.t_key_mask = sc->sc_tunnel.t_key_mask;
3020
3021 m = gre_encap(&t, m, htons(0), ttl, tos)gre_encap_dst((&t), &(&t)->t_dst, (m), ((__uint16_t
)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t)(0) &
0xffU) << 8 | ((__uint16_t)(0) & 0xff00U) >>
8) : __swap16md(0))), (ttl), (tos))
;
3022 if (m == NULL((void *)0))
3023 return;
3024
3025 switch (sc->sc_tunnel.t_af) {
3026 case AF_INET2: {
3027 struct ip *ip;
3028
3029 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
3030 ip->ip_id = htons(ip_randomid())(__uint16_t)(__builtin_constant_p(ip_randomid()) ? (__uint16_t
)(((__uint16_t)(ip_randomid()) & 0xffU) << 8 | ((__uint16_t
)(ip_randomid()) & 0xff00U) >> 8) : __swap16md(ip_randomid
()))
;
3031 in_hdr_cksum_out(m, NULL((void *)0));
3032
3033 proto = htons(ETHERTYPE_IP)(__uint16_t)(__builtin_constant_p(0x0800) ? (__uint16_t)(((__uint16_t
)(0x0800) & 0xffU) << 8 | ((__uint16_t)(0x0800) &
0xff00U) >> 8) : __swap16md(0x0800))
;
3034 break;
3035 }
3036#ifdef INET61
3037 case AF_INET624:
3038 proto = htons(ETHERTYPE_IPV6)(__uint16_t)(__builtin_constant_p(0x86DD) ? (__uint16_t)(((__uint16_t
)(0x86DD) & 0xffU) << 8 | ((__uint16_t)(0x86DD) &
0xff00U) >> 8) : __swap16md(0x86DD))
;
3039 break;
3040#endif
3041 default:
3042 m_freem(m);
3043 return;
3044 }
3045
3046 /*
3047 * put it in the tunnel
3048 */
3049 m = gre_encap(&sc->sc_tunnel, m, proto, ttl, tos)gre_encap_dst((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), (proto), (ttl), (tos))
;
3050 if (m == NULL((void *)0))
3051 return;
3052
3053 gre_ip_output(&sc->sc_tunnel, m);
3054}
3055
3056static void
3057gre_keepalive_hold(void *arg)
3058{
3059 struct gre_softc *sc = arg;
3060 struct ifnet *ifp = &sc->sc_if;
3061
3062 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)) ||
3063 sc->sc_ka_state == GRE_KA_NONE0)
3064 return;
3065
3066 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3067 sc->sc_ka_state = GRE_KA_DOWN1;
3068 gre_link_state(ifp, sc->sc_ka_state);
3069 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3070}
3071
3072static int
3073gre_set_tunnel(struct gre_tunnel *tunnel, struct if_laddrreq *req, int ucast)
3074{
3075 struct sockaddr *src = (struct sockaddr *)&req->addr;
3076 struct sockaddr *dst = (struct sockaddr *)&req->dstaddr;
3077 struct sockaddr_in *src4, *dst4;
3078#ifdef INET61
3079 struct sockaddr_in6 *src6, *dst6;
3080 int error;
3081#endif
3082
3083 /* sa_family and sa_len must be equal */
3084 if (src->sa_family != dst->sa_family || src->sa_len != dst->sa_len)
3085 return (EINVAL22);
3086
3087 /* validate */
3088 switch (dst->sa_family) {
3089 case AF_INET2:
3090 if (dst->sa_len != sizeof(*dst4))
3091 return (EINVAL22);
3092
3093 src4 = (struct sockaddr_in *)src;
3094 if (in_nullhost(src4->sin_addr)((src4->sin_addr).s_addr == ((u_int32_t) (__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000000)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff000000) >> 24) : __swap32md((u_int32_t
)(0x00000000)))))
||
3095 IN_MULTICAST(src4->sin_addr.s_addr)(((u_int32_t)(src4->sin_addr.s_addr) & ((u_int32_t) (__uint32_t
)(__builtin_constant_p((u_int32_t)(0xf0000000)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff) <<
24 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff00) <<
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff000000)
>> 24) : __swap32md((u_int32_t)(0xf0000000))))) == ((u_int32_t
) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xe0000000)) ?
(__uint32_t)(((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0xe0000000)))))
)
3096 return (EINVAL22);
3097
3098 dst4 = (struct sockaddr_in *)dst;
3099 if (in_nullhost(dst4->sin_addr)((dst4->sin_addr).s_addr == ((u_int32_t) (__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000000)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff000000) >> 24) : __swap32md((u_int32_t
)(0x00000000)))))
||
3100 (IN_MULTICAST(dst4->sin_addr.s_addr)(((u_int32_t)(dst4->sin_addr.s_addr) & ((u_int32_t) (__uint32_t
)(__builtin_constant_p((u_int32_t)(0xf0000000)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff) <<
24 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff00) <<
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff000000)
>> 24) : __swap32md((u_int32_t)(0xf0000000))))) == ((u_int32_t
) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xe0000000)) ?
(__uint32_t)(((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0xe0000000)))))
!= !ucast))
3101 return (EINVAL22);
3102
3103 tunnel->t_src4t_src.in4 = src4->sin_addr;
3104 tunnel->t_dst4t_dst.in4 = dst4->sin_addr;
3105
3106 break;
3107#ifdef INET61
3108 case AF_INET624:
3109 if (dst->sa_len != sizeof(*dst6))
3110 return (EINVAL22);
3111
3112 src6 = (struct sockaddr_in6 *)src;
3113 if (IN6_IS_ADDR_UNSPECIFIED(&src6->sin6_addr)((*(const u_int32_t *)(const void *)(&(&src6->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&src6->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&src6->sin6_addr)->__u6_addr.__u6_addr8[8
]) == 0) && (*(const u_int32_t *)(const void *)(&
(&src6->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0)
)
||
3114 IN6_IS_ADDR_MULTICAST(&src6->sin6_addr)((&src6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
)
3115 return (EINVAL22);
3116
3117 dst6 = (struct sockaddr_in6 *)dst;
3118 if (IN6_IS_ADDR_UNSPECIFIED(&dst6->sin6_addr)((*(const u_int32_t *)(const void *)(&(&dst6->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&dst6->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&dst6->sin6_addr)->__u6_addr.__u6_addr8[8
]) == 0) && (*(const u_int32_t *)(const void *)(&
(&dst6->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0)
)
||
3119 IN6_IS_ADDR_MULTICAST(&dst6->sin6_addr)((&dst6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
!= !ucast)
3120 return (EINVAL22);
3121
3122 if (src6->sin6_scope_id != dst6->sin6_scope_id)
3123 return (EINVAL22);
3124
3125 error = in6_embedscope(&tunnel->t_src6t_src.in6, src6, NULL((void *)0), NULL((void *)0));
3126 if (error != 0)
3127 return (error);
3128
3129 error = in6_embedscope(&tunnel->t_dst6t_dst.in6, dst6, NULL((void *)0), NULL((void *)0));
3130 if (error != 0)
3131 return (error);
3132
3133 break;
3134#endif
3135 default:
3136 return (EAFNOSUPPORT47);
3137 }
3138
3139 /* commit */
3140 tunnel->t_af = dst->sa_family;
3141
3142 return (0);
3143}
3144
3145static int
3146gre_get_tunnel(struct gre_tunnel *tunnel, struct if_laddrreq *req)
3147{
3148 struct sockaddr *src = (struct sockaddr *)&req->addr;
3149 struct sockaddr *dst = (struct sockaddr *)&req->dstaddr;
3150 struct sockaddr_in *sin;
3151#ifdef INET61 /* ifconfig already embeds the scopeid */
3152 struct sockaddr_in6 *sin6;
3153#endif
3154
3155 switch (tunnel->t_af) {
3156 case AF_UNSPEC0:
3157 return (EADDRNOTAVAIL49);
3158 case AF_INET2:
3159 sin = (struct sockaddr_in *)src;
3160 memset(sin, 0, sizeof(*sin))__builtin_memset((sin), (0), (sizeof(*sin)));
3161 sin->sin_family = AF_INET2;
3162 sin->sin_len = sizeof(*sin);
3163 sin->sin_addr = tunnel->t_src4t_src.in4;
3164
3165 sin = (struct sockaddr_in *)dst;
3166 memset(sin, 0, sizeof(*sin))__builtin_memset((sin), (0), (sizeof(*sin)));
3167 sin->sin_family = AF_INET2;
3168 sin->sin_len = sizeof(*sin);
3169 sin->sin_addr = tunnel->t_dst4t_dst.in4;
3170
3171 break;
3172
3173#ifdef INET61
3174 case AF_INET624:
3175 sin6 = (struct sockaddr_in6 *)src;
3176 memset(sin6, 0, sizeof(*sin6))__builtin_memset((sin6), (0), (sizeof(*sin6)));
3177 sin6->sin6_family = AF_INET624;
3178 sin6->sin6_len = sizeof(*sin6);
3179 in6_recoverscope(sin6, &tunnel->t_src6t_src.in6);
3180
3181 sin6 = (struct sockaddr_in6 *)dst;
3182 memset(sin6, 0, sizeof(*sin6))__builtin_memset((sin6), (0), (sizeof(*sin6)));
3183 sin6->sin6_family = AF_INET624;
3184 sin6->sin6_len = sizeof(*sin6);
3185 in6_recoverscope(sin6, &tunnel->t_dst6t_dst.in6);
3186
3187 break;
3188#endif
3189 default:
3190 return (EAFNOSUPPORT47);
3191 }
3192
3193 return (0);
3194}
3195
3196static int
3197gre_del_tunnel(struct gre_tunnel *tunnel)
3198{
3199 /* commit */
3200 tunnel->t_af = AF_UNSPEC0;
3201
3202 return (0);
3203}
3204
3205static int
3206gre_set_vnetid(struct gre_tunnel *tunnel, struct ifreq *ifr)
3207{
3208 uint32_t key;
3209 uint32_t min = GRE_KEY_MIN0x00000000U;
3210 uint32_t max = GRE_KEY_MAX0xffffffffU;
3211 unsigned int shift = GRE_KEY_SHIFT0;
3212 uint32_t mask = GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
;
3213
3214 if (tunnel->t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
) {
3215 min = GRE_KEY_ENTROPY_MIN0x00000000U;
3216 max = GRE_KEY_ENTROPY_MAX0x00ffffffU;
3217 shift = GRE_KEY_ENTROPY_SHIFT8;
3218 mask = GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
3219 }
3220
3221 if (ifr->ifr_vnetidifr_ifru.ifru_vnetid < min || ifr->ifr_vnetidifr_ifru.ifru_vnetid > max)
3222 return (EINVAL22);
3223
3224 key = htonl(ifr->ifr_vnetid << shift)(__uint32_t)(__builtin_constant_p(ifr->ifr_ifru.ifru_vnetid
<< shift) ? (__uint32_t)(((__uint32_t)(ifr->ifr_ifru
.ifru_vnetid << shift) & 0xff) << 24 | ((__uint32_t
)(ifr->ifr_ifru.ifru_vnetid << shift) & 0xff00) <<
8 | ((__uint32_t)(ifr->ifr_ifru.ifru_vnetid << shift
) & 0xff0000) >> 8 | ((__uint32_t)(ifr->ifr_ifru
.ifru_vnetid << shift) & 0xff000000) >> 24) :
__swap32md(ifr->ifr_ifru.ifru_vnetid << shift))
;
3225
3226 /* commit */
3227 tunnel->t_key_mask = mask;
3228 tunnel->t_key = key;
3229
3230 return (0);
3231}
3232
3233static int
3234gre_get_vnetid(struct gre_tunnel *tunnel, struct ifreq *ifr)
3235{
3236 int shift;
3
'shift' declared without an initial value
3237
3238 switch (tunnel->t_key_mask) {
3239 case GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
:
3240 return (EADDRNOTAVAIL49);
3241 case GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
:
3242 shift = GRE_KEY_ENTROPY_SHIFT8;
3243 break;
3244 case GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
:
3245 shift = GRE_KEY_SHIFT0;
3246 break;
3247 }
3248
3249 ifr->ifr_vnetidifr_ifru.ifru_vnetid = ntohl(tunnel->t_key)(__uint32_t)(__builtin_constant_p(tunnel->t_key) ? (__uint32_t
)(((__uint32_t)(tunnel->t_key) & 0xff) << 24 | (
(__uint32_t)(tunnel->t_key) & 0xff00) << 8 | ((__uint32_t
)(tunnel->t_key) & 0xff0000) >> 8 | ((__uint32_t
)(tunnel->t_key) & 0xff000000) >> 24) : __swap32md
(tunnel->t_key))
>> shift;
4
'Default' branch taken. Execution continues on line 3249
5
'?' condition is false
6
The right operand of '>>' is a garbage value
3250
3251 return (0);
3252}
3253
3254static int
3255gre_del_vnetid(struct gre_tunnel *tunnel)
3256{
3257 tunnel->t_key_mask = GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
;
3258
3259 return (0);
3260}
3261
3262static int
3263gre_set_vnetflowid(struct gre_tunnel *tunnel, struct ifreq *ifr)
3264{
3265 uint32_t mask, key;
3266
3267 if (tunnel->t_key_mask == GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
)
3268 return (EADDRNOTAVAIL49);
3269
3270 mask = ifr->ifr_vnetidifr_ifru.ifru_vnetid ? GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
: GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
;
3271 if (tunnel->t_key_mask == mask) {
3272 /* nop */
3273 return (0);
3274 }
3275
3276 key = ntohl(tunnel->t_key)(__uint32_t)(__builtin_constant_p(tunnel->t_key) ? (__uint32_t
)(((__uint32_t)(tunnel->t_key) & 0xff) << 24 | (
(__uint32_t)(tunnel->t_key) & 0xff00) << 8 | ((__uint32_t
)(tunnel->t_key) & 0xff0000) >> 8 | ((__uint32_t
)(tunnel->t_key) & 0xff000000) >> 24) : __swap32md
(tunnel->t_key))
;
3277 if (mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
) {
3278 if (key > GRE_KEY_ENTROPY_MAX0x00ffffffU)
3279 return (ERANGE34);
3280
3281 key = htonl(key << GRE_KEY_ENTROPY_SHIFT)(__uint32_t)(__builtin_constant_p(key << 8) ? (__uint32_t
)(((__uint32_t)(key << 8) & 0xff) << 24 | ((__uint32_t
)(key << 8) & 0xff00) << 8 | ((__uint32_t)(key
<< 8) & 0xff0000) >> 8 | ((__uint32_t)(key <<
8) & 0xff000000) >> 24) : __swap32md(key << 8
))
;
3282 } else
3283 key = htonl(key >> GRE_KEY_ENTROPY_SHIFT)(__uint32_t)(__builtin_constant_p(key >> 8) ? (__uint32_t
)(((__uint32_t)(key >> 8) & 0xff) << 24 | ((__uint32_t
)(key >> 8) & 0xff00) << 8 | ((__uint32_t)(key
>> 8) & 0xff0000) >> 8 | ((__uint32_t)(key >>
8) & 0xff000000) >> 24) : __swap32md(key >> 8
))
;
3284
3285 /* commit */
3286 tunnel->t_key_mask = mask;
3287 tunnel->t_key = key;
3288
3289 return (0);
3290}
3291
3292static int
3293gre_get_vnetflowid(struct gre_tunnel *tunnel, struct ifreq *ifr)
3294{
3295 if (tunnel->t_key_mask == GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
)
3296 return (EADDRNOTAVAIL49);
3297
3298 ifr->ifr_vnetidifr_ifru.ifru_vnetid = tunnel->t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
3299
3300 return (0);
3301}
3302
3303static int
3304mgre_up(struct mgre_softc *sc)
3305{
3306 unsigned int hlen;
3307
3308 switch (sc->sc_tunnel.t_af) {
3309 case AF_UNSPEC0:
3310 return (EDESTADDRREQ39);
3311 case AF_INET2:
3312 hlen = sizeof(struct ip);
3313 break;
3314#ifdef INET61
3315 case AF_INET624:
3316 hlen = sizeof(struct ip6_hdr);
3317 break;
3318#endif /* INET6 */
3319 default:
3320 unhandled_af(sc->sc_tunnel.t_af);
3321 }
3322
3323 hlen += sizeof(struct gre_header);
3324 if (sc->sc_tunnel.t_key_mask != GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
)
3325 hlen += sizeof(struct gre_h_key);
3326
3327 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3328
3329 if (RBT_INSERT(mgre_tree, &mgre_tree, sc)mgre_tree_RBT_INSERT(&mgre_tree, sc) != NULL((void *)0))
3330 return (EADDRINUSE48);
3331
3332 sc->sc_if.if_hdrlenif_data.ifi_hdrlen = hlen;
3333 SET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) |= (0x40));
3334
3335 return (0);
3336}
3337
3338static int
3339mgre_down(struct mgre_softc *sc)
3340{
3341 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3342
3343 CLR(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) &= ~(0x40));
3344 sc->sc_if.if_hdrlenif_data.ifi_hdrlen = GRE_HDRLEN(sizeof(struct ip) + sizeof(struct gre_header)); /* symmetry */
3345
3346 RBT_REMOVE(mgre_tree, &mgre_tree, sc)mgre_tree_RBT_REMOVE(&mgre_tree, sc);
3347
3348 /* barrier? */
3349
3350 return (0);
3351}
3352
3353static int
3354egre_up(struct egre_softc *sc)
3355{
3356 if (sc->sc_tunnel.t_af == AF_UNSPEC0)
3357 return (EDESTADDRREQ39);
3358
3359 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3360
3361 if (RBT_INSERT(egre_tree, &egre_tree, sc)egre_tree_RBT_INSERT(&egre_tree, sc) != NULL((void *)0))
3362 return (EADDRINUSE48);
3363
3364 SET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) |= (0x40));
3365
3366 return (0);
3367}
3368
3369static int
3370egre_down(struct egre_softc *sc)
3371{
3372 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3373
3374 CLR(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) &= ~(0x40));
3375
3376 RBT_REMOVE(egre_tree, &egre_tree, sc)egre_tree_RBT_REMOVE(&egre_tree, sc);
3377
3378 /* barrier? */
3379
3380 return (0);
3381}
3382
3383static int
3384egre_media_change(struct ifnet *ifp)
3385{
3386 return (ENOTTY25);
3387}
3388
3389static void
3390egre_media_status(struct ifnet *ifp, struct ifmediareq *imr)
3391{
3392 imr->ifm_active = IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL;
3393 imr->ifm_status = IFM_AVALID0x0000000000000001ULL | IFM_ACTIVE0x0000000000000002ULL;
3394}
3395
3396static int
3397nvgre_up(struct nvgre_softc *sc)
3398{
3399 struct gre_tunnel *tunnel = &sc->sc_tunnel;
3400 struct ifnet *ifp0;
3401 void *inm;
3402 int error;
3403
3404 if (tunnel->t_af == AF_UNSPEC0)
3405 return (EDESTADDRREQ39);
3406
3407 ifp0 = if_get(sc->sc_ifp0);
3408 if (ifp0 == NULL((void *)0))
3409 return (ENXIO6);
3410 if (!ISSET(ifp0->if_flags, IFF_MULTICAST)((ifp0->if_flags) & (0x8000))) {
3411 error = ENODEV19;
3412 goto put;
3413 }
3414
3415 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3416
3417 if (RBT_INSERT(nvgre_mcast_tree, &nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_INSERT(&nvgre_mcast_tree, sc) != NULL((void *)0)) {
3418 error = EADDRINUSE48;
3419 goto put;
3420 }
3421 if (RBT_INSERT(nvgre_ucast_tree, &nvgre_ucast_tree, sc)nvgre_ucast_tree_RBT_INSERT(&nvgre_ucast_tree, sc) != NULL((void *)0)) {
3422 error = EADDRINUSE48;
3423 goto remove_mcast;
3424 }
3425
3426 switch (tunnel->t_af) {
3427 case AF_INET2:
3428 inm = in_addmulti(&tunnel->t_dst4t_dst.in4, ifp0);
3429 if (inm == NULL((void *)0)) {
3430 error = ECONNABORTED53;
3431 goto remove_ucast;
3432 }
3433 break;
3434#ifdef INET61
3435 case AF_INET624:
3436 inm = in6_addmulti(&tunnel->t_dst6t_dst.in6, ifp0, &error);
3437 if (inm == NULL((void *)0)) {
3438 /* error is already set */
3439 goto remove_ucast;
3440 }
3441 break;
3442#endif /* INET6 */
3443 default:
3444 unhandled_af(tunnel->t_af);
3445 }
3446
3447 if_linkstatehook_add(ifp0, &sc->sc_ltask);
3448 if_detachhook_add(ifp0, &sc->sc_dtask);
3449
3450 if_put(ifp0);
3451
3452 sc->sc_inm = inm;
3453 SET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) |= (0x40));
3454
3455 return (0);
3456
3457remove_ucast:
3458 RBT_REMOVE(nvgre_ucast_tree, &nvgre_ucast_tree, sc)nvgre_ucast_tree_RBT_REMOVE(&nvgre_ucast_tree, sc);
3459remove_mcast:
3460 RBT_REMOVE(nvgre_mcast_tree, &nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_REMOVE(&nvgre_mcast_tree, sc);
3461put:
3462 if_put(ifp0);
3463 return (error);
3464}
3465
3466static int
3467nvgre_down(struct nvgre_softc *sc)
3468{
3469 struct gre_tunnel *tunnel = &sc->sc_tunnel;
3470 struct ifnet *ifp = &sc->sc_ac.ac_if;
3471 struct taskq *softnet = net_tq(ifp->if_index);
3472 struct ifnet *ifp0;
3473
3474 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3475
3476 CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40));
3477
3478 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3479 ifq_barrier(&ifp->if_snd);
3480 if (!task_del(softnet, &sc->sc_send_task))
3481 taskq_barrier(softnet);
3482 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3483
3484 mq_purge(&sc->sc_send_list);
3485
3486 ifp0 = if_get(sc->sc_ifp0);
3487 if (ifp0 != NULL((void *)0)) {
3488 if_detachhook_del(ifp0, &sc->sc_dtask);
3489 if_linkstatehook_del(ifp0, &sc->sc_ltask);
3490 }
3491 if_put(ifp0);
3492
3493 switch (tunnel->t_af) {
3494 case AF_INET2:
3495 in_delmulti(sc->sc_inm);
3496 break;
3497
3498#ifdef INET61
3499 case AF_INET624:
3500 in6_delmulti(sc->sc_inm);
3501 break;
3502#endif
3503 default:
3504 unhandled_af(tunnel->t_af);
3505 }
3506
3507 RBT_REMOVE(nvgre_ucast_tree, &nvgre_ucast_tree, sc)nvgre_ucast_tree_RBT_REMOVE(&nvgre_ucast_tree, sc);
3508 RBT_REMOVE(nvgre_mcast_tree, &nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_REMOVE(&nvgre_mcast_tree, sc);
3509
3510 return (0);
3511}
3512
3513static void
3514nvgre_link_change(void *arg)
3515{
3516 /* nop */
3517}
3518
3519static void
3520nvgre_detach(void *arg)
3521{
3522 struct nvgre_softc *sc = arg;
3523 struct ifnet *ifp = &sc->sc_ac.ac_if;
3524
3525 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
3526 nvgre_down(sc);
3527 if_down(ifp);
3528 }
3529
3530 sc->sc_ifp0 = 0;
3531}
3532
3533static int
3534nvgre_set_parent(struct nvgre_softc *sc, const char *parent)
3535{
3536 struct ifnet *ifp0;
3537
3538 ifp0 = if_unit(parent);
3539 if (ifp0 == NULL((void *)0))
3540 return (EINVAL22);
3541
3542 if (!ISSET(ifp0->if_flags, IFF_MULTICAST)((ifp0->if_flags) & (0x8000))) {
3543 if_put(ifp0);
3544 return (EPROTONOSUPPORT43);
3545 }
3546
3547 ifsetlro(ifp0, 0);
3548
3549 /* commit */
3550 sc->sc_ifp0 = ifp0->if_index;
3551 if_put(ifp0);
3552
3553 return (0);
3554}
3555
3556static int
3557nvgre_add_addr(struct nvgre_softc *sc, const struct ifbareq *ifba)
3558{
3559 struct sockaddr_in *sin;
3560#ifdef INET61
3561 struct sockaddr_in6 *sin6;
3562 struct sockaddr_in6 src6 = {
3563 .sin6_len = sizeof(src6),
3564 .sin6_family = AF_UNSPEC0,
3565 };
3566 int error;
3567#endif
3568 union gre_addr endpoint;
3569 unsigned int type;
3570
3571 /* ignore ifba_ifsname */
3572
3573 if (ISSET(ifba->ifba_flags, ~IFBAF_TYPEMASK)((ifba->ifba_flags) & (~0x03)))
3574 return (EINVAL22);
3575 switch (ifba->ifba_flags & IFBAF_TYPEMASK0x03) {
3576 case IFBAF_DYNAMIC0x00:
3577 type = EBE_DYNAMIC0x0;
3578 break;
3579 case IFBAF_STATIC0x01:
3580 type = EBE_STATIC0x1;
3581 break;
3582 default:
3583 return (EINVAL22);
3584 }
3585
3586 memset(&endpoint, 0, sizeof(endpoint))__builtin_memset((&endpoint), (0), (sizeof(endpoint)));
3587
3588 if (ifba->ifba_dstsa.ss_family != sc->sc_tunnel.t_af)
3589 return (EAFNOSUPPORT47);
3590 switch (ifba->ifba_dstsa.ss_family) {
3591 case AF_INET2:
3592 sin = (struct sockaddr_in *)&ifba->ifba_dstsa;
3593 if (in_nullhost(sin->sin_addr)((sin->sin_addr).s_addr == ((u_int32_t) (__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000000)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff000000) >> 24) : __swap32md((u_int32_t
)(0x00000000)))))
||
3594 IN_MULTICAST(sin->sin_addr.s_addr)(((u_int32_t)(sin->sin_addr.s_addr) & ((u_int32_t) (__uint32_t
)(__builtin_constant_p((u_int32_t)(0xf0000000)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff) <<
24 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff00) <<
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff000000)
>> 24) : __swap32md((u_int32_t)(0xf0000000))))) == ((u_int32_t
) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xe0000000)) ?
(__uint32_t)(((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0xe0000000)))))
)
3595 return (EADDRNOTAVAIL49);
3596
3597 endpoint.in4 = sin->sin_addr;
3598 break;
3599
3600#ifdef INET61
3601 case AF_INET624:
3602 sin6 = (struct sockaddr_in6 *)&ifba->ifba_dstsa;
3603 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)((*(const u_int32_t *)(const void *)(&(&sin6->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&sin6->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&sin6->sin6_addr)->__u6_addr.__u6_addr8[8
]) == 0) && (*(const u_int32_t *)(const void *)(&
(&sin6->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0)
)
||
3604 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)((&sin6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
)
3605 return (EADDRNOTAVAIL49);
3606
3607 in6_recoverscope(&src6, &sc->sc_tunnel.t_src6t_src.in6);
3608
3609 if (src6.sin6_scope_id != sin6->sin6_scope_id)
3610 return (EADDRNOTAVAIL49);
3611
3612 error = in6_embedscope(&endpoint.in6, sin6, NULL((void *)0), NULL((void *)0));
3613 if (error != 0)
3614 return (error);
3615
3616 break;
3617#endif
3618 default: /* AF_UNSPEC */
3619 return (EADDRNOTAVAIL49);
3620 }
3621
3622 return (etherbridge_add_addr(&sc->sc_eb, &endpoint,
3623 &ifba->ifba_dst, type));
3624}
3625
3626static int
3627nvgre_del_addr(struct nvgre_softc *sc, const struct ifbareq *ifba)
3628{
3629 return (etherbridge_del_addr(&sc->sc_eb, &ifba->ifba_dst));
3630}
3631
3632static void
3633nvgre_start(struct ifnet *ifp)
3634{
3635 struct nvgre_softc *sc = ifp->if_softc;
3636 const struct gre_tunnel *tunnel = &sc->sc_tunnel;
3637 union gre_addr gateway;
3638 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
3639 struct ether_header *eh;
3640 struct mbuf *m, *m0;
3641#if NBPFILTER1 > 0
3642 caddr_t if_bpf;
3643#endif
3644
3645 if (!gre_allow) {
3646 ifq_purge(&ifp->if_snd);
3647 return;
3648 }
3649
3650 while ((m0 = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
3651#if NBPFILTER1 > 0
3652 if_bpf = ifp->if_bpf;
3653 if (if_bpf)
3654 bpf_mtap_ether(if_bpf, m0, BPF_DIRECTION_OUT(1 << 1));
3655#endif
3656
3657 eh = mtod(m0, struct ether_header *)((struct ether_header *)((m0)->m_hdr.mh_data));
3658 if (ETHER_IS_BROADCAST(eh->ether_dhost)(((eh->ether_dhost)[0] & (eh->ether_dhost)[1] &
(eh->ether_dhost)[2] & (eh->ether_dhost)[3] & (
eh->ether_dhost)[4] & (eh->ether_dhost)[5]) == 0xff
)
)
3659 gateway = tunnel->t_dst;
3660 else {
3661 const union gre_addr *endpoint;
3662
3663 smr_read_enter();
3664 endpoint = etherbridge_resolve_ea(&sc->sc_eb,
3665 (struct ether_addr *)eh->ether_dhost);
3666 if (endpoint == NULL((void *)0)) {
3667 /* "flood" to unknown hosts */
3668 endpoint = &tunnel->t_dst;
3669 }
3670 gateway = *endpoint;
3671 smr_read_leave();
3672 }
3673
3674 /* force prepend mbuf because of alignment problems */
3675 m = m_get(M_DONTWAIT0x0002, m0->m_typem_hdr.mh_type);
3676 if (m == NULL((void *)0)) {
3677 m_freem(m0);
3678 continue;
3679 }
3680
3681 M_MOVE_PKTHDR(m, m0)do { (m)->m_hdr.mh_flags = ((m)->m_hdr.mh_flags & (
0x0001 | 0x0008)); (m)->m_hdr.mh_flags |= (m0)->m_hdr.mh_flags
& (0x0002|0x0004|0x0010|0x0100|0x0200|0x0400|0x4000| 0x0800
|0x0040|0x1000|0x8000|0x0020|0x2000); do { ((m))->M_dat.MH
.MH_pkthdr = ((m0))->M_dat.MH.MH_pkthdr; ((m0))->m_hdr.
mh_flags &= ~0x0002; { ((&((m0))->M_dat.MH.MH_pkthdr
.ph_tags)->slh_first) = ((void *)0); }; ((m0))->M_dat.MH
.MH_pkthdr.pf.statekey = ((void *)0); } while ( 0); if (((m)->
m_hdr.mh_flags & 0x0001) == 0) (m)->m_hdr.mh_data = (m
)->M_dat.MH.MH_dat.MH_databuf; } while ( 0)
;
3682 m->m_nextm_hdr.mh_next = m0;
3683
3684 m_align(m, 0);
3685 m->m_lenm_hdr.mh_len = 0;
3686
3687 m = gre_encap_dst(tunnel, &gateway, m,
3688 htons(ETHERTYPE_TRANSETHER)(__uint16_t)(__builtin_constant_p(0x6558) ? (__uint16_t)(((__uint16_t
)(0x6558) & 0xffU) << 8 | ((__uint16_t)(0x6558) &
0xff00U) >> 8) : __swap16md(0x6558))
,
3689 tunnel->t_ttl, gre_l2_tos(tunnel, m));
3690 if (m == NULL((void *)0))
3691 continue;
3692
3693 m->m_flagsm_hdr.mh_flags &= ~(M_BCAST0x0100|M_MCAST0x0200);
3694 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = tunnel->t_rtableid;
3695
3696#if NPF1 > 0
3697 pf_pkt_addr_changed(m);
3698#endif
3699
3700 ml_enqueue(&ml, m);
3701 }
3702
3703 if (!ml_empty(&ml)((&ml)->ml_len == 0)) {
3704 if (mq_enlist(&sc->sc_send_list, &ml) == 0)
3705 task_add(net_tq(ifp->if_index), &sc->sc_send_task);
3706 /* else set OACTIVE? */
3707 }
3708}
3709
3710static uint64_t
3711nvgre_send4(struct nvgre_softc *sc, struct mbuf_list *ml)
3712{
3713 struct ip_moptions imo;
3714 struct mbuf *m;
3715 uint64_t oerrors = 0;
3716
3717 imo.imo_ifidx = sc->sc_ifp0;
3718 imo.imo_ttl = sc->sc_tunnel.t_ttl;
3719 imo.imo_loop = 0;
3720
3721 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3722 while ((m = ml_dequeue(ml)) != NULL((void *)0)) {
3723 if (ip_output(m, NULL((void *)0), NULL((void *)0), IP_RAWOUTPUT0x2, &imo, NULL((void *)0), 0) != 0)
3724 oerrors++;
3725 }
3726 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3727
3728 return (oerrors);
3729}
3730
3731#ifdef INET61
3732static uint64_t
3733nvgre_send6(struct nvgre_softc *sc, struct mbuf_list *ml)
3734{
3735 struct ip6_moptions im6o;
3736 struct mbuf *m;
3737 uint64_t oerrors = 0;
3738
3739 im6o.im6o_ifidx = sc->sc_ifp0;
3740 im6o.im6o_hlim = sc->sc_tunnel.t_ttl;
3741 im6o.im6o_loop = 0;
3742
3743 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3744 while ((m = ml_dequeue(ml)) != NULL((void *)0)) {
3745 if (ip6_output(m, NULL((void *)0), NULL((void *)0), 0, &im6o, NULL((void *)0)) != 0)
3746 oerrors++;
3747 }
3748 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3749
3750 return (oerrors);
3751}
3752#endif /* INET6 */
3753
3754static void
3755nvgre_send(void *arg)
3756{
3757 struct nvgre_softc *sc = arg;
3758 struct ifnet *ifp = &sc->sc_ac.ac_if;
3759 sa_family_t af = sc->sc_tunnel.t_af;
3760 struct mbuf_list ml;
3761 uint64_t oerrors;
3762
3763 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
3764 return;
3765
3766 mq_delist(&sc->sc_send_list, &ml);
3767 if (ml_empty(&ml)((&ml)->ml_len == 0))
3768 return;
3769
3770 switch (af) {
3771 case AF_INET2:
3772 oerrors = nvgre_send4(sc, &ml);
3773 break;
3774#ifdef INET61
3775 case AF_INET624:
3776 oerrors = nvgre_send6(sc, &ml);
3777 break;
3778#endif
3779 default:
3780 unhandled_af(af);
3781 /* NOTREACHED */
3782 }
3783
3784 ifp->if_oerrorsif_data.ifi_oerrors += oerrors; /* XXX should be ifq_oerrors */
3785}
3786
3787static int
3788eoip_up(struct eoip_softc *sc)
3789{
3790 if (sc->sc_tunnel.t_af == AF_UNSPEC0)
3791 return (EDESTADDRREQ39);
3792
3793 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3794
3795 if (RBT_INSERT(eoip_tree, &eoip_tree, sc)eoip_tree_RBT_INSERT(&eoip_tree, sc) != NULL((void *)0))
3796 return (EADDRINUSE48);
3797
3798 SET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) |= (0x40));
3799
3800 if (sc->sc_ka_state != GRE_KA_NONE0) {
3801 sc->sc_ka_holdmax = sc->sc_ka_count;
3802 eoip_keepalive_send(sc);
3803 }
3804
3805 return (0);
3806}
3807
3808static int
3809eoip_down(struct eoip_softc *sc)
3810{
3811 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3812 CLR(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) &= ~(0x40));
3813
3814 if (sc->sc_ka_state != GRE_KA_NONE0) {
3815 timeout_del_barrier(&sc->sc_ka_hold);
3816 timeout_del_barrier(&sc->sc_ka_send);
3817
3818 sc->sc_ka_state = GRE_KA_DOWN1;
3819 gre_link_state(&sc->sc_ac.ac_if, sc->sc_ka_state);
3820 }
3821
3822 RBT_REMOVE(eoip_tree, &eoip_tree, sc)eoip_tree_RBT_REMOVE(&eoip_tree, sc);
3823
3824 return (0);
3825}
3826
3827static void
3828eoip_start(struct ifnet *ifp)
3829{
3830 struct eoip_softc *sc = ifp->if_softc;
3831 struct mbuf *m0, *m;
3832#if NBPFILTER1 > 0
3833 caddr_t if_bpf;
3834#endif
3835
3836 if (!gre_allow) {
3837 ifq_purge(&ifp->if_snd);
3838 return;
3839 }
3840
3841 while ((m0 = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
3842#if NBPFILTER1 > 0
3843 if_bpf = ifp->if_bpf;
3844 if (if_bpf)
3845 bpf_mtap_ether(if_bpf, m0, BPF_DIRECTION_OUT(1 << 1));
3846#endif
3847
3848 /* force prepend mbuf because of alignment problems */
3849 m = m_get(M_DONTWAIT0x0002, m0->m_typem_hdr.mh_type);
3850 if (m == NULL((void *)0)) {
3851 m_freem(m0);
3852 continue;
3853 }
3854
3855 M_MOVE_PKTHDR(m, m0)do { (m)->m_hdr.mh_flags = ((m)->m_hdr.mh_flags & (
0x0001 | 0x0008)); (m)->m_hdr.mh_flags |= (m0)->m_hdr.mh_flags
& (0x0002|0x0004|0x0010|0x0100|0x0200|0x0400|0x4000| 0x0800
|0x0040|0x1000|0x8000|0x0020|0x2000); do { ((m))->M_dat.MH
.MH_pkthdr = ((m0))->M_dat.MH.MH_pkthdr; ((m0))->m_hdr.
mh_flags &= ~0x0002; { ((&((m0))->M_dat.MH.MH_pkthdr
.ph_tags)->slh_first) = ((void *)0); }; ((m0))->M_dat.MH
.MH_pkthdr.pf.statekey = ((void *)0); } while ( 0); if (((m)->
m_hdr.mh_flags & 0x0001) == 0) (m)->m_hdr.mh_data = (m
)->M_dat.MH.MH_dat.MH_databuf; } while ( 0)
;
3856 m->m_nextm_hdr.mh_next = m0;
3857
3858 m_align(m, 0);
3859 m->m_lenm_hdr.mh_len = 0;
3860
3861 m = eoip_encap(sc, m, gre_l2_tos(&sc->sc_tunnel, m));
3862 if (m == NULL((void *)0) || gre_ip_output(&sc->sc_tunnel, m) != 0) {
3863 ifp->if_oerrorsif_data.ifi_oerrors++;
3864 continue;
3865 }
3866 }
3867}
3868
3869static struct mbuf *
3870eoip_encap(struct eoip_softc *sc, struct mbuf *m, uint8_t tos)
3871{
3872 struct gre_header *gh;
3873 struct gre_h_key_eoip *eoiph;
3874 int len = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
3875
3876 m = m_prepend(m, sizeof(*gh) + sizeof(*eoiph), M_DONTWAIT0x0002);
3877 if (m == NULL((void *)0))
3878 return (NULL((void *)0));
3879
3880 gh = mtod(m, struct gre_header *)((struct gre_header *)((m)->m_hdr.mh_data));
3881 gh->gre_flags = htons(GRE_VERS_1 | GRE_KP)(__uint16_t)(__builtin_constant_p(0x0001 | 0x2000) ? (__uint16_t
)(((__uint16_t)(0x0001 | 0x2000) & 0xffU) << 8 | ((
__uint16_t)(0x0001 | 0x2000) & 0xff00U) >> 8) : __swap16md
(0x0001 | 0x2000))
;
3882 gh->gre_proto = htons(GRE_EOIP)(__uint16_t)(__builtin_constant_p(0x6400) ? (__uint16_t)(((__uint16_t
)(0x6400) & 0xffU) << 8 | ((__uint16_t)(0x6400) &
0xff00U) >> 8) : __swap16md(0x6400))
;
3883
3884 eoiph = (struct gre_h_key_eoip *)(gh + 1);
3885 htobem16(&eoiph->eoip_len, len)(*(__uint16_t *)(&eoiph->eoip_len) = (__uint16_t)(__builtin_constant_p
(len) ? (__uint16_t)(((__uint16_t)(len) & 0xffU) <<
8 | ((__uint16_t)(len) & 0xff00U) >> 8) : __swap16md
(len)))
;
3886 eoiph->eoip_tunnel_id = sc->sc_tunnel_id;
3887
3888 return (gre_encap_ip(&sc->sc_tunnel, m, sc->sc_tunnel.t_ttl, tos)gre_encap_dst_ip((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), (sc->sc_tunnel.t_ttl), (tos))
);
3889}
3890
3891static void
3892eoip_keepalive_send(void *arg)
3893{
3894 struct eoip_softc *sc = arg;
3895 struct ifnet *ifp = &sc->sc_ac.ac_if;
3896 struct mbuf *m;
3897 int linkhdr;
3898
3899 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
3900 return;
3901
3902 /* this is really conservative */
3903#ifdef INET61
3904 linkhdr = max_linkhdr + MAX(sizeof(struct ip), sizeof(struct ip6_hdr))(((sizeof(struct ip))>(sizeof(struct ip6_hdr)))?(sizeof(struct
ip)):(sizeof(struct ip6_hdr)))
+
3905 sizeof(struct gre_header) + sizeof(struct gre_h_key_eoip);
3906#else
3907 linkhdr = max_linkhdr + sizeof(struct ip) +
3908 sizeof(struct gre_header) + sizeof(struct gre_h_key_eoip);
3909#endif
3910 MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1));
3911 if (m == NULL((void *)0))
3912 return;
3913
3914 if (linkhdr > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
3915 MCLGETL(m, M_DONTWAIT, linkhdr)m_clget((m), (0x0002), (linkhdr));
3916 if (!ISSET(m->m_flags, M_EXT)((m->m_hdr.mh_flags) & (0x0001))) {
3917 m_freem(m);
3918 return;
3919 }
3920 }
3921
3922 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = ifp->if_llprio;
3923 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = linkhdr;
3924 m_adj(m, linkhdr);
3925
3926 m = eoip_encap(sc, m, gre_l2_tos(&sc->sc_tunnel, m));
3927 if (m == NULL((void *)0))
3928 return;
3929
3930 gre_ip_output(&sc->sc_tunnel, m);
3931
3932 timeout_add_sec(&sc->sc_ka_send, sc->sc_ka_timeo);
3933}
3934
3935static void
3936eoip_keepalive_hold(void *arg)
3937{
3938 struct eoip_softc *sc = arg;
3939 struct ifnet *ifp = &sc->sc_ac.ac_if;
3940
3941 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
3942 return;
3943
3944 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3945 sc->sc_ka_state = GRE_KA_DOWN1;
3946 gre_link_state(ifp, sc->sc_ka_state);
3947 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3948}
3949
3950static void
3951eoip_keepalive_recv(struct eoip_softc *sc)
3952{
3953 switch (sc->sc_ka_state) {
3954 case GRE_KA_NONE0:
3955 return;
3956 case GRE_KA_DOWN1:
3957 sc->sc_ka_state = GRE_KA_HOLD2;
3958 sc->sc_ka_holdcnt = sc->sc_ka_holdmax;
3959 sc->sc_ka_holdmax = MIN(sc->sc_ka_holdmax * 2,(((sc->sc_ka_holdmax * 2)<(16 * sc->sc_ka_count))?(sc
->sc_ka_holdmax * 2):(16 * sc->sc_ka_count))
3960 16 * sc->sc_ka_count)(((sc->sc_ka_holdmax * 2)<(16 * sc->sc_ka_count))?(sc
->sc_ka_holdmax * 2):(16 * sc->sc_ka_count))
;
3961 break;
3962 case GRE_KA_HOLD2:
3963 if (--sc->sc_ka_holdcnt > 0)
3964 break;
3965
3966 sc->sc_ka_state = GRE_KA_UP3;
3967 gre_link_state(&sc->sc_ac.ac_if, sc->sc_ka_state);
3968 break;
3969
3970 case GRE_KA_UP3:
3971 sc->sc_ka_holdmax--;
3972 sc->sc_ka_holdmax = MAX(sc->sc_ka_holdmax, sc->sc_ka_count)(((sc->sc_ka_holdmax)>(sc->sc_ka_count))?(sc->sc_ka_holdmax
):(sc->sc_ka_count))
;
3973 break;
3974 }
3975
3976 timeout_add_sec(&sc->sc_ka_hold, sc->sc_ka_timeo * sc->sc_ka_count);
3977}
3978
3979static struct mbuf *
3980eoip_input(struct gre_tunnel *key, struct mbuf *m,
3981 const struct gre_header *gh, uint8_t otos, int iphlen)
3982{
3983 struct eoip_softc *sc;
3984 struct gre_h_key_eoip *eoiph;
3985 int hlen, len;
3986 caddr_t buf;
3987
3988 if (gh->gre_flags != htons(GRE_KP | GRE_VERS_1)(__uint16_t)(__builtin_constant_p(0x2000 | 0x0001) ? (__uint16_t
)(((__uint16_t)(0x2000 | 0x0001) & 0xffU) << 8 | ((
__uint16_t)(0x2000 | 0x0001) & 0xff00U) >> 8) : __swap16md
(0x2000 | 0x0001))
)
3989 goto decline;
3990
3991 hlen = iphlen + sizeof(*gh) + sizeof(*eoiph);
3992 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen)
3993 goto decline;
3994
3995 m = m_pullup(m, hlen);
3996 if (m == NULL((void *)0))
3997 return (NULL((void *)0));
3998
3999 buf = mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data));
4000 gh = (struct gre_header *)(buf + iphlen);
4001 eoiph = (struct gre_h_key_eoip *)(gh + 1);
4002
4003 key->t_key = eoiph->eoip_tunnel_id;
4004
4005 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
4006 sc = RBT_FIND(eoip_tree, &eoip_tree, (const struct eoip_softc *)key)eoip_tree_RBT_FIND(&eoip_tree, (const struct eoip_softc *
)key)
;
4007 if (sc == NULL((void *)0))
4008 goto decline;
4009
4010 /* it's ours now */
4011 len = bemtoh16(&eoiph->eoip_len)(__uint16_t)(__builtin_constant_p(*(__uint16_t *)(&eoiph->
eoip_len)) ? (__uint16_t)(((__uint16_t)(*(__uint16_t *)(&
eoiph->eoip_len)) & 0xffU) << 8 | ((__uint16_t)(
*(__uint16_t *)(&eoiph->eoip_len)) & 0xff00U) >>
8) : __swap16md(*(__uint16_t *)(&eoiph->eoip_len)))
;
4012 if (len == 0) {
4013 eoip_keepalive_recv(sc);
4014 goto drop;
4015 }
4016
4017 m = gre_ether_align(m, hlen);
4018 if (m == NULL((void *)0))
4019 return (NULL((void *)0));
4020
4021 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < len)
4022 goto drop;
4023 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len != len)
4024 m_adj(m, len - m->m_pkthdrM_dat.MH.MH_pkthdr.len);
4025
4026 m->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200|M_BCAST0x0100);
4027
4028 gre_l2_prio(&sc->sc_tunnel, m, otos)do { int rxprio = (&sc->sc_tunnel)->t_rxhprio; switch
(rxprio) { case -1: break; case -3: (m)->M_dat.MH.MH_pkthdr
.pf.prio = (((otos)) >> 5); break; default: (m)->M_dat
.MH.MH_pkthdr.pf.prio = rxprio; break; } } while (0)
;
4029
4030 if_vinput(&sc->sc_ac.ac_if, m);
4031
4032 return (NULL((void *)0));
4033
4034decline:
4035 return (m);
4036drop:
4037 m_freem(m);
4038 return (NULL((void *)0));
4039}
4040
4041const struct sysctl_bounded_args gre_vars[] = {
4042 { GRECTL_ALLOW1, &gre_allow, 0, 1 },
4043 { GRECTL_WCCP2, &gre_wccp, 0, 1 },
4044};
4045
4046int
4047gre_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
4048 size_t newlen)
4049{
4050 int error;
4051
4052 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
4053 error = sysctl_bounded_arr(gre_vars, nitems(gre_vars)(sizeof((gre_vars)) / sizeof((gre_vars)[0])), name,
4054 namelen, oldp, oldlenp, newp, newlen);
4055 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
4056 return error;
4057}
4058
4059static inline int
4060gre_ip_cmp(int af, const union gre_addr *a, const union gre_addr *b)
4061{
4062 switch (af) {
4063#ifdef INET61
4064 case AF_INET624:
4065 return (memcmp(&a->in6, &b->in6, sizeof(a->in6))__builtin_memcmp((&a->in6), (&b->in6), (sizeof(
a->in6)))
);
4066#endif /* INET6 */
4067 case AF_INET2:
4068 return (memcmp(&a->in4, &b->in4, sizeof(a->in4))__builtin_memcmp((&a->in4), (&b->in4), (sizeof(
a->in4)))
);
4069 default:
4070 unhandled_af(af);
4071 }
4072
4073 return (0);
4074}
4075
4076static int
4077gre_cmp_src(const struct gre_tunnel *a, const struct gre_tunnel *b)
4078{
4079 uint32_t ka, kb;
4080 uint32_t mask;
4081 int rv;
4082
4083 /* is K set at all? */
4084 ka = a->t_key_mask & GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
4085 kb = b->t_key_mask & GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
4086
4087 /* sort by whether K is set */
4088 if (ka > kb)
4089 return (1);
4090 if (ka < kb)
4091 return (-1);
4092
4093 /* is K set on both? */
4094 if (ka != GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
) {
4095 /* get common prefix */
4096 mask = a->t_key_mask & b->t_key_mask;
4097
4098 ka = a->t_key & mask;
4099 kb = b->t_key & mask;
4100
4101 /* sort by common prefix */
4102 if (ka > kb)
4103 return (1);
4104 if (ka < kb)
4105 return (-1);
4106 }
4107
4108 /* sort by routing table */
4109 if (a->t_rtableid > b->t_rtableid)
4110 return (1);
4111 if (a->t_rtableid < b->t_rtableid)
4112 return (-1);
4113
4114 /* sort by address */
4115 if (a->t_af > b->t_af)
4116 return (1);
4117 if (a->t_af < b->t_af)
4118 return (-1);
4119
4120 rv = gre_ip_cmp(a->t_af, &a->t_src, &b->t_src);
4121 if (rv != 0)
4122 return (rv);
4123
4124 return (0);
4125}
4126
4127static int
4128gre_cmp(const struct gre_tunnel *a, const struct gre_tunnel *b)
4129{
4130 int rv;
4131
4132 rv = gre_cmp_src(a, b);
4133 if (rv != 0)
4134 return (rv);
4135
4136 return (gre_ip_cmp(a->t_af, &a->t_dst, &b->t_dst));
4137}
4138
4139static inline int
4140mgre_cmp(const struct mgre_softc *a, const struct mgre_softc *b)
4141{
4142 return (gre_cmp_src(&a->sc_tunnel, &b->sc_tunnel));
4143}
4144
4145RBT_GENERATE(mgre_tree, mgre_softc, sc_entry, mgre_cmp)static int mgre_tree_RBT_COMPARE(const void *lptr, const void
*rptr) { const struct mgre_softc *l = lptr, *r = rptr; return
mgre_cmp(l, r); } static const struct rb_type mgre_tree_RBT_INFO
= { mgre_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof(struct
mgre_softc, sc_entry), }; const struct rb_type *const mgre_tree_RBT_TYPE
= &mgre_tree_RBT_INFO
;
4146
4147static inline int
4148egre_cmp(const struct egre_softc *a, const struct egre_softc *b)
4149{
4150 return (gre_cmp(&a->sc_tunnel, &b->sc_tunnel));
4151}
4152
4153RBT_GENERATE(egre_tree, egre_softc, sc_entry, egre_cmp)static int egre_tree_RBT_COMPARE(const void *lptr, const void
*rptr) { const struct egre_softc *l = lptr, *r = rptr; return
egre_cmp(l, r); } static const struct rb_type egre_tree_RBT_INFO
= { egre_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof(struct
egre_softc, sc_entry), }; const struct rb_type *const egre_tree_RBT_TYPE
= &egre_tree_RBT_INFO
;
4154
4155static int
4156nvgre_cmp_tunnel(const struct gre_tunnel *a, const struct gre_tunnel *b)
4157{
4158 uint32_t ka, kb;
4159
4160 ka = a->t_key & GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
4161 kb = b->t_key & GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
4162
4163 /* sort by common prefix */
4164 if (ka > kb)
4165 return (1);
4166 if (ka < kb)
4167 return (-1);
4168
4169 /* sort by routing table */
4170 if (a->t_rtableid > b->t_rtableid)
4171 return (1);
4172 if (a->t_rtableid < b->t_rtableid)
4173 return (-1);
4174
4175 /* sort by address */
4176 if (a->t_af > b->t_af)
4177 return (1);
4178 if (a->t_af < b->t_af)
4179 return (-1);
4180
4181 return (0);
4182}
4183
4184static inline int
4185nvgre_cmp_ucast(const struct nvgre_softc *na, const struct nvgre_softc *nb)
4186{
4187 const struct gre_tunnel *a = &na->sc_tunnel;
4188 const struct gre_tunnel *b = &nb->sc_tunnel;
4189 int rv;
4190
4191 rv = nvgre_cmp_tunnel(a, b);
4192 if (rv != 0)
4193 return (rv);
4194
4195 rv = gre_ip_cmp(a->t_af, &a->t_src, &b->t_src);
4196 if (rv != 0)
4197 return (rv);
4198
4199 return (0);
4200}
4201
4202static int
4203nvgre_cmp_mcast(const struct gre_tunnel *a, const union gre_addr *aa,
4204 unsigned int if0idxa, const struct gre_tunnel *b,
4205 const union gre_addr *ab,unsigned int if0idxb)
4206{
4207 int rv;
4208
4209 rv = nvgre_cmp_tunnel(a, b);
4210 if (rv != 0)
4211 return (rv);
4212
4213 rv = gre_ip_cmp(a->t_af, aa, ab);
4214 if (rv != 0)
4215 return (rv);
4216
4217 if (if0idxa > if0idxb)
4218 return (1);
4219 if (if0idxa < if0idxb)
4220 return (-1);
4221
4222 return (0);
4223}
4224
4225static inline int
4226nvgre_cmp_mcast_sc(const struct nvgre_softc *na, const struct nvgre_softc *nb)
4227{
4228 const struct gre_tunnel *a = &na->sc_tunnel;
4229 const struct gre_tunnel *b = &nb->sc_tunnel;
4230
4231 return (nvgre_cmp_mcast(a, &a->t_dst, na->sc_ifp0,
4232 b, &b->t_dst, nb->sc_ifp0));
4233}
4234
4235RBT_GENERATE(nvgre_ucast_tree, nvgre_softc, sc_uentry, nvgre_cmp_ucast)static int nvgre_ucast_tree_RBT_COMPARE(const void *lptr, const
void *rptr) { const struct nvgre_softc *l = lptr, *r = rptr;
return nvgre_cmp_ucast(l, r); } static const struct rb_type nvgre_ucast_tree_RBT_INFO
= { nvgre_ucast_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof
(struct nvgre_softc, sc_uentry), }; const struct rb_type *const
nvgre_ucast_tree_RBT_TYPE = &nvgre_ucast_tree_RBT_INFO
;
4236RBT_GENERATE(nvgre_mcast_tree, nvgre_softc, sc_mentry, nvgre_cmp_mcast_sc)static int nvgre_mcast_tree_RBT_COMPARE(const void *lptr, const
void *rptr) { const struct nvgre_softc *l = lptr, *r = rptr;
return nvgre_cmp_mcast_sc(l, r); } static const struct rb_type
nvgre_mcast_tree_RBT_INFO = { nvgre_mcast_tree_RBT_COMPARE, (
(void *)0), __builtin_offsetof(struct nvgre_softc, sc_mentry)
, }; const struct rb_type *const nvgre_mcast_tree_RBT_TYPE = &
nvgre_mcast_tree_RBT_INFO
;
4237
4238static inline int
4239eoip_cmp(const struct eoip_softc *ea, const struct eoip_softc *eb)
4240{
4241 const struct gre_tunnel *a = &ea->sc_tunnel;
4242 const struct gre_tunnel *b = &eb->sc_tunnel;
4243 int rv;
4244
4245 if (a->t_key > b->t_key)
4246 return (1);
4247 if (a->t_key < b->t_key)
4248 return (-1);
4249
4250 /* sort by routing table */
4251 if (a->t_rtableid > b->t_rtableid)
4252 return (1);
4253 if (a->t_rtableid < b->t_rtableid)
4254 return (-1);
4255
4256 /* sort by address */
4257 if (a->t_af > b->t_af)
4258 return (1);
4259 if (a->t_af < b->t_af)
4260 return (-1);
4261
4262 rv = gre_ip_cmp(a->t_af, &a->t_src, &b->t_src);
4263 if (rv != 0)
4264 return (rv);
4265
4266 rv = gre_ip_cmp(a->t_af, &a->t_dst, &b->t_dst);
4267 if (rv != 0)
4268 return (rv);
4269
4270 return (0);
4271}
4272
4273RBT_GENERATE(eoip_tree, eoip_softc, sc_entry, eoip_cmp)static int eoip_tree_RBT_COMPARE(const void *lptr, const void
*rptr) { const struct eoip_softc *l = lptr, *r = rptr; return
eoip_cmp(l, r); } static const struct rb_type eoip_tree_RBT_INFO
= { eoip_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof(struct
eoip_softc, sc_entry), }; const struct rb_type *const eoip_tree_RBT_TYPE
= &eoip_tree_RBT_INFO
;
4274
4275static int
4276nvgre_eb_port_eq(void *arg, void *a, void *b)
4277{
4278 struct nvgre_softc *sc = arg;
4279
4280 return (gre_ip_cmp(sc->sc_tunnel.t_af, a, b) == 0);
4281}
4282
4283static void *
4284nvgre_eb_port_take(void *arg, void *port)
4285{
4286 union gre_addr *ea = port;
4287 union gre_addr *endpoint;
4288
4289 endpoint = pool_get(&nvgre_endpoint_pool, PR_NOWAIT0x0002);
4290 if (endpoint == NULL((void *)0))
4291 return (NULL((void *)0));
4292
4293 *endpoint = *ea;
4294
4295 return (endpoint);
4296}
4297
4298static void
4299nvgre_eb_port_rele(void *arg, void *port)
4300{
4301 union gre_addr *endpoint = port;
4302
4303 pool_put(&nvgre_endpoint_pool, endpoint);
4304}
4305
4306static size_t
4307nvgre_eb_port_ifname(void *arg, char *dst, size_t len, void *port)
4308{
4309 struct nvgre_softc *sc = arg;
4310
4311 return (strlcpy(dst, sc->sc_ac.ac_if.if_xname, len));
4312}
4313
4314static void
4315nvgre_eb_port_sa(void *arg, struct sockaddr_storage *ss, void *port)
4316{
4317 struct nvgre_softc *sc = arg;
4318 union gre_addr *endpoint = port;
4319
4320 switch (sc->sc_tunnel.t_af) {
4321 case AF_INET2: {
4322 struct sockaddr_in *sin = (struct sockaddr_in *)ss;
4323
4324 sin->sin_len = sizeof(*sin);
4325 sin->sin_family = AF_INET2;
4326 sin->sin_addr = endpoint->in4;
4327 break;
4328 }
4329#ifdef INET61
4330 case AF_INET624: {
4331 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)ss;
4332
4333 sin6->sin6_len = sizeof(*sin6);
4334 sin6->sin6_family = AF_INET624;
4335 in6_recoverscope(sin6, &endpoint->in6);
4336
4337 break;
4338 }
4339#endif /* INET6 */
4340 default:
4341 unhandled_af(sc->sc_tunnel.t_af);
4342 }
4343}