Bug Summary

File:net/if_gre.c
Warning:line 1151, column 7
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name if_gre.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/net/if_gre.c
1/* $OpenBSD: if_gre.c,v 1.171 2021/03/10 10:21:47 jsg Exp $ */
2/* $NetBSD: if_gre.c,v 1.9 1999/10/25 19:18:11 drochner Exp $ */
3
4/*
5 * Copyright (c) 1998 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Heiko W.Rupp <hwr@pilhuhn.de>
10 *
11 * IPv6-over-GRE contributed by Gert Doering <gert@greenie.muc.de>
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * Encapsulate L3 protocols into IP, per RFC 1701 and 1702.
37 * See gre(4) for more details.
38 * Also supported: IP in IP encapsulation (proto 55) per RFC 2004.
39 */
40
41#include "bpfilter.h"
42#include "pf.h"
43
44#include <sys/param.h>
45#include <sys/mbuf.h>
46#include <sys/socket.h>
47#include <sys/sockio.h>
48#include <sys/kernel.h>
49#include <sys/systm.h>
50#include <sys/errno.h>
51#include <sys/timeout.h>
52#include <sys/queue.h>
53#include <sys/tree.h>
54#include <sys/pool.h>
55#include <sys/rwlock.h>
56
57#include <crypto/siphash.h>
58
59#include <net/if.h>
60#include <net/if_var.h>
61#include <net/if_types.h>
62#include <net/if_media.h>
63#include <net/route.h>
64
65#include <netinet/in.h>
66#include <netinet/in_var.h>
67#include <netinet/if_ether.h>
68#include <netinet/ip.h>
69#include <netinet/ip_var.h>
70#include <netinet/ip_ecn.h>
71
72#ifdef INET61
73#include <netinet/ip6.h>
74#include <netinet6/ip6_var.h>
75#include <netinet6/in6_var.h>
76#endif
77
78#ifdef PIPEX1
79#include <net/pipex.h>
80#endif
81
82#ifdef MPLS1
83#include <netmpls/mpls.h>
84#endif /* MPLS */
85
86#if NBPFILTER1 > 0
87#include <net/bpf.h>
88#endif
89
90#if NPF1 > 0
91#include <net/pfvar.h>
92#endif
93
94#include <net/if_gre.h>
95
96#include <netinet/ip_gre.h>
97#include <sys/sysctl.h>
98
99/* for nvgre bridge shizz */
100#include <sys/socket.h>
101#include <net/if_bridge.h>
102#include <net/if_etherbridge.h>
103
104/*
105 * packet formats
106 */
107struct gre_header {
108 uint16_t gre_flags;
109#define GRE_CP0x8000 0x8000 /* Checksum Present */
110#define GRE_KP0x2000 0x2000 /* Key Present */
111#define GRE_SP0x1000 0x1000 /* Sequence Present */
112
113#define GRE_VERS_MASK0x0007 0x0007
114#define GRE_VERS_00x0000 0x0000
115#define GRE_VERS_10x0001 0x0001
116
117 uint16_t gre_proto;
118} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
119
120struct gre_h_cksum {
121 uint16_t gre_cksum;
122 uint16_t gre_reserved1;
123} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
124
125struct gre_h_key {
126 uint32_t gre_key;
127} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
128
129#define GRE_EOIP0x6400 0x6400
130
131struct gre_h_key_eoip {
132 uint16_t eoip_len; /* network order */
133 uint16_t eoip_tunnel_id; /* little endian */
134} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
135
136#define NVGRE_VSID_RES_MIN0x000000 0x000000 /* reserved for future use */
137#define NVGRE_VSID_RES_MAX0x000fff 0x000fff
138#define NVGRE_VSID_NVE2NVE0xffffff 0xffffff /* vendor specific NVE-to-NVE comms */
139
140struct gre_h_seq {
141 uint32_t gre_seq;
142} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
143
144struct gre_h_wccp {
145 uint8_t wccp_flags;
146 uint8_t service_id;
147 uint8_t alt_bucket;
148 uint8_t pri_bucket;
149} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
150
151#define GRE_WCCP0x883e 0x883e
152
153#define GRE_HDRLEN(sizeof(struct ip) + sizeof(struct gre_header)) (sizeof(struct ip) + sizeof(struct gre_header))
154
155/*
156 * GRE tunnel metadata
157 */
158
159#define GRE_KA_NONE0 0
160#define GRE_KA_DOWN1 1
161#define GRE_KA_HOLD2 2
162#define GRE_KA_UP3 3
163
164union gre_addr {
165 struct in_addr in4;
166 struct in6_addr in6;
167};
168
169static inline int
170 gre_ip_cmp(int, const union gre_addr *,
171 const union gre_addr *);
172
173#define GRE_KEY_MIN0x00000000U 0x00000000U
174#define GRE_KEY_MAX0xffffffffU 0xffffffffU
175#define GRE_KEY_SHIFT0 0
176
177#define GRE_KEY_ENTROPY_MIN0x00000000U 0x00000000U
178#define GRE_KEY_ENTROPY_MAX0x00ffffffU 0x00ffffffU
179#define GRE_KEY_ENTROPY_SHIFT8 8
180
181struct gre_tunnel {
182 uint32_t t_key_mask;
183#define GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
htonl(0x00000000U)(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
184#define GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
htonl(0xffffff00U)(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
185#define GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
htonl(0xffffffffU)(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
186 uint32_t t_key;
187
188 u_int t_rtableid;
189 union gre_addr t_src;
190#define t_src4t_src.in4 t_src.in4
191#define t_src6t_src.in6 t_src.in6
192 union gre_addr t_dst;
193#define t_dst4t_dst.in4 t_dst.in4
194#define t_dst6t_dst.in6 t_dst.in6
195 int t_ttl;
196 int t_txhprio;
197 int t_rxhprio;
198 int t_ecn;
199 uint16_t t_df;
200 sa_family_t t_af;
201};
202
203static int
204 gre_cmp_src(const struct gre_tunnel *,
205 const struct gre_tunnel *);
206static int
207 gre_cmp(const struct gre_tunnel *, const struct gre_tunnel *);
208
209static int gre_set_tunnel(struct gre_tunnel *, struct if_laddrreq *, int);
210static int gre_get_tunnel(struct gre_tunnel *, struct if_laddrreq *);
211static int gre_del_tunnel(struct gre_tunnel *);
212
213static int gre_set_vnetid(struct gre_tunnel *, struct ifreq *);
214static int gre_get_vnetid(struct gre_tunnel *, struct ifreq *);
215static int gre_del_vnetid(struct gre_tunnel *);
216
217static int gre_set_vnetflowid(struct gre_tunnel *, struct ifreq *);
218static int gre_get_vnetflowid(struct gre_tunnel *, struct ifreq *);
219
220static struct mbuf *
221 gre_encap_dst(const struct gre_tunnel *, const union gre_addr *,
222 struct mbuf *, uint16_t, uint8_t, uint8_t);
223#define gre_encap(_t, _m, _p, _ttl, _tos)gre_encap_dst((_t), &(_t)->t_dst, (_m), (_p), (_ttl), (
_tos))
\
224 gre_encap_dst((_t), &(_t)->t_dst, (_m), (_p), (_ttl), (_tos))
225
226static struct mbuf *
227 gre_encap_dst_ip(const struct gre_tunnel *,
228 const union gre_addr *, struct mbuf *, uint8_t, uint8_t);
229#define gre_encap_ip(_t, _m, _ttl, _tos)gre_encap_dst_ip((_t), &(_t)->t_dst, (_m), (_ttl), (_tos
))
\
230 gre_encap_dst_ip((_t), &(_t)->t_dst, (_m), (_ttl), (_tos))
231
232static int
233 gre_ip_output(const struct gre_tunnel *, struct mbuf *);
234
235static int gre_tunnel_ioctl(struct ifnet *, struct gre_tunnel *,
236 u_long, void *);
237
238static uint8_t gre_l2_tos(const struct gre_tunnel *, const struct mbuf *);
239static uint8_t gre_l3_tos(const struct gre_tunnel *,
240 const struct mbuf *, uint8_t);
241
242/*
243 * layer 3 GRE tunnels
244 */
245
246struct gre_softc {
247 struct gre_tunnel sc_tunnel; /* must be first */
248 TAILQ_ENTRY(gre_softc)struct { struct gre_softc *tqe_next; struct gre_softc **tqe_prev
; }
sc_entry;
249
250 struct ifnet sc_if;
251
252 struct timeout sc_ka_send;
253 struct timeout sc_ka_hold;
254
255 unsigned int sc_ka_state;
256 unsigned int sc_ka_timeo;
257 unsigned int sc_ka_count;
258
259 unsigned int sc_ka_holdmax;
260 unsigned int sc_ka_holdcnt;
261
262 SIPHASH_KEY sc_ka_key;
263 uint32_t sc_ka_bias;
264 int sc_ka_recvtm;
265};
266
267TAILQ_HEAD(gre_list, gre_softc)struct gre_list { struct gre_softc *tqh_first; struct gre_softc
**tqh_last; }
;
268
269struct gre_keepalive {
270 uint32_t gk_uptime;
271 uint32_t gk_random;
272 uint8_t gk_digest[SIPHASH_DIGEST_LENGTH8];
273} __packed__attribute__((__packed__)) __aligned(4)__attribute__((__aligned__(4)));
274
275static int gre_clone_create(struct if_clone *, int);
276static int gre_clone_destroy(struct ifnet *);
277
278struct if_clone gre_cloner =
279 IF_CLONE_INITIALIZER("gre", gre_clone_create, gre_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "gre"
, .ifc_namelen = sizeof("gre") - 1, .ifc_create = gre_clone_create
, .ifc_destroy = gre_clone_destroy, }
;
280
281/* protected by NET_LOCK */
282struct gre_list gre_list = TAILQ_HEAD_INITIALIZER(gre_list){ ((void *)0), &(gre_list).tqh_first };
283
284static int gre_output(struct ifnet *, struct mbuf *, struct sockaddr *,
285 struct rtentry *);
286static void gre_start(struct ifnet *);
287static int gre_ioctl(struct ifnet *, u_long, caddr_t);
288
289static int gre_up(struct gre_softc *);
290static int gre_down(struct gre_softc *);
291static void gre_link_state(struct ifnet *, unsigned int);
292
293static int gre_input_key(struct mbuf **, int *, int, int, uint8_t,
294 struct gre_tunnel *);
295
296static struct mbuf *
297 gre_ipv4_patch(const struct gre_tunnel *, struct mbuf *,
298 uint8_t *, uint8_t);
299#ifdef INET61
300static struct mbuf *
301 gre_ipv6_patch(const struct gre_tunnel *, struct mbuf *,
302 uint8_t *, uint8_t);
303#endif
304#ifdef MPLS1
305static struct mbuf *
306 gre_mpls_patch(const struct gre_tunnel *, struct mbuf *,
307 uint8_t *, uint8_t);
308#endif
309static void gre_keepalive_send(void *);
310static void gre_keepalive_recv(struct ifnet *ifp, struct mbuf *);
311static void gre_keepalive_hold(void *);
312
313static struct mbuf *
314 gre_l3_encap_dst(const struct gre_tunnel *, const void *,
315 struct mbuf *m, sa_family_t);
316
317#define gre_l3_encap(_t, _m, _af)gre_l3_encap_dst((_t), &(_t)->t_dst, (_m), (_af)) \
318 gre_l3_encap_dst((_t), &(_t)->t_dst, (_m), (_af))
319
320struct mgre_softc {
321 struct gre_tunnel sc_tunnel; /* must be first */
322 RBT_ENTRY(mgre_softc)struct rb_entry sc_entry;
323
324 struct ifnet sc_if;
325};
326
327RBT_HEAD(mgre_tree, mgre_softc)struct mgre_tree { struct rb_tree rbh_root; };
328
329static inline int
330 mgre_cmp(const struct mgre_softc *, const struct mgre_softc *);
331
332RBT_PROTOTYPE(mgre_tree, mgre_softc, sc_entry, mgre_cmp)extern const struct rb_type *const mgre_tree_RBT_TYPE; __attribute__
((__unused__)) static inline void mgre_tree_RBT_INIT(struct mgre_tree
*head) { _rb_init(&head->rbh_root); } __attribute__((
__unused__)) static inline struct mgre_softc * mgre_tree_RBT_INSERT
(struct mgre_tree *head, struct mgre_softc *elm) { return _rb_insert
(mgre_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct mgre_softc * mgre_tree_RBT_REMOVE
(struct mgre_tree *head, struct mgre_softc *elm) { return _rb_remove
(mgre_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct mgre_softc * mgre_tree_RBT_FIND
(struct mgre_tree *head, const struct mgre_softc *key) { return
_rb_find(mgre_tree_RBT_TYPE, &head->rbh_root, key); }
__attribute__((__unused__)) static inline struct mgre_softc *
mgre_tree_RBT_NFIND(struct mgre_tree *head, const struct mgre_softc
*key) { return _rb_nfind(mgre_tree_RBT_TYPE, &head->rbh_root
, key); } __attribute__((__unused__)) static inline struct mgre_softc
* mgre_tree_RBT_ROOT(struct mgre_tree *head) { return _rb_root
(mgre_tree_RBT_TYPE, &head->rbh_root); } __attribute__
((__unused__)) static inline int mgre_tree_RBT_EMPTY(struct mgre_tree
*head) { return _rb_empty(&head->rbh_root); } __attribute__
((__unused__)) static inline struct mgre_softc * mgre_tree_RBT_MIN
(struct mgre_tree *head) { return _rb_min(mgre_tree_RBT_TYPE,
&head->rbh_root); } __attribute__((__unused__)) static
inline struct mgre_softc * mgre_tree_RBT_MAX(struct mgre_tree
*head) { return _rb_max(mgre_tree_RBT_TYPE, &head->rbh_root
); } __attribute__((__unused__)) static inline struct mgre_softc
* mgre_tree_RBT_NEXT(struct mgre_softc *elm) { return _rb_next
(mgre_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct mgre_softc * mgre_tree_RBT_PREV(struct mgre_softc
*elm) { return _rb_prev(mgre_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct mgre_softc * mgre_tree_RBT_LEFT
(struct mgre_softc *elm) { return _rb_left(mgre_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct mgre_softc
* mgre_tree_RBT_RIGHT(struct mgre_softc *elm) { return _rb_right
(mgre_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct mgre_softc * mgre_tree_RBT_PARENT(struct mgre_softc
*elm) { return _rb_parent(mgre_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline void mgre_tree_RBT_SET_LEFT(struct
mgre_softc *elm, struct mgre_softc *left) { _rb_set_left(mgre_tree_RBT_TYPE
, elm, left); } __attribute__((__unused__)) static inline void
mgre_tree_RBT_SET_RIGHT(struct mgre_softc *elm, struct mgre_softc
*right) { _rb_set_right(mgre_tree_RBT_TYPE, elm, right); } __attribute__
((__unused__)) static inline void mgre_tree_RBT_SET_PARENT(struct
mgre_softc *elm, struct mgre_softc *parent) { _rb_set_parent
(mgre_tree_RBT_TYPE, elm, parent); } __attribute__((__unused__
)) static inline void mgre_tree_RBT_POISON(struct mgre_softc *
elm, unsigned long poison) { _rb_poison(mgre_tree_RBT_TYPE, elm
, poison); } __attribute__((__unused__)) static inline int mgre_tree_RBT_CHECK
(struct mgre_softc *elm, unsigned long poison) { return _rb_check
(mgre_tree_RBT_TYPE, elm, poison); }
;
333
334static int mgre_clone_create(struct if_clone *, int);
335static int mgre_clone_destroy(struct ifnet *);
336
337struct if_clone mgre_cloner =
338 IF_CLONE_INITIALIZER("mgre", mgre_clone_create, mgre_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "mgre"
, .ifc_namelen = sizeof("mgre") - 1, .ifc_create = mgre_clone_create
, .ifc_destroy = mgre_clone_destroy, }
;
339
340static void mgre_rtrequest(struct ifnet *, int, struct rtentry *);
341static int mgre_output(struct ifnet *, struct mbuf *, struct sockaddr *,
342 struct rtentry *);
343static void mgre_start(struct ifnet *);
344static int mgre_ioctl(struct ifnet *, u_long, caddr_t);
345
346static int mgre_set_tunnel(struct mgre_softc *, struct if_laddrreq *);
347static int mgre_get_tunnel(struct mgre_softc *, struct if_laddrreq *);
348static int mgre_up(struct mgre_softc *);
349static int mgre_down(struct mgre_softc *);
350
351/* protected by NET_LOCK */
352struct mgre_tree mgre_tree = RBT_INITIALIZER(){ { ((void *)0) } };
353
354/*
355 * Ethernet GRE tunnels
356 */
357
358static struct mbuf *
359 gre_ether_align(struct mbuf *, int);
360
361struct egre_softc {
362 struct gre_tunnel sc_tunnel; /* must be first */
363 RBT_ENTRY(egre_softc)struct rb_entry sc_entry;
364
365 struct arpcom sc_ac;
366 struct ifmedia sc_media;
367};
368
369RBT_HEAD(egre_tree, egre_softc)struct egre_tree { struct rb_tree rbh_root; };
370
371static inline int
372 egre_cmp(const struct egre_softc *, const struct egre_softc *);
373
374RBT_PROTOTYPE(egre_tree, egre_softc, sc_entry, egre_cmp)extern const struct rb_type *const egre_tree_RBT_TYPE; __attribute__
((__unused__)) static inline void egre_tree_RBT_INIT(struct egre_tree
*head) { _rb_init(&head->rbh_root); } __attribute__((
__unused__)) static inline struct egre_softc * egre_tree_RBT_INSERT
(struct egre_tree *head, struct egre_softc *elm) { return _rb_insert
(egre_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct egre_softc * egre_tree_RBT_REMOVE
(struct egre_tree *head, struct egre_softc *elm) { return _rb_remove
(egre_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct egre_softc * egre_tree_RBT_FIND
(struct egre_tree *head, const struct egre_softc *key) { return
_rb_find(egre_tree_RBT_TYPE, &head->rbh_root, key); }
__attribute__((__unused__)) static inline struct egre_softc *
egre_tree_RBT_NFIND(struct egre_tree *head, const struct egre_softc
*key) { return _rb_nfind(egre_tree_RBT_TYPE, &head->rbh_root
, key); } __attribute__((__unused__)) static inline struct egre_softc
* egre_tree_RBT_ROOT(struct egre_tree *head) { return _rb_root
(egre_tree_RBT_TYPE, &head->rbh_root); } __attribute__
((__unused__)) static inline int egre_tree_RBT_EMPTY(struct egre_tree
*head) { return _rb_empty(&head->rbh_root); } __attribute__
((__unused__)) static inline struct egre_softc * egre_tree_RBT_MIN
(struct egre_tree *head) { return _rb_min(egre_tree_RBT_TYPE,
&head->rbh_root); } __attribute__((__unused__)) static
inline struct egre_softc * egre_tree_RBT_MAX(struct egre_tree
*head) { return _rb_max(egre_tree_RBT_TYPE, &head->rbh_root
); } __attribute__((__unused__)) static inline struct egre_softc
* egre_tree_RBT_NEXT(struct egre_softc *elm) { return _rb_next
(egre_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct egre_softc * egre_tree_RBT_PREV(struct egre_softc
*elm) { return _rb_prev(egre_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct egre_softc * egre_tree_RBT_LEFT
(struct egre_softc *elm) { return _rb_left(egre_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct egre_softc
* egre_tree_RBT_RIGHT(struct egre_softc *elm) { return _rb_right
(egre_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct egre_softc * egre_tree_RBT_PARENT(struct egre_softc
*elm) { return _rb_parent(egre_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline void egre_tree_RBT_SET_LEFT(struct
egre_softc *elm, struct egre_softc *left) { _rb_set_left(egre_tree_RBT_TYPE
, elm, left); } __attribute__((__unused__)) static inline void
egre_tree_RBT_SET_RIGHT(struct egre_softc *elm, struct egre_softc
*right) { _rb_set_right(egre_tree_RBT_TYPE, elm, right); } __attribute__
((__unused__)) static inline void egre_tree_RBT_SET_PARENT(struct
egre_softc *elm, struct egre_softc *parent) { _rb_set_parent
(egre_tree_RBT_TYPE, elm, parent); } __attribute__((__unused__
)) static inline void egre_tree_RBT_POISON(struct egre_softc *
elm, unsigned long poison) { _rb_poison(egre_tree_RBT_TYPE, elm
, poison); } __attribute__((__unused__)) static inline int egre_tree_RBT_CHECK
(struct egre_softc *elm, unsigned long poison) { return _rb_check
(egre_tree_RBT_TYPE, elm, poison); }
;
375
376static int egre_clone_create(struct if_clone *, int);
377static int egre_clone_destroy(struct ifnet *);
378
379static void egre_start(struct ifnet *);
380static int egre_ioctl(struct ifnet *, u_long, caddr_t);
381static int egre_media_change(struct ifnet *);
382static void egre_media_status(struct ifnet *, struct ifmediareq *);
383
384static int egre_up(struct egre_softc *);
385static int egre_down(struct egre_softc *);
386
387static int egre_input(const struct gre_tunnel *, struct mbuf *, int,
388 uint8_t);
389struct if_clone egre_cloner =
390 IF_CLONE_INITIALIZER("egre", egre_clone_create, egre_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "egre"
, .ifc_namelen = sizeof("egre") - 1, .ifc_create = egre_clone_create
, .ifc_destroy = egre_clone_destroy, }
;
391
392/* protected by NET_LOCK */
393struct egre_tree egre_tree = RBT_INITIALIZER(){ { ((void *)0) } };
394
395/*
396 * Network Virtualisation Using Generic Routing Encapsulation (NVGRE)
397 */
398
399struct nvgre_softc {
400 struct gre_tunnel sc_tunnel; /* must be first */
401 unsigned int sc_ifp0;
402 RBT_ENTRY(nvgre_softc)struct rb_entry sc_uentry;
403 RBT_ENTRY(nvgre_softc)struct rb_entry sc_mentry;
404
405 struct arpcom sc_ac;
406 struct ifmedia sc_media;
407
408 struct mbuf_queue sc_send_list;
409 struct task sc_send_task;
410
411 void *sc_inm;
412 struct task sc_ltask;
413 struct task sc_dtask;
414
415 struct etherbridge sc_eb;
416};
417
418RBT_HEAD(nvgre_ucast_tree, nvgre_softc)struct nvgre_ucast_tree { struct rb_tree rbh_root; };
419RBT_HEAD(nvgre_mcast_tree, nvgre_softc)struct nvgre_mcast_tree { struct rb_tree rbh_root; };
420
421static inline int
422 nvgre_cmp_ucast(const struct nvgre_softc *,
423 const struct nvgre_softc *);
424static int
425 nvgre_cmp_mcast(const struct gre_tunnel *,
426 const union gre_addr *, unsigned int,
427 const struct gre_tunnel *, const union gre_addr *,
428 unsigned int);
429static inline int
430 nvgre_cmp_mcast_sc(const struct nvgre_softc *,
431 const struct nvgre_softc *);
432
433RBT_PROTOTYPE(nvgre_ucast_tree, nvgre_softc, sc_uentry, nvgre_cmp_ucast)extern const struct rb_type *const nvgre_ucast_tree_RBT_TYPE;
__attribute__((__unused__)) static inline void nvgre_ucast_tree_RBT_INIT
(struct nvgre_ucast_tree *head) { _rb_init(&head->rbh_root
); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_ucast_tree_RBT_INSERT(struct nvgre_ucast_tree *head,
struct nvgre_softc *elm) { return _rb_insert(nvgre_ucast_tree_RBT_TYPE
, &head->rbh_root, elm); } __attribute__((__unused__))
static inline struct nvgre_softc * nvgre_ucast_tree_RBT_REMOVE
(struct nvgre_ucast_tree *head, struct nvgre_softc *elm) { return
_rb_remove(nvgre_ucast_tree_RBT_TYPE, &head->rbh_root
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_ucast_tree_RBT_FIND(struct nvgre_ucast_tree *head, const
struct nvgre_softc *key) { return _rb_find(nvgre_ucast_tree_RBT_TYPE
, &head->rbh_root, key); } __attribute__((__unused__))
static inline struct nvgre_softc * nvgre_ucast_tree_RBT_NFIND
(struct nvgre_ucast_tree *head, const struct nvgre_softc *key
) { return _rb_nfind(nvgre_ucast_tree_RBT_TYPE, &head->
rbh_root, key); } __attribute__((__unused__)) static inline struct
nvgre_softc * nvgre_ucast_tree_RBT_ROOT(struct nvgre_ucast_tree
*head) { return _rb_root(nvgre_ucast_tree_RBT_TYPE, &head
->rbh_root); } __attribute__((__unused__)) static inline int
nvgre_ucast_tree_RBT_EMPTY(struct nvgre_ucast_tree *head) { return
_rb_empty(&head->rbh_root); } __attribute__((__unused__
)) static inline struct nvgre_softc * nvgre_ucast_tree_RBT_MIN
(struct nvgre_ucast_tree *head) { return _rb_min(nvgre_ucast_tree_RBT_TYPE
, &head->rbh_root); } __attribute__((__unused__)) static
inline struct nvgre_softc * nvgre_ucast_tree_RBT_MAX(struct nvgre_ucast_tree
*head) { return _rb_max(nvgre_ucast_tree_RBT_TYPE, &head
->rbh_root); } __attribute__((__unused__)) static inline struct
nvgre_softc * nvgre_ucast_tree_RBT_NEXT(struct nvgre_softc *
elm) { return _rb_next(nvgre_ucast_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct nvgre_softc * nvgre_ucast_tree_RBT_PREV
(struct nvgre_softc *elm) { return _rb_prev(nvgre_ucast_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_ucast_tree_RBT_LEFT(struct nvgre_softc *elm) { return
_rb_left(nvgre_ucast_tree_RBT_TYPE, elm); } __attribute__((__unused__
)) static inline struct nvgre_softc * nvgre_ucast_tree_RBT_RIGHT
(struct nvgre_softc *elm) { return _rb_right(nvgre_ucast_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_ucast_tree_RBT_PARENT(struct nvgre_softc *elm) { return
_rb_parent(nvgre_ucast_tree_RBT_TYPE, elm); } __attribute__(
(__unused__)) static inline void nvgre_ucast_tree_RBT_SET_LEFT
(struct nvgre_softc *elm, struct nvgre_softc *left) { _rb_set_left
(nvgre_ucast_tree_RBT_TYPE, elm, left); } __attribute__((__unused__
)) static inline void nvgre_ucast_tree_RBT_SET_RIGHT(struct nvgre_softc
*elm, struct nvgre_softc *right) { _rb_set_right(nvgre_ucast_tree_RBT_TYPE
, elm, right); } __attribute__((__unused__)) static inline void
nvgre_ucast_tree_RBT_SET_PARENT(struct nvgre_softc *elm, struct
nvgre_softc *parent) { _rb_set_parent(nvgre_ucast_tree_RBT_TYPE
, elm, parent); } __attribute__((__unused__)) static inline void
nvgre_ucast_tree_RBT_POISON(struct nvgre_softc *elm, unsigned
long poison) { _rb_poison(nvgre_ucast_tree_RBT_TYPE, elm, poison
); } __attribute__((__unused__)) static inline int nvgre_ucast_tree_RBT_CHECK
(struct nvgre_softc *elm, unsigned long poison) { return _rb_check
(nvgre_ucast_tree_RBT_TYPE, elm, poison); }
;
434RBT_PROTOTYPE(nvgre_mcast_tree, nvgre_softc, sc_mentry, nvgre_cmp_mcast_sc)extern const struct rb_type *const nvgre_mcast_tree_RBT_TYPE;
__attribute__((__unused__)) static inline void nvgre_mcast_tree_RBT_INIT
(struct nvgre_mcast_tree *head) { _rb_init(&head->rbh_root
); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_mcast_tree_RBT_INSERT(struct nvgre_mcast_tree *head,
struct nvgre_softc *elm) { return _rb_insert(nvgre_mcast_tree_RBT_TYPE
, &head->rbh_root, elm); } __attribute__((__unused__))
static inline struct nvgre_softc * nvgre_mcast_tree_RBT_REMOVE
(struct nvgre_mcast_tree *head, struct nvgre_softc *elm) { return
_rb_remove(nvgre_mcast_tree_RBT_TYPE, &head->rbh_root
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_mcast_tree_RBT_FIND(struct nvgre_mcast_tree *head, const
struct nvgre_softc *key) { return _rb_find(nvgre_mcast_tree_RBT_TYPE
, &head->rbh_root, key); } __attribute__((__unused__))
static inline struct nvgre_softc * nvgre_mcast_tree_RBT_NFIND
(struct nvgre_mcast_tree *head, const struct nvgre_softc *key
) { return _rb_nfind(nvgre_mcast_tree_RBT_TYPE, &head->
rbh_root, key); } __attribute__((__unused__)) static inline struct
nvgre_softc * nvgre_mcast_tree_RBT_ROOT(struct nvgre_mcast_tree
*head) { return _rb_root(nvgre_mcast_tree_RBT_TYPE, &head
->rbh_root); } __attribute__((__unused__)) static inline int
nvgre_mcast_tree_RBT_EMPTY(struct nvgre_mcast_tree *head) { return
_rb_empty(&head->rbh_root); } __attribute__((__unused__
)) static inline struct nvgre_softc * nvgre_mcast_tree_RBT_MIN
(struct nvgre_mcast_tree *head) { return _rb_min(nvgre_mcast_tree_RBT_TYPE
, &head->rbh_root); } __attribute__((__unused__)) static
inline struct nvgre_softc * nvgre_mcast_tree_RBT_MAX(struct nvgre_mcast_tree
*head) { return _rb_max(nvgre_mcast_tree_RBT_TYPE, &head
->rbh_root); } __attribute__((__unused__)) static inline struct
nvgre_softc * nvgre_mcast_tree_RBT_NEXT(struct nvgre_softc *
elm) { return _rb_next(nvgre_mcast_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct nvgre_softc * nvgre_mcast_tree_RBT_PREV
(struct nvgre_softc *elm) { return _rb_prev(nvgre_mcast_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_mcast_tree_RBT_LEFT(struct nvgre_softc *elm) { return
_rb_left(nvgre_mcast_tree_RBT_TYPE, elm); } __attribute__((__unused__
)) static inline struct nvgre_softc * nvgre_mcast_tree_RBT_RIGHT
(struct nvgre_softc *elm) { return _rb_right(nvgre_mcast_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct nvgre_softc
* nvgre_mcast_tree_RBT_PARENT(struct nvgre_softc *elm) { return
_rb_parent(nvgre_mcast_tree_RBT_TYPE, elm); } __attribute__(
(__unused__)) static inline void nvgre_mcast_tree_RBT_SET_LEFT
(struct nvgre_softc *elm, struct nvgre_softc *left) { _rb_set_left
(nvgre_mcast_tree_RBT_TYPE, elm, left); } __attribute__((__unused__
)) static inline void nvgre_mcast_tree_RBT_SET_RIGHT(struct nvgre_softc
*elm, struct nvgre_softc *right) { _rb_set_right(nvgre_mcast_tree_RBT_TYPE
, elm, right); } __attribute__((__unused__)) static inline void
nvgre_mcast_tree_RBT_SET_PARENT(struct nvgre_softc *elm, struct
nvgre_softc *parent) { _rb_set_parent(nvgre_mcast_tree_RBT_TYPE
, elm, parent); } __attribute__((__unused__)) static inline void
nvgre_mcast_tree_RBT_POISON(struct nvgre_softc *elm, unsigned
long poison) { _rb_poison(nvgre_mcast_tree_RBT_TYPE, elm, poison
); } __attribute__((__unused__)) static inline int nvgre_mcast_tree_RBT_CHECK
(struct nvgre_softc *elm, unsigned long poison) { return _rb_check
(nvgre_mcast_tree_RBT_TYPE, elm, poison); }
;
435
436static int nvgre_clone_create(struct if_clone *, int);
437static int nvgre_clone_destroy(struct ifnet *);
438
439static void nvgre_start(struct ifnet *);
440static int nvgre_ioctl(struct ifnet *, u_long, caddr_t);
441
442static int nvgre_up(struct nvgre_softc *);
443static int nvgre_down(struct nvgre_softc *);
444static int nvgre_set_parent(struct nvgre_softc *, const char *);
445static void nvgre_link_change(void *);
446static void nvgre_detach(void *);
447
448static int nvgre_input(const struct gre_tunnel *, struct mbuf *, int,
449 uint8_t);
450static void nvgre_send(void *);
451
452static int nvgre_add_addr(struct nvgre_softc *, const struct ifbareq *);
453static int nvgre_del_addr(struct nvgre_softc *, const struct ifbareq *);
454
455static int nvgre_eb_port_eq(void *, void *, void *);
456static void *nvgre_eb_port_take(void *, void *);
457static void nvgre_eb_port_rele(void *, void *);
458static size_t nvgre_eb_port_ifname(void *, char *, size_t, void *);
459static void nvgre_eb_port_sa(void *, struct sockaddr_storage *, void *);
460
461static const struct etherbridge_ops nvgre_etherbridge_ops = {
462 nvgre_eb_port_eq,
463 nvgre_eb_port_take,
464 nvgre_eb_port_rele,
465 nvgre_eb_port_ifname,
466 nvgre_eb_port_sa,
467};
468
469struct if_clone nvgre_cloner =
470 IF_CLONE_INITIALIZER("nvgre", nvgre_clone_create, nvgre_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "nvgre"
, .ifc_namelen = sizeof("nvgre") - 1, .ifc_create = nvgre_clone_create
, .ifc_destroy = nvgre_clone_destroy, }
;
471
472struct pool nvgre_endpoint_pool;
473
474/* protected by NET_LOCK */
475struct nvgre_ucast_tree nvgre_ucast_tree = RBT_INITIALIZER(){ { ((void *)0) } };
476struct nvgre_mcast_tree nvgre_mcast_tree = RBT_INITIALIZER(){ { ((void *)0) } };
477
478/*
479 * MikroTik Ethernet over IP protocol (eoip)
480 */
481
482struct eoip_softc {
483 struct gre_tunnel sc_tunnel; /* must be first */
484 uint16_t sc_tunnel_id;
485 RBT_ENTRY(eoip_softc)struct rb_entry sc_entry;
486
487 struct arpcom sc_ac;
488 struct ifmedia sc_media;
489
490 struct timeout sc_ka_send;
491 struct timeout sc_ka_hold;
492
493 unsigned int sc_ka_state;
494 unsigned int sc_ka_timeo;
495 unsigned int sc_ka_count;
496
497 unsigned int sc_ka_holdmax;
498 unsigned int sc_ka_holdcnt;
499};
500
501RBT_HEAD(eoip_tree, eoip_softc)struct eoip_tree { struct rb_tree rbh_root; };
502
503static inline int
504 eoip_cmp(const struct eoip_softc *, const struct eoip_softc *);
505
506RBT_PROTOTYPE(eoip_tree, eoip_softc, sc_entry, eoip_cmp)extern const struct rb_type *const eoip_tree_RBT_TYPE; __attribute__
((__unused__)) static inline void eoip_tree_RBT_INIT(struct eoip_tree
*head) { _rb_init(&head->rbh_root); } __attribute__((
__unused__)) static inline struct eoip_softc * eoip_tree_RBT_INSERT
(struct eoip_tree *head, struct eoip_softc *elm) { return _rb_insert
(eoip_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct eoip_softc * eoip_tree_RBT_REMOVE
(struct eoip_tree *head, struct eoip_softc *elm) { return _rb_remove
(eoip_tree_RBT_TYPE, &head->rbh_root, elm); } __attribute__
((__unused__)) static inline struct eoip_softc * eoip_tree_RBT_FIND
(struct eoip_tree *head, const struct eoip_softc *key) { return
_rb_find(eoip_tree_RBT_TYPE, &head->rbh_root, key); }
__attribute__((__unused__)) static inline struct eoip_softc *
eoip_tree_RBT_NFIND(struct eoip_tree *head, const struct eoip_softc
*key) { return _rb_nfind(eoip_tree_RBT_TYPE, &head->rbh_root
, key); } __attribute__((__unused__)) static inline struct eoip_softc
* eoip_tree_RBT_ROOT(struct eoip_tree *head) { return _rb_root
(eoip_tree_RBT_TYPE, &head->rbh_root); } __attribute__
((__unused__)) static inline int eoip_tree_RBT_EMPTY(struct eoip_tree
*head) { return _rb_empty(&head->rbh_root); } __attribute__
((__unused__)) static inline struct eoip_softc * eoip_tree_RBT_MIN
(struct eoip_tree *head) { return _rb_min(eoip_tree_RBT_TYPE,
&head->rbh_root); } __attribute__((__unused__)) static
inline struct eoip_softc * eoip_tree_RBT_MAX(struct eoip_tree
*head) { return _rb_max(eoip_tree_RBT_TYPE, &head->rbh_root
); } __attribute__((__unused__)) static inline struct eoip_softc
* eoip_tree_RBT_NEXT(struct eoip_softc *elm) { return _rb_next
(eoip_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct eoip_softc * eoip_tree_RBT_PREV(struct eoip_softc
*elm) { return _rb_prev(eoip_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline struct eoip_softc * eoip_tree_RBT_LEFT
(struct eoip_softc *elm) { return _rb_left(eoip_tree_RBT_TYPE
, elm); } __attribute__((__unused__)) static inline struct eoip_softc
* eoip_tree_RBT_RIGHT(struct eoip_softc *elm) { return _rb_right
(eoip_tree_RBT_TYPE, elm); } __attribute__((__unused__)) static
inline struct eoip_softc * eoip_tree_RBT_PARENT(struct eoip_softc
*elm) { return _rb_parent(eoip_tree_RBT_TYPE, elm); } __attribute__
((__unused__)) static inline void eoip_tree_RBT_SET_LEFT(struct
eoip_softc *elm, struct eoip_softc *left) { _rb_set_left(eoip_tree_RBT_TYPE
, elm, left); } __attribute__((__unused__)) static inline void
eoip_tree_RBT_SET_RIGHT(struct eoip_softc *elm, struct eoip_softc
*right) { _rb_set_right(eoip_tree_RBT_TYPE, elm, right); } __attribute__
((__unused__)) static inline void eoip_tree_RBT_SET_PARENT(struct
eoip_softc *elm, struct eoip_softc *parent) { _rb_set_parent
(eoip_tree_RBT_TYPE, elm, parent); } __attribute__((__unused__
)) static inline void eoip_tree_RBT_POISON(struct eoip_softc *
elm, unsigned long poison) { _rb_poison(eoip_tree_RBT_TYPE, elm
, poison); } __attribute__((__unused__)) static inline int eoip_tree_RBT_CHECK
(struct eoip_softc *elm, unsigned long poison) { return _rb_check
(eoip_tree_RBT_TYPE, elm, poison); }
;
507
508static int eoip_clone_create(struct if_clone *, int);
509static int eoip_clone_destroy(struct ifnet *);
510
511static void eoip_start(struct ifnet *);
512static int eoip_ioctl(struct ifnet *, u_long, caddr_t);
513
514static void eoip_keepalive_send(void *);
515static void eoip_keepalive_recv(struct eoip_softc *);
516static void eoip_keepalive_hold(void *);
517
518static int eoip_up(struct eoip_softc *);
519static int eoip_down(struct eoip_softc *);
520
521static struct mbuf *
522 eoip_encap(struct eoip_softc *, struct mbuf *, uint8_t);
523
524static struct mbuf *
525 eoip_input(struct gre_tunnel *, struct mbuf *,
526 const struct gre_header *, uint8_t, int);
527struct if_clone eoip_cloner =
528 IF_CLONE_INITIALIZER("eoip", eoip_clone_create, eoip_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "eoip"
, .ifc_namelen = sizeof("eoip") - 1, .ifc_create = eoip_clone_create
, .ifc_destroy = eoip_clone_destroy, }
;
529
530/* protected by NET_LOCK */
531struct eoip_tree eoip_tree = RBT_INITIALIZER(){ { ((void *)0) } };
532
533/*
534 * It is not easy to calculate the right value for a GRE MTU.
535 * We leave this task to the admin and use the same default that
536 * other vendors use.
537 */
538#define GREMTU1476 1476
539
540/*
541 * We can control the acceptance of GRE and MobileIP packets by
542 * altering the sysctl net.inet.gre.allow values
543 * respectively. Zero means drop them, all else is acceptance. We can also
544 * control acceptance of WCCPv1-style GRE packets through the
545 * net.inet.gre.wccp value, but be aware it depends upon normal GRE being
546 * allowed as well.
547 *
548 */
549int gre_allow = 0;
550int gre_wccp = 0;
551
552void
553greattach(int n)
554{
555 if_clone_attach(&gre_cloner);
556 if_clone_attach(&mgre_cloner);
557 if_clone_attach(&egre_cloner);
558 if_clone_attach(&nvgre_cloner);
559 if_clone_attach(&eoip_cloner);
560}
561
562static int
563gre_clone_create(struct if_clone *ifc, int unit)
564{
565 struct gre_softc *sc;
566 struct ifnet *ifp;
567
568 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
569 snprintf(sc->sc_if.if_xname, sizeof sc->sc_if.if_xname, "%s%d",
570 ifc->ifc_name, unit);
571
572 ifp = &sc->sc_if;
573 ifp->if_softc = sc;
574 ifp->if_typeif_data.ifi_type = IFT_TUNNEL0x83;
575 ifp->if_hdrlenif_data.ifi_hdrlen = GRE_HDRLEN(sizeof(struct ip) + sizeof(struct gre_header));
576 ifp->if_mtuif_data.ifi_mtu = GREMTU1476;
577 ifp->if_flags = IFF_POINTOPOINT0x10|IFF_MULTICAST0x8000;
578 ifp->if_xflags = IFXF_CLONED0x2;
579 ifp->if_bpf_mtap = p2p_bpf_mtap;
580 ifp->if_input = p2p_input;
581 ifp->if_output = gre_output;
582 ifp->if_start = gre_start;
583 ifp->if_ioctl = gre_ioctl;
584 ifp->if_rtrequest = p2p_rtrequest;
585
586 sc->sc_tunnel.t_ttl = ip_defttl;
587 sc->sc_tunnel.t_txhprio = IF_HDRPRIO_PAYLOAD-2;
588 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
589 sc->sc_tunnel.t_df = htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
590 sc->sc_tunnel.t_ecn = ECN_ALLOWED1;
591
592 timeout_set(&sc->sc_ka_send, gre_keepalive_send, sc);
593 timeout_set_proc(&sc->sc_ka_hold, gre_keepalive_hold, sc);
594 sc->sc_ka_state = GRE_KA_NONE0;
595
596 if_counters_alloc(ifp);
597 if_attach(ifp);
598 if_alloc_sadl(ifp);
599
600#if NBPFILTER1 > 0
601 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP12, sizeof(uint32_t));
602#endif
603
604 ifp->if_llprio = IFQ_TOS2PRIO(IPTOS_PREC_INTERNETCONTROL)((0xc0) >> 5);
605
606 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
607 TAILQ_INSERT_TAIL(&gre_list, sc, sc_entry)do { (sc)->sc_entry.tqe_next = ((void *)0); (sc)->sc_entry
.tqe_prev = (&gre_list)->tqh_last; *(&gre_list)->
tqh_last = (sc); (&gre_list)->tqh_last = &(sc)->
sc_entry.tqe_next; } while (0)
;
608 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
609
610 return (0);
611}
612
613static int
614gre_clone_destroy(struct ifnet *ifp)
615{
616 struct gre_softc *sc = ifp->if_softc;
617
618 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
619 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
620 gre_down(sc);
621
622 TAILQ_REMOVE(&gre_list, sc, sc_entry)do { if (((sc)->sc_entry.tqe_next) != ((void *)0)) (sc)->
sc_entry.tqe_next->sc_entry.tqe_prev = (sc)->sc_entry.tqe_prev
; else (&gre_list)->tqh_last = (sc)->sc_entry.tqe_prev
; *(sc)->sc_entry.tqe_prev = (sc)->sc_entry.tqe_next; (
(sc)->sc_entry.tqe_prev) = ((void *)-1); ((sc)->sc_entry
.tqe_next) = ((void *)-1); } while (0)
;
623 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
624
625 if_detach(ifp);
626
627 free(sc, M_DEVBUF2, sizeof(*sc));
628
629 return (0);
630}
631
632static int
633mgre_clone_create(struct if_clone *ifc, int unit)
634{
635 struct mgre_softc *sc;
636 struct ifnet *ifp;
637
638 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
639 ifp = &sc->sc_if;
640
641 snprintf(ifp->if_xname, sizeof(ifp->if_xname),
642 "%s%d", ifc->ifc_name, unit);
643
644 ifp->if_softc = sc;
645 ifp->if_typeif_data.ifi_type = IFT_L3IPVLAN0x88;
646 ifp->if_hdrlenif_data.ifi_hdrlen = GRE_HDRLEN(sizeof(struct ip) + sizeof(struct gre_header));
647 ifp->if_mtuif_data.ifi_mtu = GREMTU1476;
648 ifp->if_flags = IFF_MULTICAST0x8000|IFF_SIMPLEX0x800;
649 ifp->if_xflags = IFXF_CLONED0x2;
650 ifp->if_bpf_mtap = p2p_bpf_mtap;
651 ifp->if_input = p2p_input;
652 ifp->if_rtrequest = mgre_rtrequest;
653 ifp->if_output = mgre_output;
654 ifp->if_start = mgre_start;
655 ifp->if_ioctl = mgre_ioctl;
656
657 sc->sc_tunnel.t_ttl = ip_defttl;
658 sc->sc_tunnel.t_txhprio = IF_HDRPRIO_PAYLOAD-2;
659 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
660 sc->sc_tunnel.t_df = htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
661 sc->sc_tunnel.t_ecn = ECN_ALLOWED1;
662
663 if_counters_alloc(ifp);
664 if_attach(ifp);
665 if_alloc_sadl(ifp);
666
667#if NBPFILTER1 > 0
668 bpfattach(&ifp->if_bpf, ifp, DLT_LOOP12, sizeof(uint32_t));
669#endif
670
671 return (0);
672}
673
674static int
675mgre_clone_destroy(struct ifnet *ifp)
676{
677 struct mgre_softc *sc = ifp->if_softc;
678
679 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
680 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
681 mgre_down(sc);
682 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
683
684 if_detach(ifp);
685
686 free(sc, M_DEVBUF2, sizeof(*sc));
687
688 return (0);
689}
690
691static int
692egre_clone_create(struct if_clone *ifc, int unit)
693{
694 struct egre_softc *sc;
695 struct ifnet *ifp;
696
697 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
698 ifp = &sc->sc_ac.ac_if;
699
700 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d",
701 ifc->ifc_name, unit);
702
703 ifp->if_softc = sc;
704 ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435;
705 ifp->if_ioctl = egre_ioctl;
706 ifp->if_start = egre_start;
707 ifp->if_xflags = IFXF_CLONED0x2;
708 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
709 ether_fakeaddr(ifp);
710
711 sc->sc_tunnel.t_ttl = ip_defttl;
712 sc->sc_tunnel.t_txhprio = 0;
713 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
714 sc->sc_tunnel.t_df = htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
715
716 ifmedia_init(&sc->sc_media, 0, egre_media_change, egre_media_status);
717 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
718 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
719
720 if_counters_alloc(ifp);
721 if_attach(ifp);
722 ether_ifattach(ifp);
723
724 return (0);
725}
726
727static int
728egre_clone_destroy(struct ifnet *ifp)
729{
730 struct egre_softc *sc = ifp->if_softc;
731
732 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
733 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
734 egre_down(sc);
735 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
736
737 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY((uint64_t) -1));
738 ether_ifdetach(ifp);
739 if_detach(ifp);
740
741 free(sc, M_DEVBUF2, sizeof(*sc));
742
743 return (0);
744}
745
746static int
747nvgre_clone_create(struct if_clone *ifc, int unit)
748{
749 struct nvgre_softc *sc;
750 struct ifnet *ifp;
751 struct gre_tunnel *tunnel;
752 int error;
753
754 if (nvgre_endpoint_pool.pr_size == 0) {
755 pool_init(&nvgre_endpoint_pool, sizeof(union gre_addr),
756 0, IPL_SOFTNET0x5, 0, "nvgreep", NULL((void *)0));
757 }
758
759 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
760 ifp = &sc->sc_ac.ac_if;
761
762 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d",
763 ifc->ifc_name, unit);
764
765 error = etherbridge_init(&sc->sc_eb, ifp->if_xname,
766 &nvgre_etherbridge_ops, sc);
767 if (error != 0) {
768 free(sc, M_DEVBUF2, sizeof(*sc));
769 return (error);
770 }
771
772 ifp->if_softc = sc;
773 ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435;
774 ifp->if_ioctl = nvgre_ioctl;
775 ifp->if_start = nvgre_start;
776 ifp->if_xflags = IFXF_CLONED0x2;
777 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
778 ether_fakeaddr(ifp);
779
780 tunnel = &sc->sc_tunnel;
781 tunnel->t_ttl = IP_DEFAULT_MULTICAST_TTL1;
782 tunnel->t_txhprio = 0;
783 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
784 tunnel->t_df = htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
;
785 tunnel->t_key_mask = GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
786 tunnel->t_key = htonl((NVGRE_VSID_RES_MAX + 1) <<(__uint32_t)(__builtin_constant_p((0x000fff + 1) << 8) ?
(__uint32_t)(((__uint32_t)((0x000fff + 1) << 8) & 0xff
) << 24 | ((__uint32_t)((0x000fff + 1) << 8) &
0xff00) << 8 | ((__uint32_t)((0x000fff + 1) << 8
) & 0xff0000) >> 8 | ((__uint32_t)((0x000fff + 1) <<
8) & 0xff000000) >> 24) : __swap32md((0x000fff + 1
) << 8))
787 GRE_KEY_ENTROPY_SHIFT)(__uint32_t)(__builtin_constant_p((0x000fff + 1) << 8) ?
(__uint32_t)(((__uint32_t)((0x000fff + 1) << 8) & 0xff
) << 24 | ((__uint32_t)((0x000fff + 1) << 8) &
0xff00) << 8 | ((__uint32_t)((0x000fff + 1) << 8
) & 0xff0000) >> 8 | ((__uint32_t)((0x000fff + 1) <<
8) & 0xff000000) >> 24) : __swap32md((0x000fff + 1
) << 8))
;
788
789 mq_init(&sc->sc_send_list, IFQ_MAXLEN256 * 2, IPL_SOFTNET0x5);
790 task_set(&sc->sc_send_task, nvgre_send, sc);
791 task_set(&sc->sc_ltask, nvgre_link_change, sc);
792 task_set(&sc->sc_dtask, nvgre_detach, sc);
793
794 ifmedia_init(&sc->sc_media, 0, egre_media_change, egre_media_status);
795 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
796 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
797
798 if_counters_alloc(ifp);
799 if_attach(ifp);
800 ether_ifattach(ifp);
801
802 return (0);
803}
804
805static int
806nvgre_clone_destroy(struct ifnet *ifp)
807{
808 struct nvgre_softc *sc = ifp->if_softc;
809
810 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
811 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
812 nvgre_down(sc);
813 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
814
815 etherbridge_destroy(&sc->sc_eb);
816
817 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY((uint64_t) -1));
818 ether_ifdetach(ifp);
819 if_detach(ifp);
820
821 free(sc, M_DEVBUF2, sizeof(*sc));
822
823 return (0);
824}
825
826static int
827eoip_clone_create(struct if_clone *ifc, int unit)
828{
829 struct eoip_softc *sc;
830 struct ifnet *ifp;
831
832 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
833 ifp = &sc->sc_ac.ac_if;
834
835 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d",
836 ifc->ifc_name, unit);
837
838 ifp->if_softc = sc;
839 ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435;
840 ifp->if_ioctl = eoip_ioctl;
841 ifp->if_start = eoip_start;
842 ifp->if_xflags = IFXF_CLONED0x2;
843 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
844 ether_fakeaddr(ifp);
845
846 sc->sc_tunnel.t_ttl = ip_defttl;
847 sc->sc_tunnel.t_txhprio = 0;
848 sc->sc_tunnel.t_rxhprio = IF_HDRPRIO_PACKET-1;
849 sc->sc_tunnel.t_df = htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
850
851 sc->sc_ka_timeo = 10;
852 sc->sc_ka_count = 10;
853
854 timeout_set(&sc->sc_ka_send, eoip_keepalive_send, sc);
855 timeout_set_proc(&sc->sc_ka_hold, eoip_keepalive_hold, sc);
856 sc->sc_ka_state = GRE_KA_DOWN1;
857
858 ifmedia_init(&sc->sc_media, 0, egre_media_change, egre_media_status);
859 ifmedia_add(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL, 0, NULL((void *)0));
860 ifmedia_set(&sc->sc_media, IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL);
861
862 if_counters_alloc(ifp);
863 if_attach(ifp);
864 ether_ifattach(ifp);
865
866 return (0);
867}
868
869static int
870eoip_clone_destroy(struct ifnet *ifp)
871{
872 struct eoip_softc *sc = ifp->if_softc;
873
874 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
875 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
876 eoip_down(sc);
877 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
878
879 ifmedia_delete_instance(&sc->sc_media, IFM_INST_ANY((uint64_t) -1));
880 ether_ifdetach(ifp);
881 if_detach(ifp);
882
883 free(sc, M_DEVBUF2, sizeof(*sc));
884
885 return (0);
886}
887
888int
889gre_input(struct mbuf **mp, int *offp, int type, int af)
890{
891 struct mbuf *m = *mp;
892 struct gre_tunnel key;
893 struct ip *ip;
894
895 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
896
897 /* XXX check if ip_src is sane for nvgre? */
898
899 key.t_af = AF_INET2;
900 key.t_src4t_src.in4 = ip->ip_dst;
901 key.t_dst4t_dst.in4 = ip->ip_src;
902
903 if (gre_input_key(mp, offp, type, af, ip->ip_tos, &key) == -1)
1
Calling 'gre_input_key'
904 return (rip_input(mp, offp, type, af));
905
906 return (IPPROTO_DONE257);
907}
908
909#ifdef INET61
910int
911gre_input6(struct mbuf **mp, int *offp, int type, int af)
912{
913 struct mbuf *m = *mp;
914 struct gre_tunnel key;
915 struct ip6_hdr *ip6;
916 uint32_t flow;
917
918 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
919
920 /* XXX check if ip6_src is sane for nvgre? */
921
922 key.t_af = AF_INET624;
923 key.t_src6t_src.in6 = ip6->ip6_dst;
924 key.t_dst6t_dst.in6 = ip6->ip6_src;
925
926 flow = bemtoh32(&ip6->ip6_flow)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&ip6->
ip6_ctlun.ip6_un1.ip6_un1_flow)) ? (__uint32_t)(((__uint32_t)
(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)
) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(&
ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)) & 0xff00) <<
8 | ((__uint32_t)(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1
.ip6_un1_flow)) & 0xff0000) >> 8 | ((__uint32_t)(*(
__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&ip6
->ip6_ctlun.ip6_un1.ip6_un1_flow)))
;
927
928 if (gre_input_key(mp, offp, type, af, flow >> 20, &key) == -1)
929 return (rip6_input(mp, offp, type, af));
930
931 return (IPPROTO_DONE257);
932}
933#endif /* INET6 */
934
935static inline struct ifnet *
936gre_find(const struct gre_tunnel *key)
937{
938 struct gre_softc *sc;
939
940 TAILQ_FOREACH(sc, &gre_list, sc_entry)for((sc) = ((&gre_list)->tqh_first); (sc) != ((void *)
0); (sc) = ((sc)->sc_entry.tqe_next))
{
941 if (gre_cmp(key, &sc->sc_tunnel) != 0)
942 continue;
943
944 if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) & (0x40)))
945 continue;
946
947 return (&sc->sc_if);
948 }
949
950 return (NULL((void *)0));
951}
952
953static inline struct ifnet *
954mgre_find(const struct gre_tunnel *key)
955{
956 struct mgre_softc *sc;
957
958 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
959 sc = RBT_FIND(mgre_tree, &mgre_tree, (const struct mgre_softc *)key)mgre_tree_RBT_FIND(&mgre_tree, (const struct mgre_softc *
)key)
;
960 if (sc != NULL((void *)0))
961 return (&sc->sc_if);
962
963 return (NULL((void *)0));
964}
965
966static struct mbuf *
967gre_input_1(struct gre_tunnel *key, struct mbuf *m,
968 const struct gre_header *gh, uint8_t otos, int iphlen)
969{
970 switch (gh->gre_proto) {
971 case htons(ETHERTYPE_PPP)(__uint16_t)(__builtin_constant_p(0x880B) ? (__uint16_t)(((__uint16_t
)(0x880B) & 0xffU) << 8 | ((__uint16_t)(0x880B) &
0xff00U) >> 8) : __swap16md(0x880B))
:
972#ifdef PIPEX1
973 if (pipex_enable) {
974 struct pipex_session *session;
975
976 session = pipex_pptp_lookup_session(m);
977 if (session != NULL((void *)0) &&
978 pipex_pptp_input(m, session) == NULL((void *)0))
979 return (NULL((void *)0));
980 }
981#endif
982 break;
983 case htons(GRE_EOIP)(__uint16_t)(__builtin_constant_p(0x6400) ? (__uint16_t)(((__uint16_t
)(0x6400) & 0xffU) << 8 | ((__uint16_t)(0x6400) &
0xff00U) >> 8) : __swap16md(0x6400))
:
984 return (eoip_input(key, m, gh, otos, iphlen));
985 break;
986 }
987
988 return (m);
989}
990
991static int
992gre_input_key(struct mbuf **mp, int *offp, int type, int af, uint8_t otos,
993 struct gre_tunnel *key)
994{
995 struct mbuf *m = *mp;
996 int iphlen = *offp, hlen, rxprio;
997 struct ifnet *ifp;
998 const struct gre_tunnel *tunnel;
999 caddr_t buf;
1000 struct gre_header *gh;
1001 struct gre_h_key *gkh;
1002 struct mbuf *(*patch)(const struct gre_tunnel *, struct mbuf *,
1003 uint8_t *, uint8_t);
1004 int mcast = 0;
1005 uint8_t itos;
1006
1007 if (!gre_allow)
2
Assuming 'gre_allow' is not equal to 0
3
Taking false branch
1008 goto decline;
1009
1010 key->t_rtableid = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid;
1011
1012 hlen = iphlen + sizeof(*gh);
1013 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen)
4
Assuming 'hlen' is <= field 'len'
5
Taking false branch
1014 goto decline;
1015
1016 m = m_pullup(m, hlen);
1017 if (m == NULL((void *)0))
6
Assuming 'm' is not equal to NULL
7
Taking false branch
1018 return (IPPROTO_DONE257);
1019
1020 buf = mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data));
1021 gh = (struct gre_header *)(buf + iphlen);
1022
1023 /* check the version */
1024 switch (gh->gre_flags & htons(GRE_VERS_MASK)(__uint16_t)(__builtin_constant_p(0x0007) ? (__uint16_t)(((__uint16_t
)(0x0007) & 0xffU) << 8 | ((__uint16_t)(0x0007) &
0xff00U) >> 8) : __swap16md(0x0007))
) {
8
'?' condition is true
9
Control jumps to 'case 0:' at line 1025
1025 case htons(GRE_VERS_0)(__uint16_t)(__builtin_constant_p(0x0000) ? (__uint16_t)(((__uint16_t
)(0x0000) & 0xffU) << 8 | ((__uint16_t)(0x0000) &
0xff00U) >> 8) : __swap16md(0x0000))
:
1026 break;
10
Execution continues on line 1038
1027
1028 case htons(GRE_VERS_1)(__uint16_t)(__builtin_constant_p(0x0001) ? (__uint16_t)(((__uint16_t
)(0x0001) & 0xffU) << 8 | ((__uint16_t)(0x0001) &
0xff00U) >> 8) : __swap16md(0x0001))
:
1029 m = gre_input_1(key, m, gh, otos, iphlen);
1030 if (m == NULL((void *)0))
1031 return (IPPROTO_DONE257);
1032 /* FALLTHROUGH */
1033 default:
1034 goto decline;
1035 }
1036
1037 /* the only optional bit in the header is K flag */
1038 if ((gh->gre_flags & htons(~(GRE_KP|GRE_VERS_MASK))(__uint16_t)(__builtin_constant_p(~(0x2000|0x0007)) ? (__uint16_t
)(((__uint16_t)(~(0x2000|0x0007)) & 0xffU) << 8 | (
(__uint16_t)(~(0x2000|0x0007)) & 0xff00U) >> 8) : __swap16md
(~(0x2000|0x0007)))
) != htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
)
11
'?' condition is true
12
'?' condition is true
13
Assuming the condition is false
14
Taking false branch
1039 goto decline;
1040
1041 if (gh->gre_flags & htons(GRE_KP)(__uint16_t)(__builtin_constant_p(0x2000) ? (__uint16_t)(((__uint16_t
)(0x2000) & 0xffU) << 8 | ((__uint16_t)(0x2000) &
0xff00U) >> 8) : __swap16md(0x2000))
) {
15
'?' condition is true
16
Assuming the condition is false
17
Taking false branch
1042 hlen += sizeof(*gkh);
1043 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen)
1044 goto decline;
1045
1046 m = m_pullup(m, hlen);
1047 if (m == NULL((void *)0))
1048 return (IPPROTO_DONE257);
1049
1050 buf = mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data));
1051 gh = (struct gre_header *)(buf + iphlen);
1052 gkh = (struct gre_h_key *)(gh + 1);
1053
1054 key->t_key_mask = GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
;
1055 key->t_key = gkh->gre_key;
1056 } else
1057 key->t_key_mask = GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
;
18
'?' condition is true
1058
1059 if (gh->gre_proto == htons(ETHERTYPE_TRANSETHER)(__uint16_t)(__builtin_constant_p(0x6558) ? (__uint16_t)(((__uint16_t
)(0x6558) & 0xffU) << 8 | ((__uint16_t)(0x6558) &
0xff00U) >> 8) : __swap16md(0x6558))
) {
19
'?' condition is true
20
Assuming the condition is false
21
Taking false branch
1060 if (egre_input(key, m, hlen, otos) == -1 &&
1061 nvgre_input(key, m, hlen, otos) == -1)
1062 goto decline;
1063
1064 return (IPPROTO_DONE257);
1065 }
1066
1067 ifp = gre_find(key);
1068 if (ifp == NULL((void *)0)) {
22
Assuming 'ifp' is not equal to NULL
23
Taking false branch
1069 ifp = mgre_find(key);
1070 if (ifp == NULL((void *)0))
1071 goto decline;
1072 }
1073
1074 switch (gh->gre_proto) {
24
Control jumps to 'case 18568:' at line 1116
1075 case htons(GRE_WCCP)(__uint16_t)(__builtin_constant_p(0x883e) ? (__uint16_t)(((__uint16_t
)(0x883e) & 0xffU) << 8 | ((__uint16_t)(0x883e) &
0xff00U) >> 8) : __swap16md(0x883e))
: {
1076 struct mbuf *n;
1077 int off;
1078
1079 /* WCCP/GRE:
1080 * So far as I can see (and test) it seems that Cisco's WCCP
1081 * GRE tunnel is precisely a IP-in-GRE tunnel that differs
1082 * only in its protocol number. At least, it works for me.
1083 *
1084 * The Internet Drafts can be found if you look for
1085 * the following:
1086 * draft-forster-wrec-wccp-v1-00.txt
1087 * draft-wilson-wrec-wccp-v2-01.txt
1088 */
1089
1090 if (!gre_wccp && !ISSET(ifp->if_flags, IFF_LINK0)((ifp->if_flags) & (0x1000)))
1091 goto decline;
1092
1093 /*
1094 * If the first nibble of the payload does not look like
1095 * IPv4, assume it is WCCP v2.
1096 */
1097 n = m_getptr(m, hlen, &off);
1098 if (n == NULL((void *)0))
1099 goto decline;
1100 if (n->m_datam_hdr.mh_data[off] >> 4 != IPVERSION4)
1101 hlen += 4; /* four-octet Redirect header */
1102
1103 /* FALLTHROUGH */
1104 }
1105 case htons(ETHERTYPE_IP)(__uint16_t)(__builtin_constant_p(0x0800) ? (__uint16_t)(((__uint16_t
)(0x0800) & 0xffU) << 8 | ((__uint16_t)(0x0800) &
0xff00U) >> 8) : __swap16md(0x0800))
:
1106 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = AF_INET2;
1107 patch = gre_ipv4_patch;
1108 break;
1109#ifdef INET61
1110 case htons(ETHERTYPE_IPV6)(__uint16_t)(__builtin_constant_p(0x86DD) ? (__uint16_t)(((__uint16_t
)(0x86DD) & 0xffU) << 8 | ((__uint16_t)(0x86DD) &
0xff00U) >> 8) : __swap16md(0x86DD))
:
1111 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = AF_INET624;
1112 patch = gre_ipv6_patch;
1113 break;
1114#endif
1115#ifdef MPLS1
1116 case htons(ETHERTYPE_MPLS_MCAST)(__uint16_t)(__builtin_constant_p(0x8848) ? (__uint16_t)(((__uint16_t
)(0x8848) & 0xffU) << 8 | ((__uint16_t)(0x8848) &
0xff00U) >> 8) : __swap16md(0x8848))
:
1117 mcast = M_MCAST0x0200|M_BCAST0x0100;
1118 /* fallthrough */
1119 case htons(ETHERTYPE_MPLS)(__uint16_t)(__builtin_constant_p(0x8847) ? (__uint16_t)(((__uint16_t
)(0x8847) & 0xffU) << 8 | ((__uint16_t)(0x8847) &
0xff00U) >> 8) : __swap16md(0x8847))
:
1120 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = AF_MPLS33;
1121 patch = gre_mpls_patch;
1122 break;
25
Execution continues on line 1140
1123#endif
1124 case htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
:
1125 if (ifp->if_typeif_data.ifi_type != IFT_TUNNEL0x83) {
1126 /* keepalives dont make sense for mgre */
1127 goto decline;
1128 }
1129
1130 m_adj(m, hlen);
1131 gre_keepalive_recv(ifp, m);
1132 return (IPPROTO_DONE257);
1133
1134 default:
1135 goto decline;
1136 }
1137
1138 /* it's ours now */
1139
1140 m_adj(m, hlen);
1141
1142 tunnel = ifp->if_softc; /* gre and mgre tunnel info is at the front */
1143
1144 m = (*patch)(tunnel, m, &itos, otos);
1145 if (m == NULL((void *)0))
26
Assuming 'm' is not equal to NULL
27
Taking false branch
1146 return (IPPROTO_DONE257);
1147
1148 if (tunnel->t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
) {
28
'?' condition is true
29
Assuming the condition is true
30
Taking true branch
1149 SET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000));
1150 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid =
1151 bemtoh32(&key->t_key)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&key->
t_key)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&key
->t_key)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(&key->t_key)) & 0xff00) << 8 | ((__uint32_t
)(*(__uint32_t *)(&key->t_key)) & 0xff0000) >>
8 | ((__uint32_t)(*(__uint32_t *)(&key->t_key)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&key
->t_key)))
& ~GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
31
1st function call argument is an uninitialized value
1152 }
1153
1154 rxprio = tunnel->t_rxhprio;
1155 switch (rxprio) {
1156 case IF_HDRPRIO_PACKET-1:
1157 /* nop */
1158 break;
1159 case IF_HDRPRIO_OUTER-3:
1160 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = IFQ_TOS2PRIO(otos)((otos) >> 5);
1161 break;
1162 case IF_HDRPRIO_PAYLOAD-2:
1163 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = IFQ_TOS2PRIO(itos)((itos) >> 5);
1164 break;
1165 default:
1166 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = rxprio;
1167 break;
1168 }
1169
1170 m->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200|M_BCAST0x0100);
1171 m->m_flagsm_hdr.mh_flags |= mcast;
1172
1173 if_vinput(ifp, m);
1174 return (IPPROTO_DONE257);
1175decline:
1176 *mp = m;
1177 return (-1);
1178}
1179
1180static struct mbuf *
1181gre_ipv4_patch(const struct gre_tunnel *tunnel, struct mbuf *m,
1182 uint8_t *itosp, uint8_t otos)
1183{
1184 struct ip *ip;
1185 uint8_t itos;
1186
1187 m = m_pullup(m, sizeof(*ip));
1188 if (m == NULL((void *)0))
1189 return (NULL((void *)0));
1190
1191 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
1192
1193 itos = ip->ip_tos;
1194 if (ip_ecn_egress(tunnel->t_ecn, &otos, &itos) == 0) {
1195 m_freem(m);
1196 return (NULL((void *)0));
1197 }
1198 if (itos != ip->ip_tos)
1199 ip_tos_patch(ip, itos);
1200
1201 *itosp = itos;
1202
1203 return (m);
1204}
1205
1206#ifdef INET61
1207static struct mbuf *
1208gre_ipv6_patch(const struct gre_tunnel *tunnel, struct mbuf *m,
1209 uint8_t *itosp, uint8_t otos)
1210{
1211 struct ip6_hdr *ip6;
1212 uint32_t flow;
1213 uint8_t itos;
1214
1215 m = m_pullup(m, sizeof(*ip6));
1216 if (m == NULL((void *)0))
1217 return (NULL((void *)0));
1218
1219 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
1220
1221 flow = bemtoh32(&ip6->ip6_flow)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&ip6->
ip6_ctlun.ip6_un1.ip6_un1_flow)) ? (__uint32_t)(((__uint32_t)
(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)
) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t *)(&
ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)) & 0xff00) <<
8 | ((__uint32_t)(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1
.ip6_un1_flow)) & 0xff0000) >> 8 | ((__uint32_t)(*(
__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&ip6
->ip6_ctlun.ip6_un1.ip6_un1_flow)))
;
1222 itos = flow >> 20;
1223 if (ip_ecn_egress(tunnel->t_ecn, &otos, &itos) == 0) {
1224 m_freem(m);
1225 return (NULL((void *)0));
1226 }
1227
1228 CLR(flow, 0xff << 20)((flow) &= ~(0xff << 20));
1229 SET(flow, itos << 20)((flow) |= (itos << 20));
1230 htobem32(&ip6->ip6_flow, flow)(*(__uint32_t *)(&ip6->ip6_ctlun.ip6_un1.ip6_un1_flow)
= (__uint32_t)(__builtin_constant_p(flow) ? (__uint32_t)(((__uint32_t
)(flow) & 0xff) << 24 | ((__uint32_t)(flow) & 0xff00
) << 8 | ((__uint32_t)(flow) & 0xff0000) >> 8
| ((__uint32_t)(flow) & 0xff000000) >> 24) : __swap32md
(flow)))
;
1231
1232 *itosp = itos;
1233
1234 return (m);
1235}
1236#endif
1237
1238#ifdef MPLS1
1239static struct mbuf *
1240gre_mpls_patch(const struct gre_tunnel *tunnel, struct mbuf *m,
1241 uint8_t *itosp, uint8_t otos)
1242{
1243 uint8_t itos;
1244 uint32_t shim;
1245
1246 m = m_pullup(m, sizeof(shim));
1247 if (m == NULL((void *)0))
1248 return (NULL((void *)0));
1249
1250 shim = *mtod(m, uint32_t *)((uint32_t *)((m)->m_hdr.mh_data));
1251 itos = (ntohl(shim & MPLS_EXP_MASK)(__uint32_t)(__builtin_constant_p(shim & ((u_int32_t)(__uint32_t
)(__builtin_constant_p((u_int32_t)(0x00000e00U)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff) <<
24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff00) <<
8 | ((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0x00000e00U))))) ? (__uint32_t
)(((__uint32_t)(shim & ((u_int32_t)(__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000e00U)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000e00U)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000e00U)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000e00U)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000e00U)) & 0xff000000) >> 24) : __swap32md((
u_int32_t)(0x00000e00U))))) & 0xff) << 24 | ((__uint32_t
)(shim & ((u_int32_t)(__uint32_t)(__builtin_constant_p((u_int32_t
)(0x00000e00U)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))) & 0xff00) << 8 | ((__uint32_t)(shim & ((u_int32_t
)(__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000e00U)) ?
(__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U))
& 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))) & 0xff0000) >> 8 | ((__uint32_t)(shim & (
(u_int32_t)(__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000e00U
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U))
& 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))) & 0xff000000) >> 24) : __swap32md(shim & (
(u_int32_t)(__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000e00U
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U))
& 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))))
>> MPLS_EXP_OFFSET9) << 5;
1252
1253 if (ip_ecn_egress(tunnel->t_ecn, &otos, &itos) == 0) {
1254 m_freem(m);
1255 return (NULL((void *)0));
1256 }
1257
1258 *itosp = itos;
1259
1260 return (m);
1261}
1262#endif
1263
1264#define gre_l2_prio(_t, _m, _otos)do { int rxprio = (_t)->t_rxhprio; switch (rxprio) { case -
1: break; case -3: (_m)->M_dat.MH.MH_pkthdr.pf.prio = (((_otos
)) >> 5); break; default: (_m)->M_dat.MH.MH_pkthdr.pf
.prio = rxprio; break; } } while (0)
do { \
1265 int rxprio = (_t)->t_rxhprio; \
1266 switch (rxprio) { \
1267 case IF_HDRPRIO_PACKET-1: \
1268 /* nop */ \
1269 break; \
1270 case IF_HDRPRIO_OUTER-3: \
1271 (_m)->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = IFQ_TOS2PRIO((_otos))(((_otos)) >> 5); \
1272 break; \
1273 default: \
1274 (_m)->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = rxprio; \
1275 break; \
1276 } \
1277} while (0)
1278
1279static int
1280egre_input(const struct gre_tunnel *key, struct mbuf *m, int hlen, uint8_t otos)
1281{
1282 struct egre_softc *sc;
1283
1284 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1285 sc = RBT_FIND(egre_tree, &egre_tree, (const struct egre_softc *)key)egre_tree_RBT_FIND(&egre_tree, (const struct egre_softc *
)key)
;
1286 if (sc == NULL((void *)0))
1287 return (-1);
1288
1289 /* it's ours now */
1290 m = gre_ether_align(m, hlen);
1291 if (m == NULL((void *)0))
1292 return (0);
1293
1294 if (sc->sc_tunnel.t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
) {
1295 SET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000));
1296 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid =
1297 bemtoh32(&key->t_key)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&key->
t_key)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&key
->t_key)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(&key->t_key)) & 0xff00) << 8 | ((__uint32_t
)(*(__uint32_t *)(&key->t_key)) & 0xff0000) >>
8 | ((__uint32_t)(*(__uint32_t *)(&key->t_key)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&key
->t_key)))
& ~GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
1298 }
1299
1300 m->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200|M_BCAST0x0100);
1301
1302 gre_l2_prio(&sc->sc_tunnel, m, otos)do { int rxprio = (&sc->sc_tunnel)->t_rxhprio; switch
(rxprio) { case -1: break; case -3: (m)->M_dat.MH.MH_pkthdr
.pf.prio = (((otos)) >> 5); break; default: (m)->M_dat
.MH.MH_pkthdr.pf.prio = rxprio; break; } } while (0)
;
1303
1304 if_vinput(&sc->sc_ac.ac_if, m);
1305
1306 return (0);
1307}
1308
1309static inline struct nvgre_softc *
1310nvgre_mcast_find(const struct gre_tunnel *key, unsigned int if0idx)
1311{
1312 struct nvgre_softc *sc;
1313 int rv;
1314
1315 /*
1316 * building an nvgre_softc to use with RBT_FIND is expensive, and
1317 * would need to swap the src and dst addresses in the key. so do the
1318 * find by hand.
1319 */
1320
1321 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1322 sc = RBT_ROOT(nvgre_mcast_tree, &nvgre_mcast_tree)nvgre_mcast_tree_RBT_ROOT(&nvgre_mcast_tree);
1323 while (sc != NULL((void *)0)) {
1324 rv = nvgre_cmp_mcast(key, &key->t_src, if0idx,
1325 &sc->sc_tunnel, &sc->sc_tunnel.t_dst, sc->sc_ifp0);
1326 if (rv == 0)
1327 return (sc);
1328 if (rv < 0)
1329 sc = RBT_LEFT(nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_LEFT(sc);
1330 else
1331 sc = RBT_RIGHT(nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_RIGHT(sc);
1332 }
1333
1334 return (NULL((void *)0));
1335}
1336
1337static inline struct nvgre_softc *
1338nvgre_ucast_find(const struct gre_tunnel *key)
1339{
1340 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1341 return (RBT_FIND(nvgre_ucast_tree, &nvgre_ucast_tree,nvgre_ucast_tree_RBT_FIND(&nvgre_ucast_tree, (struct nvgre_softc
*)key)
1342 (struct nvgre_softc *)key)nvgre_ucast_tree_RBT_FIND(&nvgre_ucast_tree, (struct nvgre_softc
*)key)
);
1343}
1344
1345static int
1346nvgre_input(const struct gre_tunnel *key, struct mbuf *m, int hlen,
1347 uint8_t otos)
1348{
1349 struct nvgre_softc *sc;
1350 struct ether_header *eh;
1351
1352 if (ISSET(m->m_flags, M_MCAST|M_BCAST)((m->m_hdr.mh_flags) & (0x0200|0x0100)))
1353 sc = nvgre_mcast_find(key, m->m_pkthdrM_dat.MH.MH_pkthdr.ph_ifidx);
1354 else
1355 sc = nvgre_ucast_find(key);
1356
1357 if (sc == NULL((void *)0))
1358 return (-1);
1359
1360 /* it's ours now */
1361 m = gre_ether_align(m, hlen);
1362 if (m == NULL((void *)0))
1363 return (0);
1364
1365 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
1366 etherbridge_map_ea(&sc->sc_eb, (void *)&key->t_dst,
1367 (struct ether_addr *)eh->ether_shost);
1368
1369 SET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x4000));
1370 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_flowid = bemtoh32(&key->t_key)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&key->
t_key)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&key
->t_key)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(&key->t_key)) & 0xff00) << 8 | ((__uint32_t
)(*(__uint32_t *)(&key->t_key)) & 0xff0000) >>
8 | ((__uint32_t)(*(__uint32_t *)(&key->t_key)) &
0xff000000) >> 24) : __swap32md(*(__uint32_t *)(&key
->t_key)))
& ~GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
1371
1372 m->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200|M_BCAST0x0100);
1373
1374 gre_l2_prio(&sc->sc_tunnel, m, otos)do { int rxprio = (&sc->sc_tunnel)->t_rxhprio; switch
(rxprio) { case -1: break; case -3: (m)->M_dat.MH.MH_pkthdr
.pf.prio = (((otos)) >> 5); break; default: (m)->M_dat
.MH.MH_pkthdr.pf.prio = rxprio; break; } } while (0)
;
1375
1376 if_vinput(&sc->sc_ac.ac_if, m);
1377
1378 return (0);
1379}
1380
1381static struct mbuf *
1382gre_ether_align(struct mbuf *m, int hlen)
1383{
1384 struct mbuf *n;
1385 int off;
1386
1387 m_adj(m, hlen);
1388
1389 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < sizeof(struct ether_header)) {
1390 m_freem(m);
1391 return (NULL((void *)0));
1392 }
1393
1394 m = m_pullup(m, sizeof(struct ether_header));
1395 if (m == NULL((void *)0))
1396 return (NULL((void *)0));
1397
1398 n = m_getptr(m, sizeof(struct ether_header), &off);
1399 if (n == NULL((void *)0)) {
1400 m_freem(m);
1401 return (NULL((void *)0));
1402 }
1403
1404 if (!ALIGNED_POINTER(mtod(n, caddr_t) + off, uint32_t)1) {
1405 n = m_dup_pkt(m, ETHER_ALIGN2, M_NOWAIT0x0002);
1406 m_freem(m);
1407 if (n == NULL((void *)0))
1408 return (NULL((void *)0));
1409 m = n;
1410 }
1411
1412 return (m);
1413}
1414
1415static void
1416gre_keepalive_recv(struct ifnet *ifp, struct mbuf *m)
1417{
1418 struct gre_softc *sc = ifp->if_softc;
1419 struct gre_keepalive *gk;
1420 SIPHASH_CTX ctx;
1421 uint8_t digest[SIPHASH_DIGEST_LENGTH8];
1422 int uptime, delta;
1423 int tick = ticks;
1424
1425 if (sc->sc_ka_state == GRE_KA_NONE0 ||
1426 sc->sc_tunnel.t_rtableid != sc->sc_if.if_rdomainif_data.ifi_rdomain)
1427 goto drop;
1428
1429 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < sizeof(*gk))
1430 goto drop;
1431 m = m_pullup(m, sizeof(*gk));
1432 if (m == NULL((void *)0))
1433 return;
1434
1435 gk = mtod(m, struct gre_keepalive *)((struct gre_keepalive *)((m)->m_hdr.mh_data));
1436 uptime = bemtoh32(&gk->gk_uptime)(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(&gk->
gk_uptime)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t *)(&
gk->gk_uptime)) & 0xff) << 24 | ((__uint32_t)(*(
__uint32_t *)(&gk->gk_uptime)) & 0xff00) << 8
| ((__uint32_t)(*(__uint32_t *)(&gk->gk_uptime)) &
0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t *)(&gk
->gk_uptime)) & 0xff000000) >> 24) : __swap32md(
*(__uint32_t *)(&gk->gk_uptime)))
- sc->sc_ka_bias;
1437 delta = tick - uptime;
1438 if (delta < 0)
1439 goto drop;
1440 if (delta > hz * 10) /* magic */
1441 goto drop;
1442
1443 /* avoid too much siphash work */
1444 delta = tick - sc->sc_ka_recvtm;
1445 if (delta > 0 && delta < (hz / 10))
1446 goto drop;
1447
1448 SipHash24_Init(&ctx, &sc->sc_ka_key)SipHash_Init((&ctx), (&sc->sc_ka_key));
1449 SipHash24_Update(&ctx, &gk->gk_uptime, sizeof(gk->gk_uptime))SipHash_Update((&ctx), 2, 4, (&gk->gk_uptime), (sizeof
(gk->gk_uptime)))
;
1450 SipHash24_Update(&ctx, &gk->gk_random, sizeof(gk->gk_random))SipHash_Update((&ctx), 2, 4, (&gk->gk_random), (sizeof
(gk->gk_random)))
;
1451 SipHash24_Final(digest, &ctx)SipHash_Final((digest), (&ctx), 2, 4);
1452
1453 if (memcmp(digest, gk->gk_digest, sizeof(digest))__builtin_memcmp((digest), (gk->gk_digest), (sizeof(digest
)))
!= 0)
1454 goto drop;
1455
1456 sc->sc_ka_recvtm = tick;
1457
1458 switch (sc->sc_ka_state) {
1459 case GRE_KA_DOWN1:
1460 sc->sc_ka_state = GRE_KA_HOLD2;
1461 sc->sc_ka_holdcnt = sc->sc_ka_holdmax;
1462 sc->sc_ka_holdmax = MIN(sc->sc_ka_holdmax * 2,(((sc->sc_ka_holdmax * 2)<(16 * sc->sc_ka_count))?(sc
->sc_ka_holdmax * 2):(16 * sc->sc_ka_count))
1463 16 * sc->sc_ka_count)(((sc->sc_ka_holdmax * 2)<(16 * sc->sc_ka_count))?(sc
->sc_ka_holdmax * 2):(16 * sc->sc_ka_count))
;
1464 break;
1465 case GRE_KA_HOLD2:
1466 if (--sc->sc_ka_holdcnt > 0)
1467 break;
1468
1469 sc->sc_ka_state = GRE_KA_UP3;
1470 gre_link_state(&sc->sc_if, sc->sc_ka_state);
1471 break;
1472
1473 case GRE_KA_UP3:
1474 sc->sc_ka_holdmax--;
1475 sc->sc_ka_holdmax = MAX(sc->sc_ka_holdmax, sc->sc_ka_count)(((sc->sc_ka_holdmax)>(sc->sc_ka_count))?(sc->sc_ka_holdmax
):(sc->sc_ka_count))
;
1476 break;
1477 }
1478
1479 timeout_add_sec(&sc->sc_ka_hold, sc->sc_ka_timeo * sc->sc_ka_count);
1480
1481drop:
1482 m_freem(m);
1483}
1484
1485static int
1486gre_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1487 struct rtentry *rt)
1488{
1489 struct m_tag *mtag;
1490 int error = 0;
1491
1492 if (!gre_allow) {
1493 error = EACCES13;
1494 goto drop;
1495 }
1496
1497 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
1498 error = ENETDOWN50;
1499 goto drop;
1500 }
1501
1502 switch (dst->sa_family) {
1503 case AF_INET2:
1504#ifdef INET61
1505 case AF_INET624:
1506#endif
1507#ifdef MPLS1
1508 case AF_MPLS33:
1509#endif
1510 break;
1511 default:
1512 error = EAFNOSUPPORT47;
1513 goto drop;
1514 }
1515
1516 /* Try to limit infinite recursion through misconfiguration. */
1517 for (mtag = m_tag_find(m, PACKET_TAG_GRE0x0080, NULL((void *)0)); mtag;
1518 mtag = m_tag_find(m, PACKET_TAG_GRE0x0080, mtag)) {
1519 if (memcmp((caddr_t)(mtag + 1), &ifp->if_index,__builtin_memcmp(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
1520 sizeof(ifp->if_index))__builtin_memcmp(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
== 0) {
1521 m_freem(m);
1522 error = EIO5;
1523 goto end;
1524 }
1525 }
1526
1527 mtag = m_tag_get(PACKET_TAG_GRE0x0080, sizeof(ifp->if_index), M_NOWAIT0x0002);
1528 if (mtag == NULL((void *)0)) {
1529 m_freem(m);
1530 error = ENOBUFS55;
1531 goto end;
1532 }
1533 memcpy((caddr_t)(mtag + 1), &ifp->if_index, sizeof(ifp->if_index))__builtin_memcpy(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
;
1534 m_tag_prepend(m, mtag);
1535
1536 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = dst->sa_family;
1537
1538 error = if_enqueue(ifp, m);
1539end:
1540 if (error)
1541 ifp->if_oerrorsif_data.ifi_oerrors++;
1542 return (error);
1543
1544drop:
1545 m_freem(m);
1546 return (error);
1547}
1548
1549void
1550gre_start(struct ifnet *ifp)
1551{
1552 struct gre_softc *sc = ifp->if_softc;
1553 struct mbuf *m;
1554 int af;
1555#if NBPFILTER1 > 0
1556 caddr_t if_bpf;
1557#endif
1558
1559 while ((m = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
1560 af = m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family;
1561
1562#if NBPFILTER1 > 0
1563 if_bpf = ifp->if_bpf;
1564 if (if_bpf)
1565 bpf_mtap_af(if_bpf, af, m, BPF_DIRECTION_OUT(1 << 1));
1566#endif
1567
1568 m = gre_l3_encap(&sc->sc_tunnel, m, af)gre_l3_encap_dst((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), (af))
;
1569 if (m == NULL((void *)0) || gre_ip_output(&sc->sc_tunnel, m) != 0) {
1570 ifp->if_oerrorsif_data.ifi_oerrors++;
1571 continue;
1572 }
1573 }
1574}
1575
1576void
1577mgre_rtrequest(struct ifnet *ifp, int req, struct rtentry *rt)
1578{
1579 struct ifnet *lo0ifp;
1580 struct ifaddr *ifa, *lo0ifa;
1581
1582 switch (req) {
1583 case RTM_ADD0x1:
1584 if (!ISSET(rt->rt_flags, RTF_LOCAL)((rt->rt_flags) & (0x200000)))
1585 break;
1586
1587 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list)for((ifa) = ((&ifp->if_addrlist)->tqh_first); (ifa)
!= ((void *)0); (ifa) = ((ifa)->ifa_list.tqe_next))
{
1588 if (memcmp(rt_key(rt), ifa->ifa_addr,__builtin_memcmp((((rt)->rt_dest)), (ifa->ifa_addr), ((
(rt)->rt_dest)->sa_len))
1589 rt_key(rt)->sa_len)__builtin_memcmp((((rt)->rt_dest)), (ifa->ifa_addr), ((
(rt)->rt_dest)->sa_len))
== 0)
1590 break;
1591 }
1592
1593 if (ifa == NULL((void *)0))
1594 break;
1595
1596 KASSERT(ifa == rt->rt_ifa)((ifa == rt->rt_ifa) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_gre.c"
, 1596, "ifa == rt->rt_ifa"))
;
1597
1598 lo0ifp = if_get(rtable_loindex(ifp->if_rdomainif_data.ifi_rdomain));
1599 KASSERT(lo0ifp != NULL)((lo0ifp != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_gre.c"
, 1599, "lo0ifp != NULL"))
;
1600 TAILQ_FOREACH(lo0ifa, &lo0ifp->if_addrlist, ifa_list)for((lo0ifa) = ((&lo0ifp->if_addrlist)->tqh_first);
(lo0ifa) != ((void *)0); (lo0ifa) = ((lo0ifa)->ifa_list.tqe_next
))
{
1601 if (lo0ifa->ifa_addr->sa_family ==
1602 ifa->ifa_addr->sa_family)
1603 break;
1604 }
1605 if_put(lo0ifp);
1606
1607 if (lo0ifa == NULL((void *)0))
1608 break;
1609
1610 rt->rt_flags &= ~RTF_LLINFO0x400;
1611 break;
1612 case RTM_DELETE0x2:
1613 case RTM_RESOLVE0xb:
1614 default:
1615 break;
1616 }
1617}
1618
1619static int
1620mgre_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dest,
1621 struct rtentry *rt0)
1622{
1623 struct mgre_softc *sc = ifp->if_softc;
1624 struct sockaddr *gate;
1625 struct rtentry *rt;
1626 struct m_tag *mtag;
1627 int error = 0;
1628 sa_family_t af;
1629 const void *addr;
1630
1631 if (!gre_allow) {
1632 error = EACCES13;
1633 goto drop;
1634 }
1635
1636 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
1637 error = ENETDOWN50;
1638 goto drop;
1639 }
1640
1641 switch (dest->sa_family) {
1642 case AF_INET2:
1643#ifdef INET61
1644 case AF_INET624:
1645#endif
1646#ifdef MPLS1
1647 case AF_MPLS33:
1648#endif
1649 break;
1650 default:
1651 error = EAFNOSUPPORT47;
1652 goto drop;
1653 }
1654
1655 if (ISSET(m->m_flags, M_MCAST|M_BCAST)((m->m_hdr.mh_flags) & (0x0200|0x0100))) {
1656 error = ENETUNREACH51;
1657 goto drop;
1658 }
1659
1660 rt = rt_getll(rt0);
1661
1662 /* check rt_expire? */
1663 if (ISSET(rt->rt_flags, RTF_REJECT)((rt->rt_flags) & (0x8))) {
1664 error = (rt == rt0) ? EHOSTDOWN64 : EHOSTUNREACH65;
1665 goto drop;
1666 }
1667 if (!ISSET(rt->rt_flags, RTF_HOST)((rt->rt_flags) & (0x4))) {
1668 error = EHOSTUNREACH65;
1669 goto drop;
1670 }
1671 if (ISSET(rt->rt_flags, RTF_GATEWAY)((rt->rt_flags) & (0x2))) {
1672 error = EINVAL22;
1673 goto drop;
1674 }
1675
1676 gate = rt->rt_gateway;
1677 af = gate->sa_family;
1678 if (af != sc->sc_tunnel.t_af) {
1679 error = EAGAIN35;
1680 goto drop;
1681 }
1682
1683 /* Try to limit infinite recursion through misconfiguration. */
1684 for (mtag = m_tag_find(m, PACKET_TAG_GRE0x0080, NULL((void *)0)); mtag;
1685 mtag = m_tag_find(m, PACKET_TAG_GRE0x0080, mtag)) {
1686 if (memcmp((caddr_t)(mtag + 1), &ifp->if_index,__builtin_memcmp(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
1687 sizeof(ifp->if_index))__builtin_memcmp(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
== 0) {
1688 error = EIO5;
1689 goto drop;
1690 }
1691 }
1692
1693 mtag = m_tag_get(PACKET_TAG_GRE0x0080, sizeof(ifp->if_index), M_NOWAIT0x0002);
1694 if (mtag == NULL((void *)0)) {
1695 error = ENOBUFS55;
1696 goto drop;
1697 }
1698 memcpy((caddr_t)(mtag + 1), &ifp->if_index, sizeof(ifp->if_index))__builtin_memcpy(((caddr_t)(mtag + 1)), (&ifp->if_index
), (sizeof(ifp->if_index)))
;
1699 m_tag_prepend(m, mtag);
1700
1701 switch (af) {
1702 case AF_INET2: {
1703 struct sockaddr_in *sin = (struct sockaddr_in *)gate;
1704 addr = &sin->sin_addr;
1705 break;
1706 }
1707#ifdef INET61
1708 case AF_INET624: {
1709 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)gate;
1710 addr = &sin6->sin6_addr;
1711 break;
1712 }
1713 #endif
1714 default:
1715 unhandled_af(af);
1716 /* NOTREACHED */
1717 }
1718
1719 m = gre_l3_encap_dst(&sc->sc_tunnel, addr, m, dest->sa_family);
1720 if (m == NULL((void *)0)) {
1721 ifp->if_oerrorsif_data.ifi_oerrors++;
1722 return (ENOBUFS55);
1723 }
1724
1725 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family = dest->sa_family;
1726
1727 error = if_enqueue(ifp, m);
1728 if (error)
1729 ifp->if_oerrorsif_data.ifi_oerrors++;
1730 return (error);
1731
1732drop:
1733 m_freem(m);
1734 return (error);
1735}
1736
1737static void
1738mgre_start(struct ifnet *ifp)
1739{
1740 struct mgre_softc *sc = ifp->if_softc;
1741 struct mbuf *m;
1742#if NBPFILTER1 > 0
1743 caddr_t if_bpf;
1744#endif
1745
1746 while ((m = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
1747#if NBPFILTER1 > 0
1748 if_bpf = ifp->if_bpf;
1749 if (if_bpf) {
1750 struct m_hdr mh;
1751 struct mbuf *n;
1752 int off;
1753
1754 n = m_getptr(m, ifp->if_hdrlenif_data.ifi_hdrlen, &off);
1755 KASSERT(n != NULL)((n != ((void *)0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_gre.c"
, 1755, "n != NULL"))
;
1756
1757 mh.mh_flags = 0;
1758 mh.mh_next = n->m_nextm_hdr.mh_next;
1759 mh.mh_len = n->m_lenm_hdr.mh_len - off;
1760 mh.mh_data = n->m_datam_hdr.mh_data + off;
1761
1762 bpf_mtap_af(if_bpf, m->m_pkthdrM_dat.MH.MH_pkthdr.ph_family,
1763 (struct mbuf *)&mh, BPF_DIRECTION_OUT(1 << 1));
1764 }
1765#endif
1766
1767 if (gre_ip_output(&sc->sc_tunnel, m) != 0) {
1768 ifp->if_oerrorsif_data.ifi_oerrors++;
1769 continue;
1770 }
1771 }
1772}
1773
1774static void
1775egre_start(struct ifnet *ifp)
1776{
1777 struct egre_softc *sc = ifp->if_softc;
1778 struct mbuf *m0, *m;
1779#if NBPFILTER1 > 0
1780 caddr_t if_bpf;
1781#endif
1782
1783 if (!gre_allow) {
1784 ifq_purge(&ifp->if_snd);
1785 return;
1786 }
1787
1788 while ((m0 = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
1789#if NBPFILTER1 > 0
1790 if_bpf = ifp->if_bpf;
1791 if (if_bpf)
1792 bpf_mtap_ether(if_bpf, m0, BPF_DIRECTION_OUT(1 << 1));
1793#endif
1794
1795 /* force prepend mbuf because of alignment problems */
1796 m = m_get(M_DONTWAIT0x0002, m0->m_typem_hdr.mh_type);
1797 if (m == NULL((void *)0)) {
1798 m_freem(m0);
1799 continue;
1800 }
1801
1802 M_MOVE_PKTHDR(m, m0)do { (m)->m_hdr.mh_flags = ((m)->m_hdr.mh_flags & (
0x0001 | 0x0008)); (m)->m_hdr.mh_flags |= (m0)->m_hdr.mh_flags
& (0x0002|0x0004|0x0010|0x0100|0x0200|0x0400|0x4000| 0x0800
|0x0040|0x1000|0x8000|0x0020|0x2000); do { ((m))->M_dat.MH
.MH_pkthdr = ((m0))->M_dat.MH.MH_pkthdr; ((m0))->m_hdr.
mh_flags &= ~0x0002; { ((&((m0))->M_dat.MH.MH_pkthdr
.ph_tags)->slh_first) = ((void *)0); }; ((m0))->M_dat.MH
.MH_pkthdr.pf.statekey = ((void *)0); } while ( 0); if (((m)->
m_hdr.mh_flags & 0x0001) == 0) (m)->m_hdr.mh_data = (m
)->M_dat.MH.MH_dat.MH_databuf; } while ( 0)
;
1803 m->m_nextm_hdr.mh_next = m0;
1804
1805 m_align(m, 0);
1806 m->m_lenm_hdr.mh_len = 0;
1807
1808 m = gre_encap(&sc->sc_tunnel, m, htons(ETHERTYPE_TRANSETHER),gre_encap_dst((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), ((__uint16_t)(__builtin_constant_p(0x6558) ?
(__uint16_t)(((__uint16_t)(0x6558) & 0xffU) << 8 |
((__uint16_t)(0x6558) & 0xff00U) >> 8) : __swap16md
(0x6558))), (sc->sc_tunnel.t_ttl), (gre_l2_tos(&sc->
sc_tunnel, m)))
1809 sc->sc_tunnel.t_ttl, gre_l2_tos(&sc->sc_tunnel, m))gre_encap_dst((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), ((__uint16_t)(__builtin_constant_p(0x6558) ?
(__uint16_t)(((__uint16_t)(0x6558) & 0xffU) << 8 |
((__uint16_t)(0x6558) & 0xff00U) >> 8) : __swap16md
(0x6558))), (sc->sc_tunnel.t_ttl), (gre_l2_tos(&sc->
sc_tunnel, m)))
;
1810 if (m == NULL((void *)0) || gre_ip_output(&sc->sc_tunnel, m) != 0) {
1811 ifp->if_oerrorsif_data.ifi_oerrors++;
1812 continue;
1813 }
1814 }
1815}
1816
1817static struct mbuf *
1818gre_l3_encap_dst(const struct gre_tunnel *tunnel, const void *dst,
1819 struct mbuf *m, sa_family_t af)
1820{
1821 uint16_t proto;
1822 uint8_t ttl, itos, otos;
1823 int tttl = tunnel->t_ttl;
1824 int ttloff;
1825
1826 switch (af) {
1827 case AF_INET2: {
1828 struct ip *ip;
1829
1830 m = m_pullup(m, sizeof(*ip));
1831 if (m == NULL((void *)0))
1832 return (NULL((void *)0));
1833
1834 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
1835 itos = ip->ip_tos;
1836
1837 ttloff = offsetof(struct ip, ip_ttl)__builtin_offsetof(struct ip, ip_ttl);
1838 proto = htons(ETHERTYPE_IP)(__uint16_t)(__builtin_constant_p(0x0800) ? (__uint16_t)(((__uint16_t
)(0x0800) & 0xffU) << 8 | ((__uint16_t)(0x0800) &
0xff00U) >> 8) : __swap16md(0x0800))
;
1839 break;
1840 }
1841#ifdef INET61
1842 case AF_INET624: {
1843 struct ip6_hdr *ip6;
1844
1845 m = m_pullup(m, sizeof(*ip6));
1846 if (m == NULL((void *)0))
1847 return (NULL((void *)0));
1848
1849 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
1850 itos = (ntohl(ip6->ip6_flow)(__uint32_t)(__builtin_constant_p(ip6->ip6_ctlun.ip6_un1.ip6_un1_flow
) ? (__uint32_t)(((__uint32_t)(ip6->ip6_ctlun.ip6_un1.ip6_un1_flow
) & 0xff) << 24 | ((__uint32_t)(ip6->ip6_ctlun.ip6_un1
.ip6_un1_flow) & 0xff00) << 8 | ((__uint32_t)(ip6->
ip6_ctlun.ip6_un1.ip6_un1_flow) & 0xff0000) >> 8 | (
(__uint32_t)(ip6->ip6_ctlun.ip6_un1.ip6_un1_flow) & 0xff000000
) >> 24) : __swap32md(ip6->ip6_ctlun.ip6_un1.ip6_un1_flow
))
& 0x0ff00000) >> 20;
1851
1852 ttloff = offsetof(struct ip6_hdr, ip6_hlim)__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_hlim
)
;
1853 proto = htons(ETHERTYPE_IPV6)(__uint16_t)(__builtin_constant_p(0x86DD) ? (__uint16_t)(((__uint16_t
)(0x86DD) & 0xffU) << 8 | ((__uint16_t)(0x86DD) &
0xff00U) >> 8) : __swap16md(0x86DD))
;
1854 break;
1855 }
1856 #endif
1857#ifdef MPLS1
1858 case AF_MPLS33: {
1859 uint32_t shim;
1860
1861 m = m_pullup(m, sizeof(shim));
1862 if (m == NULL((void *)0))
1863 return (NULL((void *)0));
1864
1865 shim = bemtoh32(mtod(m, uint32_t *))(__uint32_t)(__builtin_constant_p(*(__uint32_t *)(((uint32_t *
)((m)->m_hdr.mh_data)))) ? (__uint32_t)(((__uint32_t)(*(__uint32_t
*)(((uint32_t *)((m)->m_hdr.mh_data)))) & 0xff) <<
24 | ((__uint32_t)(*(__uint32_t *)(((uint32_t *)((m)->m_hdr
.mh_data)))) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t
*)(((uint32_t *)((m)->m_hdr.mh_data)))) & 0xff0000) >>
8 | ((__uint32_t)(*(__uint32_t *)(((uint32_t *)((m)->m_hdr
.mh_data)))) & 0xff000000) >> 24) : __swap32md(*(__uint32_t
*)(((uint32_t *)((m)->m_hdr.mh_data)))))
& MPLS_EXP_MASK((u_int32_t)(__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000e00U
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000e00U)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000e00U))
& 0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000e00U
)) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000e00U
))))
;
1866 itos = (shim >> MPLS_EXP_OFFSET9) << 5;
1867
1868 ttloff = 3;
1869
1870 if (m->m_flagsm_hdr.mh_flags & (M_BCAST0x0100 | M_MCAST0x0200))
1871 proto = htons(ETHERTYPE_MPLS_MCAST)(__uint16_t)(__builtin_constant_p(0x8848) ? (__uint16_t)(((__uint16_t
)(0x8848) & 0xffU) << 8 | ((__uint16_t)(0x8848) &
0xff00U) >> 8) : __swap16md(0x8848))
;
1872 else
1873 proto = htons(ETHERTYPE_MPLS)(__uint16_t)(__builtin_constant_p(0x8847) ? (__uint16_t)(((__uint16_t
)(0x8847) & 0xffU) << 8 | ((__uint16_t)(0x8847) &
0xff00U) >> 8) : __swap16md(0x8847))
;
1874 break;
1875 }
1876#endif
1877 default:
1878 unhandled_af(af);
1879 }
1880
1881 if (tttl == -1) {
1882 KASSERT(m->m_len > ttloff)((m->m_hdr.mh_len > ttloff) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/net/if_gre.c", 1882, "m->m_len > ttloff"
))
; /* m_pullup has happened */
1883
1884 ttl = *(m->m_datam_hdr.mh_data + ttloff);
1885 } else
1886 ttl = tttl;
1887
1888 itos = gre_l3_tos(tunnel, m, itos);
1889 ip_ecn_ingress(tunnel->t_ecn, &otos, &itos);
1890
1891 return (gre_encap_dst(tunnel, dst, m, proto, ttl, otos));
1892}
1893
1894static struct mbuf *
1895gre_encap_dst(const struct gre_tunnel *tunnel, const union gre_addr *dst,
1896 struct mbuf *m, uint16_t proto, uint8_t ttl, uint8_t tos)
1897{
1898 struct gre_header *gh;
1899 struct gre_h_key *gkh;
1900 int hlen;
1901
1902 hlen = sizeof(*gh);
1903 if (tunnel->t_key_mask != GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
)
1904 hlen += sizeof(*gkh);
1905
1906 m = m_prepend(m, hlen, M_DONTWAIT0x0002);
1907 if (m == NULL((void *)0))
1908 return (NULL((void *)0));
1909
1910 gh = mtod(m, struct gre_header *)((struct gre_header *)((m)->m_hdr.mh_data));
1911 gh->gre_flags = GRE_VERS_00x0000;
1912 gh->gre_proto = proto;
1913 if (tunnel->t_key_mask != GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
) {
1914 gh->gre_flags |= htons(GRE_KP)(__uint16_t)(__builtin_constant_p(0x2000) ? (__uint16_t)(((__uint16_t
)(0x2000) & 0xffU) << 8 | ((__uint16_t)(0x2000) &
0xff00U) >> 8) : __swap16md(0x2000))
;
1915
1916 gkh = (struct gre_h_key *)(gh + 1);
1917 gkh->gre_key = tunnel->t_key;
1918
1919 if (tunnel->t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
&&
1920 ISSET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) & (0x4000))) {
1921 gkh->gre_key |= htonl(~GRE_KEY_ENTROPY &(__uint32_t)(__builtin_constant_p(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) ? (__uint32_t)(((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff) << 24 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff00) << 8 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff0000) >> 8 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff000000) >> 24) : __swap32md(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
))
1922 m->m_pkthdr.ph_flowid)(__uint32_t)(__builtin_constant_p(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) ? (__uint32_t)(((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff) << 24 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff00) << 8 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff0000) >> 8 | ((__uint32_t)(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff000000) >> 24) : __swap32md(~(__uint32_t)(__builtin_constant_p
(0xffffff00U) ? (__uint32_t)(((__uint32_t)(0xffffff00U) &
0xff) << 24 | ((__uint32_t)(0xffffff00U) & 0xff00)
<< 8 | ((__uint32_t)(0xffffff00U) & 0xff0000) >>
8 | ((__uint32_t)(0xffffff00U) & 0xff000000) >> 24
) : __swap32md(0xffffff00U)) & m->M_dat.MH.MH_pkthdr.ph_flowid
))
;
1923 }
1924 }
1925
1926 return (gre_encap_dst_ip(tunnel, dst, m, ttl, tos));
1927}
1928
1929static struct mbuf *
1930gre_encap_dst_ip(const struct gre_tunnel *tunnel, const union gre_addr *dst,
1931 struct mbuf *m, uint8_t ttl, uint8_t tos)
1932{
1933 switch (tunnel->t_af) {
1934 case AF_UNSPEC0:
1935 /* packets may arrive before tunnel is set up */
1936 m_freem(m);
1937 return (NULL((void *)0));
1938 case AF_INET2: {
1939 struct ip *ip;
1940
1941 m = m_prepend(m, sizeof(*ip), M_DONTWAIT0x0002);
1942 if (m == NULL((void *)0))
1943 return (NULL((void *)0));
1944
1945 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
1946 ip->ip_v = IPVERSION4;
1947 ip->ip_hl = sizeof(*ip) >> 2;
1948 ip->ip_off = tunnel->t_df;
1949 ip->ip_tos = tos;
1950 ip->ip_len = htons(m->m_pkthdr.len)(__uint16_t)(__builtin_constant_p(m->M_dat.MH.MH_pkthdr.len
) ? (__uint16_t)(((__uint16_t)(m->M_dat.MH.MH_pkthdr.len) &
0xffU) << 8 | ((__uint16_t)(m->M_dat.MH.MH_pkthdr.len
) & 0xff00U) >> 8) : __swap16md(m->M_dat.MH.MH_pkthdr
.len))
;
1951 ip->ip_ttl = ttl;
1952 ip->ip_p = IPPROTO_GRE47;
1953 ip->ip_src = tunnel->t_src4t_src.in4;
1954 ip->ip_dst = dst->in4;
1955 break;
1956 }
1957#ifdef INET61
1958 case AF_INET624: {
1959 struct ip6_hdr *ip6;
1960 int len = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
1961
1962 m = m_prepend(m, sizeof(*ip6), M_DONTWAIT0x0002);
1963 if (m == NULL((void *)0))
1964 return (NULL((void *)0));
1965
1966 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
1967 ip6->ip6_flowip6_ctlun.ip6_un1.ip6_un1_flow = ISSET(m->m_pkthdr.csum_flags, M_FLOWID)((m->M_dat.MH.MH_pkthdr.csum_flags) & (0x4000)) ?
1968 htonl(m->m_pkthdr.ph_flowid)(__uint32_t)(__builtin_constant_p(m->M_dat.MH.MH_pkthdr.ph_flowid
) ? (__uint32_t)(((__uint32_t)(m->M_dat.MH.MH_pkthdr.ph_flowid
) & 0xff) << 24 | ((__uint32_t)(m->M_dat.MH.MH_pkthdr
.ph_flowid) & 0xff00) << 8 | ((__uint32_t)(m->M_dat
.MH.MH_pkthdr.ph_flowid) & 0xff0000) >> 8 | ((__uint32_t
)(m->M_dat.MH.MH_pkthdr.ph_flowid) & 0xff000000) >>
24) : __swap32md(m->M_dat.MH.MH_pkthdr.ph_flowid))
: 0;
1969 ip6->ip6_vfcip6_ctlun.ip6_un2_vfc |= IPV6_VERSION0x60;
1970 ip6->ip6_flowip6_ctlun.ip6_un1.ip6_un1_flow |= htonl((uint32_t)tos << 20)(__uint32_t)(__builtin_constant_p((uint32_t)tos << 20) ?
(__uint32_t)(((__uint32_t)((uint32_t)tos << 20) & 0xff
) << 24 | ((__uint32_t)((uint32_t)tos << 20) &
0xff00) << 8 | ((__uint32_t)((uint32_t)tos << 20
) & 0xff0000) >> 8 | ((__uint32_t)((uint32_t)tos <<
20) & 0xff000000) >> 24) : __swap32md((uint32_t)tos
<< 20))
;
1971 ip6->ip6_plenip6_ctlun.ip6_un1.ip6_un1_plen = htons(len)(__uint16_t)(__builtin_constant_p(len) ? (__uint16_t)(((__uint16_t
)(len) & 0xffU) << 8 | ((__uint16_t)(len) & 0xff00U
) >> 8) : __swap16md(len))
;
1972 ip6->ip6_nxtip6_ctlun.ip6_un1.ip6_un1_nxt = IPPROTO_GRE47;
1973 ip6->ip6_hlimip6_ctlun.ip6_un1.ip6_un1_hlim = ttl;
1974 ip6->ip6_src = tunnel->t_src6t_src.in6;
1975 ip6->ip6_dst = dst->in6;
1976
1977 if (tunnel->t_df)
1978 SET(m->m_pkthdr.csum_flags, M_IPV6_DF_OUT)((m->M_dat.MH.MH_pkthdr.csum_flags) |= (0x1000));
1979
1980 break;
1981 }
1982#endif /* INET6 */
1983 default:
1984 unhandled_af(tunnel->t_af);
1985 }
1986
1987 return (m);
1988}
1989
1990static int
1991gre_ip_output(const struct gre_tunnel *tunnel, struct mbuf *m)
1992{
1993 m->m_flagsm_hdr.mh_flags &= ~(M_BCAST0x0100|M_MCAST0x0200);
1994 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = tunnel->t_rtableid;
1995
1996#if NPF1 > 0
1997 pf_pkt_addr_changed(m);
1998#endif
1999
2000 switch (tunnel->t_af) {
2001 case AF_INET2:
2002 ip_send(m);
2003 break;
2004#ifdef INET61
2005 case AF_INET624:
2006 ip6_send(m);
2007 break;
2008#endif
2009 default:
2010 unhandled_af(tunnel->t_af);
2011 }
2012
2013 return (0);
2014}
2015
2016static int
2017gre_tunnel_ioctl(struct ifnet *ifp, struct gre_tunnel *tunnel,
2018 u_long cmd, void *data)
2019{
2020 struct ifreq *ifr = (struct ifreq *)data;
2021 int error = 0;
2022
2023 switch(cmd) {
2024 case SIOCSIFMTU((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((127)))
:
2025 if (ifr->ifr_mtuifr_ifru.ifru_metric < 576) {
2026 error = EINVAL22;
2027 break;
2028 }
2029 ifp->if_mtuif_data.ifi_mtu = ifr->ifr_mtuifr_ifru.ifru_metric;
2030 break;
2031 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
2032 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
2033 break;
2034
2035 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2036 error = gre_set_vnetid(tunnel, ifr);
2037 break;
2038
2039 case SIOCGVNETID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((167)))
:
2040 error = gre_get_vnetid(tunnel, ifr);
2041 break;
2042 case SIOCDVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((175)))
:
2043 error = gre_del_vnetid(tunnel);
2044 break;
2045
2046 case SIOCSVNETFLOWID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((195)))
:
2047 error = gre_set_vnetflowid(tunnel, ifr);
2048 break;
2049
2050 case SIOCGVNETFLOWID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((196)))
:
2051 error = gre_get_vnetflowid(tunnel, ifr);
2052 break;
2053
2054 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2055 error = gre_set_tunnel(tunnel, (struct if_laddrreq *)data, 1);
2056 break;
2057 case SIOCGLIFPHYADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((75)))
:
2058 error = gre_get_tunnel(tunnel, (struct if_laddrreq *)data);
2059 break;
2060 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2061 error = gre_del_tunnel(tunnel);
2062 break;
2063
2064 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2065 if (ifr->ifr_rdomainidifr_ifru.ifru_metric < 0 ||
2066 ifr->ifr_rdomainidifr_ifru.ifru_metric > RT_TABLEID_MAX255 ||
2067 !rtable_exists(ifr->ifr_rdomainidifr_ifru.ifru_metric)) {
2068 error = EINVAL22;
2069 break;
2070 }
2071 tunnel->t_rtableid = ifr->ifr_rdomainidifr_ifru.ifru_metric;
2072 break;
2073 case SIOCGLIFPHYRTABLE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((162)))
:
2074 ifr->ifr_rdomainidifr_ifru.ifru_metric = tunnel->t_rtableid;
2075 break;
2076
2077 case SIOCSLIFPHYDF((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((193)))
:
2078 /* commit */
2079 tunnel->t_df = ifr->ifr_dfifr_ifru.ifru_metric ? htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
: htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
2080 break;
2081 case SIOCGLIFPHYDF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((194)))
:
2082 ifr->ifr_dfifr_ifru.ifru_metric = tunnel->t_df ? 1 : 0;
2083 break;
2084
2085 default:
2086 error = ENOTTY25;
2087 break;
2088 }
2089
2090 return (error);
2091}
2092
2093static uint8_t
2094gre_l2_tos(const struct gre_tunnel *t, const struct mbuf *m)
2095{
2096 uint8_t prio;
2097
2098 switch (t->t_txhprio) {
2099 case IF_HDRPRIO_PACKET-1:
2100 prio = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio;
2101 break;
2102 default:
2103 prio = t->t_txhprio;
2104 break;
2105 }
2106
2107 return (IFQ_PRIO2TOS(prio)((prio) << 5));
2108}
2109
2110static uint8_t
2111gre_l3_tos(const struct gre_tunnel *t, const struct mbuf *m, uint8_t tos)
2112{
2113 uint8_t prio;
2114
2115 switch (t->t_txhprio) {
2116 case IF_HDRPRIO_PAYLOAD-2:
2117 return (tos);
2118 case IF_HDRPRIO_PACKET-1:
2119 prio = m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio;
2120 break;
2121 default:
2122 prio = t->t_txhprio;
2123 break;
2124 }
2125
2126 return (IFQ_PRIO2TOS(prio)((prio) << 5) | (tos & IPTOS_ECN_MASK0x03));
2127}
2128
2129static int
2130gre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2131{
2132 struct gre_softc *sc = ifp->if_softc;
2133 struct ifreq *ifr = (struct ifreq *)data;
2134 struct ifkalivereq *ikar = (struct ifkalivereq *)data;
2135 int error = 0;
2136
2137 switch(cmd) {
2138 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2139 ifp->if_flags |= IFF_UP0x1;
2140 /* FALLTHROUGH */
2141 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2142 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2143 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2144 error = gre_up(sc);
2145 else
2146 error = 0;
2147 } else {
2148 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2149 error = gre_down(sc);
2150 }
2151 break;
2152 case SIOCSIFRDOMAIN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((159)))
:
2153 /* let if_rdomain do its thing */
2154 error = ENOTTY25;
2155 break;
2156
2157 case SIOCSETKALIVE((unsigned long)0x80000000 | ((sizeof(struct ifkalivereq) &
0x1fff) << 16) | ((('i')) << 8) | ((163)))
:
2158 if (ikar->ikar_timeo < 0 || ikar->ikar_timeo > 86400 ||
2159 ikar->ikar_cnt < 0 || ikar->ikar_cnt > 256 ||
2160 (ikar->ikar_timeo == 0) != (ikar->ikar_cnt == 0))
2161 return (EINVAL22);
2162
2163 if (ikar->ikar_timeo == 0 || ikar->ikar_cnt == 0) {
2164 sc->sc_ka_count = 0;
2165 sc->sc_ka_timeo = 0;
2166 sc->sc_ka_state = GRE_KA_NONE0;
2167 } else {
2168 sc->sc_ka_count = ikar->ikar_cnt;
2169 sc->sc_ka_timeo = ikar->ikar_timeo;
2170 sc->sc_ka_state = GRE_KA_DOWN1;
2171
2172 arc4random_buf(&sc->sc_ka_key, sizeof(sc->sc_ka_key));
2173 sc->sc_ka_bias = arc4random();
2174 sc->sc_ka_holdmax = sc->sc_ka_count;
2175
2176 sc->sc_ka_recvtm = ticks - hz;
2177 timeout_add(&sc->sc_ka_send, 1);
2178 timeout_add_sec(&sc->sc_ka_hold,
2179 sc->sc_ka_timeo * sc->sc_ka_count);
2180 }
2181 break;
2182
2183 case SIOCGETKALIVE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifkalivereq) & 0x1fff) << 16) | ((('i')) <<
8) | ((164)))
:
2184 ikar->ikar_cnt = sc->sc_ka_count;
2185 ikar->ikar_timeo = sc->sc_ka_timeo;
2186 break;
2187
2188 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2189 if (ifr->ifr_ttlifr_ifru.ifru_metric != -1 &&
2190 (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff)) {
2191 error = EINVAL22;
2192 break;
2193 }
2194
2195 /* commit */
2196 sc->sc_tunnel.t_ttl = ifr->ifr_ttlifr_ifru.ifru_metric;
2197 break;
2198
2199 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2200 ifr->ifr_ttlifr_ifru.ifru_metric = sc->sc_tunnel.t_ttl;
2201 break;
2202
2203 case SIOCSLIFPHYECN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((199)))
:
2204 sc->sc_tunnel.t_ecn =
2205 ifr->ifr_metricifr_ifru.ifru_metric ? ECN_ALLOWED1 : ECN_FORBIDDEN0;
2206 break;
2207 case SIOCGLIFPHYECN(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((200)))
:
2208 ifr->ifr_metricifr_ifru.ifru_metric = (sc->sc_tunnel.t_ecn == ECN_ALLOWED1);
2209 break;
2210
2211 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2212 error = if_txhprio_l3_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2213 if (error != 0)
2214 break;
2215
2216 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2217 break;
2218 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2219 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2220 break;
2221
2222 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2223 error = if_rxhprio_l3_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2224 if (error != 0)
2225 break;
2226
2227 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2228 break;
2229 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2230 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2231 break;
2232
2233 default:
2234 error = gre_tunnel_ioctl(ifp, &sc->sc_tunnel, cmd, data);
2235 break;
2236 }
2237
2238 return (error);
2239}
2240
2241static int
2242mgre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2243{
2244 struct mgre_softc *sc = ifp->if_softc;
2245 struct ifreq *ifr = (struct ifreq *)data;
2246 int error = 0;
2247
2248 switch(cmd) {
2249 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2250 break;
2251 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2252 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2253 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2254 error = mgre_up(sc);
2255 else
2256 error = 0;
2257 } else {
2258 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2259 error = mgre_down(sc);
2260 }
2261 break;
2262
2263 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2264 if (ifr->ifr_ttlifr_ifru.ifru_metric != -1 &&
2265 (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff)) {
2266 error = EINVAL22;
2267 break;
2268 }
2269
2270 /* commit */
2271 sc->sc_tunnel.t_ttl = ifr->ifr_ttlifr_ifru.ifru_metric;
2272 break;
2273
2274 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2275 ifr->ifr_ttlifr_ifru.ifru_metric = sc->sc_tunnel.t_ttl;
2276 break;
2277
2278 case SIOCSLIFPHYECN((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((199)))
:
2279 sc->sc_tunnel.t_ecn =
2280 ifr->ifr_metricifr_ifru.ifru_metric ? ECN_ALLOWED1 : ECN_FORBIDDEN0;
2281 break;
2282 case SIOCGLIFPHYECN(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((200)))
:
2283 ifr->ifr_metricifr_ifru.ifru_metric = (sc->sc_tunnel.t_ecn == ECN_ALLOWED1);
2284 break;
2285
2286 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2287 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2288 error = EBUSY16;
2289 break;
2290 }
2291 error = mgre_set_tunnel(sc, (struct if_laddrreq *)data);
2292 break;
2293 case SIOCGLIFPHYADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((75)))
:
2294 error = mgre_get_tunnel(sc, (struct if_laddrreq *)data);
2295 break;
2296
2297 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2298 error = if_txhprio_l3_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2299 if (error != 0)
2300 break;
2301
2302 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2303 break;
2304 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2305 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2306 break;
2307
2308 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2309 error = if_rxhprio_l3_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2310 if (error != 0)
2311 break;
2312
2313 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2314 break;
2315 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2316 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2317 break;
2318
2319 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2320 case SIOCDVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((175)))
:
2321 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2322 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2323 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2324 error = EBUSY16;
2325 break;
2326 }
2327
2328 /* FALLTHROUGH */
2329 default:
2330 error = gre_tunnel_ioctl(ifp, &sc->sc_tunnel, cmd, data);
2331 break;
2332 }
2333
2334 return (error);
2335}
2336
2337static int
2338mgre_set_tunnel(struct mgre_softc *sc, struct if_laddrreq *req)
2339{
2340 struct gre_tunnel *tunnel = &sc->sc_tunnel;
2341 struct sockaddr *addr = (struct sockaddr *)&req->addr;
2342 struct sockaddr *dstaddr = (struct sockaddr *)&req->dstaddr;
2343 struct sockaddr_in *addr4;
2344#ifdef INET61
2345 struct sockaddr_in6 *addr6;
2346 int error;
2347#endif
2348
2349 if (dstaddr->sa_family != AF_UNSPEC0)
2350 return (EINVAL22);
2351
2352 /* validate */
2353 switch (addr->sa_family) {
2354 case AF_INET2:
2355 if (addr->sa_len != sizeof(*addr4))
2356 return (EINVAL22);
2357
2358 addr4 = (struct sockaddr_in *)addr;
2359 if (in_nullhost(addr4->sin_addr)((addr4->sin_addr).s_addr == ((u_int32_t) (__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000000)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff000000) >> 24) : __swap32md((u_int32_t
)(0x00000000)))))
||
2360 IN_MULTICAST(addr4->sin_addr.s_addr)(((u_int32_t)(addr4->sin_addr.s_addr) & ((u_int32_t) (
__uint32_t)(__builtin_constant_p((u_int32_t)(0xf0000000)) ? (
__uint32_t)(((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0xf0000000))))) == ((
u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xe0000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0xe0000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0xe0000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0xe0000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0xe0000000
)))))
)
2361 return (EINVAL22);
2362
2363 tunnel->t_src4t_src.in4 = addr4->sin_addr;
2364 tunnel->t_dst4t_dst.in4.s_addr = INADDR_ANY((u_int32_t) (__uint32_t)(__builtin_constant_p((u_int32_t)(0x00000000
)) ? (__uint32_t)(((__uint32_t)((u_int32_t)(0x00000000)) &
0xff) << 24 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff00) << 8 | ((__uint32_t)((u_int32_t)(0x00000000)) &
0xff0000) >> 8 | ((__uint32_t)((u_int32_t)(0x00000000)
) & 0xff000000) >> 24) : __swap32md((u_int32_t)(0x00000000
))))
;
2365
2366 break;
2367#ifdef INET61
2368 case AF_INET624:
2369 if (addr->sa_len != sizeof(*addr6))
2370 return (EINVAL22);
2371
2372 addr6 = (struct sockaddr_in6 *)addr;
2373 if (IN6_IS_ADDR_UNSPECIFIED(&addr6->sin6_addr)((*(const u_int32_t *)(const void *)(&(&addr6->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&addr6->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&addr6->sin6_addr)->__u6_addr.__u6_addr8[
8]) == 0) && (*(const u_int32_t *)(const void *)(&
(&addr6->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0
))
||
2374 IN6_IS_ADDR_MULTICAST(&addr6->sin6_addr)((&addr6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
)
2375 return (EINVAL22);
2376
2377 error = in6_embedscope(&tunnel->t_src6t_src.in6, addr6, NULL((void *)0));
2378 if (error != 0)
2379 return (error);
2380
2381 memset(&tunnel->t_dst6, 0, sizeof(tunnel->t_dst6))__builtin_memset((&tunnel->t_dst.in6), (0), (sizeof(tunnel
->t_dst.in6)))
;
2382
2383 break;
2384#endif
2385 default:
2386 return (EAFNOSUPPORT47);
2387 }
2388
2389 /* commit */
2390 tunnel->t_af = addr->sa_family;
2391
2392 return (0);
2393}
2394
2395static int
2396mgre_get_tunnel(struct mgre_softc *sc, struct if_laddrreq *req)
2397{
2398 struct gre_tunnel *tunnel = &sc->sc_tunnel;
2399 struct sockaddr *dstaddr = (struct sockaddr *)&req->dstaddr;
2400 struct sockaddr_in *sin;
2401#ifdef INET61
2402 struct sockaddr_in6 *sin6;
2403#endif
2404
2405 switch (tunnel->t_af) {
2406 case AF_UNSPEC0:
2407 return (EADDRNOTAVAIL49);
2408 case AF_INET2:
2409 sin = (struct sockaddr_in *)&req->addr;
2410 memset(sin, 0, sizeof(*sin))__builtin_memset((sin), (0), (sizeof(*sin)));
2411 sin->sin_family = AF_INET2;
2412 sin->sin_len = sizeof(*sin);
2413 sin->sin_addr = tunnel->t_src4t_src.in4;
2414 break;
2415
2416#ifdef INET61
2417 case AF_INET624:
2418 sin6 = (struct sockaddr_in6 *)&req->addr;
2419 memset(sin6, 0, sizeof(*sin6))__builtin_memset((sin6), (0), (sizeof(*sin6)));
2420 sin6->sin6_family = AF_INET624;
2421 sin6->sin6_len = sizeof(*sin6);
2422 in6_recoverscope(sin6, &tunnel->t_src6t_src.in6);
2423 break;
2424#endif
2425 default:
2426 unhandled_af(tunnel->t_af);
2427 }
2428
2429 dstaddr->sa_len = 2;
2430 dstaddr->sa_family = AF_UNSPEC0;
2431
2432 return (0);
2433}
2434
2435static int
2436egre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2437{
2438 struct egre_softc *sc = ifp->if_softc;
2439 struct ifreq *ifr = (struct ifreq *)data;
2440 int error = 0;
2441
2442 switch(cmd) {
2443 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2444 break;
2445 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2446 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2447 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2448 error = egre_up(sc);
2449 else
2450 error = 0;
2451 } else {
2452 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2453 error = egre_down(sc);
2454 }
2455 break;
2456
2457 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2458 if (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff) {
2459 error = EINVAL22;
2460 break;
2461 }
2462
2463 /* commit */
2464 sc->sc_tunnel.t_ttl = (uint8_t)ifr->ifr_ttlifr_ifru.ifru_metric;
2465 break;
2466
2467 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2468 ifr->ifr_ttlifr_ifru.ifru_metric = (int)sc->sc_tunnel.t_ttl;
2469 break;
2470
2471 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2472 error = if_txhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2473 if (error != 0)
2474 break;
2475
2476 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2477 break;
2478 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2479 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2480 break;
2481
2482 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2483 error = if_rxhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2484 if (error != 0)
2485 break;
2486
2487 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2488 break;
2489 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2490 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2491 break;
2492
2493 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2494 case SIOCDVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((175)))
:
2495 case SIOCSVNETFLOWID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((195)))
:
2496 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2497 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2498 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2499 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2500 error = EBUSY16;
2501 break;
2502 }
2503
2504 /* FALLTHROUGH */
2505 default:
2506 error = gre_tunnel_ioctl(ifp, &sc->sc_tunnel, cmd, data);
2507 if (error == ENOTTY25)
2508 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2509 break;
2510 }
2511
2512 if (error == ENETRESET52) {
2513 /* no hardware to program */
2514 error = 0;
2515 }
2516
2517 return (error);
2518}
2519
2520static int
2521nvgre_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2522{
2523 struct nvgre_softc *sc = ifp->if_softc;
2524 struct gre_tunnel *tunnel = &sc->sc_tunnel;
2525
2526 struct ifreq *ifr = (struct ifreq *)data;
2527 struct if_parent *parent = (struct if_parent *)data;
2528 struct ifbrparam *bparam = (struct ifbrparam *)data;
2529 struct ifnet *ifp0;
2530
2531 int error = 0;
2532
2533 switch (cmd) {
2534 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2535 break;
2536 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2537 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2538 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2539 error = nvgre_up(sc);
2540 else
2541 error = ENETRESET52;
2542 } else {
2543 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2544 error = nvgre_down(sc);
2545 }
2546 break;
2547
2548 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2549 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2550 error = EBUSY16;
2551 break;
2552 }
2553 error = gre_set_tunnel(tunnel, (struct if_laddrreq *)data, 0);
2554 if (error == 0)
2555 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2556 break;
2557 case SIOCGLIFPHYADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((75)))
:
2558 error = gre_get_tunnel(tunnel, (struct if_laddrreq *)data);
2559 break;
2560 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2561 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2562 error = EBUSY16;
2563 break;
2564 }
2565 error = gre_del_tunnel(tunnel);
2566 if (error == 0)
2567 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2568 break;
2569
2570 case SIOCSIFPARENT((unsigned long)0x80000000 | ((sizeof(struct if_parent) &
0x1fff) << 16) | ((('i')) << 8) | ((178)))
:
2571 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2572 error = EBUSY16;
2573 break;
2574 }
2575 error = nvgre_set_parent(sc, parent->ifp_parent);
2576 if (error == 0)
2577 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2578 break;
2579 case SIOCGIFPARENT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_parent) & 0x1fff) << 16) | ((('i')) <<
8) | ((179)))
:
2580 ifp0 = if_get(sc->sc_ifp0);
2581 if (ifp0 == NULL((void *)0))
2582 error = EADDRNOTAVAIL49;
2583 else {
2584 memcpy(parent->ifp_parent, ifp0->if_xname,__builtin_memcpy((parent->ifp_parent), (ifp0->if_xname)
, (sizeof(parent->ifp_parent)))
2585 sizeof(parent->ifp_parent))__builtin_memcpy((parent->ifp_parent), (ifp0->if_xname)
, (sizeof(parent->ifp_parent)))
;
2586 }
2587 if_put(ifp0);
2588 break;
2589 case SIOCDIFPARENT((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((180)))
:
2590 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2591 error = EBUSY16;
2592 break;
2593 }
2594 /* commit */
2595 sc->sc_ifp0 = 0;
2596 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2597 break;
2598
2599 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2600 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2601 error = EBUSY16;
2602 break;
2603 }
2604 if (ifr->ifr_vnetidifr_ifru.ifru_vnetid < GRE_KEY_ENTROPY_MIN0x00000000U ||
2605 ifr->ifr_vnetidifr_ifru.ifru_vnetid > GRE_KEY_ENTROPY_MAX0x00ffffffU) {
2606 error = EINVAL22;
2607 break;
2608 }
2609
2610 /* commit */
2611 tunnel->t_key = htonl(ifr->ifr_vnetid << GRE_KEY_ENTROPY_SHIFT)(__uint32_t)(__builtin_constant_p(ifr->ifr_ifru.ifru_vnetid
<< 8) ? (__uint32_t)(((__uint32_t)(ifr->ifr_ifru.ifru_vnetid
<< 8) & 0xff) << 24 | ((__uint32_t)(ifr->
ifr_ifru.ifru_vnetid << 8) & 0xff00) << 8 | (
(__uint32_t)(ifr->ifr_ifru.ifru_vnetid << 8) & 0xff0000
) >> 8 | ((__uint32_t)(ifr->ifr_ifru.ifru_vnetid <<
8) & 0xff000000) >> 24) : __swap32md(ifr->ifr_ifru
.ifru_vnetid << 8))
;
2612 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2613 break;
2614 case SIOCGVNETID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((167)))
:
2615 error = gre_get_vnetid(tunnel, ifr);
2616 break;
2617
2618 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2619 if (ifr->ifr_rdomainidifr_ifru.ifru_metric < 0 ||
2620 ifr->ifr_rdomainidifr_ifru.ifru_metric > RT_TABLEID_MAX255 ||
2621 !rtable_exists(ifr->ifr_rdomainidifr_ifru.ifru_metric)) {
2622 error = EINVAL22;
2623 break;
2624 }
2625 tunnel->t_rtableid = ifr->ifr_rdomainidifr_ifru.ifru_metric;
2626 etherbridge_flush(&sc->sc_eb, IFBF_FLUSHALL0x1);
2627 break;
2628 case SIOCGLIFPHYRTABLE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((162)))
:
2629 ifr->ifr_rdomainidifr_ifru.ifru_metric = tunnel->t_rtableid;
2630 break;
2631
2632 case SIOCSLIFPHYDF((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((193)))
:
2633 /* commit */
2634 tunnel->t_df = ifr->ifr_dfifr_ifru.ifru_metric ? htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
: htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
2635 break;
2636 case SIOCGLIFPHYDF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((194)))
:
2637 ifr->ifr_dfifr_ifru.ifru_metric = tunnel->t_df ? 1 : 0;
2638 break;
2639
2640 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2641 if (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff) {
2642 error = EINVAL22;
2643 break;
2644 }
2645
2646 /* commit */
2647 tunnel->t_ttl = ifr->ifr_ttlifr_ifru.ifru_metric;
2648 break;
2649
2650 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2651 ifr->ifr_ttlifr_ifru.ifru_metric = tunnel->t_ttl;
2652 break;
2653
2654 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2655 error = if_txhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2656 if (error != 0)
2657 break;
2658
2659 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2660 break;
2661 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2662 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2663 break;
2664
2665 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2666 error = if_rxhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2667 if (error != 0)
2668 break;
2669
2670 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2671 break;
2672 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2673 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2674 break;
2675
2676 case SIOCBRDGSCACHE((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((64)))
:
2677 error = etherbridge_set_max(&sc->sc_eb, bparam);
2678 break;
2679 case SIOCBRDGGCACHE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbrparam) & 0x1fff) << 16) | ((('i')) <<
8) | ((65)))
:
2680 error = etherbridge_get_max(&sc->sc_eb, bparam);
2681 break;
2682
2683 case SIOCBRDGSTO((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((69)))
:
2684 error = etherbridge_set_tmo(&sc->sc_eb, bparam);
2685 break;
2686 case SIOCBRDGGTO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbrparam) & 0x1fff) << 16) | ((('i')) <<
8) | ((70)))
:
2687 error = etherbridge_get_tmo(&sc->sc_eb, bparam);
2688 break;
2689
2690 case SIOCBRDGRTS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbaconf) & 0x1fff) << 16) | ((('i')) <<
8) | ((67)))
:
2691 error = etherbridge_rtfind(&sc->sc_eb,
2692 (struct ifbaconf *)data);
2693 break;
2694 case SIOCBRDGFLUSH((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((72)))
:
2695 etherbridge_flush(&sc->sc_eb,
2696 ((struct ifbreq *)data)->ifbr_ifsflags);
2697 break;
2698 case SIOCBRDGSADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((68)))
:
2699 error = nvgre_add_addr(sc, (struct ifbareq *)data);
2700 break;
2701 case SIOCBRDGDADDR((unsigned long)0x80000000 | ((sizeof(struct ifbareq) & 0x1fff
) << 16) | ((('i')) << 8) | ((71)))
:
2702 error = nvgre_del_addr(sc, (struct ifbareq *)data);
2703 break;
2704
2705 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
2706 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
2707 break;
2708
2709 default:
2710 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2711 break;
2712 }
2713
2714 if (error == ENETRESET52) {
2715 /* no hardware to program */
2716 error = 0;
2717 }
2718
2719 return (error);
2720}
2721
2722static int
2723eoip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2724{
2725 struct eoip_softc *sc = ifp->if_softc;
2726 struct ifreq *ifr = (struct ifreq *)data;
2727 struct ifkalivereq *ikar = (struct ifkalivereq *)data;
2728 int error = 0;
2729
2730 switch(cmd) {
2731 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2732 break;
2733 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2734 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2735 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2736 error = eoip_up(sc);
2737 else
2738 error = 0;
2739 } else {
2740 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2741 error = eoip_down(sc);
2742 }
2743 break;
2744
2745 case SIOCSETKALIVE((unsigned long)0x80000000 | ((sizeof(struct ifkalivereq) &
0x1fff) << 16) | ((('i')) << 8) | ((163)))
:
2746 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2747 error = EBUSY16;
2748 break;
2749 }
2750
2751 if (ikar->ikar_timeo < 0 || ikar->ikar_timeo > 86400 ||
2752 ikar->ikar_cnt < 0 || ikar->ikar_cnt > 256)
2753 return (EINVAL22);
2754
2755 if (ikar->ikar_timeo == 0 || ikar->ikar_cnt == 0) {
2756 sc->sc_ka_count = 0;
2757 sc->sc_ka_timeo = 0;
2758 sc->sc_ka_state = GRE_KA_NONE0;
2759 } else {
2760 sc->sc_ka_count = ikar->ikar_cnt;
2761 sc->sc_ka_timeo = ikar->ikar_timeo;
2762 sc->sc_ka_state = GRE_KA_DOWN1;
2763 }
2764 break;
2765
2766 case SIOCGETKALIVE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifkalivereq) & 0x1fff) << 16) | ((('i')) <<
8) | ((164)))
:
2767 ikar->ikar_cnt = sc->sc_ka_count;
2768 ikar->ikar_timeo = sc->sc_ka_timeo;
2769 break;
2770
2771 case SIOCSVNETID((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((166)))
:
2772 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2773 error = EBUSY16;
2774 break;
2775 }
2776 if (ifr->ifr_vnetidifr_ifru.ifru_vnetid < 0 || ifr->ifr_vnetidifr_ifru.ifru_vnetid > 0xffff)
2777 return (EINVAL22);
2778
2779 sc->sc_tunnel.t_key = htole16(ifr->ifr_vnetid)((__uint16_t)(ifr->ifr_ifru.ifru_vnetid)); /* for cmp */
2780 sc->sc_tunnel_id = htole16(ifr->ifr_vnetid)((__uint16_t)(ifr->ifr_ifru.ifru_vnetid));
2781 break;
2782
2783 case SIOCGVNETID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((167)))
:
2784 ifr->ifr_vnetidifr_ifru.ifru_vnetid = letoh16(sc->sc_tunnel_id)((__uint16_t)(sc->sc_tunnel_id));
2785 break;
2786
2787 case SIOCSLIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct if_laddrreq) &
0x1fff) << 16) | ((('i')) << 8) | ((74)))
:
2788 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2789 error = EBUSY16;
2790 break;
2791 }
2792
2793 error = gre_set_tunnel(&sc->sc_tunnel,
2794 (struct if_laddrreq *)data, 1);
2795 break;
2796 case SIOCGLIFPHYADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct if_laddrreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((75)))
:
2797 error = gre_get_tunnel(&sc->sc_tunnel,
2798 (struct if_laddrreq *)data);
2799 break;
2800 case SIOCDIFPHYADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((73)))
:
2801 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2802 error = EBUSY16;
2803 break;
2804 }
2805
2806 error = gre_del_tunnel(&sc->sc_tunnel);
2807 break;
2808
2809 case SIOCSLIFPHYRTABLE((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((161)))
:
2810 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2811 error = EBUSY16;
2812 break;
2813 }
2814
2815 if (ifr->ifr_rdomainidifr_ifru.ifru_metric < 0 ||
2816 ifr->ifr_rdomainidifr_ifru.ifru_metric > RT_TABLEID_MAX255 ||
2817 !rtable_exists(ifr->ifr_rdomainidifr_ifru.ifru_metric)) {
2818 error = EINVAL22;
2819 break;
2820 }
2821 sc->sc_tunnel.t_rtableid = ifr->ifr_rdomainidifr_ifru.ifru_metric;
2822 break;
2823 case SIOCGLIFPHYRTABLE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((162)))
:
2824 ifr->ifr_rdomainidifr_ifru.ifru_metric = sc->sc_tunnel.t_rtableid;
2825 break;
2826
2827 case SIOCSLIFPHYTTL((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((168)))
:
2828 if (ifr->ifr_ttlifr_ifru.ifru_metric < 1 || ifr->ifr_ttlifr_ifru.ifru_metric > 0xff) {
2829 error = EINVAL22;
2830 break;
2831 }
2832
2833 /* commit */
2834 sc->sc_tunnel.t_ttl = (uint8_t)ifr->ifr_ttlifr_ifru.ifru_metric;
2835 break;
2836 case SIOCGLIFPHYTTL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((169)))
:
2837 ifr->ifr_ttlifr_ifru.ifru_metric = (int)sc->sc_tunnel.t_ttl;
2838 break;
2839
2840 case SIOCSLIFPHYDF((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((193)))
:
2841 /* commit */
2842 sc->sc_tunnel.t_df = ifr->ifr_dfifr_ifru.ifru_metric ? htons(IP_DF)(__uint16_t)(__builtin_constant_p(0x4000) ? (__uint16_t)(((__uint16_t
)(0x4000) & 0xffU) << 8 | ((__uint16_t)(0x4000) &
0xff00U) >> 8) : __swap16md(0x4000))
: htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
;
2843 break;
2844 case SIOCGLIFPHYDF(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((194)))
:
2845 ifr->ifr_dfifr_ifru.ifru_metric = sc->sc_tunnel.t_df ? 1 : 0;
2846 break;
2847
2848 case SIOCSTXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((197)))
:
2849 error = if_txhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2850 if (error != 0)
2851 break;
2852
2853 sc->sc_tunnel.t_txhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2854 break;
2855 case SIOCGTXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((198)))
:
2856 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_txhprio;
2857 break;
2858
2859 case SIOCSRXHPRIO((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((219)))
:
2860 error = if_rxhprio_l2_check(ifr->ifr_hdrprioifr_ifru.ifru_metric);
2861 if (error != 0)
2862 break;
2863
2864 sc->sc_tunnel.t_rxhprio = ifr->ifr_hdrprioifr_ifru.ifru_metric;
2865 break;
2866 case SIOCGRXHPRIO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((219)))
:
2867 ifr->ifr_hdrprioifr_ifru.ifru_metric = sc->sc_tunnel.t_rxhprio;
2868 break;
2869
2870 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
2871 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
2872 break;
2873
2874 default:
2875 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2876 break;
2877 }
2878
2879 if (error == ENETRESET52) {
2880 /* no hardware to program */
2881 error = 0;
2882 }
2883
2884 return (error);
2885}
2886
2887static int
2888gre_up(struct gre_softc *sc)
2889{
2890 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2891 SET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) |= (0x40));
2892
2893 if (sc->sc_ka_state != GRE_KA_NONE0)
2894 gre_keepalive_send(sc);
2895
2896 return (0);
2897}
2898
2899static int
2900gre_down(struct gre_softc *sc)
2901{
2902 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2903 CLR(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) &= ~(0x40));
2904
2905 if (sc->sc_ka_state != GRE_KA_NONE0) {
2906 timeout_del_barrier(&sc->sc_ka_hold);
2907 timeout_del_barrier(&sc->sc_ka_send);
2908
2909 sc->sc_ka_state = GRE_KA_DOWN1;
2910 gre_link_state(&sc->sc_if, sc->sc_ka_state);
2911 }
2912
2913 return (0);
2914}
2915
2916static void
2917gre_link_state(struct ifnet *ifp, unsigned int state)
2918{
2919 int link_state = LINK_STATE_UNKNOWN0;
2920
2921 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
2922 switch (state) {
2923 case GRE_KA_NONE0:
2924 /* maybe up? or down? it's unknown, really */
2925 break;
2926 case GRE_KA_UP3:
2927 link_state = LINK_STATE_UP4;
2928 break;
2929 default:
2930 link_state = LINK_STATE_KALIVE_DOWN3;
2931 break;
2932 }
2933 }
2934
2935 if (ifp->if_link_stateif_data.ifi_link_state != link_state) {
2936 ifp->if_link_stateif_data.ifi_link_state = link_state;
2937 if_link_state_change(ifp);
2938 }
2939}
2940
2941static void
2942gre_keepalive_send(void *arg)
2943{
2944 struct gre_tunnel t;
2945 struct gre_softc *sc = arg;
2946 struct mbuf *m;
2947 struct gre_keepalive *gk;
2948 SIPHASH_CTX ctx;
2949 int linkhdr, len;
2950 uint16_t proto;
2951 uint8_t ttl;
2952 uint8_t tos;
2953
2954 /*
2955 * re-schedule immediately, so we deal with incomplete configuration
2956 * or temporary errors.
2957 */
2958 if (sc->sc_ka_timeo)
2959 timeout_add_sec(&sc->sc_ka_send, sc->sc_ka_timeo);
2960
2961 if (!ISSET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) & (0x40)) ||
2962 sc->sc_ka_state == GRE_KA_NONE0 ||
2963 sc->sc_tunnel.t_af == AF_UNSPEC0 ||
2964 sc->sc_tunnel.t_rtableid != sc->sc_if.if_rdomainif_data.ifi_rdomain)
2965 return;
2966
2967 /* this is really conservative */
2968#ifdef INET61
2969 linkhdr = max_linkhdr + MAX(sizeof(struct ip), sizeof(struct ip6_hdr))(((sizeof(struct ip))>(sizeof(struct ip6_hdr)))?(sizeof(struct
ip)):(sizeof(struct ip6_hdr)))
+
2970 sizeof(struct gre_header) + sizeof(struct gre_h_key);
2971#else
2972 linkhdr = max_linkhdr + sizeof(struct ip) +
2973 sizeof(struct gre_header) + sizeof(struct gre_h_key);
2974#endif
2975 len = linkhdr + sizeof(*gk);
2976
2977 MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1));
2978 if (m == NULL((void *)0))
2979 return;
2980
2981 if (len > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
2982 MCLGETL(m, M_DONTWAIT, len)m_clget((m), (0x0002), (len));
2983 if (!ISSET(m->m_flags, M_EXT)((m->m_hdr.mh_flags) & (0x0001))) {
2984 m_freem(m);
2985 return;
2986 }
2987 }
2988
2989 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = len;
2990 m_adj(m, linkhdr);
2991
2992 /*
2993 * build the inside packet
2994 */
2995 gk = mtod(m, struct gre_keepalive *)((struct gre_keepalive *)((m)->m_hdr.mh_data));
2996 htobem32(&gk->gk_uptime, sc->sc_ka_bias + ticks)(*(__uint32_t *)(&gk->gk_uptime) = (__uint32_t)(__builtin_constant_p
(sc->sc_ka_bias + ticks) ? (__uint32_t)(((__uint32_t)(sc->
sc_ka_bias + ticks) & 0xff) << 24 | ((__uint32_t)(sc
->sc_ka_bias + ticks) & 0xff00) << 8 | ((__uint32_t
)(sc->sc_ka_bias + ticks) & 0xff0000) >> 8 | ((__uint32_t
)(sc->sc_ka_bias + ticks) & 0xff000000) >> 24) :
__swap32md(sc->sc_ka_bias + ticks)))
;
2997 htobem32(&gk->gk_random, arc4random())(*(__uint32_t *)(&gk->gk_random) = (__uint32_t)(__builtin_constant_p
(arc4random()) ? (__uint32_t)(((__uint32_t)(arc4random()) &
0xff) << 24 | ((__uint32_t)(arc4random()) & 0xff00
) << 8 | ((__uint32_t)(arc4random()) & 0xff0000) >>
8 | ((__uint32_t)(arc4random()) & 0xff000000) >> 24
) : __swap32md(arc4random())))
;
2998
2999 SipHash24_Init(&ctx, &sc->sc_ka_key)SipHash_Init((&ctx), (&sc->sc_ka_key));
3000 SipHash24_Update(&ctx, &gk->gk_uptime, sizeof(gk->gk_uptime))SipHash_Update((&ctx), 2, 4, (&gk->gk_uptime), (sizeof
(gk->gk_uptime)))
;
3001 SipHash24_Update(&ctx, &gk->gk_random, sizeof(gk->gk_random))SipHash_Update((&ctx), 2, 4, (&gk->gk_random), (sizeof
(gk->gk_random)))
;
3002 SipHash24_Final(gk->gk_digest, &ctx)SipHash_Final((gk->gk_digest), (&ctx), 2, 4);
3003
3004 ttl = sc->sc_tunnel.t_ttl == -1 ? ip_defttl : sc->sc_tunnel.t_ttl;
3005
3006 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = sc->sc_if.if_llprio;
3007 tos = gre_l3_tos(&sc->sc_tunnel, m, IFQ_PRIO2TOS(m->m_pkthdr.pf.prio)((m->M_dat.MH.MH_pkthdr.pf.prio) << 5));
3008
3009 t.t_af = sc->sc_tunnel.t_af;
3010 t.t_df = sc->sc_tunnel.t_df;
3011 t.t_src = sc->sc_tunnel.t_dst;
3012 t.t_dst = sc->sc_tunnel.t_src;
3013 t.t_key = sc->sc_tunnel.t_key;
3014 t.t_key_mask = sc->sc_tunnel.t_key_mask;
3015
3016 m = gre_encap(&t, m, htons(0), ttl, tos)gre_encap_dst((&t), &(&t)->t_dst, (m), ((__uint16_t
)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t)(0) &
0xffU) << 8 | ((__uint16_t)(0) & 0xff00U) >>
8) : __swap16md(0))), (ttl), (tos))
;
3017 if (m == NULL((void *)0))
3018 return;
3019
3020 switch (sc->sc_tunnel.t_af) {
3021 case AF_INET2: {
3022 struct ip *ip;
3023
3024 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
3025 ip->ip_id = htons(ip_randomid())(__uint16_t)(__builtin_constant_p(ip_randomid()) ? (__uint16_t
)(((__uint16_t)(ip_randomid()) & 0xffU) << 8 | ((__uint16_t
)(ip_randomid()) & 0xff00U) >> 8) : __swap16md(ip_randomid
()))
;
3026 ip->ip_sum = 0;
3027 ip->ip_sum = in_cksum(m, sizeof(*ip));
3028
3029 proto = htons(ETHERTYPE_IP)(__uint16_t)(__builtin_constant_p(0x0800) ? (__uint16_t)(((__uint16_t
)(0x0800) & 0xffU) << 8 | ((__uint16_t)(0x0800) &
0xff00U) >> 8) : __swap16md(0x0800))
;
3030 break;
3031 }
3032#ifdef INET61
3033 case AF_INET624:
3034 proto = htons(ETHERTYPE_IPV6)(__uint16_t)(__builtin_constant_p(0x86DD) ? (__uint16_t)(((__uint16_t
)(0x86DD) & 0xffU) << 8 | ((__uint16_t)(0x86DD) &
0xff00U) >> 8) : __swap16md(0x86DD))
;
3035 break;
3036#endif
3037 default:
3038 m_freem(m);
3039 return;
3040 }
3041
3042 /*
3043 * put it in the tunnel
3044 */
3045 m = gre_encap(&sc->sc_tunnel, m, proto, ttl, tos)gre_encap_dst((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), (proto), (ttl), (tos))
;
3046 if (m == NULL((void *)0))
3047 return;
3048
3049 gre_ip_output(&sc->sc_tunnel, m);
3050}
3051
3052static void
3053gre_keepalive_hold(void *arg)
3054{
3055 struct gre_softc *sc = arg;
3056 struct ifnet *ifp = &sc->sc_if;
3057
3058 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)) ||
3059 sc->sc_ka_state == GRE_KA_NONE0)
3060 return;
3061
3062 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3063 sc->sc_ka_state = GRE_KA_DOWN1;
3064 gre_link_state(ifp, sc->sc_ka_state);
3065 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3066}
3067
3068static int
3069gre_set_tunnel(struct gre_tunnel *tunnel, struct if_laddrreq *req, int ucast)
3070{
3071 struct sockaddr *src = (struct sockaddr *)&req->addr;
3072 struct sockaddr *dst = (struct sockaddr *)&req->dstaddr;
3073 struct sockaddr_in *src4, *dst4;
3074#ifdef INET61
3075 struct sockaddr_in6 *src6, *dst6;
3076 int error;
3077#endif
3078
3079 /* sa_family and sa_len must be equal */
3080 if (src->sa_family != dst->sa_family || src->sa_len != dst->sa_len)
3081 return (EINVAL22);
3082
3083 /* validate */
3084 switch (dst->sa_family) {
3085 case AF_INET2:
3086 if (dst->sa_len != sizeof(*dst4))
3087 return (EINVAL22);
3088
3089 src4 = (struct sockaddr_in *)src;
3090 if (in_nullhost(src4->sin_addr)((src4->sin_addr).s_addr == ((u_int32_t) (__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000000)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff000000) >> 24) : __swap32md((u_int32_t
)(0x00000000)))))
||
3091 IN_MULTICAST(src4->sin_addr.s_addr)(((u_int32_t)(src4->sin_addr.s_addr) & ((u_int32_t) (__uint32_t
)(__builtin_constant_p((u_int32_t)(0xf0000000)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff) <<
24 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff00) <<
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff000000)
>> 24) : __swap32md((u_int32_t)(0xf0000000))))) == ((u_int32_t
) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xe0000000)) ?
(__uint32_t)(((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0xe0000000)))))
)
3092 return (EINVAL22);
3093
3094 dst4 = (struct sockaddr_in *)dst;
3095 if (in_nullhost(dst4->sin_addr)((dst4->sin_addr).s_addr == ((u_int32_t) (__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000000)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff000000) >> 24) : __swap32md((u_int32_t
)(0x00000000)))))
||
3096 (IN_MULTICAST(dst4->sin_addr.s_addr)(((u_int32_t)(dst4->sin_addr.s_addr) & ((u_int32_t) (__uint32_t
)(__builtin_constant_p((u_int32_t)(0xf0000000)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff) <<
24 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff00) <<
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff000000)
>> 24) : __swap32md((u_int32_t)(0xf0000000))))) == ((u_int32_t
) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xe0000000)) ?
(__uint32_t)(((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0xe0000000)))))
!= !ucast))
3097 return (EINVAL22);
3098
3099 tunnel->t_src4t_src.in4 = src4->sin_addr;
3100 tunnel->t_dst4t_dst.in4 = dst4->sin_addr;
3101
3102 break;
3103#ifdef INET61
3104 case AF_INET624:
3105 if (dst->sa_len != sizeof(*dst6))
3106 return (EINVAL22);
3107
3108 src6 = (struct sockaddr_in6 *)src;
3109 if (IN6_IS_ADDR_UNSPECIFIED(&src6->sin6_addr)((*(const u_int32_t *)(const void *)(&(&src6->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&src6->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&src6->sin6_addr)->__u6_addr.__u6_addr8[8
]) == 0) && (*(const u_int32_t *)(const void *)(&
(&src6->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0)
)
||
3110 IN6_IS_ADDR_MULTICAST(&src6->sin6_addr)((&src6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
)
3111 return (EINVAL22);
3112
3113 dst6 = (struct sockaddr_in6 *)dst;
3114 if (IN6_IS_ADDR_UNSPECIFIED(&dst6->sin6_addr)((*(const u_int32_t *)(const void *)(&(&dst6->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&dst6->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&dst6->sin6_addr)->__u6_addr.__u6_addr8[8
]) == 0) && (*(const u_int32_t *)(const void *)(&
(&dst6->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0)
)
||
3115 IN6_IS_ADDR_MULTICAST(&dst6->sin6_addr)((&dst6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
!= !ucast)
3116 return (EINVAL22);
3117
3118 if (src6->sin6_scope_id != dst6->sin6_scope_id)
3119 return (EINVAL22);
3120
3121 error = in6_embedscope(&tunnel->t_src6t_src.in6, src6, NULL((void *)0));
3122 if (error != 0)
3123 return (error);
3124
3125 error = in6_embedscope(&tunnel->t_dst6t_dst.in6, dst6, NULL((void *)0));
3126 if (error != 0)
3127 return (error);
3128
3129 break;
3130#endif
3131 default:
3132 return (EAFNOSUPPORT47);
3133 }
3134
3135 /* commit */
3136 tunnel->t_af = dst->sa_family;
3137
3138 return (0);
3139}
3140
3141static int
3142gre_get_tunnel(struct gre_tunnel *tunnel, struct if_laddrreq *req)
3143{
3144 struct sockaddr *src = (struct sockaddr *)&req->addr;
3145 struct sockaddr *dst = (struct sockaddr *)&req->dstaddr;
3146 struct sockaddr_in *sin;
3147#ifdef INET61 /* ifconfig already embeds the scopeid */
3148 struct sockaddr_in6 *sin6;
3149#endif
3150
3151 switch (tunnel->t_af) {
3152 case AF_UNSPEC0:
3153 return (EADDRNOTAVAIL49);
3154 case AF_INET2:
3155 sin = (struct sockaddr_in *)src;
3156 memset(sin, 0, sizeof(*sin))__builtin_memset((sin), (0), (sizeof(*sin)));
3157 sin->sin_family = AF_INET2;
3158 sin->sin_len = sizeof(*sin);
3159 sin->sin_addr = tunnel->t_src4t_src.in4;
3160
3161 sin = (struct sockaddr_in *)dst;
3162 memset(sin, 0, sizeof(*sin))__builtin_memset((sin), (0), (sizeof(*sin)));
3163 sin->sin_family = AF_INET2;
3164 sin->sin_len = sizeof(*sin);
3165 sin->sin_addr = tunnel->t_dst4t_dst.in4;
3166
3167 break;
3168
3169#ifdef INET61
3170 case AF_INET624:
3171 sin6 = (struct sockaddr_in6 *)src;
3172 memset(sin6, 0, sizeof(*sin6))__builtin_memset((sin6), (0), (sizeof(*sin6)));
3173 sin6->sin6_family = AF_INET624;
3174 sin6->sin6_len = sizeof(*sin6);
3175 in6_recoverscope(sin6, &tunnel->t_src6t_src.in6);
3176
3177 sin6 = (struct sockaddr_in6 *)dst;
3178 memset(sin6, 0, sizeof(*sin6))__builtin_memset((sin6), (0), (sizeof(*sin6)));
3179 sin6->sin6_family = AF_INET624;
3180 sin6->sin6_len = sizeof(*sin6);
3181 in6_recoverscope(sin6, &tunnel->t_dst6t_dst.in6);
3182
3183 break;
3184#endif
3185 default:
3186 return (EAFNOSUPPORT47);
3187 }
3188
3189 return (0);
3190}
3191
3192static int
3193gre_del_tunnel(struct gre_tunnel *tunnel)
3194{
3195 /* commit */
3196 tunnel->t_af = AF_UNSPEC0;
3197
3198 return (0);
3199}
3200
3201static int
3202gre_set_vnetid(struct gre_tunnel *tunnel, struct ifreq *ifr)
3203{
3204 uint32_t key;
3205 uint32_t min = GRE_KEY_MIN0x00000000U;
3206 uint32_t max = GRE_KEY_MAX0xffffffffU;
3207 unsigned int shift = GRE_KEY_SHIFT0;
3208 uint32_t mask = GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
;
3209
3210 if (tunnel->t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
) {
3211 min = GRE_KEY_ENTROPY_MIN0x00000000U;
3212 max = GRE_KEY_ENTROPY_MAX0x00ffffffU;
3213 shift = GRE_KEY_ENTROPY_SHIFT8;
3214 mask = GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
3215 }
3216
3217 if (ifr->ifr_vnetidifr_ifru.ifru_vnetid < min || ifr->ifr_vnetidifr_ifru.ifru_vnetid > max)
3218 return (EINVAL22);
3219
3220 key = htonl(ifr->ifr_vnetid << shift)(__uint32_t)(__builtin_constant_p(ifr->ifr_ifru.ifru_vnetid
<< shift) ? (__uint32_t)(((__uint32_t)(ifr->ifr_ifru
.ifru_vnetid << shift) & 0xff) << 24 | ((__uint32_t
)(ifr->ifr_ifru.ifru_vnetid << shift) & 0xff00) <<
8 | ((__uint32_t)(ifr->ifr_ifru.ifru_vnetid << shift
) & 0xff0000) >> 8 | ((__uint32_t)(ifr->ifr_ifru
.ifru_vnetid << shift) & 0xff000000) >> 24) :
__swap32md(ifr->ifr_ifru.ifru_vnetid << shift))
;
3221
3222 /* commit */
3223 tunnel->t_key_mask = mask;
3224 tunnel->t_key = key;
3225
3226 return (0);
3227}
3228
3229static int
3230gre_get_vnetid(struct gre_tunnel *tunnel, struct ifreq *ifr)
3231{
3232 int shift;
3233
3234 switch (tunnel->t_key_mask) {
3235 case GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
:
3236 return (EADDRNOTAVAIL49);
3237 case GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
:
3238 shift = GRE_KEY_ENTROPY_SHIFT8;
3239 break;
3240 case GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
:
3241 shift = GRE_KEY_SHIFT0;
3242 break;
3243 }
3244
3245 ifr->ifr_vnetidifr_ifru.ifru_vnetid = ntohl(tunnel->t_key)(__uint32_t)(__builtin_constant_p(tunnel->t_key) ? (__uint32_t
)(((__uint32_t)(tunnel->t_key) & 0xff) << 24 | (
(__uint32_t)(tunnel->t_key) & 0xff00) << 8 | ((__uint32_t
)(tunnel->t_key) & 0xff0000) >> 8 | ((__uint32_t
)(tunnel->t_key) & 0xff000000) >> 24) : __swap32md
(tunnel->t_key))
>> shift;
3246
3247 return (0);
3248}
3249
3250static int
3251gre_del_vnetid(struct gre_tunnel *tunnel)
3252{
3253 tunnel->t_key_mask = GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
;
3254
3255 return (0);
3256}
3257
3258static int
3259gre_set_vnetflowid(struct gre_tunnel *tunnel, struct ifreq *ifr)
3260{
3261 uint32_t mask, key;
3262
3263 if (tunnel->t_key_mask == GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
)
3264 return (EADDRNOTAVAIL49);
3265
3266 mask = ifr->ifr_vnetidifr_ifru.ifru_vnetid ? GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
: GRE_KEY_MASK(__uint32_t)(__builtin_constant_p(0xffffffffU) ? (__uint32_t)
(((__uint32_t)(0xffffffffU) & 0xff) << 24 | ((__uint32_t
)(0xffffffffU) & 0xff00) << 8 | ((__uint32_t)(0xffffffffU
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffffffU) &
0xff000000) >> 24) : __swap32md(0xffffffffU))
;
3267 if (tunnel->t_key_mask == mask) {
3268 /* nop */
3269 return (0);
3270 }
3271
3272 key = ntohl(tunnel->t_key)(__uint32_t)(__builtin_constant_p(tunnel->t_key) ? (__uint32_t
)(((__uint32_t)(tunnel->t_key) & 0xff) << 24 | (
(__uint32_t)(tunnel->t_key) & 0xff00) << 8 | ((__uint32_t
)(tunnel->t_key) & 0xff0000) >> 8 | ((__uint32_t
)(tunnel->t_key) & 0xff000000) >> 24) : __swap32md
(tunnel->t_key))
;
3273 if (mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
) {
3274 if (key > GRE_KEY_ENTROPY_MAX0x00ffffffU)
3275 return (ERANGE34);
3276
3277 key = htonl(key << GRE_KEY_ENTROPY_SHIFT)(__uint32_t)(__builtin_constant_p(key << 8) ? (__uint32_t
)(((__uint32_t)(key << 8) & 0xff) << 24 | ((__uint32_t
)(key << 8) & 0xff00) << 8 | ((__uint32_t)(key
<< 8) & 0xff0000) >> 8 | ((__uint32_t)(key <<
8) & 0xff000000) >> 24) : __swap32md(key << 8
))
;
3278 } else
3279 key = htonl(key >> GRE_KEY_ENTROPY_SHIFT)(__uint32_t)(__builtin_constant_p(key >> 8) ? (__uint32_t
)(((__uint32_t)(key >> 8) & 0xff) << 24 | ((__uint32_t
)(key >> 8) & 0xff00) << 8 | ((__uint32_t)(key
>> 8) & 0xff0000) >> 8 | ((__uint32_t)(key >>
8) & 0xff000000) >> 24) : __swap32md(key >> 8
))
;
3280
3281 /* commit */
3282 tunnel->t_key_mask = mask;
3283 tunnel->t_key = key;
3284
3285 return (0);
3286}
3287
3288static int
3289gre_get_vnetflowid(struct gre_tunnel *tunnel, struct ifreq *ifr)
3290{
3291 if (tunnel->t_key_mask == GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
)
3292 return (EADDRNOTAVAIL49);
3293
3294 ifr->ifr_vnetidifr_ifru.ifru_vnetid = tunnel->t_key_mask == GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
3295
3296 return (0);
3297}
3298
3299static int
3300mgre_up(struct mgre_softc *sc)
3301{
3302 unsigned int hlen;
3303
3304 switch (sc->sc_tunnel.t_af) {
3305 case AF_UNSPEC0:
3306 return (EDESTADDRREQ39);
3307 case AF_INET2:
3308 hlen = sizeof(struct ip);
3309 break;
3310#ifdef INET61
3311 case AF_INET624:
3312 hlen = sizeof(struct ip6_hdr);
3313 break;
3314#endif /* INET6 */
3315 default:
3316 unhandled_af(sc->sc_tunnel.t_af);
3317 }
3318
3319 hlen += sizeof(struct gre_header);
3320 if (sc->sc_tunnel.t_key_mask != GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
)
3321 hlen += sizeof(struct gre_h_key);
3322
3323 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3324
3325 if (RBT_INSERT(mgre_tree, &mgre_tree, sc)mgre_tree_RBT_INSERT(&mgre_tree, sc) != NULL((void *)0))
3326 return (EADDRINUSE48);
3327
3328 sc->sc_if.if_hdrlenif_data.ifi_hdrlen = hlen;
3329 SET(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) |= (0x40));
3330
3331 return (0);
3332}
3333
3334static int
3335mgre_down(struct mgre_softc *sc)
3336{
3337 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3338
3339 CLR(sc->sc_if.if_flags, IFF_RUNNING)((sc->sc_if.if_flags) &= ~(0x40));
3340 sc->sc_if.if_hdrlenif_data.ifi_hdrlen = GRE_HDRLEN(sizeof(struct ip) + sizeof(struct gre_header)); /* symmetry */
3341
3342 RBT_REMOVE(mgre_tree, &mgre_tree, sc)mgre_tree_RBT_REMOVE(&mgre_tree, sc);
3343
3344 /* barrier? */
3345
3346 return (0);
3347}
3348
3349static int
3350egre_up(struct egre_softc *sc)
3351{
3352 if (sc->sc_tunnel.t_af == AF_UNSPEC0)
3353 return (EDESTADDRREQ39);
3354
3355 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3356
3357 if (RBT_INSERT(egre_tree, &egre_tree, sc)egre_tree_RBT_INSERT(&egre_tree, sc) != NULL((void *)0))
3358 return (EADDRINUSE48);
3359
3360 SET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) |= (0x40));
3361
3362 return (0);
3363}
3364
3365static int
3366egre_down(struct egre_softc *sc)
3367{
3368 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3369
3370 CLR(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) &= ~(0x40));
3371
3372 RBT_REMOVE(egre_tree, &egre_tree, sc)egre_tree_RBT_REMOVE(&egre_tree, sc);
3373
3374 /* barrier? */
3375
3376 return (0);
3377}
3378
3379static int
3380egre_media_change(struct ifnet *ifp)
3381{
3382 return (ENOTTY25);
3383}
3384
3385static void
3386egre_media_status(struct ifnet *ifp, struct ifmediareq *imr)
3387{
3388 imr->ifm_active = IFM_ETHER0x0000000000000100ULL | IFM_AUTO0ULL;
3389 imr->ifm_status = IFM_AVALID0x0000000000000001ULL | IFM_ACTIVE0x0000000000000002ULL;
3390}
3391
3392static int
3393nvgre_up(struct nvgre_softc *sc)
3394{
3395 struct gre_tunnel *tunnel = &sc->sc_tunnel;
3396 struct ifnet *ifp0;
3397 void *inm;
3398 int error;
3399
3400 if (tunnel->t_af == AF_UNSPEC0)
3401 return (EDESTADDRREQ39);
3402
3403 ifp0 = if_get(sc->sc_ifp0);
3404 if (ifp0 == NULL((void *)0))
3405 return (ENXIO6);
3406 if (!ISSET(ifp0->if_flags, IFF_MULTICAST)((ifp0->if_flags) & (0x8000))) {
3407 error = ENODEV19;
3408 goto put;
3409 }
3410
3411 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3412
3413 if (RBT_INSERT(nvgre_mcast_tree, &nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_INSERT(&nvgre_mcast_tree, sc) != NULL((void *)0)) {
3414 error = EADDRINUSE48;
3415 goto put;
3416 }
3417 if (RBT_INSERT(nvgre_ucast_tree, &nvgre_ucast_tree, sc)nvgre_ucast_tree_RBT_INSERT(&nvgre_ucast_tree, sc) != NULL((void *)0)) {
3418 error = EADDRINUSE48;
3419 goto remove_mcast;
3420 }
3421
3422 switch (tunnel->t_af) {
3423 case AF_INET2:
3424 inm = in_addmulti(&tunnel->t_dst4t_dst.in4, ifp0);
3425 if (inm == NULL((void *)0)) {
3426 error = ECONNABORTED53;
3427 goto remove_ucast;
3428 }
3429 break;
3430#ifdef INET61
3431 case AF_INET624:
3432 inm = in6_addmulti(&tunnel->t_dst6t_dst.in6, ifp0, &error);
3433 if (inm == NULL((void *)0)) {
3434 /* error is already set */
3435 goto remove_ucast;
3436 }
3437 break;
3438#endif /* INET6 */
3439 default:
3440 unhandled_af(tunnel->t_af);
3441 }
3442
3443 if_linkstatehook_add(ifp0, &sc->sc_ltask);
3444 if_detachhook_add(ifp0, &sc->sc_dtask);
3445
3446 if_put(ifp0);
3447
3448 sc->sc_inm = inm;
3449 SET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) |= (0x40));
3450
3451 return (0);
3452
3453remove_ucast:
3454 RBT_REMOVE(nvgre_ucast_tree, &nvgre_ucast_tree, sc)nvgre_ucast_tree_RBT_REMOVE(&nvgre_ucast_tree, sc);
3455remove_mcast:
3456 RBT_REMOVE(nvgre_mcast_tree, &nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_REMOVE(&nvgre_mcast_tree, sc);
3457put:
3458 if_put(ifp0);
3459 return (error);
3460}
3461
3462static int
3463nvgre_down(struct nvgre_softc *sc)
3464{
3465 struct gre_tunnel *tunnel = &sc->sc_tunnel;
3466 struct ifnet *ifp = &sc->sc_ac.ac_if;
3467 struct taskq *softnet = net_tq(ifp->if_index);
3468 struct ifnet *ifp0;
3469
3470 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3471
3472 CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40));
3473
3474 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3475 ifq_barrier(&ifp->if_snd);
3476 if (!task_del(softnet, &sc->sc_send_task))
3477 taskq_barrier(softnet);
3478 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3479
3480 mq_purge(&sc->sc_send_list);
3481
3482 ifp0 = if_get(sc->sc_ifp0);
3483 if (ifp0 != NULL((void *)0)) {
3484 if_detachhook_del(ifp0, &sc->sc_dtask);
3485 if_linkstatehook_del(ifp0, &sc->sc_ltask);
3486 }
3487 if_put(ifp0);
3488
3489 switch (tunnel->t_af) {
3490 case AF_INET2:
3491 in_delmulti(sc->sc_inm);
3492 break;
3493
3494#ifdef INET61
3495 case AF_INET624:
3496 in6_delmulti(sc->sc_inm);
3497 break;
3498#endif
3499 default:
3500 unhandled_af(tunnel->t_af);
3501 }
3502
3503 RBT_REMOVE(nvgre_ucast_tree, &nvgre_ucast_tree, sc)nvgre_ucast_tree_RBT_REMOVE(&nvgre_ucast_tree, sc);
3504 RBT_REMOVE(nvgre_mcast_tree, &nvgre_mcast_tree, sc)nvgre_mcast_tree_RBT_REMOVE(&nvgre_mcast_tree, sc);
3505
3506 return (0);
3507}
3508
3509static void
3510nvgre_link_change(void *arg)
3511{
3512 /* nop */
3513}
3514
3515static void
3516nvgre_detach(void *arg)
3517{
3518 struct nvgre_softc *sc = arg;
3519 struct ifnet *ifp = &sc->sc_ac.ac_if;
3520
3521 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40))) {
3522 nvgre_down(sc);
3523 if_down(ifp);
3524 }
3525
3526 sc->sc_ifp0 = 0;
3527}
3528
3529static int
3530nvgre_set_parent(struct nvgre_softc *sc, const char *parent)
3531{
3532 struct ifnet *ifp0;
3533
3534 ifp0 = if_unit(parent);
3535 if (ifp0 == NULL((void *)0))
3536 return (EINVAL22);
3537
3538 if (!ISSET(ifp0->if_flags, IFF_MULTICAST)((ifp0->if_flags) & (0x8000))) {
3539 if_put(ifp0);
3540 return (EPROTONOSUPPORT43);
3541 }
3542
3543 /* commit */
3544 sc->sc_ifp0 = ifp0->if_index;
3545 if_put(ifp0);
3546
3547 return (0);
3548}
3549
3550static int
3551nvgre_add_addr(struct nvgre_softc *sc, const struct ifbareq *ifba)
3552{
3553 struct sockaddr_in *sin;
3554#ifdef INET61
3555 struct sockaddr_in6 *sin6;
3556 struct sockaddr_in6 src6 = {
3557 .sin6_len = sizeof(src6),
3558 .sin6_family = AF_UNSPEC0,
3559 };
3560 int error;
3561#endif
3562 union gre_addr endpoint;
3563 unsigned int type;
3564
3565 /* ignore ifba_ifsname */
3566
3567 if (ISSET(ifba->ifba_flags, ~IFBAF_TYPEMASK)((ifba->ifba_flags) & (~0x03)))
3568 return (EINVAL22);
3569 switch (ifba->ifba_flags & IFBAF_TYPEMASK0x03) {
3570 case IFBAF_DYNAMIC0x00:
3571 type = EBE_DYNAMIC0x0;
3572 break;
3573 case IFBAF_STATIC0x01:
3574 type = EBE_STATIC0x1;
3575 break;
3576 default:
3577 return (EINVAL22);
3578 }
3579
3580 memset(&endpoint, 0, sizeof(endpoint))__builtin_memset((&endpoint), (0), (sizeof(endpoint)));
3581
3582 if (ifba->ifba_dstsa.ss_family != sc->sc_tunnel.t_af)
3583 return (EAFNOSUPPORT47);
3584 switch (ifba->ifba_dstsa.ss_family) {
3585 case AF_INET2:
3586 sin = (struct sockaddr_in *)&ifba->ifba_dstsa;
3587 if (in_nullhost(sin->sin_addr)((sin->sin_addr).s_addr == ((u_int32_t) (__uint32_t)(__builtin_constant_p
((u_int32_t)(0x00000000)) ? (__uint32_t)(((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff) << 24 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff00) << 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff0000) >> 8 | ((__uint32_t)((u_int32_t
)(0x00000000)) & 0xff000000) >> 24) : __swap32md((u_int32_t
)(0x00000000)))))
||
3588 IN_MULTICAST(sin->sin_addr.s_addr)(((u_int32_t)(sin->sin_addr.s_addr) & ((u_int32_t) (__uint32_t
)(__builtin_constant_p((u_int32_t)(0xf0000000)) ? (__uint32_t
)(((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff) <<
24 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff00) <<
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff0000) >>
8 | ((__uint32_t)((u_int32_t)(0xf0000000)) & 0xff000000)
>> 24) : __swap32md((u_int32_t)(0xf0000000))))) == ((u_int32_t
) (__uint32_t)(__builtin_constant_p((u_int32_t)(0xe0000000)) ?
(__uint32_t)(((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff
) << 24 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff00
) << 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff0000
) >> 8 | ((__uint32_t)((u_int32_t)(0xe0000000)) & 0xff000000
) >> 24) : __swap32md((u_int32_t)(0xe0000000)))))
)
3589 return (EADDRNOTAVAIL49);
3590
3591 endpoint.in4 = sin->sin_addr;
3592 break;
3593
3594#ifdef INET61
3595 case AF_INET624:
3596 sin6 = (struct sockaddr_in6 *)&ifba->ifba_dstsa;
3597 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)((*(const u_int32_t *)(const void *)(&(&sin6->sin6_addr
)->__u6_addr.__u6_addr8[0]) == 0) && (*(const u_int32_t
*)(const void *)(&(&sin6->sin6_addr)->__u6_addr
.__u6_addr8[4]) == 0) && (*(const u_int32_t *)(const void
*)(&(&sin6->sin6_addr)->__u6_addr.__u6_addr8[8
]) == 0) && (*(const u_int32_t *)(const void *)(&
(&sin6->sin6_addr)->__u6_addr.__u6_addr8[12]) == 0)
)
||
3598 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)((&sin6->sin6_addr)->__u6_addr.__u6_addr8[0] == 0xff
)
)
3599 return (EADDRNOTAVAIL49);
3600
3601 in6_recoverscope(&src6, &sc->sc_tunnel.t_src6t_src.in6);
3602
3603 if (src6.sin6_scope_id != sin6->sin6_scope_id)
3604 return (EADDRNOTAVAIL49);
3605
3606 error = in6_embedscope(&endpoint.in6, sin6, NULL((void *)0));
3607 if (error != 0)
3608 return (error);
3609
3610 break;
3611#endif
3612 default: /* AF_UNSPEC */
3613 return (EADDRNOTAVAIL49);
3614 }
3615
3616 return (etherbridge_add_addr(&sc->sc_eb, &endpoint,
3617 &ifba->ifba_dst, type));
3618}
3619
3620static int
3621nvgre_del_addr(struct nvgre_softc *sc, const struct ifbareq *ifba)
3622{
3623 return (etherbridge_del_addr(&sc->sc_eb, &ifba->ifba_dst));
3624}
3625
3626static void
3627nvgre_start(struct ifnet *ifp)
3628{
3629 struct nvgre_softc *sc = ifp->if_softc;
3630 const struct gre_tunnel *tunnel = &sc->sc_tunnel;
3631 union gre_addr gateway;
3632 struct mbuf_list ml = MBUF_LIST_INITIALIZER(){ ((void *)0), ((void *)0), 0 };
3633 struct ether_header *eh;
3634 struct mbuf *m, *m0;
3635#if NBPFILTER1 > 0
3636 caddr_t if_bpf;
3637#endif
3638
3639 if (!gre_allow) {
3640 ifq_purge(&ifp->if_snd);
3641 return;
3642 }
3643
3644 while ((m0 = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
3645#if NBPFILTER1 > 0
3646 if_bpf = ifp->if_bpf;
3647 if (if_bpf)
3648 bpf_mtap_ether(if_bpf, m0, BPF_DIRECTION_OUT(1 << 1));
3649#endif
3650
3651 eh = mtod(m0, struct ether_header *)((struct ether_header *)((m0)->m_hdr.mh_data));
3652 if (ETHER_IS_BROADCAST(eh->ether_dhost)(((eh->ether_dhost)[0] & (eh->ether_dhost)[1] &
(eh->ether_dhost)[2] & (eh->ether_dhost)[3] & (
eh->ether_dhost)[4] & (eh->ether_dhost)[5]) == 0xff
)
)
3653 gateway = tunnel->t_dst;
3654 else {
3655 const union gre_addr *endpoint;
3656
3657 smr_read_enter();
3658 endpoint = etherbridge_resolve_ea(&sc->sc_eb,
3659 (struct ether_addr *)eh->ether_dhost);
3660 if (endpoint == NULL((void *)0)) {
3661 /* "flood" to unknown hosts */
3662 endpoint = &tunnel->t_dst;
3663 }
3664 gateway = *endpoint;
3665 smr_read_leave();
3666 }
3667
3668 /* force prepend mbuf because of alignment problems */
3669 m = m_get(M_DONTWAIT0x0002, m0->m_typem_hdr.mh_type);
3670 if (m == NULL((void *)0)) {
3671 m_freem(m0);
3672 continue;
3673 }
3674
3675 M_MOVE_PKTHDR(m, m0)do { (m)->m_hdr.mh_flags = ((m)->m_hdr.mh_flags & (
0x0001 | 0x0008)); (m)->m_hdr.mh_flags |= (m0)->m_hdr.mh_flags
& (0x0002|0x0004|0x0010|0x0100|0x0200|0x0400|0x4000| 0x0800
|0x0040|0x1000|0x8000|0x0020|0x2000); do { ((m))->M_dat.MH
.MH_pkthdr = ((m0))->M_dat.MH.MH_pkthdr; ((m0))->m_hdr.
mh_flags &= ~0x0002; { ((&((m0))->M_dat.MH.MH_pkthdr
.ph_tags)->slh_first) = ((void *)0); }; ((m0))->M_dat.MH
.MH_pkthdr.pf.statekey = ((void *)0); } while ( 0); if (((m)->
m_hdr.mh_flags & 0x0001) == 0) (m)->m_hdr.mh_data = (m
)->M_dat.MH.MH_dat.MH_databuf; } while ( 0)
;
3676 m->m_nextm_hdr.mh_next = m0;
3677
3678 m_align(m, 0);
3679 m->m_lenm_hdr.mh_len = 0;
3680
3681 m = gre_encap_dst(tunnel, &gateway, m,
3682 htons(ETHERTYPE_TRANSETHER)(__uint16_t)(__builtin_constant_p(0x6558) ? (__uint16_t)(((__uint16_t
)(0x6558) & 0xffU) << 8 | ((__uint16_t)(0x6558) &
0xff00U) >> 8) : __swap16md(0x6558))
,
3683 tunnel->t_ttl, gre_l2_tos(tunnel, m));
3684 if (m == NULL((void *)0))
3685 continue;
3686
3687 m->m_flagsm_hdr.mh_flags &= ~(M_BCAST0x0100|M_MCAST0x0200);
3688 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = tunnel->t_rtableid;
3689
3690#if NPF1 > 0
3691 pf_pkt_addr_changed(m);
3692#endif
3693
3694 ml_enqueue(&ml, m);
3695 }
3696
3697 if (!ml_empty(&ml)((&ml)->ml_len == 0)) {
3698 if (mq_enlist(&sc->sc_send_list, &ml) == 0)
3699 task_add(net_tq(ifp->if_index), &sc->sc_send_task);
3700 /* else set OACTIVE? */
3701 }
3702}
3703
3704static uint64_t
3705nvgre_send4(struct nvgre_softc *sc, struct mbuf_list *ml)
3706{
3707 struct ip_moptions imo;
3708 struct mbuf *m;
3709 uint64_t oerrors = 0;
3710
3711 imo.imo_ifidx = sc->sc_ifp0;
3712 imo.imo_ttl = sc->sc_tunnel.t_ttl;
3713 imo.imo_loop = 0;
3714
3715 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3716 while ((m = ml_dequeue(ml)) != NULL((void *)0)) {
3717 if (ip_output(m, NULL((void *)0), NULL((void *)0), IP_RAWOUTPUT0x2, &imo, NULL((void *)0), 0) != 0)
3718 oerrors++;
3719 }
3720 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3721
3722 return (oerrors);
3723}
3724
3725#ifdef INET61
3726static uint64_t
3727nvgre_send6(struct nvgre_softc *sc, struct mbuf_list *ml)
3728{
3729 struct ip6_moptions im6o;
3730 struct mbuf *m;
3731 uint64_t oerrors = 0;
3732
3733 im6o.im6o_ifidx = sc->sc_ifp0;
3734 im6o.im6o_hlim = sc->sc_tunnel.t_ttl;
3735 im6o.im6o_loop = 0;
3736
3737 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3738 while ((m = ml_dequeue(ml)) != NULL((void *)0)) {
3739 if (ip6_output(m, NULL((void *)0), NULL((void *)0), 0, &im6o, NULL((void *)0)) != 0)
3740 oerrors++;
3741 }
3742 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3743
3744 return (oerrors);
3745}
3746#endif /* INET6 */
3747
3748static void
3749nvgre_send(void *arg)
3750{
3751 struct nvgre_softc *sc = arg;
3752 struct ifnet *ifp = &sc->sc_ac.ac_if;
3753 sa_family_t af = sc->sc_tunnel.t_af;
3754 struct mbuf_list ml;
3755 uint64_t oerrors;
3756
3757 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
3758 return;
3759
3760 mq_delist(&sc->sc_send_list, &ml);
3761 if (ml_empty(&ml)((&ml)->ml_len == 0))
3762 return;
3763
3764 switch (af) {
3765 case AF_INET2:
3766 oerrors = nvgre_send4(sc, &ml);
3767 break;
3768#ifdef INET61
3769 case AF_INET624:
3770 oerrors = nvgre_send6(sc, &ml);
3771 break;
3772#endif
3773 default:
3774 unhandled_af(af);
3775 /* NOTREACHED */
3776 }
3777
3778 ifp->if_oerrorsif_data.ifi_oerrors += oerrors; /* XXX should be ifq_oerrors */
3779}
3780
3781static int
3782eoip_up(struct eoip_softc *sc)
3783{
3784 if (sc->sc_tunnel.t_af == AF_UNSPEC0)
3785 return (EDESTADDRREQ39);
3786
3787 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3788
3789 if (RBT_INSERT(eoip_tree, &eoip_tree, sc)eoip_tree_RBT_INSERT(&eoip_tree, sc) != NULL((void *)0))
3790 return (EADDRINUSE48);
3791
3792 SET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) |= (0x40));
3793
3794 if (sc->sc_ka_state != GRE_KA_NONE0) {
3795 sc->sc_ka_holdmax = sc->sc_ka_count;
3796 eoip_keepalive_send(sc);
3797 }
3798
3799 return (0);
3800}
3801
3802static int
3803eoip_down(struct eoip_softc *sc)
3804{
3805 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
3806 CLR(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)((sc->sc_ac.ac_if.if_flags) &= ~(0x40));
3807
3808 if (sc->sc_ka_state != GRE_KA_NONE0) {
3809 timeout_del_barrier(&sc->sc_ka_hold);
3810 timeout_del_barrier(&sc->sc_ka_send);
3811
3812 sc->sc_ka_state = GRE_KA_DOWN1;
3813 gre_link_state(&sc->sc_ac.ac_if, sc->sc_ka_state);
3814 }
3815
3816 RBT_REMOVE(eoip_tree, &eoip_tree, sc)eoip_tree_RBT_REMOVE(&eoip_tree, sc);
3817
3818 return (0);
3819}
3820
3821static void
3822eoip_start(struct ifnet *ifp)
3823{
3824 struct eoip_softc *sc = ifp->if_softc;
3825 struct mbuf *m0, *m;
3826#if NBPFILTER1 > 0
3827 caddr_t if_bpf;
3828#endif
3829
3830 if (!gre_allow) {
3831 ifq_purge(&ifp->if_snd);
3832 return;
3833 }
3834
3835 while ((m0 = ifq_dequeue(&ifp->if_snd)) != NULL((void *)0)) {
3836#if NBPFILTER1 > 0
3837 if_bpf = ifp->if_bpf;
3838 if (if_bpf)
3839 bpf_mtap_ether(if_bpf, m0, BPF_DIRECTION_OUT(1 << 1));
3840#endif
3841
3842 /* force prepend mbuf because of alignment problems */
3843 m = m_get(M_DONTWAIT0x0002, m0->m_typem_hdr.mh_type);
3844 if (m == NULL((void *)0)) {
3845 m_freem(m0);
3846 continue;
3847 }
3848
3849 M_MOVE_PKTHDR(m, m0)do { (m)->m_hdr.mh_flags = ((m)->m_hdr.mh_flags & (
0x0001 | 0x0008)); (m)->m_hdr.mh_flags |= (m0)->m_hdr.mh_flags
& (0x0002|0x0004|0x0010|0x0100|0x0200|0x0400|0x4000| 0x0800
|0x0040|0x1000|0x8000|0x0020|0x2000); do { ((m))->M_dat.MH
.MH_pkthdr = ((m0))->M_dat.MH.MH_pkthdr; ((m0))->m_hdr.
mh_flags &= ~0x0002; { ((&((m0))->M_dat.MH.MH_pkthdr
.ph_tags)->slh_first) = ((void *)0); }; ((m0))->M_dat.MH
.MH_pkthdr.pf.statekey = ((void *)0); } while ( 0); if (((m)->
m_hdr.mh_flags & 0x0001) == 0) (m)->m_hdr.mh_data = (m
)->M_dat.MH.MH_dat.MH_databuf; } while ( 0)
;
3850 m->m_nextm_hdr.mh_next = m0;
3851
3852 m_align(m, 0);
3853 m->m_lenm_hdr.mh_len = 0;
3854
3855 m = eoip_encap(sc, m, gre_l2_tos(&sc->sc_tunnel, m));
3856 if (m == NULL((void *)0) || gre_ip_output(&sc->sc_tunnel, m) != 0) {
3857 ifp->if_oerrorsif_data.ifi_oerrors++;
3858 continue;
3859 }
3860 }
3861}
3862
3863static struct mbuf *
3864eoip_encap(struct eoip_softc *sc, struct mbuf *m, uint8_t tos)
3865{
3866 struct gre_header *gh;
3867 struct gre_h_key_eoip *eoiph;
3868 int len = m->m_pkthdrM_dat.MH.MH_pkthdr.len;
3869
3870 m = m_prepend(m, sizeof(*gh) + sizeof(*eoiph), M_DONTWAIT0x0002);
3871 if (m == NULL((void *)0))
3872 return (NULL((void *)0));
3873
3874 gh = mtod(m, struct gre_header *)((struct gre_header *)((m)->m_hdr.mh_data));
3875 gh->gre_flags = htons(GRE_VERS_1 | GRE_KP)(__uint16_t)(__builtin_constant_p(0x0001 | 0x2000) ? (__uint16_t
)(((__uint16_t)(0x0001 | 0x2000) & 0xffU) << 8 | ((
__uint16_t)(0x0001 | 0x2000) & 0xff00U) >> 8) : __swap16md
(0x0001 | 0x2000))
;
3876 gh->gre_proto = htons(GRE_EOIP)(__uint16_t)(__builtin_constant_p(0x6400) ? (__uint16_t)(((__uint16_t
)(0x6400) & 0xffU) << 8 | ((__uint16_t)(0x6400) &
0xff00U) >> 8) : __swap16md(0x6400))
;
3877
3878 eoiph = (struct gre_h_key_eoip *)(gh + 1);
3879 htobem16(&eoiph->eoip_len, len)(*(__uint16_t *)(&eoiph->eoip_len) = (__uint16_t)(__builtin_constant_p
(len) ? (__uint16_t)(((__uint16_t)(len) & 0xffU) <<
8 | ((__uint16_t)(len) & 0xff00U) >> 8) : __swap16md
(len)))
;
3880 eoiph->eoip_tunnel_id = sc->sc_tunnel_id;
3881
3882 return (gre_encap_ip(&sc->sc_tunnel, m, sc->sc_tunnel.t_ttl, tos)gre_encap_dst_ip((&sc->sc_tunnel), &(&sc->sc_tunnel
)->t_dst, (m), (sc->sc_tunnel.t_ttl), (tos))
);
3883}
3884
3885static void
3886eoip_keepalive_send(void *arg)
3887{
3888 struct eoip_softc *sc = arg;
3889 struct ifnet *ifp = &sc->sc_ac.ac_if;
3890 struct mbuf *m;
3891 int linkhdr;
3892
3893 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
3894 return;
3895
3896 /* this is really conservative */
3897#ifdef INET61
3898 linkhdr = max_linkhdr + MAX(sizeof(struct ip), sizeof(struct ip6_hdr))(((sizeof(struct ip))>(sizeof(struct ip6_hdr)))?(sizeof(struct
ip)):(sizeof(struct ip6_hdr)))
+
3899 sizeof(struct gre_header) + sizeof(struct gre_h_key_eoip);
3900#else
3901 linkhdr = max_linkhdr + sizeof(struct ip) +
3902 sizeof(struct gre_header) + sizeof(struct gre_h_key_eoip);
3903#endif
3904 MGETHDR(m, M_DONTWAIT, MT_DATA)m = m_gethdr((0x0002), (1));
3905 if (m == NULL((void *)0))
3906 return;
3907
3908 if (linkhdr > MHLEN((256 - sizeof(struct m_hdr)) - sizeof(struct pkthdr))) {
3909 MCLGETL(m, M_DONTWAIT, linkhdr)m_clget((m), (0x0002), (linkhdr));
3910 if (!ISSET(m->m_flags, M_EXT)((m->m_hdr.mh_flags) & (0x0001))) {
3911 m_freem(m);
3912 return;
3913 }
3914 }
3915
3916 m->m_pkthdrM_dat.MH.MH_pkthdr.pf.prio = ifp->if_llprio;
3917 m->m_pkthdrM_dat.MH.MH_pkthdr.len = m->m_lenm_hdr.mh_len = linkhdr;
3918 m_adj(m, linkhdr);
3919
3920 m = eoip_encap(sc, m, gre_l2_tos(&sc->sc_tunnel, m));
3921 if (m == NULL((void *)0))
3922 return;
3923
3924 gre_ip_output(&sc->sc_tunnel, m);
3925
3926 timeout_add_sec(&sc->sc_ka_send, sc->sc_ka_timeo);
3927}
3928
3929static void
3930eoip_keepalive_hold(void *arg)
3931{
3932 struct eoip_softc *sc = arg;
3933 struct ifnet *ifp = &sc->sc_ac.ac_if;
3934
3935 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
3936 return;
3937
3938 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
3939 sc->sc_ka_state = GRE_KA_DOWN1;
3940 gre_link_state(ifp, sc->sc_ka_state);
3941 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
3942}
3943
3944static void
3945eoip_keepalive_recv(struct eoip_softc *sc)
3946{
3947 switch (sc->sc_ka_state) {
3948 case GRE_KA_NONE0:
3949 return;
3950 case GRE_KA_DOWN1:
3951 sc->sc_ka_state = GRE_KA_HOLD2;
3952 sc->sc_ka_holdcnt = sc->sc_ka_holdmax;
3953 sc->sc_ka_holdmax = MIN(sc->sc_ka_holdmax * 2,(((sc->sc_ka_holdmax * 2)<(16 * sc->sc_ka_count))?(sc
->sc_ka_holdmax * 2):(16 * sc->sc_ka_count))
3954 16 * sc->sc_ka_count)(((sc->sc_ka_holdmax * 2)<(16 * sc->sc_ka_count))?(sc
->sc_ka_holdmax * 2):(16 * sc->sc_ka_count))
;
3955 break;
3956 case GRE_KA_HOLD2:
3957 if (--sc->sc_ka_holdcnt > 0)
3958 break;
3959
3960 sc->sc_ka_state = GRE_KA_UP3;
3961 gre_link_state(&sc->sc_ac.ac_if, sc->sc_ka_state);
3962 break;
3963
3964 case GRE_KA_UP3:
3965 sc->sc_ka_holdmax--;
3966 sc->sc_ka_holdmax = MAX(sc->sc_ka_holdmax, sc->sc_ka_count)(((sc->sc_ka_holdmax)>(sc->sc_ka_count))?(sc->sc_ka_holdmax
):(sc->sc_ka_count))
;
3967 break;
3968 }
3969
3970 timeout_add_sec(&sc->sc_ka_hold, sc->sc_ka_timeo * sc->sc_ka_count);
3971}
3972
3973static struct mbuf *
3974eoip_input(struct gre_tunnel *key, struct mbuf *m,
3975 const struct gre_header *gh, uint8_t otos, int iphlen)
3976{
3977 struct eoip_softc *sc;
3978 struct gre_h_key_eoip *eoiph;
3979 int hlen, len;
3980 caddr_t buf;
3981
3982 if (gh->gre_flags != htons(GRE_KP | GRE_VERS_1)(__uint16_t)(__builtin_constant_p(0x2000 | 0x0001) ? (__uint16_t
)(((__uint16_t)(0x2000 | 0x0001) & 0xffU) << 8 | ((
__uint16_t)(0x2000 | 0x0001) & 0xff00U) >> 8) : __swap16md
(0x2000 | 0x0001))
)
3983 goto decline;
3984
3985 hlen = iphlen + sizeof(*gh) + sizeof(*eoiph);
3986 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < hlen)
3987 goto decline;
3988
3989 m = m_pullup(m, hlen);
3990 if (m == NULL((void *)0))
3991 return (NULL((void *)0));
3992
3993 buf = mtod(m, caddr_t)((caddr_t)((m)->m_hdr.mh_data));
3994 gh = (struct gre_header *)(buf + iphlen);
3995 eoiph = (struct gre_h_key_eoip *)(gh + 1);
3996
3997 key->t_key = eoiph->eoip_tunnel_id;
3998
3999 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
4000 sc = RBT_FIND(eoip_tree, &eoip_tree, (const struct eoip_softc *)key)eoip_tree_RBT_FIND(&eoip_tree, (const struct eoip_softc *
)key)
;
4001 if (sc == NULL((void *)0))
4002 goto decline;
4003
4004 /* it's ours now */
4005 len = bemtoh16(&eoiph->eoip_len)(__uint16_t)(__builtin_constant_p(*(__uint16_t *)(&eoiph->
eoip_len)) ? (__uint16_t)(((__uint16_t)(*(__uint16_t *)(&
eoiph->eoip_len)) & 0xffU) << 8 | ((__uint16_t)(
*(__uint16_t *)(&eoiph->eoip_len)) & 0xff00U) >>
8) : __swap16md(*(__uint16_t *)(&eoiph->eoip_len)))
;
4006 if (len == 0) {
4007 eoip_keepalive_recv(sc);
4008 goto drop;
4009 }
4010
4011 m = gre_ether_align(m, hlen);
4012 if (m == NULL((void *)0))
4013 return (NULL((void *)0));
4014
4015 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < len)
4016 goto drop;
4017 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len != len)
4018 m_adj(m, len - m->m_pkthdrM_dat.MH.MH_pkthdr.len);
4019
4020 m->m_flagsm_hdr.mh_flags &= ~(M_MCAST0x0200|M_BCAST0x0100);
4021
4022 gre_l2_prio(&sc->sc_tunnel, m, otos)do { int rxprio = (&sc->sc_tunnel)->t_rxhprio; switch
(rxprio) { case -1: break; case -3: (m)->M_dat.MH.MH_pkthdr
.pf.prio = (((otos)) >> 5); break; default: (m)->M_dat
.MH.MH_pkthdr.pf.prio = rxprio; break; } } while (0)
;
4023
4024 if_vinput(&sc->sc_ac.ac_if, m);
4025
4026 return (NULL((void *)0));
4027
4028decline:
4029 return (m);
4030drop:
4031 m_freem(m);
4032 return (NULL((void *)0));
4033}
4034
4035const struct sysctl_bounded_args gre_vars[] = {
4036 { GRECTL_ALLOW1, &gre_allow, 0, 1 },
4037 { GRECTL_WCCP2, &gre_wccp, 0, 1 },
4038};
4039
4040int
4041gre_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
4042 size_t newlen)
4043{
4044 int error;
4045
4046 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
4047 error = sysctl_bounded_arr(gre_vars, nitems(gre_vars)(sizeof((gre_vars)) / sizeof((gre_vars)[0])), name,
4048 namelen, oldp, oldlenp, newp, newlen);
4049 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
4050 return error;
4051}
4052
4053static inline int
4054gre_ip_cmp(int af, const union gre_addr *a, const union gre_addr *b)
4055{
4056 switch (af) {
4057#ifdef INET61
4058 case AF_INET624:
4059 return (memcmp(&a->in6, &b->in6, sizeof(a->in6))__builtin_memcmp((&a->in6), (&b->in6), (sizeof(
a->in6)))
);
4060#endif /* INET6 */
4061 case AF_INET2:
4062 return (memcmp(&a->in4, &b->in4, sizeof(a->in4))__builtin_memcmp((&a->in4), (&b->in4), (sizeof(
a->in4)))
);
4063 default:
4064 unhandled_af(af);
4065 }
4066
4067 return (0);
4068}
4069
4070static int
4071gre_cmp_src(const struct gre_tunnel *a, const struct gre_tunnel *b)
4072{
4073 uint32_t ka, kb;
4074 uint32_t mask;
4075 int rv;
4076
4077 /* is K set at all? */
4078 ka = a->t_key_mask & GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
4079 kb = b->t_key_mask & GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
4080
4081 /* sort by whether K is set */
4082 if (ka > kb)
4083 return (1);
4084 if (ka < kb)
4085 return (-1);
4086
4087 /* is K set on both? */
4088 if (ka != GRE_KEY_NONE(__uint32_t)(__builtin_constant_p(0x00000000U) ? (__uint32_t)
(((__uint32_t)(0x00000000U) & 0xff) << 24 | ((__uint32_t
)(0x00000000U) & 0xff00) << 8 | ((__uint32_t)(0x00000000U
) & 0xff0000) >> 8 | ((__uint32_t)(0x00000000U) &
0xff000000) >> 24) : __swap32md(0x00000000U))
) {
4089 /* get common prefix */
4090 mask = a->t_key_mask & b->t_key_mask;
4091
4092 ka = a->t_key & mask;
4093 kb = b->t_key & mask;
4094
4095 /* sort by common prefix */
4096 if (ka > kb)
4097 return (1);
4098 if (ka < kb)
4099 return (-1);
4100 }
4101
4102 /* sort by routing table */
4103 if (a->t_rtableid > b->t_rtableid)
4104 return (1);
4105 if (a->t_rtableid < b->t_rtableid)
4106 return (-1);
4107
4108 /* sort by address */
4109 if (a->t_af > b->t_af)
4110 return (1);
4111 if (a->t_af < b->t_af)
4112 return (-1);
4113
4114 rv = gre_ip_cmp(a->t_af, &a->t_src, &b->t_src);
4115 if (rv != 0)
4116 return (rv);
4117
4118 return (0);
4119}
4120
4121static int
4122gre_cmp(const struct gre_tunnel *a, const struct gre_tunnel *b)
4123{
4124 int rv;
4125
4126 rv = gre_cmp_src(a, b);
4127 if (rv != 0)
4128 return (rv);
4129
4130 return (gre_ip_cmp(a->t_af, &a->t_dst, &b->t_dst));
4131}
4132
4133static inline int
4134mgre_cmp(const struct mgre_softc *a, const struct mgre_softc *b)
4135{
4136 return (gre_cmp_src(&a->sc_tunnel, &b->sc_tunnel));
4137}
4138
4139RBT_GENERATE(mgre_tree, mgre_softc, sc_entry, mgre_cmp)static int mgre_tree_RBT_COMPARE(const void *lptr, const void
*rptr) { const struct mgre_softc *l = lptr, *r = rptr; return
mgre_cmp(l, r); } static const struct rb_type mgre_tree_RBT_INFO
= { mgre_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof(struct
mgre_softc, sc_entry), }; const struct rb_type *const mgre_tree_RBT_TYPE
= &mgre_tree_RBT_INFO
;
4140
4141static inline int
4142egre_cmp(const struct egre_softc *a, const struct egre_softc *b)
4143{
4144 return (gre_cmp(&a->sc_tunnel, &b->sc_tunnel));
4145}
4146
4147RBT_GENERATE(egre_tree, egre_softc, sc_entry, egre_cmp)static int egre_tree_RBT_COMPARE(const void *lptr, const void
*rptr) { const struct egre_softc *l = lptr, *r = rptr; return
egre_cmp(l, r); } static const struct rb_type egre_tree_RBT_INFO
= { egre_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof(struct
egre_softc, sc_entry), }; const struct rb_type *const egre_tree_RBT_TYPE
= &egre_tree_RBT_INFO
;
4148
4149static int
4150nvgre_cmp_tunnel(const struct gre_tunnel *a, const struct gre_tunnel *b)
4151{
4152 uint32_t ka, kb;
4153
4154 ka = a->t_key & GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
4155 kb = b->t_key & GRE_KEY_ENTROPY(__uint32_t)(__builtin_constant_p(0xffffff00U) ? (__uint32_t)
(((__uint32_t)(0xffffff00U) & 0xff) << 24 | ((__uint32_t
)(0xffffff00U) & 0xff00) << 8 | ((__uint32_t)(0xffffff00U
) & 0xff0000) >> 8 | ((__uint32_t)(0xffffff00U) &
0xff000000) >> 24) : __swap32md(0xffffff00U))
;
4156
4157 /* sort by common prefix */
4158 if (ka > kb)
4159 return (1);
4160 if (ka < kb)
4161 return (-1);
4162
4163 /* sort by routing table */
4164 if (a->t_rtableid > b->t_rtableid)
4165 return (1);
4166 if (a->t_rtableid < b->t_rtableid)
4167 return (-1);
4168
4169 /* sort by address */
4170 if (a->t_af > b->t_af)
4171 return (1);
4172 if (a->t_af < b->t_af)
4173 return (-1);
4174
4175 return (0);
4176}
4177
4178static inline int
4179nvgre_cmp_ucast(const struct nvgre_softc *na, const struct nvgre_softc *nb)
4180{
4181 const struct gre_tunnel *a = &na->sc_tunnel;
4182 const struct gre_tunnel *b = &nb->sc_tunnel;
4183 int rv;
4184
4185 rv = nvgre_cmp_tunnel(a, b);
4186 if (rv != 0)
4187 return (rv);
4188
4189 rv = gre_ip_cmp(a->t_af, &a->t_src, &b->t_src);
4190 if (rv != 0)
4191 return (rv);
4192
4193 return (0);
4194}
4195
4196static int
4197nvgre_cmp_mcast(const struct gre_tunnel *a, const union gre_addr *aa,
4198 unsigned int if0idxa, const struct gre_tunnel *b,
4199 const union gre_addr *ab,unsigned int if0idxb)
4200{
4201 int rv;
4202
4203 rv = nvgre_cmp_tunnel(a, b);
4204 if (rv != 0)
4205 return (rv);
4206
4207 rv = gre_ip_cmp(a->t_af, aa, ab);
4208 if (rv != 0)
4209 return (rv);
4210
4211 if (if0idxa > if0idxb)
4212 return (1);
4213 if (if0idxa < if0idxb)
4214 return (-1);
4215
4216 return (0);
4217}
4218
4219static inline int
4220nvgre_cmp_mcast_sc(const struct nvgre_softc *na, const struct nvgre_softc *nb)
4221{
4222 const struct gre_tunnel *a = &na->sc_tunnel;
4223 const struct gre_tunnel *b = &nb->sc_tunnel;
4224
4225 return (nvgre_cmp_mcast(a, &a->t_dst, na->sc_ifp0,
4226 b, &b->t_dst, nb->sc_ifp0));
4227}
4228
4229RBT_GENERATE(nvgre_ucast_tree, nvgre_softc, sc_uentry, nvgre_cmp_ucast)static int nvgre_ucast_tree_RBT_COMPARE(const void *lptr, const
void *rptr) { const struct nvgre_softc *l = lptr, *r = rptr;
return nvgre_cmp_ucast(l, r); } static const struct rb_type nvgre_ucast_tree_RBT_INFO
= { nvgre_ucast_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof
(struct nvgre_softc, sc_uentry), }; const struct rb_type *const
nvgre_ucast_tree_RBT_TYPE = &nvgre_ucast_tree_RBT_INFO
;
4230RBT_GENERATE(nvgre_mcast_tree, nvgre_softc, sc_mentry, nvgre_cmp_mcast_sc)static int nvgre_mcast_tree_RBT_COMPARE(const void *lptr, const
void *rptr) { const struct nvgre_softc *l = lptr, *r = rptr;
return nvgre_cmp_mcast_sc(l, r); } static const struct rb_type
nvgre_mcast_tree_RBT_INFO = { nvgre_mcast_tree_RBT_COMPARE, (
(void *)0), __builtin_offsetof(struct nvgre_softc, sc_mentry)
, }; const struct rb_type *const nvgre_mcast_tree_RBT_TYPE = &
nvgre_mcast_tree_RBT_INFO
;
4231
4232static inline int
4233eoip_cmp(const struct eoip_softc *ea, const struct eoip_softc *eb)
4234{
4235 const struct gre_tunnel *a = &ea->sc_tunnel;
4236 const struct gre_tunnel *b = &eb->sc_tunnel;
4237 int rv;
4238
4239 if (a->t_key > b->t_key)
4240 return (1);
4241 if (a->t_key < b->t_key)
4242 return (-1);
4243
4244 /* sort by routing table */
4245 if (a->t_rtableid > b->t_rtableid)
4246 return (1);
4247 if (a->t_rtableid < b->t_rtableid)
4248 return (-1);
4249
4250 /* sort by address */
4251 if (a->t_af > b->t_af)
4252 return (1);
4253 if (a->t_af < b->t_af)
4254 return (-1);
4255
4256 rv = gre_ip_cmp(a->t_af, &a->t_src, &b->t_src);
4257 if (rv != 0)
4258 return (rv);
4259
4260 rv = gre_ip_cmp(a->t_af, &a->t_dst, &b->t_dst);
4261 if (rv != 0)
4262 return (rv);
4263
4264 return (0);
4265}
4266
4267RBT_GENERATE(eoip_tree, eoip_softc, sc_entry, eoip_cmp)static int eoip_tree_RBT_COMPARE(const void *lptr, const void
*rptr) { const struct eoip_softc *l = lptr, *r = rptr; return
eoip_cmp(l, r); } static const struct rb_type eoip_tree_RBT_INFO
= { eoip_tree_RBT_COMPARE, ((void *)0), __builtin_offsetof(struct
eoip_softc, sc_entry), }; const struct rb_type *const eoip_tree_RBT_TYPE
= &eoip_tree_RBT_INFO
;
4268
4269static int
4270nvgre_eb_port_eq(void *arg, void *a, void *b)
4271{
4272 struct nvgre_softc *sc = arg;
4273
4274 return (gre_ip_cmp(sc->sc_tunnel.t_af, a, b) == 0);
4275}
4276
4277static void *
4278nvgre_eb_port_take(void *arg, void *port)
4279{
4280 union gre_addr *ea = port;
4281 union gre_addr *endpoint;
4282
4283 endpoint = pool_get(&nvgre_endpoint_pool, PR_NOWAIT0x0002);
4284 if (endpoint == NULL((void *)0))
4285 return (NULL((void *)0));
4286
4287 *endpoint = *ea;
4288
4289 return (endpoint);
4290}
4291
4292static void
4293nvgre_eb_port_rele(void *arg, void *port)
4294{
4295 union gre_addr *endpoint = port;
4296
4297 pool_put(&nvgre_endpoint_pool, endpoint);
4298}
4299
4300static size_t
4301nvgre_eb_port_ifname(void *arg, char *dst, size_t len, void *port)
4302{
4303 struct nvgre_softc *sc = arg;
4304
4305 return (strlcpy(dst, sc->sc_ac.ac_if.if_xname, len));
4306}
4307
4308static void
4309nvgre_eb_port_sa(void *arg, struct sockaddr_storage *ss, void *port)
4310{
4311 struct nvgre_softc *sc = arg;
4312 union gre_addr *endpoint = port;
4313
4314 switch (sc->sc_tunnel.t_af) {
4315 case AF_INET2: {
4316 struct sockaddr_in *sin = (struct sockaddr_in *)ss;
4317
4318 sin->sin_len = sizeof(*sin);
4319 sin->sin_family = AF_INET2;
4320 sin->sin_addr = endpoint->in4;
4321 break;
4322 }
4323#ifdef INET61
4324 case AF_INET624: {
4325 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)ss;
4326
4327 sin6->sin6_len = sizeof(*sin6);
4328 sin6->sin6_family = AF_INET624;
4329 in6_recoverscope(sin6, &endpoint->in6);
4330
4331 break;
4332 }
4333#endif /* INET6 */
4334 default:
4335 unhandled_af(sc->sc_tunnel.t_af);
4336 }
4337}