Bug Summary

File:net/if_veb.c
Warning:line 1158, column 2
Value stored to 'eh' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name if_veb.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/net/if_veb.c
1/* $OpenBSD: if_veb.c,v 1.34 2023/12/23 10:52:54 bluhm Exp $ */
2
3/*
4 * Copyright (c) 2021 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "bpfilter.h"
20#include "pf.h"
21#include "vlan.h"
22
23#include <sys/param.h>
24#include <sys/kernel.h>
25#include <sys/malloc.h>
26#include <sys/mbuf.h>
27#include <sys/queue.h>
28#include <sys/socket.h>
29#include <sys/sockio.h>
30#include <sys/systm.h>
31#include <sys/syslog.h>
32#include <sys/rwlock.h>
33#include <sys/percpu.h>
34#include <sys/smr.h>
35#include <sys/task.h>
36#include <sys/pool.h>
37
38#include <net/if.h>
39#include <net/if_dl.h>
40#include <net/if_types.h>
41
42#include <netinet/in.h>
43#include <netinet/ip.h>
44#include <netinet/if_ether.h>
45
46#ifdef INET61
47#include <netinet6/in6_var.h>
48#include <netinet/ip6.h>
49#include <netinet6/ip6_var.h>
50#endif
51
52#if 0 && defined(IPSEC1)
53/*
54 * IPsec handling is disabled in veb until getting and using tdbs is mpsafe.
55 */
56#include <netinet/ip_ipsp.h>
57#include <net/if_enc.h>
58#endif
59
60#include <net/if_bridge.h>
61#include <net/if_etherbridge.h>
62
63#if NBPFILTER1 > 0
64#include <net/bpf.h>
65#endif
66
67#if NPF1 > 0
68#include <net/pfvar.h>
69#endif
70
71#if NVLAN1 > 0
72#include <net/if_vlan_var.h>
73#endif
74
75/* SIOCBRDGIFFLGS, SIOCBRDGIFFLGS */
76#define VEB_IFBIF_FLAGS(0x0001|0x0002|0x0004) (IFBIF_LEARNING0x0001|IFBIF_DISCOVER0x0002|IFBIF_BLOCKNONIP0x0004)
77
78struct veb_rule {
79 TAILQ_ENTRY(veb_rule)struct { struct veb_rule *tqe_next; struct veb_rule **tqe_prev
; }
vr_entry;
80 SMR_TAILQ_ENTRY(veb_rule)struct { struct veb_rule *smr_tqe_next; struct veb_rule **smr_tqe_prev
; }
vr_lentry[2];
81
82 uint16_t vr_flags;
83#define VEB_R_F_IN(1U << 0) (1U << 0)
84#define VEB_R_F_OUT(1U << 1) (1U << 1)
85#define VEB_R_F_SRC(1U << 2) (1U << 2)
86#define VEB_R_F_DST(1U << 3) (1U << 3)
87
88#define VEB_R_F_ARP(1U << 4) (1U << 4)
89#define VEB_R_F_RARP(1U << 5) (1U << 5)
90#define VEB_R_F_SHA(1U << 6) (1U << 6)
91#define VEB_R_F_SPA(1U << 7) (1U << 7)
92#define VEB_R_F_THA(1U << 8) (1U << 8)
93#define VEB_R_F_TPA(1U << 9) (1U << 9)
94 uint16_t vr_arp_op;
95
96 uint64_t vr_src;
97 uint64_t vr_dst;
98 struct ether_addr vr_arp_sha;
99 struct ether_addr vr_arp_tha;
100 struct in_addr vr_arp_spa;
101 struct in_addr vr_arp_tpa;
102
103 unsigned int vr_action;
104#define VEB_R_MATCH0 0
105#define VEB_R_PASS1 1
106#define VEB_R_BLOCK2 2
107
108 int vr_pftag;
109};
110
111TAILQ_HEAD(veb_rules, veb_rule)struct veb_rules { struct veb_rule *tqh_first; struct veb_rule
**tqh_last; }
;
112SMR_TAILQ_HEAD(veb_rule_list, veb_rule)struct veb_rule_list { struct veb_rule *smr_tqh_first; struct
veb_rule **smr_tqh_last; }
;
113
114struct veb_softc;
115
116struct veb_port {
117 struct ifnet *p_ifp0;
118 struct refcnt p_refs;
119
120 int (*p_enqueue)(struct ifnet *, struct mbuf *);
121
122 int (*p_ioctl)(struct ifnet *, u_long, caddr_t);
123 int (*p_output)(struct ifnet *, struct mbuf *, struct sockaddr *,
124 struct rtentry *);
125
126 struct task p_ltask;
127 struct task p_dtask;
128
129 struct veb_softc *p_veb;
130
131 struct ether_brport p_brport;
132
133 unsigned int p_link_state;
134 unsigned int p_bif_flags;
135 uint32_t p_protected;
136
137 struct veb_rules p_vrl;
138 unsigned int p_nvrl;
139 struct veb_rule_list p_vr_list[2];
140#define VEB_RULE_LIST_OUT0 0
141#define VEB_RULE_LIST_IN1 1
142};
143
144struct veb_ports {
145 struct refcnt m_refs;
146 unsigned int m_count;
147
148 /* followed by an array of veb_port pointers */
149};
150
151struct veb_softc {
152 struct ifnet sc_if;
153 unsigned int sc_dead;
154
155 struct etherbridge sc_eb;
156
157 struct rwlock sc_rule_lock;
158 struct veb_ports *sc_ports;
159 struct veb_ports *sc_spans;
160};
161
162#define DPRINTF(_sc, fmt...)do { if ((((_sc)->sc_if.if_flags) & (0x4))) printf(fmt
...); } while (0)
do { \
163 if (ISSET((_sc)->sc_if.if_flags, IFF_DEBUG)(((_sc)->sc_if.if_flags) & (0x4))) \
164 printf(fmt); \
165} while (0)
166
167static int veb_clone_create(struct if_clone *, int);
168static int veb_clone_destroy(struct ifnet *);
169
170static int veb_ioctl(struct ifnet *, u_long, caddr_t);
171static void veb_input(struct ifnet *, struct mbuf *);
172static int veb_enqueue(struct ifnet *, struct mbuf *);
173static int veb_output(struct ifnet *, struct mbuf *, struct sockaddr *,
174 struct rtentry *);
175static void veb_start(struct ifqueue *);
176
177static int veb_up(struct veb_softc *);
178static int veb_down(struct veb_softc *);
179static int veb_iff(struct veb_softc *);
180
181static void veb_p_linkch(void *);
182static void veb_p_detach(void *);
183static int veb_p_ioctl(struct ifnet *, u_long, caddr_t);
184static int veb_p_output(struct ifnet *, struct mbuf *,
185 struct sockaddr *, struct rtentry *);
186
187static inline size_t
188veb_ports_size(unsigned int n)
189{
190 /* use of _ALIGN is inspired by CMSGs */
191 return _ALIGN(sizeof(struct veb_ports))(((unsigned long)(sizeof(struct veb_ports)) + (sizeof(long) -
1)) &~(sizeof(long) - 1))
+
192 n * sizeof(struct veb_port *);
193}
194
195static inline struct veb_port **
196veb_ports_array(struct veb_ports *m)
197{
198 return (struct veb_port **)((caddr_t)m + _ALIGN(sizeof(*m))(((unsigned long)(sizeof(*m)) + (sizeof(long) - 1)) &~(sizeof
(long) - 1))
);
199}
200
201static inline void veb_ports_free(struct veb_ports *);
202
203static void veb_p_unlink(struct veb_softc *, struct veb_port *);
204static void veb_p_fini(struct veb_port *);
205static void veb_p_dtor(struct veb_softc *, struct veb_port *);
206static int veb_add_port(struct veb_softc *,
207 const struct ifbreq *, unsigned int);
208static int veb_del_port(struct veb_softc *,
209 const struct ifbreq *, unsigned int);
210static int veb_port_list(struct veb_softc *, struct ifbifconf *);
211static int veb_port_set_flags(struct veb_softc *, struct ifbreq *);
212static int veb_port_get_flags(struct veb_softc *, struct ifbreq *);
213static int veb_port_set_protected(struct veb_softc *,
214 const struct ifbreq *);
215static int veb_add_addr(struct veb_softc *, const struct ifbareq *);
216static int veb_del_addr(struct veb_softc *, const struct ifbareq *);
217
218static int veb_rule_add(struct veb_softc *, const struct ifbrlreq *);
219static int veb_rule_list_flush(struct veb_softc *,
220 const struct ifbrlreq *);
221static void veb_rule_list_free(struct veb_rule *);
222static int veb_rule_list_get(struct veb_softc *, struct ifbrlconf *);
223
224static int veb_eb_port_cmp(void *, void *, void *);
225static void *veb_eb_port_take(void *, void *);
226static void veb_eb_port_rele(void *, void *);
227static size_t veb_eb_port_ifname(void *, char *, size_t, void *);
228static void veb_eb_port_sa(void *, struct sockaddr_storage *, void *);
229
230static void veb_eb_brport_take(void *);
231static void veb_eb_brport_rele(void *);
232
233static const struct etherbridge_ops veb_etherbridge_ops = {
234 veb_eb_port_cmp,
235 veb_eb_port_take,
236 veb_eb_port_rele,
237 veb_eb_port_ifname,
238 veb_eb_port_sa,
239};
240
241static struct if_clone veb_cloner =
242 IF_CLONE_INITIALIZER("veb", veb_clone_create, veb_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "veb"
, .ifc_namelen = sizeof("veb") - 1, .ifc_create = veb_clone_create
, .ifc_destroy = veb_clone_destroy, }
;
243
244static struct pool veb_rule_pool;
245
246static int vport_clone_create(struct if_clone *, int);
247static int vport_clone_destroy(struct ifnet *);
248
249struct vport_softc {
250 struct arpcom sc_ac;
251 unsigned int sc_dead;
252};
253
254static int vport_if_enqueue(struct ifnet *, struct mbuf *);
255
256static int vport_ioctl(struct ifnet *, u_long, caddr_t);
257static int vport_enqueue(struct ifnet *, struct mbuf *);
258static void vport_start(struct ifqueue *);
259
260static int vport_up(struct vport_softc *);
261static int vport_down(struct vport_softc *);
262static int vport_iff(struct vport_softc *);
263
264static struct if_clone vport_cloner =
265 IF_CLONE_INITIALIZER("vport", vport_clone_create, vport_clone_destroy){ .ifc_list = { ((void *)0), ((void *)0) }, .ifc_name = "vport"
, .ifc_namelen = sizeof("vport") - 1, .ifc_create = vport_clone_create
, .ifc_destroy = vport_clone_destroy, }
;
266
267void
268vebattach(int count)
269{
270 if_clone_attach(&veb_cloner);
271 if_clone_attach(&vport_cloner);
272}
273
274static int
275veb_clone_create(struct if_clone *ifc, int unit)
276{
277 struct veb_softc *sc;
278 struct ifnet *ifp;
279 int error;
280
281 if (veb_rule_pool.pr_size == 0) {
282 pool_init(&veb_rule_pool, sizeof(struct veb_rule),
283 0, IPL_SOFTNET0x2, 0, "vebrpl", NULL((void *)0));
284 }
285
286 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008|M_CANFAIL0x0004);
287 if (sc == NULL((void *)0))
288 return (ENOMEM12);
289
290 rw_init(&sc->sc_rule_lock, "vebrlk")_rw_init_flags(&sc->sc_rule_lock, "vebrlk", 0, ((void *
)0))
;
291 sc->sc_ports = NULL((void *)0);
292 sc->sc_spans = NULL((void *)0);
293
294 ifp = &sc->sc_if;
295
296 snprintf(ifp->if_xname, IFNAMSIZ16, "%s%d", ifc->ifc_name, unit);
297
298 error = etherbridge_init(&sc->sc_eb, ifp->if_xname,
299 &veb_etherbridge_ops, sc);
300 if (error != 0) {
301 free(sc, M_DEVBUF2, sizeof(*sc));
302 return (error);
303 }
304
305 ifp->if_softc = sc;
306 ifp->if_typeif_data.ifi_type = IFT_BRIDGE0xd1;
307 ifp->if_hdrlenif_data.ifi_hdrlen = ETHER_HDR_LEN((6 * 2) + 2);
308 ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435;
309 ifp->if_ioctl = veb_ioctl;
310 ifp->if_input = veb_input;
311 ifp->if_output = veb_output;
312 ifp->if_enqueue = veb_enqueue;
313 ifp->if_qstart = veb_start;
314 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
315 ifp->if_xflags = IFXF_CLONED0x2 | IFXF_MPSAFE0x1;
316
317 if_counters_alloc(ifp);
318 if_attach(ifp);
319
320 if_alloc_sadl(ifp);
321
322#if NBPFILTER1 > 0
323 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB1, ETHER_HDR_LEN((6 * 2) + 2));
324#endif
325
326 return (0);
327}
328
329static int
330veb_clone_destroy(struct ifnet *ifp)
331{
332 struct veb_softc *sc = ifp->if_softc;
333 struct veb_ports *mp, *ms;
334 struct veb_port **ps;
335 struct veb_port *p;
336 unsigned int i;
337
338 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
339 sc->sc_dead = 1;
340
341 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
342 veb_down(sc);
343 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
344
345 if_detach(ifp);
346
347 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
348
349 /*
350 * this is an upside down version of veb_p_dtor() and
351 * veb_ports_destroy() to avoid a lot of malloc/free and
352 * smr_barrier calls if we remove ports one by one.
353 */
354
355 mp = SMR_PTR_GET_LOCKED(&sc->sc_ports)(*(&sc->sc_ports));
356 SMR_PTR_SET_LOCKED(&sc->sc_ports, NULL)do { do { __asm volatile("" ::: "memory"); } while (0); ({ typeof
(*&sc->sc_ports) __tmp = (((void *)0)); *(volatile typeof
(*&sc->sc_ports) *)&(*&sc->sc_ports) = __tmp
; __tmp; }); } while (0)
;
357 if (mp != NULL((void *)0)) {
358 ps = veb_ports_array(mp);
359 for (i = 0; i < mp->m_count; i++)
360 veb_p_unlink(sc, ps[i]);
361 }
362
363 ms = SMR_PTR_GET_LOCKED(&sc->sc_spans)(*(&sc->sc_spans));
364 SMR_PTR_SET_LOCKED(&sc->sc_spans, NULL)do { do { __asm volatile("" ::: "memory"); } while (0); ({ typeof
(*&sc->sc_spans) __tmp = (((void *)0)); *(volatile typeof
(*&sc->sc_spans) *)&(*&sc->sc_spans) = __tmp
; __tmp; }); } while (0)
;
365 if (ms != NULL((void *)0)) {
366 ps = veb_ports_array(ms);
367 for (i = 0; i < ms->m_count; i++)
368 veb_p_unlink(sc, ps[i]);
369 }
370
371 if (mp != NULL((void *)0) || ms != NULL((void *)0)) {
372 smr_barrier()smr_barrier_impl(0); /* everything everywhere all at once */
373
374 if (mp != NULL((void *)0)) {
375 refcnt_finalize(&mp->m_refs, "vebdtor");
376
377 ps = veb_ports_array(mp);
378 for (i = 0; i < mp->m_count; i++) {
379 p = ps[i];
380 /* the ports map holds a port ref */
381 refcnt_rele(&p->p_refs);
382 /* now we can finalize the port */
383 veb_p_fini(p);
384 }
385
386 veb_ports_free(mp);
387 }
388 if (ms != NULL((void *)0)) {
389 refcnt_finalize(&ms->m_refs, "vebdtor");
390
391 ps = veb_ports_array(ms);
392 for (i = 0; i < ms->m_count; i++) {
393 p = ps[i];
394 /* the ports map holds a port ref */
395 refcnt_rele(&p->p_refs);
396 /* now we can finalize the port */
397 veb_p_fini(p);
398 }
399
400 veb_ports_free(ms);
401 }
402 }
403 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
404
405 etherbridge_destroy(&sc->sc_eb);
406
407 free(sc, M_DEVBUF2, sizeof(*sc));
408
409 return (0);
410}
411
412static struct mbuf *
413veb_span_input(struct ifnet *ifp0, struct mbuf *m, uint64_t dst, void *brport)
414{
415 m_freem(m);
416 return (NULL((void *)0));
417}
418
419static void
420veb_span(struct veb_softc *sc, struct mbuf *m0)
421{
422 struct veb_ports *sm;
423 struct veb_port **ps;
424 struct veb_port *p;
425 struct ifnet *ifp0;
426 struct mbuf *m;
427 unsigned int i;
428
429 smr_read_enter();
430 sm = SMR_PTR_GET(&sc->sc_spans)({ typeof(*&sc->sc_spans) __tmp = *(volatile typeof(*&
sc->sc_spans) *)&(*&sc->sc_spans); membar_datadep_consumer
(); __tmp; })
;
431 if (sm != NULL((void *)0))
432 refcnt_take(&sm->m_refs);
433 smr_read_leave();
434 if (sm == NULL((void *)0))
435 return;
436
437 ps = veb_ports_array(sm);
438 for (i = 0; i < sm->m_count; i++) {
439 p = ps[i];
440
441 ifp0 = p->p_ifp0;
442 if (!ISSET(ifp0->if_flags, IFF_RUNNING)((ifp0->if_flags) & (0x40)))
443 continue;
444
445 m = m_dup_pkt(m0, max_linkhdr + ETHER_ALIGN2, M_NOWAIT0x0002);
446 if (m == NULL((void *)0)) {
447 /* XXX count error */
448 continue;
449 }
450
451 if_enqueue(ifp0, m); /* XXX count error */
452 }
453 refcnt_rele_wake(&sm->m_refs);
454}
455
456static int
457veb_ip_filter(const struct mbuf *m)
458{
459 const struct ether_header *eh;
460
461 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
462 switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t
)(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | (
(__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md
(eh->ether_type))
) {
463 case ETHERTYPE_IP0x0800:
464 case ETHERTYPE_ARP0x0806:
465 case ETHERTYPE_REVARP0x8035:
466 case ETHERTYPE_IPV60x86DD:
467 return (0);
468 default:
469 break;
470 }
471
472 return (1);
473}
474
475static int
476veb_vlan_filter(const struct mbuf *m)
477{
478 const struct ether_header *eh;
479
480 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
481 switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t
)(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | (
(__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md
(eh->ether_type))
) {
482 case ETHERTYPE_VLAN0x8100:
483 case ETHERTYPE_QINQ0x88A8:
484 return (1);
485 default:
486 break;
487 }
488
489 return (0);
490}
491
492static int
493veb_rule_arp_match(const struct veb_rule *vr, struct mbuf *m)
494{
495 struct ether_header *eh;
496 struct ether_arp ea;
497
498 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
499
500 if (eh->ether_type != htons(ETHERTYPE_ARP)(__uint16_t)(__builtin_constant_p(0x0806) ? (__uint16_t)(((__uint16_t
)(0x0806) & 0xffU) << 8 | ((__uint16_t)(0x0806) &
0xff00U) >> 8) : __swap16md(0x0806))
)
501 return (0);
502 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < sizeof(*eh) + sizeof(ea))
503 return (0);
504
505 m_copydata(m, sizeof(*eh), sizeof(ea), (caddr_t)&ea);
506
507 if (ea.arp_hrdea_hdr.ar_hrd != htons(ARPHRD_ETHER)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t
)(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U
) >> 8) : __swap16md(1))
||
508 ea.arp_proea_hdr.ar_pro != htons(ETHERTYPE_IP)(__uint16_t)(__builtin_constant_p(0x0800) ? (__uint16_t)(((__uint16_t
)(0x0800) & 0xffU) << 8 | ((__uint16_t)(0x0800) &
0xff00U) >> 8) : __swap16md(0x0800))
||
509 ea.arp_hlnea_hdr.ar_hln != ETHER_ADDR_LEN6 ||
510 ea.arp_plnea_hdr.ar_pln != sizeof(struct in_addr))
511 return (0);
512
513 if (ISSET(vr->vr_flags, VEB_R_F_ARP)((vr->vr_flags) & ((1U << 4)))) {
514 if (ea.arp_opea_hdr.ar_op != htons(ARPOP_REQUEST)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t
)(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U
) >> 8) : __swap16md(1))
&&
515 ea.arp_opea_hdr.ar_op != htons(ARPOP_REPLY)(__uint16_t)(__builtin_constant_p(2) ? (__uint16_t)(((__uint16_t
)(2) & 0xffU) << 8 | ((__uint16_t)(2) & 0xff00U
) >> 8) : __swap16md(2))
)
516 return (0);
517 }
518 if (ISSET(vr->vr_flags, VEB_R_F_RARP)((vr->vr_flags) & ((1U << 5)))) {
519 if (ea.arp_opea_hdr.ar_op != htons(ARPOP_REVREQUEST)(__uint16_t)(__builtin_constant_p(3) ? (__uint16_t)(((__uint16_t
)(3) & 0xffU) << 8 | ((__uint16_t)(3) & 0xff00U
) >> 8) : __swap16md(3))
&&
520 ea.arp_opea_hdr.ar_op != htons(ARPOP_REVREPLY)(__uint16_t)(__builtin_constant_p(4) ? (__uint16_t)(((__uint16_t
)(4) & 0xffU) << 8 | ((__uint16_t)(4) & 0xff00U
) >> 8) : __swap16md(4))
)
521 return (0);
522 }
523
524 if (vr->vr_arp_op != htons(0)(__uint16_t)(__builtin_constant_p(0) ? (__uint16_t)(((__uint16_t
)(0) & 0xffU) << 8 | ((__uint16_t)(0) & 0xff00U
) >> 8) : __swap16md(0))
&& vr->vr_arp_op != ea.arp_opea_hdr.ar_op)
525 return (0);
526
527 if (ISSET(vr->vr_flags, VEB_R_F_SHA)((vr->vr_flags) & ((1U << 6))) &&
528 !ETHER_IS_EQ(&vr->vr_arp_sha, ea.arp_sha)(__builtin_memcmp(((&vr->vr_arp_sha)), ((ea.arp_sha)),
(6)) == 0)
)
529 return (0);
530 if (ISSET(vr->vr_flags, VEB_R_F_THA)((vr->vr_flags) & ((1U << 8))) &&
531 !ETHER_IS_EQ(&vr->vr_arp_tha, ea.arp_tha)(__builtin_memcmp(((&vr->vr_arp_tha)), ((ea.arp_tha)),
(6)) == 0)
)
532 return (0);
533 if (ISSET(vr->vr_flags, VEB_R_F_SPA)((vr->vr_flags) & ((1U << 7))) &&
534 memcmp(&vr->vr_arp_spa, ea.arp_spa, sizeof(vr->vr_arp_spa))__builtin_memcmp((&vr->vr_arp_spa), (ea.arp_spa), (sizeof
(vr->vr_arp_spa)))
!= 0)
535 return (0);
536 if (ISSET(vr->vr_flags, VEB_R_F_TPA)((vr->vr_flags) & ((1U << 9))) &&
537 memcmp(&vr->vr_arp_tpa, ea.arp_tpa, sizeof(vr->vr_arp_tpa))__builtin_memcmp((&vr->vr_arp_tpa), (ea.arp_tpa), (sizeof
(vr->vr_arp_tpa)))
!= 0)
538 return (0);
539
540 return (1);
541}
542
543static int
544veb_rule_list_test(struct veb_rule *vr, int dir, struct mbuf *m,
545 uint64_t src, uint64_t dst)
546{
547 SMR_ASSERT_CRITICAL()do { if (panicstr == ((void *)0) && !db_active) ((({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_schedstate.spc_smrdepth > 0) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/net/if_veb.c", 547, "curcpu()->ci_schedstate.spc_smrdepth > 0"
)); } while (0)
;
548
549 do {
550 if (ISSET(vr->vr_flags, VEB_R_F_ARP|VEB_R_F_RARP)((vr->vr_flags) & ((1U << 4)|(1U << 5))) &&
551 !veb_rule_arp_match(vr, m))
552 continue;
553
554 if (ISSET(vr->vr_flags, VEB_R_F_SRC)((vr->vr_flags) & ((1U << 2))) &&
555 vr->vr_src != src)
556 continue;
557 if (ISSET(vr->vr_flags, VEB_R_F_DST)((vr->vr_flags) & ((1U << 3))) &&
558 vr->vr_dst != dst)
559 continue;
560
561 if (vr->vr_action == VEB_R_BLOCK2)
562 return (VEB_R_BLOCK2);
563#if NPF1 > 0
564 pf_tag_packet(m, vr->vr_pftag, -1);
565#endif
566 if (vr->vr_action == VEB_R_PASS1)
567 return (VEB_R_PASS1);
568 } while ((vr = SMR_TAILQ_NEXT(vr, vr_lentry[dir])({ typeof(*&(vr)->vr_lentry[dir].smr_tqe_next) __tmp =
*(volatile typeof(*&(vr)->vr_lentry[dir].smr_tqe_next
) *)&(*&(vr)->vr_lentry[dir].smr_tqe_next); membar_datadep_consumer
(); __tmp; })
) != NULL((void *)0));
569
570 return (VEB_R_PASS1);
571}
572
573static inline int
574veb_rule_filter(struct veb_port *p, int dir, struct mbuf *m,
575 uint64_t src, uint64_t dst)
576{
577 struct veb_rule *vr;
578 int filter = VEB_R_PASS1;
579
580 smr_read_enter();
581 vr = SMR_TAILQ_FIRST(&p->p_vr_list[dir])({ typeof(*&(&p->p_vr_list[dir])->smr_tqh_first
) __tmp = *(volatile typeof(*&(&p->p_vr_list[dir])
->smr_tqh_first) *)&(*&(&p->p_vr_list[dir])
->smr_tqh_first); membar_datadep_consumer(); __tmp; })
;
582 if (vr != NULL((void *)0))
583 filter = veb_rule_list_test(vr, dir, m, src, dst);
584 smr_read_leave();
585
586 return (filter == VEB_R_BLOCK2);
587}
588
589#if NPF1 > 0
590struct veb_pf_ip_family {
591 sa_family_t af;
592 struct mbuf *(*ip_check)(struct ifnet *, struct mbuf *);
593 void (*ip_input)(struct ifnet *, struct mbuf *);
594};
595
596static const struct veb_pf_ip_family veb_pf_ipv4 = {
597 .af = AF_INET2,
598 .ip_check = ipv4_check,
599 .ip_input = ipv4_input,
600};
601
602#ifdef INET61
603static const struct veb_pf_ip_family veb_pf_ipv6 = {
604 .af = AF_INET624,
605 .ip_check = ipv6_check,
606 .ip_input = ipv6_input,
607};
608#endif
609
610static struct mbuf *
611veb_pf(struct ifnet *ifp0, int dir, struct mbuf *m)
612{
613 struct ether_header *eh, copy;
614 const struct veb_pf_ip_family *fam;
615 int hlen;
616
617 /*
618 * pf runs on vport interfaces when they enter or leave the
619 * l3 stack, so don't confuse things (even more) by running
620 * pf again here. note that because of this exception the
621 * pf direction on vport interfaces is reversed compared to
622 * other veb ports.
623 */
624 if (ifp0->if_enqueue == vport_enqueue)
625 return (m);
626
627 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
628 switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t
)(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | (
(__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md
(eh->ether_type))
) {
629 case ETHERTYPE_IP0x0800:
630 fam = &veb_pf_ipv4;
631 break;
632#ifdef INET61
633 case ETHERTYPE_IPV60x86DD:
634 fam = &veb_pf_ipv6;
635 break;
636#endif
637 default:
638 return (m);
639 }
640
641 copy = *eh;
642 m_adj(m, sizeof(*eh));
643
644 m = (*fam->ip_check)(ifp0, m);
645 if (m == NULL((void *)0))
646 return (NULL((void *)0));
647
648 if (pf_test(fam->af, dir, ifp0, &m) != PF_PASS) {
649 m_freem(m);
650 return (NULL((void *)0));
651 }
652 if (m == NULL((void *)0))
653 return (NULL((void *)0));
654
655 if (dir == PF_IN && ISSET(m->m_pkthdr.pf.flags, PF_TAG_DIVERTED)((m->M_dat.MH.MH_pkthdr.pf.flags) & (0x08))) {
656 pf_mbuf_unlink_state_key(m);
657 pf_mbuf_unlink_inpcb(m);
658 (*fam->ip_input)(ifp0, m);
659 return (NULL((void *)0));
660 }
661
662 hlen = roundup(sizeof(*eh), sizeof(long))((((sizeof(*eh))+((sizeof(long))-1))/(sizeof(long)))*(sizeof(
long)))
;
663 m = m_prepend(m, hlen, M_DONTWAIT0x0002);
664 if (m == NULL((void *)0))
665 return (NULL((void *)0));
666
667 /* checksum? */
668
669 m_adj(m, hlen - sizeof(*eh));
670 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
671 *eh = copy;
672
673 return (m);
674}
675#endif /* NPF > 0 */
676
677#if 0 && defined(IPSEC1)
678static struct mbuf *
679veb_ipsec_proto_in(struct ifnet *ifp0, struct mbuf *m, int iphlen,
680 /* const */ union sockaddr_union *dst, int poff)
681{
682 struct tdb *tdb;
683 uint16_t cpi;
684 uint32_t spi;
685 uint8_t proto;
686
687 /* ipsec_common_input checks for 8 bytes of input, so we do too */
688 if (m->m_pkthdrM_dat.MH.MH_pkthdr.len < iphlen + 2 * sizeof(u_int32_t))
689 return (m); /* decline */
690
691 proto = *(mtod(m, uint8_t *)((uint8_t *)((m)->m_hdr.mh_data)) + poff);
692 /* i'm not a huge fan of how these headers get picked at */
693 switch (proto) {
694 case IPPROTO_ESP50:
695 m_copydata(m, iphlen, sizeof(spi), &spi);
696 break;
697 case IPPROTO_AH51:
698 m_copydata(m, iphlen + sizeof(uint32_t), sizeof(spi), &spi);
699 break;
700 case IPPROTO_IPCOMP108:
701 m_copydata(m, iphlen + sizeof(uint16_t), sizeof(cpi), &cpi);
702 spi = htonl(ntohs(cpi))(__uint32_t)(__builtin_constant_p((__uint16_t)(__builtin_constant_p
(cpi) ? (__uint16_t)(((__uint16_t)(cpi) & 0xffU) <<
8 | ((__uint16_t)(cpi) & 0xff00U) >> 8) : __swap16md
(cpi))) ? (__uint32_t)(((__uint32_t)((__uint16_t)(__builtin_constant_p
(cpi) ? (__uint16_t)(((__uint16_t)(cpi) & 0xffU) <<
8 | ((__uint16_t)(cpi) & 0xff00U) >> 8) : __swap16md
(cpi))) & 0xff) << 24 | ((__uint32_t)((__uint16_t)(
__builtin_constant_p(cpi) ? (__uint16_t)(((__uint16_t)(cpi) &
0xffU) << 8 | ((__uint16_t)(cpi) & 0xff00U) >>
8) : __swap16md(cpi))) & 0xff00) << 8 | ((__uint32_t
)((__uint16_t)(__builtin_constant_p(cpi) ? (__uint16_t)(((__uint16_t
)(cpi) & 0xffU) << 8 | ((__uint16_t)(cpi) & 0xff00U
) >> 8) : __swap16md(cpi))) & 0xff0000) >> 8 |
((__uint32_t)((__uint16_t)(__builtin_constant_p(cpi) ? (__uint16_t
)(((__uint16_t)(cpi) & 0xffU) << 8 | ((__uint16_t)(
cpi) & 0xff00U) >> 8) : __swap16md(cpi))) & 0xff000000
) >> 24) : __swap32md((__uint16_t)(__builtin_constant_p
(cpi) ? (__uint16_t)(((__uint16_t)(cpi) & 0xffU) <<
8 | ((__uint16_t)(cpi) & 0xff00U) >> 8) : __swap16md
(cpi))))
;
703 break;
704 default:
705 return (m); /* decline */
706 }
707
708 tdb = gettdb(m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid, spi, dst, proto);
709 if (tdb != NULL((void *)0) && !ISSET(tdb->tdb_flags, TDBF_INVALID)((tdb->tdb_flags) & (TDBF_INVALID)) &&
710 tdb->tdb_xform != NULL((void *)0)) {
711 if (tdb->tdb_first_use == 0) {
712 tdb->tdb_first_use = gettime();
713 if (ISSET(tdb->tdb_flags, TDBF_FIRSTUSE)((tdb->tdb_flags) & (TDBF_FIRSTUSE))) {
714 timeout_add_sec(&tdb->tdb_first_tmo,
715 tdb->tdb_exp_first_use);
716 }
717 if (ISSET(tdb->tdb_flags, TDBF_SOFT_FIRSTUSE)((tdb->tdb_flags) & (TDBF_SOFT_FIRSTUSE))) {
718 timeout_add_sec(&tdb->tdb_sfirst_tmo,
719 tdb->tdb_soft_first_use);
720 }
721 }
722
723 (*(tdb->tdb_xform->xf_input))(m, tdb, iphlen, poff);
724 return (NULL((void *)0));
725 }
726
727 return (m);
728}
729
730static struct mbuf *
731veb_ipsec_ipv4_in(struct ifnet *ifp0, struct mbuf *m)
732{
733 union sockaddr_union su = {
734 .sin.sin_len = sizeof(su.sin),
735 .sin.sin_family = AF_INET2,
736 };
737 struct ip *ip;
738 int iphlen;
739
740 if (m->m_lenm_hdr.mh_len < sizeof(*ip)) {
741 m = m_pullup(m, sizeof(*ip));
742 if (m == NULL((void *)0))
743 return (NULL((void *)0));
744 }
745
746 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
747 iphlen = ip->ip_hl << 2;
748 if (iphlen < sizeof(*ip)) {
749 /* this is a weird packet, decline */
750 return (m);
751 }
752
753 su.sin.sin_addr = ip->ip_dst;
754
755 return (veb_ipsec_proto_in(ifp0, m, iphlen, &su,
756 offsetof(struct ip, ip_p)__builtin_offsetof(struct ip, ip_p)));
757}
758
759#ifdef INET61
760static struct mbuf *
761veb_ipsec_ipv6_in(struct ifnet *ifp0, struct mbuf *m)
762{
763 union sockaddr_union su = {
764 .sin6.sin6_len = sizeof(su.sin6),
765 .sin6.sin6_family = AF_INET624,
766 };
767 struct ip6_hdr *ip6;
768
769 if (m->m_lenm_hdr.mh_len < sizeof(*ip6)) {
770 m = m_pullup(m, sizeof(*ip6));
771 if (m == NULL((void *)0))
772 return (NULL((void *)0));
773 }
774
775 ip6 = mtod(m, struct ip6_hdr *)((struct ip6_hdr *)((m)->m_hdr.mh_data));
776
777 su.sin6.sin6_addr = ip6->ip6_dst;
778
779 /* XXX scope? */
780
781 return (veb_ipsec_proto_in(ifp0, m, sizeof(*ip6), &su,
782 offsetof(struct ip6_hdr, ip6_nxt)__builtin_offsetof(struct ip6_hdr, ip6_ctlun.ip6_un1.ip6_un1_nxt
)
));
783}
784#endif /* INET6 */
785
786static struct mbuf *
787veb_ipsec_in(struct ifnet *ifp0, struct mbuf *m)
788{
789 struct mbuf *(*ipsec_ip_in)(struct ifnet *, struct mbuf *);
790 struct ether_header *eh, copy;
791
792 if (ifp0->if_enqueue == vport_enqueue)
793 return (m);
794
795 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
796 switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t
)(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | (
(__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md
(eh->ether_type))
) {
797 case ETHERTYPE_IP0x0800:
798 ipsec_ip_in = veb_ipsec_ipv4_in;
799 break;
800#ifdef INET61
801 case ETHERTYPE_IPV60x86DD:
802 ipsec_ip_in = veb_ipsec_ipv6_in;
803 break;
804#endif /* INET6 */
805 default:
806 return (m);
807 }
808
809 copy = *eh;
810 m_adj(m, sizeof(*eh));
811
812 m = (*ipsec_ip_in)(ifp0, m);
813 if (m == NULL((void *)0))
814 return (NULL((void *)0));
815
816 m = m_prepend(m, sizeof(*eh), M_DONTWAIT0x0002);
817 if (m == NULL((void *)0))
818 return (NULL((void *)0));
819
820 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
821 *eh = copy;
822
823 return (m);
824}
825
826static struct mbuf *
827veb_ipsec_proto_out(struct mbuf *m, sa_family_t af, int iphlen)
828{
829 struct tdb *tdb;
830 int error;
831#if NPF1 > 0
832 struct ifnet *encifp;
833#endif
834
835 tdb = ipsp_spd_lookup(m, af, iphlen, &error, IPSP_DIRECTION_OUT,
836 NULL((void *)0), NULL((void *)0), NULL((void *)0));
837 if (tdb == NULL((void *)0))
838 return (m);
839
840#if NPF1 > 0
841 encifp = enc_getif(tdb->tdb_rdomain, tdb->tdb_tap);
842 if (encifp != NULL((void *)0)) {
843 if (pf_test(af, PF_OUT, encifp, &m) != PF_PASS) {
844 m_freem(m);
845 return (NULL((void *)0));
846 }
847 if (m == NULL((void *)0))
848 return (NULL((void *)0));
849 }
850#endif /* NPF > 0 */
851
852 /* XXX mtu checks */
853
854 (void)ipsp_process_packet(m, tdb, af, 0);
855 return (NULL((void *)0));
856}
857
858static struct mbuf *
859veb_ipsec_ipv4_out(struct mbuf *m)
860{
861 struct ip *ip;
862 int iphlen;
863
864 if (m->m_lenm_hdr.mh_len < sizeof(*ip)) {
865 m = m_pullup(m, sizeof(*ip));
866 if (m == NULL((void *)0))
867 return (NULL((void *)0));
868 }
869
870 ip = mtod(m, struct ip *)((struct ip *)((m)->m_hdr.mh_data));
871 iphlen = ip->ip_hl << 2;
872 if (iphlen < sizeof(*ip)) {
873 /* this is a weird packet, decline */
874 return (m);
875 }
876
877 return (veb_ipsec_proto_out(m, AF_INET2, iphlen));
878}
879
880#ifdef INET61
881static struct mbuf *
882veb_ipsec_ipv6_out(struct mbuf *m)
883{
884 return (veb_ipsec_proto_out(m, AF_INET624, sizeof(struct ip6_hdr)));
885}
886#endif /* INET6 */
887
888static struct mbuf *
889veb_ipsec_out(struct ifnet *ifp0, struct mbuf *m)
890{
891 struct mbuf *(*ipsec_ip_out)(struct mbuf *);
892 struct ether_header *eh, copy;
893
894 if (ifp0->if_enqueue == vport_enqueue)
895 return (m);
896
897 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
898 switch (ntohs(eh->ether_type)(__uint16_t)(__builtin_constant_p(eh->ether_type) ? (__uint16_t
)(((__uint16_t)(eh->ether_type) & 0xffU) << 8 | (
(__uint16_t)(eh->ether_type) & 0xff00U) >> 8) : __swap16md
(eh->ether_type))
) {
899 case ETHERTYPE_IP0x0800:
900 ipsec_ip_out = veb_ipsec_ipv4_out;
901 break;
902#ifdef INET61
903 case ETHERTYPE_IPV60x86DD:
904 ipsec_ip_out = veb_ipsec_ipv6_out;
905 break;
906#endif /* INET6 */
907 default:
908 return (m);
909 }
910
911 copy = *eh;
912 m_adj(m, sizeof(*eh));
913
914 m = (*ipsec_ip_out)(m);
915 if (m == NULL((void *)0))
916 return (NULL((void *)0));
917
918 m = m_prepend(m, sizeof(*eh), M_DONTWAIT0x0002);
919 if (m == NULL((void *)0))
920 return (NULL((void *)0));
921
922 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
923 *eh = copy;
924
925 return (m);
926}
927#endif /* IPSEC */
928
929static void
930veb_broadcast(struct veb_softc *sc, struct veb_port *rp, struct mbuf *m0,
931 uint64_t src, uint64_t dst)
932{
933 struct ifnet *ifp = &sc->sc_if;
934 struct veb_ports *pm;
935 struct veb_port **ps;
936 struct veb_port *tp;
937 struct ifnet *ifp0;
938 struct mbuf *m;
939 unsigned int i;
940
941#if NPF1 > 0
942 /*
943 * we couldn't find a specific port to send this packet to,
944 * but pf should still have a chance to apply policy to it.
945 * let pf look at it, but use the veb interface as a proxy.
946 */
947 if (ISSET(ifp->if_flags, IFF_LINK1)((ifp->if_flags) & (0x2000)) &&
948 (m0 = veb_pf(ifp, PF_OUT, m0)) == NULL((void *)0))
949 return;
950#endif
951
952#if 0 && defined(IPSEC1)
953 /* same goes for ipsec */
954 if (ISSET(ifp->if_flags, IFF_LINK2)((ifp->if_flags) & (0x4000)) &&
955 (m0 = veb_ipsec_out(ifp, m0)) == NULL((void *)0))
956 return;
957#endif
958
959 counters_pkt(ifp->if_counters, ifc_opackets, ifc_obytes,
960 m0->m_pkthdrM_dat.MH.MH_pkthdr.len);
961
962 smr_read_enter();
963 pm = SMR_PTR_GET(&sc->sc_ports)({ typeof(*&sc->sc_ports) __tmp = *(volatile typeof(*&
sc->sc_ports) *)&(*&sc->sc_ports); membar_datadep_consumer
(); __tmp; })
;
964 if (__predict_true(pm != NULL)__builtin_expect(((pm != ((void *)0)) != 0), 1))
965 refcnt_take(&pm->m_refs);
966 smr_read_leave();
967 if (__predict_false(pm == NULL)__builtin_expect(((pm == ((void *)0)) != 0), 0))
968 goto done;
969
970 ps = veb_ports_array(pm);
971 for (i = 0; i < pm->m_count; i++) {
972 tp = ps[i];
973
974 if (rp == tp || (rp->p_protected & tp->p_protected)) {
975 /*
976 * don't let Ethernet packets hairpin or
977 * move between ports in the same protected
978 * domain(s).
979 */
980 continue;
981 }
982
983 ifp0 = tp->p_ifp0;
984 if (!ISSET(ifp0->if_flags, IFF_RUNNING)((ifp0->if_flags) & (0x40))) {
985 /* don't waste time */
986 continue;
987 }
988
989 if (!ISSET(tp->p_bif_flags, IFBIF_DISCOVER)((tp->p_bif_flags) & (0x0002)) &&
990 !ISSET(m0->m_flags, M_BCAST | M_MCAST)((m0->m_hdr.mh_flags) & (0x0100 | 0x0200))) {
991 /* don't flood unknown unicast */
992 continue;
993 }
994
995 if (veb_rule_filter(tp, VEB_RULE_LIST_OUT0, m0, src, dst))
996 continue;
997
998 m = m_dup_pkt(m0, max_linkhdr + ETHER_ALIGN2, M_NOWAIT0x0002);
999 if (m == NULL((void *)0)) {
1000 /* XXX count error? */
1001 continue;
1002 }
1003
1004 (*tp->p_enqueue)(ifp0, m); /* XXX count error */
1005 }
1006 refcnt_rele_wake(&pm->m_refs);
1007
1008done:
1009 m_freem(m0);
1010}
1011
1012static struct mbuf *
1013veb_transmit(struct veb_softc *sc, struct veb_port *rp, struct veb_port *tp,
1014 struct mbuf *m, uint64_t src, uint64_t dst)
1015{
1016 struct ifnet *ifp = &sc->sc_if;
1017 struct ifnet *ifp0;
1018
1019 if (tp == NULL((void *)0))
1020 return (m);
1021
1022 if (rp == tp || (rp->p_protected & tp->p_protected)) {
1023 /*
1024 * don't let Ethernet packets hairpin or move between
1025 * ports in the same protected domain(s).
1026 */
1027 goto drop;
1028 }
1029
1030 if (veb_rule_filter(tp, VEB_RULE_LIST_OUT0, m, src, dst))
1031 goto drop;
1032
1033 ifp0 = tp->p_ifp0;
1034
1035#if 0 && defined(IPSEC1)
1036 if (ISSET(ifp->if_flags, IFF_LINK2)((ifp->if_flags) & (0x4000)) &&
1037 (m = veb_ipsec_out(ifp0, m0)) == NULL((void *)0))
1038 return;
1039#endif
1040
1041#if NPF1 > 0
1042 if (ISSET(ifp->if_flags, IFF_LINK1)((ifp->if_flags) & (0x2000)) &&
1043 (m = veb_pf(ifp0, PF_OUT, m)) == NULL((void *)0))
1044 return (NULL((void *)0));
1045#endif
1046
1047 counters_pkt(ifp->if_counters, ifc_opackets, ifc_obytes,
1048 m->m_pkthdrM_dat.MH.MH_pkthdr.len);
1049
1050 (*tp->p_enqueue)(ifp0, m); /* XXX count error */
1051
1052 return (NULL((void *)0));
1053drop:
1054 m_freem(m);
1055 return (NULL((void *)0));
1056}
1057
1058static struct mbuf *
1059veb_vport_input(struct ifnet *ifp0, struct mbuf *m, uint64_t dst, void *brport)
1060{
1061 return (m);
1062}
1063
1064static struct mbuf *
1065veb_port_input(struct ifnet *ifp0, struct mbuf *m, uint64_t dst, void *brport)
1066{
1067 struct veb_port *p = brport;
1068 struct veb_softc *sc = p->p_veb;
1069 struct ifnet *ifp = &sc->sc_if;
1070 struct ether_header *eh;
1071 uint64_t src;
1072#if NBPFILTER1 > 0
1073 caddr_t if_bpf;
1074#endif
1075
1076 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
1077 return (m);
1078
1079 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
1080 src = ether_addr_to_e64((struct ether_addr *)eh->ether_shost);
1081
1082 /* Is this a MAC Bridge component Reserved address? */
1083 if (ETH64_IS_8021_RSVD(dst)(((dst) & 0xfffffffffff0ULL) == 0x0180c2000000ULL)) {
1084 if (!ISSET(ifp->if_flags, IFF_LINK0)((ifp->if_flags) & (0x1000))) {
1085 /*
1086 * letting vlans through implies this is
1087 * an s-vlan component.
1088 */
1089 goto drop;
1090 }
1091
1092 /* look at the last nibble of the 802.1 reserved address */
1093 switch (dst & 0xf) {
1094 case 0x0: /* Nearest Customer Bridge Group Address */
1095 case 0xb: /* EDE-SS PEP (IEEE Std 802.1AEcg) */
1096 case 0xc: /* reserved */
1097 case 0xd: /* Provider Bridge MVRP Address */
1098 case 0xf: /* reserved */
1099 break;
1100 default:
1101 goto drop;
1102 }
1103 }
1104
1105#if NVLAN1 > 0
1106 /*
1107 * If the underlying interface removed the VLAN header itself,
1108 * add it back.
1109 */
1110 if (ISSET(m->m_flags, M_VLANTAG)((m->m_hdr.mh_flags) & (0x0020))) {
1111 m = vlan_inject(m, ETHERTYPE_VLAN0x8100, m->m_pkthdrM_dat.MH.MH_pkthdr.ether_vtag);
1112 if (m == NULL((void *)0)) {
1113 counters_inc(ifp->if_counters, ifc_ierrors);
1114 goto drop;
1115 }
1116 }
1117#endif
1118
1119 counters_pkt(ifp->if_counters, ifc_ipackets, ifc_ibytes,
1120 m->m_pkthdrM_dat.MH.MH_pkthdr.len);
1121
1122 /* force packets into the one routing domain for pf */
1123 m->m_pkthdrM_dat.MH.MH_pkthdr.ph_rtableid = ifp->if_rdomainif_data.ifi_rdomain;
1124
1125#if NBPFILTER1 > 0
1126 if_bpf = READ_ONCE(ifp->if_bpf)({ typeof(ifp->if_bpf) __tmp = *(volatile typeof(ifp->if_bpf
) *)&(ifp->if_bpf); membar_datadep_consumer(); __tmp; }
)
;
1127 if (if_bpf != NULL((void *)0)) {
1128 if (bpf_mtap_ether(if_bpf, m, 0) != 0)
1129 goto drop;
1130 }
1131#endif
1132
1133 veb_span(sc, m);
1134
1135 if (ISSET(p->p_bif_flags, IFBIF_BLOCKNONIP)((p->p_bif_flags) & (0x0004)) &&
1136 veb_ip_filter(m))
1137 goto drop;
1138
1139 if (!ISSET(ifp->if_flags, IFF_LINK0)((ifp->if_flags) & (0x1000)) &&
1140 veb_vlan_filter(m))
1141 goto drop;
1142
1143 if (veb_rule_filter(p, VEB_RULE_LIST_IN1, m, src, dst))
1144 goto drop;
1145
1146#if NPF1 > 0
1147 if (ISSET(ifp->if_flags, IFF_LINK1)((ifp->if_flags) & (0x2000)) &&
1148 (m = veb_pf(ifp0, PF_IN, m)) == NULL((void *)0))
1149 return (NULL((void *)0));
1150#endif
1151
1152#if 0 && defined(IPSEC1)
1153 if (ISSET(ifp->if_flags, IFF_LINK2)((ifp->if_flags) & (0x4000)) &&
1154 (m = veb_ipsec_in(ifp0, m)) == NULL((void *)0))
1155 return (NULL((void *)0));
1156#endif
1157
1158 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
Value stored to 'eh' is never read
1159
1160 if (ISSET(p->p_bif_flags, IFBIF_LEARNING)((p->p_bif_flags) & (0x0001)))
1161 etherbridge_map(&sc->sc_eb, p, src);
1162
1163 CLR(m->m_flags, M_BCAST|M_MCAST)((m->m_hdr.mh_flags) &= ~(0x0100|0x0200));
1164
1165 if (!ETH64_IS_MULTICAST(dst)((dst) & 0x010000000000ULL)) {
1166 struct veb_port *tp = NULL((void *)0);
1167
1168 smr_read_enter();
1169 tp = etherbridge_resolve(&sc->sc_eb, dst);
1170 if (tp != NULL((void *)0))
1171 veb_eb_port_take(NULL((void *)0), tp);
1172 smr_read_leave();
1173 if (tp != NULL((void *)0)) {
1174 m = veb_transmit(sc, p, tp, m, src, dst);
1175 veb_eb_port_rele(NULL((void *)0), tp);
1176 }
1177
1178 if (m == NULL((void *)0))
1179 return (NULL((void *)0));
1180
1181 /* unknown unicast address */
1182 } else {
1183 SET(m->m_flags, ETH64_IS_BROADCAST(dst) ? M_BCAST : M_MCAST)((m->m_hdr.mh_flags) |= (((dst) == 0xffffffffffffULL) ? 0x0100
: 0x0200))
;
1184 }
1185
1186 veb_broadcast(sc, p, m, src, dst);
1187 return (NULL((void *)0));
1188
1189drop:
1190 m_freem(m);
1191 return (NULL((void *)0));
1192}
1193
1194static void
1195veb_input(struct ifnet *ifp, struct mbuf *m)
1196{
1197 m_freem(m);
1198}
1199
1200static int
1201veb_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1202 struct rtentry *rt)
1203{
1204 m_freem(m);
1205 return (ENODEV19);
1206}
1207
1208static int
1209veb_enqueue(struct ifnet *ifp, struct mbuf *m)
1210{
1211 m_freem(m);
1212 return (ENODEV19);
1213}
1214
1215static void
1216veb_start(struct ifqueue *ifq)
1217{
1218 ifq_purge(ifq);
1219}
1220
1221static int
1222veb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1223{
1224 struct veb_softc *sc = ifp->if_softc;
1225 struct ifbrparam *bparam = (struct ifbrparam *)data;
1226 int error = 0;
1227
1228 if (sc->sc_dead)
1229 return (ENXIO6);
1230
1231 switch (cmd) {
1232 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
1233 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
1234 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
1235 error = veb_up(sc);
1236 } else {
1237 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
1238 error = veb_down(sc);
1239 }
1240 break;
1241
1242 case SIOCBRDGADD((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((60)))
:
1243 error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
1244 if (error != 0)
1245 break;
1246
1247 error = veb_add_port(sc, (struct ifbreq *)data, 0);
1248 break;
1249 case SIOCBRDGADDS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((65)))
:
1250 error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
1251 if (error != 0)
1252 break;
1253
1254 error = veb_add_port(sc, (struct ifbreq *)data, 1);
1255 break;
1256 case SIOCBRDGDEL((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((61)))
:
1257 error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
1258 if (error != 0)
1259 break;
1260
1261 error = veb_del_port(sc, (struct ifbreq *)data, 0);
1262 break;
1263 case SIOCBRDGDELS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((66)))
:
1264 error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
1265 if (error != 0)
1266 break;
1267
1268 error = veb_del_port(sc, (struct ifbreq *)data, 1);
1269 break;
1270
1271 case SIOCBRDGSCACHE((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((64)))
:
1272 error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
1273 if (error != 0)
1274 break;
1275
1276 error = etherbridge_set_max(&sc->sc_eb, bparam);
1277 break;
1278 case SIOCBRDGGCACHE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbrparam) & 0x1fff) << 16) | ((('i')) <<
8) | ((65)))
:
1279 error = etherbridge_get_max(&sc->sc_eb, bparam);
1280 break;
1281
1282 case SIOCBRDGSTO((unsigned long)0x80000000 | ((sizeof(struct ifbrparam) &
0x1fff) << 16) | ((('i')) << 8) | ((69)))
:
1283 error = suser(curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
1284 if (error != 0)
1285 break;
1286
1287 error = etherbridge_set_tmo(&sc->sc_eb, bparam);
1288 break;
1289 case SIOCBRDGGTO(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbrparam) & 0x1fff) << 16) | ((('i')) <<
8) | ((70)))
:
1290 error = etherbridge_get_tmo(&sc->sc_eb, bparam);
1291 break;
1292
1293 case SIOCBRDGRTS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbaconf) & 0x1fff) << 16) | ((('i')) <<
8) | ((67)))
:
1294 error = etherbridge_rtfind(&sc->sc_eb, (struct ifbaconf *)data);
1295 break;
1296 case SIOCBRDGIFS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((66)))
:
1297 error = veb_port_list(sc, (struct ifbifconf *)data);
1298 break;
1299 case SIOCBRDGFLUSH((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((72)))
:
1300 etherbridge_flush(&sc->sc_eb,
1301 ((struct ifbreq *)data)->ifbr_ifsflags);
1302 break;
1303 case SIOCBRDGSADDR(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbareq) & 0x1fff) << 16) | ((('i')) <<
8) | ((68)))
:
1304 error = veb_add_addr(sc, (struct ifbareq *)data);
1305 break;
1306 case SIOCBRDGDADDR((unsigned long)0x80000000 | ((sizeof(struct ifbareq) & 0x1fff
) << 16) | ((('i')) << 8) | ((71)))
:
1307 error = veb_del_addr(sc, (struct ifbareq *)data);
1308 break;
1309
1310 case SIOCBRDGSIFPROT((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((74)))
:
1311 error = veb_port_set_protected(sc, (struct ifbreq *)data);
1312 break;
1313
1314 case SIOCBRDGSIFFLGS((unsigned long)0x80000000 | ((sizeof(struct ifbreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((63)))
:
1315 error = veb_port_set_flags(sc, (struct ifbreq *)data);
1316 break;
1317 case SIOCBRDGGIFFLGS(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbreq) & 0x1fff) << 16) | ((('i')) <<
8) | ((62)))
:
1318 error = veb_port_get_flags(sc, (struct ifbreq *)data);
1319 break;
1320
1321 case SIOCBRDGARL((unsigned long)0x80000000 | ((sizeof(struct ifbrlreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((77)))
:
1322 error = veb_rule_add(sc, (struct ifbrlreq *)data);
1323 break;
1324 case SIOCBRDGFRL((unsigned long)0x80000000 | ((sizeof(struct ifbrlreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((78)))
:
1325 error = veb_rule_list_flush(sc, (struct ifbrlreq *)data);
1326 break;
1327 case SIOCBRDGGRL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct ifbrlconf) & 0x1fff) << 16) | ((('i')) <<
8) | ((79)))
:
1328 error = veb_rule_list_get(sc, (struct ifbrlconf *)data);
1329 break;
1330
1331 default:
1332 error = ENOTTY25;
1333 break;
1334 }
1335
1336 if (error == ENETRESET52)
1337 error = veb_iff(sc);
1338
1339 return (error);
1340}
1341
1342static struct veb_ports *
1343veb_ports_insert(struct veb_ports *om, struct veb_port *p)
1344{
1345 struct veb_ports *nm;
1346 struct veb_port **nps, **ops;
1347 unsigned int ocount = om != NULL((void *)0) ? om->m_count : 0;
1348 unsigned int ncount = ocount + 1;
1349 unsigned int i;
1350
1351 nm = malloc(veb_ports_size(ncount), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
1352
1353 refcnt_init(&nm->m_refs);
1354 nm->m_count = ncount;
1355
1356 nps = veb_ports_array(nm);
1357
1358 if (om != NULL((void *)0)) {
1359 ops = veb_ports_array(om);
1360 for (i = 0; i < ocount; i++) {
1361 struct veb_port *op = ops[i];
1362 refcnt_take(&op->p_refs);
1363 nps[i] = op;
1364 }
1365 } else
1366 i = 0;
1367
1368 refcnt_take(&p->p_refs);
1369 nps[i] = p;
1370
1371 return (nm);
1372}
1373
1374static struct veb_ports *
1375veb_ports_remove(struct veb_ports *om, struct veb_port *p)
1376{
1377 struct veb_ports *nm;
1378 struct veb_port **nps, **ops;
1379 unsigned int ocount = om->m_count;
1380 unsigned int ncount = ocount - 1;
1381 unsigned int i, j;
1382
1383 if (ncount == 0)
1384 return (NULL((void *)0));
1385
1386 nm = malloc(veb_ports_size(ncount), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
1387
1388 refcnt_init(&nm->m_refs);
1389 nm->m_count = ncount;
1390
1391 nps = veb_ports_array(nm);
1392 j = 0;
1393
1394 ops = veb_ports_array(om);
1395 for (i = 0; i < ocount; i++) {
1396 struct veb_port *op = ops[i];
1397 if (op == p)
1398 continue;
1399
1400 refcnt_take(&op->p_refs);
1401 nps[j++] = op;
1402 }
1403 KASSERT(j == ncount)((j == ncount) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/net/if_veb.c"
, 1403, "j == ncount"))
;
1404
1405 return (nm);
1406}
1407
1408static inline void
1409veb_ports_free(struct veb_ports *m)
1410{
1411 free(m, M_DEVBUF2, veb_ports_size(m->m_count));
1412}
1413
1414static void
1415veb_ports_destroy(struct veb_ports *m)
1416{
1417 struct veb_port **ps = veb_ports_array(m);
1418 unsigned int i;
1419
1420 for (i = 0; i < m->m_count; i++) {
1421 struct veb_port *p = ps[i];
1422 refcnt_rele_wake(&p->p_refs);
1423 }
1424
1425 veb_ports_free(m);
1426}
1427
1428static int
1429veb_add_port(struct veb_softc *sc, const struct ifbreq *req, unsigned int span)
1430{
1431 struct ifnet *ifp = &sc->sc_if;
1432 struct ifnet *ifp0;
1433 struct veb_ports **ports_ptr;
1434 struct veb_ports *om, *nm;
1435 struct veb_port *p;
1436 int isvport;
1437 int error;
1438
1439 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1440
1441 ifp0 = if_unit(req->ifbr_ifsname);
1442 if (ifp0 == NULL((void *)0))
1443 return (EINVAL22);
1444
1445 if (ifp0->if_typeif_data.ifi_type != IFT_ETHER0x06) {
1446 error = EPROTONOSUPPORT43;
1447 goto put;
1448 }
1449
1450 if (ifp0 == ifp) {
1451 error = EPROTONOSUPPORT43;
1452 goto put;
1453 }
1454
1455 isvport = (ifp0->if_enqueue == vport_enqueue);
1456
1457 error = ether_brport_isset(ifp0);
1458 if (error != 0)
1459 goto put;
1460
1461 /* let's try */
1462
1463 p = malloc(sizeof(*p), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008|M_CANFAIL0x0004);
1464 if (p == NULL((void *)0)) {
1465 error = ENOMEM12;
1466 goto put;
1467 }
1468
1469 ifsetlro(ifp0, 0);
1470
1471 p->p_ifp0 = ifp0;
1472 p->p_veb = sc;
1473
1474 refcnt_init(&p->p_refs);
1475 TAILQ_INIT(&p->p_vrl)do { (&p->p_vrl)->tqh_first = ((void *)0); (&p->
p_vrl)->tqh_last = &(&p->p_vrl)->tqh_first; }
while (0)
;
1476 SMR_TAILQ_INIT(&p->p_vr_list[0])do { (&p->p_vr_list[0])->smr_tqh_first = ((void *)0
); (&p->p_vr_list[0])->smr_tqh_last = &(&p->
p_vr_list[0])->smr_tqh_first; } while (0)
;
1477 SMR_TAILQ_INIT(&p->p_vr_list[1])do { (&p->p_vr_list[1])->smr_tqh_first = ((void *)0
); (&p->p_vr_list[1])->smr_tqh_last = &(&p->
p_vr_list[1])->smr_tqh_first; } while (0)
;
1478
1479 p->p_enqueue = isvport ? vport_if_enqueue : if_enqueue;
1480 p->p_ioctl = ifp0->if_ioctl;
1481 p->p_output = ifp0->if_output;
1482
1483 if (span) {
1484 ports_ptr = &sc->sc_spans;
1485
1486 if (isvport) {
1487 error = EPROTONOSUPPORT43;
1488 goto free;
1489 }
1490
1491 p->p_brport.eb_input = veb_span_input;
1492 p->p_bif_flags = IFBIF_SPAN0x0100;
1493 } else {
1494 ports_ptr = &sc->sc_ports;
1495
1496 error = ifpromisc(ifp0, 1);
1497 if (error != 0)
1498 goto free;
1499
1500 p->p_bif_flags = IFBIF_LEARNING0x0001 | IFBIF_DISCOVER0x0002;
1501 p->p_brport.eb_input = isvport ?
1502 veb_vport_input : veb_port_input;
1503 }
1504
1505 p->p_brport.eb_port_take = veb_eb_brport_take;
1506 p->p_brport.eb_port_rele = veb_eb_brport_rele;
1507
1508 om = SMR_PTR_GET_LOCKED(ports_ptr)(*(ports_ptr));
1509 nm = veb_ports_insert(om, p);
1510
1511 /* this might have changed if we slept for malloc or ifpromisc */
1512 error = ether_brport_isset(ifp0);
1513 if (error != 0)
1514 goto unpromisc;
1515
1516 task_set(&p->p_ltask, veb_p_linkch, p);
1517 if_linkstatehook_add(ifp0, &p->p_ltask);
1518
1519 task_set(&p->p_dtask, veb_p_detach, p);
1520 if_detachhook_add(ifp0, &p->p_dtask);
1521
1522 p->p_brport.eb_port = p;
1523
1524 /* commit */
1525 SMR_PTR_SET_LOCKED(ports_ptr, nm)do { do { __asm volatile("" ::: "memory"); } while (0); ({ typeof
(*ports_ptr) __tmp = (nm); *(volatile typeof(*ports_ptr) *)&
(*ports_ptr) = __tmp; __tmp; }); } while (0)
;
1526
1527 ether_brport_set(ifp0, &p->p_brport);
1528 if (!isvport) { /* vport is special */
1529 ifp0->if_ioctl = veb_p_ioctl;
1530 ifp0->if_output = veb_p_output;
1531 }
1532
1533 veb_p_linkch(p);
1534
1535 /* clean up the old veb_ports map */
1536 smr_barrier()smr_barrier_impl(0);
1537 if (om != NULL((void *)0)) {
1538 refcnt_finalize(&om->m_refs, "vebports");
1539 veb_ports_destroy(om);
1540 }
1541
1542 return (0);
1543
1544unpromisc:
1545 if (!span)
1546 ifpromisc(ifp0, 0);
1547free:
1548 free(p, M_DEVBUF2, sizeof(*p));
1549put:
1550 if_put(ifp0);
1551 return (error);
1552}
1553
1554static struct veb_port *
1555veb_trunkport(struct veb_softc *sc, const char *name, unsigned int span)
1556{
1557 struct veb_ports *m;
1558 struct veb_port **ps;
1559 struct veb_port *p;
1560 unsigned int i;
1561
1562 m = SMR_PTR_GET_LOCKED(span ? &sc->sc_spans : &sc->sc_ports)(*(span ? &sc->sc_spans : &sc->sc_ports));
1563 if (m == NULL((void *)0))
1564 return (NULL((void *)0));
1565
1566 ps = veb_ports_array(m);
1567 for (i = 0; i < m->m_count; i++) {
1568 p = ps[i];
1569
1570 if (strncmp(p->p_ifp0->if_xname, name, IFNAMSIZ16) == 0)
1571 return (p);
1572 }
1573
1574 return (NULL((void *)0));
1575}
1576
1577static int
1578veb_del_port(struct veb_softc *sc, const struct ifbreq *req, unsigned int span)
1579{
1580 struct veb_port *p;
1581
1582 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1583 p = veb_trunkport(sc, req->ifbr_ifsname, span);
1584 if (p == NULL((void *)0))
1585 return (EINVAL22);
1586
1587 veb_p_dtor(sc, p);
1588
1589 return (0);
1590}
1591
1592static struct veb_port *
1593veb_port_get(struct veb_softc *sc, const char *name)
1594{
1595 struct veb_ports *m;
1596 struct veb_port **ps;
1597 struct veb_port *p;
1598 unsigned int i;
1599
1600 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1601
1602 m = SMR_PTR_GET_LOCKED(&sc->sc_ports)(*(&sc->sc_ports));
1603 if (m == NULL((void *)0))
1604 return (NULL((void *)0));
1605
1606 ps = veb_ports_array(m);
1607 for (i = 0; i < m->m_count; i++) {
1608 p = ps[i];
1609
1610 if (strncmp(p->p_ifp0->if_xname, name, IFNAMSIZ16) == 0) {
1611 refcnt_take(&p->p_refs);
1612 return (p);
1613 }
1614 }
1615
1616 return (NULL((void *)0));
1617}
1618
1619static void
1620veb_port_put(struct veb_softc *sc, struct veb_port *p)
1621{
1622 refcnt_rele_wake(&p->p_refs);
1623}
1624
1625static int
1626veb_port_set_protected(struct veb_softc *sc, const struct ifbreq *ifbr)
1627{
1628 struct veb_port *p;
1629
1630 p = veb_port_get(sc, ifbr->ifbr_ifsname);
1631 if (p == NULL((void *)0))
1632 return (ESRCH3);
1633
1634 p->p_protected = ifbr->ifbr_protected;
1635 veb_port_put(sc, p);
1636
1637 return (0);
1638}
1639
1640static int
1641veb_rule_add(struct veb_softc *sc, const struct ifbrlreq *ifbr)
1642{
1643 const struct ifbrarpf *brla = &ifbr->ifbr_arpf;
1644 struct veb_rule vr, *vrp;
1645 struct veb_port *p;
1646 int error;
1647
1648 memset(&vr, 0, sizeof(vr))__builtin_memset((&vr), (0), (sizeof(vr)));
1649
1650 switch (ifbr->ifbr_action) {
1651 case BRL_ACTION_BLOCK0x01:
1652 vr.vr_action = VEB_R_BLOCK2;
1653 break;
1654 case BRL_ACTION_PASS0x02:
1655 vr.vr_action = VEB_R_PASS1;
1656 break;
1657 /* XXX VEB_R_MATCH */
1658 default:
1659 return (EINVAL22);
1660 }
1661
1662 if (!ISSET(ifbr->ifbr_flags, BRL_FLAG_IN|BRL_FLAG_OUT)((ifbr->ifbr_flags) & (0x08|0x04)))
1663 return (EINVAL22);
1664 if (ISSET(ifbr->ifbr_flags, BRL_FLAG_IN)((ifbr->ifbr_flags) & (0x08)))
1665 SET(vr.vr_flags, VEB_R_F_IN)((vr.vr_flags) |= ((1U << 0)));
1666 if (ISSET(ifbr->ifbr_flags, BRL_FLAG_OUT)((ifbr->ifbr_flags) & (0x04)))
1667 SET(vr.vr_flags, VEB_R_F_OUT)((vr.vr_flags) |= ((1U << 1)));
1668
1669 if (ISSET(ifbr->ifbr_flags, BRL_FLAG_SRCVALID)((ifbr->ifbr_flags) & (0x02))) {
1670 SET(vr.vr_flags, VEB_R_F_SRC)((vr.vr_flags) |= ((1U << 2)));
1671 vr.vr_src = ether_addr_to_e64(&ifbr->ifbr_src);
1672 }
1673 if (ISSET(ifbr->ifbr_flags, BRL_FLAG_DSTVALID)((ifbr->ifbr_flags) & (0x01))) {
1674 SET(vr.vr_flags, VEB_R_F_DST)((vr.vr_flags) |= ((1U << 3)));
1675 vr.vr_dst = ether_addr_to_e64(&ifbr->ifbr_dst);
1676 }
1677
1678 /* ARP rule */
1679 if (ISSET(brla->brla_flags, BRLA_ARP|BRLA_RARP)((brla->brla_flags) & (0x01|0x02))) {
1680 if (ISSET(brla->brla_flags, BRLA_ARP)((brla->brla_flags) & (0x01)))
1681 SET(vr.vr_flags, VEB_R_F_ARP)((vr.vr_flags) |= ((1U << 4)));
1682 if (ISSET(brla->brla_flags, BRLA_RARP)((brla->brla_flags) & (0x02)))
1683 SET(vr.vr_flags, VEB_R_F_RARP)((vr.vr_flags) |= ((1U << 5)));
1684
1685 if (ISSET(brla->brla_flags, BRLA_SHA)((brla->brla_flags) & (0x10))) {
1686 SET(vr.vr_flags, VEB_R_F_SHA)((vr.vr_flags) |= ((1U << 6)));
1687 vr.vr_arp_sha = brla->brla_sha;
1688 }
1689 if (ISSET(brla->brla_flags, BRLA_THA)((brla->brla_flags) & (0x40))) {
1690 SET(vr.vr_flags, VEB_R_F_THA)((vr.vr_flags) |= ((1U << 8)));
1691 vr.vr_arp_tha = brla->brla_tha;
1692 }
1693 if (ISSET(brla->brla_flags, BRLA_SPA)((brla->brla_flags) & (0x20))) {
1694 SET(vr.vr_flags, VEB_R_F_SPA)((vr.vr_flags) |= ((1U << 7)));
1695 vr.vr_arp_spa = brla->brla_spa;
1696 }
1697 if (ISSET(brla->brla_flags, BRLA_TPA)((brla->brla_flags) & (0x80))) {
1698 SET(vr.vr_flags, VEB_R_F_TPA)((vr.vr_flags) |= ((1U << 9)));
1699 vr.vr_arp_tpa = brla->brla_tpa;
1700 }
1701 vr.vr_arp_op = htons(brla->brla_op)(__uint16_t)(__builtin_constant_p(brla->brla_op) ? (__uint16_t
)(((__uint16_t)(brla->brla_op) & 0xffU) << 8 | (
(__uint16_t)(brla->brla_op) & 0xff00U) >> 8) : __swap16md
(brla->brla_op))
;
1702 }
1703
1704 if (ifbr->ifbr_tagname[0] != '\0') {
1705#if NPF1 > 0
1706 vr.vr_pftag = pf_tagname2tag((char *)ifbr->ifbr_tagname, 1);
1707 if (vr.vr_pftag == 0)
1708 return (ENOMEM12);
1709#else
1710 return (EINVAL22);
1711#endif
1712 }
1713
1714 p = veb_port_get(sc, ifbr->ifbr_ifsname);
1715 if (p == NULL((void *)0)) {
1716 error = ESRCH3;
1717 goto error;
1718 }
1719
1720 vrp = pool_get(&veb_rule_pool, PR_WAITOK0x0001|PR_LIMITFAIL0x0004|PR_ZERO0x0008);
1721 if (vrp == NULL((void *)0)) {
1722 error = ENOMEM12;
1723 goto port_put;
1724 }
1725
1726 *vrp = vr;
1727
1728 /* there's one big lock on a veb for all ports */
1729 error = rw_enter(&sc->sc_rule_lock, RW_WRITE0x0001UL|RW_INTR0x0010UL);
1730 if (error != 0)
1731 goto rule_put;
1732
1733 TAILQ_INSERT_TAIL(&p->p_vrl, vrp, vr_entry)do { (vrp)->vr_entry.tqe_next = ((void *)0); (vrp)->vr_entry
.tqe_prev = (&p->p_vrl)->tqh_last; *(&p->p_vrl
)->tqh_last = (vrp); (&p->p_vrl)->tqh_last = &
(vrp)->vr_entry.tqe_next; } while (0)
;
1734 p->p_nvrl++;
1735 if (ISSET(vr.vr_flags, VEB_R_F_OUT)((vr.vr_flags) & ((1U << 1)))) {
1736 SMR_TAILQ_INSERT_TAIL_LOCKED(&p->p_vr_list[0],do { (vrp)->vr_lentry[0].smr_tqe_next = ((void *)0); (vrp)
->vr_lentry[0].smr_tqe_prev = (&p->p_vr_list[0])->
smr_tqh_last; do { __asm volatile("" ::: "memory"); } while (
0); *(&p->p_vr_list[0])->smr_tqh_last = (vrp); (&
p->p_vr_list[0])->smr_tqh_last = &(vrp)->vr_lentry
[0].smr_tqe_next; } while (0)
1737 vrp, vr_lentry[0])do { (vrp)->vr_lentry[0].smr_tqe_next = ((void *)0); (vrp)
->vr_lentry[0].smr_tqe_prev = (&p->p_vr_list[0])->
smr_tqh_last; do { __asm volatile("" ::: "memory"); } while (
0); *(&p->p_vr_list[0])->smr_tqh_last = (vrp); (&
p->p_vr_list[0])->smr_tqh_last = &(vrp)->vr_lentry
[0].smr_tqe_next; } while (0)
;
1738 }
1739 if (ISSET(vr.vr_flags, VEB_R_F_IN)((vr.vr_flags) & ((1U << 0)))) {
1740 SMR_TAILQ_INSERT_TAIL_LOCKED(&p->p_vr_list[1],do { (vrp)->vr_lentry[1].smr_tqe_next = ((void *)0); (vrp)
->vr_lentry[1].smr_tqe_prev = (&p->p_vr_list[1])->
smr_tqh_last; do { __asm volatile("" ::: "memory"); } while (
0); *(&p->p_vr_list[1])->smr_tqh_last = (vrp); (&
p->p_vr_list[1])->smr_tqh_last = &(vrp)->vr_lentry
[1].smr_tqe_next; } while (0)
1741 vrp, vr_lentry[1])do { (vrp)->vr_lentry[1].smr_tqe_next = ((void *)0); (vrp)
->vr_lentry[1].smr_tqe_prev = (&p->p_vr_list[1])->
smr_tqh_last; do { __asm volatile("" ::: "memory"); } while (
0); *(&p->p_vr_list[1])->smr_tqh_last = (vrp); (&
p->p_vr_list[1])->smr_tqh_last = &(vrp)->vr_lentry
[1].smr_tqe_next; } while (0)
;
1742 }
1743
1744 rw_exit(&sc->sc_rule_lock);
1745 veb_port_put(sc, p);
1746
1747 return (0);
1748
1749rule_put:
1750 pool_put(&veb_rule_pool, vrp);
1751port_put:
1752 veb_port_put(sc, p);
1753error:
1754#if NPF1 > 0
1755 pf_tag_unref(vr.vr_pftag);
1756#endif
1757 return (error);
1758}
1759
1760static void
1761veb_rule_list_free(struct veb_rule *nvr)
1762{
1763 struct veb_rule *vr;
1764
1765 while ((vr = nvr) != NULL((void *)0)) {
1766 nvr = TAILQ_NEXT(vr, vr_entry)((vr)->vr_entry.tqe_next);
1767 pool_put(&veb_rule_pool, vr);
1768 }
1769}
1770
1771static int
1772veb_rule_list_flush(struct veb_softc *sc, const struct ifbrlreq *ifbr)
1773{
1774 struct veb_port *p;
1775 struct veb_rule *vr;
1776 int error;
1777
1778 p = veb_port_get(sc, ifbr->ifbr_ifsname);
1779 if (p == NULL((void *)0))
1780 return (ESRCH3);
1781
1782 error = rw_enter(&sc->sc_rule_lock, RW_WRITE0x0001UL|RW_INTR0x0010UL);
1783 if (error != 0) {
1784 veb_port_put(sc, p);
1785 return (error);
1786 }
1787
1788 /* take all the rules away */
1789 vr = TAILQ_FIRST(&p->p_vrl)((&p->p_vrl)->tqh_first);
1790
1791 /* reset the lists and counts of rules */
1792 TAILQ_INIT(&p->p_vrl)do { (&p->p_vrl)->tqh_first = ((void *)0); (&p->
p_vrl)->tqh_last = &(&p->p_vrl)->tqh_first; }
while (0)
;
1793 p->p_nvrl = 0;
1794 SMR_TAILQ_INIT(&p->p_vr_list[0])do { (&p->p_vr_list[0])->smr_tqh_first = ((void *)0
); (&p->p_vr_list[0])->smr_tqh_last = &(&p->
p_vr_list[0])->smr_tqh_first; } while (0)
;
1795 SMR_TAILQ_INIT(&p->p_vr_list[1])do { (&p->p_vr_list[1])->smr_tqh_first = ((void *)0
); (&p->p_vr_list[1])->smr_tqh_last = &(&p->
p_vr_list[1])->smr_tqh_first; } while (0)
;
1796
1797 rw_exit(&sc->sc_rule_lock);
1798 veb_port_put(sc, p);
1799
1800 smr_barrier()smr_barrier_impl(0);
1801 veb_rule_list_free(vr);
1802
1803 return (0);
1804}
1805
1806static void
1807veb_rule2ifbr(struct ifbrlreq *ifbr, const struct veb_rule *vr)
1808{
1809 switch (vr->vr_action) {
1810 case VEB_R_PASS1:
1811 ifbr->ifbr_action = BRL_ACTION_PASS0x02;
1812 break;
1813 case VEB_R_BLOCK2:
1814 ifbr->ifbr_action = BRL_ACTION_BLOCK0x01;
1815 break;
1816 }
1817
1818 if (ISSET(vr->vr_flags, VEB_R_F_IN)((vr->vr_flags) & ((1U << 0))))
1819 SET(ifbr->ifbr_flags, BRL_FLAG_IN)((ifbr->ifbr_flags) |= (0x08));
1820 if (ISSET(vr->vr_flags, VEB_R_F_OUT)((vr->vr_flags) & ((1U << 1))))
1821 SET(ifbr->ifbr_flags, BRL_FLAG_OUT)((ifbr->ifbr_flags) |= (0x04));
1822
1823 if (ISSET(vr->vr_flags, VEB_R_F_SRC)((vr->vr_flags) & ((1U << 2)))) {
1824 SET(ifbr->ifbr_flags, BRL_FLAG_SRCVALID)((ifbr->ifbr_flags) |= (0x02));
1825 ether_e64_to_addr(&ifbr->ifbr_src, vr->vr_src);
1826 }
1827 if (ISSET(vr->vr_flags, VEB_R_F_DST)((vr->vr_flags) & ((1U << 3)))) {
1828 SET(ifbr->ifbr_flags, BRL_FLAG_DSTVALID)((ifbr->ifbr_flags) |= (0x01));
1829 ether_e64_to_addr(&ifbr->ifbr_dst, vr->vr_dst);
1830 }
1831
1832 /* ARP rule */
1833 if (ISSET(vr->vr_flags, VEB_R_F_ARP|VEB_R_F_RARP)((vr->vr_flags) & ((1U << 4)|(1U << 5)))) {
1834 struct ifbrarpf *brla = &ifbr->ifbr_arpf;
1835
1836 if (ISSET(vr->vr_flags, VEB_R_F_ARP)((vr->vr_flags) & ((1U << 4))))
1837 SET(brla->brla_flags, BRLA_ARP)((brla->brla_flags) |= (0x01));
1838 if (ISSET(vr->vr_flags, VEB_R_F_RARP)((vr->vr_flags) & ((1U << 5))))
1839 SET(brla->brla_flags, BRLA_RARP)((brla->brla_flags) |= (0x02));
1840
1841 if (ISSET(vr->vr_flags, VEB_R_F_SHA)((vr->vr_flags) & ((1U << 6)))) {
1842 SET(brla->brla_flags, BRLA_SHA)((brla->brla_flags) |= (0x10));
1843 brla->brla_sha = vr->vr_arp_sha;
1844 }
1845 if (ISSET(vr->vr_flags, VEB_R_F_THA)((vr->vr_flags) & ((1U << 8)))) {
1846 SET(brla->brla_flags, BRLA_THA)((brla->brla_flags) |= (0x40));
1847 brla->brla_tha = vr->vr_arp_tha;
1848 }
1849
1850 if (ISSET(vr->vr_flags, VEB_R_F_SPA)((vr->vr_flags) & ((1U << 7)))) {
1851 SET(brla->brla_flags, BRLA_SPA)((brla->brla_flags) |= (0x20));
1852 brla->brla_spa = vr->vr_arp_spa;
1853 }
1854 if (ISSET(vr->vr_flags, VEB_R_F_TPA)((vr->vr_flags) & ((1U << 9)))) {
1855 SET(brla->brla_flags, BRLA_TPA)((brla->brla_flags) |= (0x80));
1856 brla->brla_tpa = vr->vr_arp_tpa;
1857 }
1858
1859 brla->brla_op = ntohs(vr->vr_arp_op)(__uint16_t)(__builtin_constant_p(vr->vr_arp_op) ? (__uint16_t
)(((__uint16_t)(vr->vr_arp_op) & 0xffU) << 8 | (
(__uint16_t)(vr->vr_arp_op) & 0xff00U) >> 8) : __swap16md
(vr->vr_arp_op))
;
1860 }
1861
1862#if NPF1 > 0
1863 if (vr->vr_pftag != 0)
1864 pf_tag2tagname(vr->vr_pftag, ifbr->ifbr_tagname);
1865#endif
1866}
1867
1868static int
1869veb_rule_list_get(struct veb_softc *sc, struct ifbrlconf *ifbrl)
1870{
1871 struct veb_port *p;
1872 struct veb_rule *vr;
1873 struct ifbrlreq *ifbr, *ifbrs;
1874 int error = 0;
1875 size_t len;
1876
1877 p = veb_port_get(sc, ifbrl->ifbrl_ifsname);
1878 if (p == NULL((void *)0))
1879 return (ESRCH3);
1880
1881 len = p->p_nvrl; /* estimate */
1882 if (ifbrl->ifbrl_len == 0 || len == 0) {
1883 ifbrl->ifbrl_len = len * sizeof(*ifbrs);
1884 goto port_put;
1885 }
1886
1887 error = rw_enter(&sc->sc_rule_lock, RW_READ0x0002UL|RW_INTR0x0010UL);
1888 if (error != 0)
1889 goto port_put;
1890
1891 ifbrs = mallocarray(p->p_nvrl, sizeof(*ifbrs), M_TEMP127,
1892 M_WAITOK0x0001|M_CANFAIL0x0004|M_ZERO0x0008);
1893 if (ifbrs == NULL((void *)0)) {
1894 rw_exit(&sc->sc_rule_lock);
1895 goto port_put;
1896 }
1897 len = p->p_nvrl * sizeof(*ifbrs);
1898
1899 ifbr = ifbrs;
1900 TAILQ_FOREACH(vr, &p->p_vrl, vr_entry)for((vr) = ((&p->p_vrl)->tqh_first); (vr) != ((void
*)0); (vr) = ((vr)->vr_entry.tqe_next))
{
1901 strlcpy(ifbr->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ16);
1902 strlcpy(ifbr->ifbr_ifsname, p->p_ifp0->if_xname, IFNAMSIZ16);
1903 veb_rule2ifbr(ifbr, vr);
1904
1905 ifbr++;
1906 }
1907
1908 rw_exit(&sc->sc_rule_lock);
1909
1910 error = copyout(ifbrs, ifbrl->ifbrl_bufifbrl_ifbrlu.ifbrlu_buf, min(len, ifbrl->ifbrl_len));
1911 if (error == 0)
1912 ifbrl->ifbrl_len = len;
1913 free(ifbrs, M_TEMP127, len);
1914
1915port_put:
1916 veb_port_put(sc, p);
1917 return (error);
1918}
1919
1920static int
1921veb_port_list(struct veb_softc *sc, struct ifbifconf *bifc)
1922{
1923 struct ifnet *ifp = &sc->sc_if;
1924 struct veb_ports *m;
1925 struct veb_port **ps;
1926 struct veb_port *p;
1927 struct ifnet *ifp0;
1928 struct ifbreq breq;
1929 int n = 0, error = 0;
1930 unsigned int i;
1931
1932 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
1933
1934 if (bifc->ifbic_len == 0) {
1935 m = SMR_PTR_GET_LOCKED(&sc->sc_ports)(*(&sc->sc_ports));
1936 if (m != NULL((void *)0))
1937 n += m->m_count;
1938 m = SMR_PTR_GET_LOCKED(&sc->sc_spans)(*(&sc->sc_spans));
1939 if (m != NULL((void *)0))
1940 n += m->m_count;
1941 goto done;
1942 }
1943
1944 m = SMR_PTR_GET_LOCKED(&sc->sc_ports)(*(&sc->sc_ports));
1945 if (m != NULL((void *)0)) {
1946 ps = veb_ports_array(m);
1947 for (i = 0; i < m->m_count; i++) {
1948 if (bifc->ifbic_len < sizeof(breq))
1949 break;
1950
1951 p = ps[i];
1952
1953 memset(&breq, 0, sizeof(breq))__builtin_memset((&breq), (0), (sizeof(breq)));
1954
1955 ifp0 = p->p_ifp0;
1956
1957 strlcpy(breq.ifbr_name, ifp->if_xname, IFNAMSIZ16);
1958 strlcpy(breq.ifbr_ifsname, ifp0->if_xname, IFNAMSIZ16);
1959
1960 breq.ifbr_ifsflags = p->p_bif_flags;
1961 breq.ifbr_portno = ifp0->if_index;
1962 breq.ifbr_protected = p->p_protected;
1963 if ((error = copyout(&breq, bifc->ifbic_reqifbic_ifbicu.ifbicu_req + n,
1964 sizeof(breq))) != 0)
1965 goto done;
1966
1967 bifc->ifbic_len -= sizeof(breq);
1968 n++;
1969 }
1970 }
1971
1972 m = SMR_PTR_GET_LOCKED(&sc->sc_spans)(*(&sc->sc_spans));
1973 if (m != NULL((void *)0)) {
1974 ps = veb_ports_array(m);
1975 for (i = 0; i < m->m_count; i++) {
1976 if (bifc->ifbic_len < sizeof(breq))
1977 break;
1978
1979 p = ps[i];
1980
1981 memset(&breq, 0, sizeof(breq))__builtin_memset((&breq), (0), (sizeof(breq)));
1982
1983 strlcpy(breq.ifbr_name, ifp->if_xname, IFNAMSIZ16);
1984 strlcpy(breq.ifbr_ifsname, p->p_ifp0->if_xname,
1985 IFNAMSIZ16);
1986
1987 breq.ifbr_ifsflags = p->p_bif_flags;
1988 if ((error = copyout(&breq, bifc->ifbic_reqifbic_ifbicu.ifbicu_req + n,
1989 sizeof(breq))) != 0)
1990 goto done;
1991
1992 bifc->ifbic_len -= sizeof(breq);
1993 n++;
1994 }
1995 }
1996
1997done:
1998 bifc->ifbic_len = n * sizeof(breq);
1999 return (error);
2000}
2001
2002static int
2003veb_port_set_flags(struct veb_softc *sc, struct ifbreq *ifbr)
2004{
2005 struct veb_port *p;
2006
2007 if (ISSET(ifbr->ifbr_ifsflags, ~VEB_IFBIF_FLAGS)((ifbr->ifbr_ifsflags) & (~(0x0001|0x0002|0x0004))))
2008 return (EINVAL22);
2009
2010 p = veb_port_get(sc, ifbr->ifbr_ifsname);
2011 if (p == NULL((void *)0))
2012 return (ESRCH3);
2013
2014 p->p_bif_flags = ifbr->ifbr_ifsflags;
2015
2016 veb_port_put(sc, p);
2017 return (0);
2018}
2019
2020static int
2021veb_port_get_flags(struct veb_softc *sc, struct ifbreq *ifbr)
2022{
2023 struct veb_port *p;
2024
2025 p = veb_port_get(sc, ifbr->ifbr_ifsname);
2026 if (p == NULL((void *)0))
2027 return (ESRCH3);
2028
2029 ifbr->ifbr_ifsflags = p->p_bif_flags;
2030 ifbr->ifbr_portno = p->p_ifp0->if_index;
2031 ifbr->ifbr_protected = p->p_protected;
2032
2033 veb_port_put(sc, p);
2034 return (0);
2035}
2036
2037static int
2038veb_add_addr(struct veb_softc *sc, const struct ifbareq *ifba)
2039{
2040 struct veb_port *p;
2041 int error = 0;
2042 unsigned int type;
2043
2044 if (ISSET(ifba->ifba_flags, ~IFBAF_TYPEMASK)((ifba->ifba_flags) & (~0x03)))
2045 return (EINVAL22);
2046 switch (ifba->ifba_flags & IFBAF_TYPEMASK0x03) {
2047 case IFBAF_DYNAMIC0x00:
2048 type = EBE_DYNAMIC0x0;
2049 break;
2050 case IFBAF_STATIC0x01:
2051 type = EBE_STATIC0x1;
2052 break;
2053 default:
2054 return (EINVAL22);
2055 }
2056
2057 if (ifba->ifba_dstsa.ss_family != AF_UNSPEC0)
2058 return (EAFNOSUPPORT47);
2059
2060 p = veb_port_get(sc, ifba->ifba_ifsname);
2061 if (p == NULL((void *)0))
2062 return (ESRCH3);
2063
2064 error = etherbridge_add_addr(&sc->sc_eb, p, &ifba->ifba_dst, type);
2065
2066 veb_port_put(sc, p);
2067
2068 return (error);
2069}
2070
2071static int
2072veb_del_addr(struct veb_softc *sc, const struct ifbareq *ifba)
2073{
2074 return (etherbridge_del_addr(&sc->sc_eb, &ifba->ifba_dst));
2075}
2076
2077static int
2078veb_p_ioctl(struct ifnet *ifp0, u_long cmd, caddr_t data)
2079{
2080 const struct ether_brport *eb = ether_brport_get_locked(ifp0);
2081 struct veb_port *p;
2082 int error = 0;
2083
2084 KASSERTMSG(eb != NULL,((eb != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s: %s called without an ether_brport set", "diagnostic "
, "eb != NULL", "/usr/src/sys/net/if_veb.c", 2086, ifp0->if_xname
, __func__))
2085 "%s: %s called without an ether_brport set",((eb != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s: %s called without an ether_brport set", "diagnostic "
, "eb != NULL", "/usr/src/sys/net/if_veb.c", 2086, ifp0->if_xname
, __func__))
2086 ifp0->if_xname, __func__)((eb != ((void *)0)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s: %s called without an ether_brport set", "diagnostic "
, "eb != NULL", "/usr/src/sys/net/if_veb.c", 2086, ifp0->if_xname
, __func__))
;
2087 KASSERTMSG((eb->eb_input == veb_port_input) ||(((eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input
)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s called %s, but eb_input (%p) seems wrong", "diagnostic "
, "(eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input)"
, "/usr/src/sys/net/if_veb.c", 2090, ifp0->if_xname, __func__
, eb->eb_input))
2088 (eb->eb_input == veb_span_input),(((eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input
)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s called %s, but eb_input (%p) seems wrong", "diagnostic "
, "(eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input)"
, "/usr/src/sys/net/if_veb.c", 2090, ifp0->if_xname, __func__
, eb->eb_input))
2089 "%s called %s, but eb_input (%p) seems wrong",(((eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input
)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s called %s, but eb_input (%p) seems wrong", "diagnostic "
, "(eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input)"
, "/usr/src/sys/net/if_veb.c", 2090, ifp0->if_xname, __func__
, eb->eb_input))
2090 ifp0->if_xname, __func__, eb->eb_input)(((eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input
)) ? (void)0 : panic("kernel %sassertion \"%s\" failed: file \"%s\", line %d"
" " "%s called %s, but eb_input (%p) seems wrong", "diagnostic "
, "(eb->eb_input == veb_port_input) || (eb->eb_input == veb_span_input)"
, "/usr/src/sys/net/if_veb.c", 2090, ifp0->if_xname, __func__
, eb->eb_input))
;
2091
2092 p = eb->eb_port;
2093
2094 switch (cmd) {
2095 case SIOCSIFADDR((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((12)))
:
2096 error = EBUSY16;
2097 break;
2098
2099 default:
2100 error = (*p->p_ioctl)(ifp0, cmd, data);
2101 break;
2102 }
2103
2104 return (error);
2105}
2106
2107static int
2108veb_p_output(struct ifnet *ifp0, struct mbuf *m, struct sockaddr *dst,
2109 struct rtentry *rt)
2110{
2111 int (*p_output)(struct ifnet *, struct mbuf *, struct sockaddr *,
2112 struct rtentry *) = NULL((void *)0);
2113 const struct ether_brport *eb;
2114
2115 /* restrict transmission to bpf only */
2116 if ((m_tag_find(m, PACKET_TAG_DLT0x0100, NULL((void *)0)) == NULL((void *)0))) {
2117 m_freem(m);
2118 return (EBUSY16);
2119 }
2120
2121 smr_read_enter();
2122 eb = ether_brport_get(ifp0);
2123 if (eb != NULL((void *)0) && eb->eb_input == veb_port_input) {
2124 struct veb_port *p = eb->eb_port;
2125 p_output = p->p_output; /* code doesn't go away */
2126 }
2127 smr_read_leave();
2128
2129 if (p_output == NULL((void *)0)) {
2130 m_freem(m);
2131 return (ENXIO6);
2132 }
2133
2134 return ((*p_output)(ifp0, m, dst, rt));
2135}
2136
2137/*
2138 * there must be an smr_barrier after ether_brport_clr() and before
2139 * veb_port is freed in veb_p_fini()
2140 */
2141
2142static void
2143veb_p_unlink(struct veb_softc *sc, struct veb_port *p)
2144{
2145 struct ifnet *ifp = &sc->sc_if;
2146 struct ifnet *ifp0 = p->p_ifp0;
2147
2148 ifp0->if_ioctl = p->p_ioctl;
2149 ifp0->if_output = p->p_output;
2150
2151 ether_brport_clr(ifp0); /* needs an smr_barrier */
2152
2153 if_detachhook_del(ifp0, &p->p_dtask);
2154 if_linkstatehook_del(ifp0, &p->p_ltask);
2155
2156 if (!ISSET(p->p_bif_flags, IFBIF_SPAN)((p->p_bif_flags) & (0x0100))) {
2157 if (ifpromisc(ifp0, 0) != 0) {
2158 log(LOG_WARNING4, "%s %s: unable to disable promisc\n",
2159 ifp->if_xname, ifp0->if_xname);
2160 }
2161
2162 etherbridge_detach_port(&sc->sc_eb, p);
2163 }
2164}
2165
2166static void
2167veb_p_fini(struct veb_port *p)
2168{
2169 struct ifnet *ifp0 = p->p_ifp0;
2170
2171 refcnt_finalize(&p->p_refs, "vebpdtor");
2172 veb_rule_list_free(TAILQ_FIRST(&p->p_vrl)((&p->p_vrl)->tqh_first));
2173
2174 if_put(ifp0);
2175 free(p, M_DEVBUF2, sizeof(*p)); /* hope you didn't forget smr_barrier */
2176}
2177
2178static void
2179veb_p_dtor(struct veb_softc *sc, struct veb_port *p)
2180{
2181 struct veb_ports **ports_ptr;
2182 struct veb_ports *om, *nm;
2183
2184 ports_ptr = ISSET(p->p_bif_flags, IFBIF_SPAN)((p->p_bif_flags) & (0x0100)) ?
2185 &sc->sc_spans : &sc->sc_ports;
2186
2187 om = SMR_PTR_GET_LOCKED(ports_ptr)(*(ports_ptr));
2188 nm = veb_ports_remove(om, p);
2189 SMR_PTR_SET_LOCKED(ports_ptr, nm)do { do { __asm volatile("" ::: "memory"); } while (0); ({ typeof
(*ports_ptr) __tmp = (nm); *(volatile typeof(*ports_ptr) *)&
(*ports_ptr) = __tmp; __tmp; }); } while (0)
;
2190
2191 veb_p_unlink(sc, p);
2192
2193 smr_barrier()smr_barrier_impl(0);
2194 refcnt_finalize(&om->m_refs, "vebports");
2195 veb_ports_destroy(om);
2196
2197 veb_p_fini(p);
2198}
2199
2200static void
2201veb_p_detach(void *arg)
2202{
2203 struct veb_port *p = arg;
2204 struct veb_softc *sc = p->p_veb;
2205
2206 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2207
2208 veb_p_dtor(sc, p);
2209}
2210
2211static int
2212veb_p_active(struct veb_port *p)
2213{
2214 struct ifnet *ifp0 = p->p_ifp0;
2215
2216 return (ISSET(ifp0->if_flags, IFF_RUNNING)((ifp0->if_flags) & (0x40)) &&
2217 LINK_STATE_IS_UP(ifp0->if_link_state)((ifp0->if_data.ifi_link_state) >= 4 || (ifp0->if_data
.ifi_link_state) == 0)
);
2218}
2219
2220static void
2221veb_p_linkch(void *arg)
2222{
2223 struct veb_port *p = arg;
2224 u_char link_state = LINK_STATE_FULL_DUPLEX6;
2225
2226 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2227
2228 if (!veb_p_active(p))
2229 link_state = LINK_STATE_DOWN2;
2230
2231 p->p_link_state = link_state;
2232}
2233
2234static int
2235veb_up(struct veb_softc *sc)
2236{
2237 struct ifnet *ifp = &sc->sc_if;
2238 int error;
2239
2240 error = etherbridge_up(&sc->sc_eb);
2241 if (error != 0)
2242 return (error);
2243
2244 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2245 SET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) |= (0x40));
2246
2247 return (0);
2248}
2249
2250static int
2251veb_iff(struct veb_softc *sc)
2252{
2253 return (0);
2254}
2255
2256static int
2257veb_down(struct veb_softc *sc)
2258{
2259 struct ifnet *ifp = &sc->sc_if;
2260 int error;
2261
2262 error = etherbridge_down(&sc->sc_eb);
2263 if (error != 0)
2264 return (0);
2265
2266 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2267 CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40));
2268
2269 return (0);
2270}
2271
2272static int
2273veb_eb_port_cmp(void *arg, void *a, void *b)
2274{
2275 struct veb_port *pa = a, *pb = b;
2276 return (pa == pb);
2277}
2278
2279static void *
2280veb_eb_port_take(void *arg, void *port)
2281{
2282 struct veb_port *p = port;
2283
2284 refcnt_take(&p->p_refs);
2285
2286 return (p);
2287}
2288
2289static void
2290veb_eb_port_rele(void *arg, void *port)
2291{
2292 struct veb_port *p = port;
2293
2294 refcnt_rele_wake(&p->p_refs);
2295}
2296
2297static void
2298veb_eb_brport_take(void *port)
2299{
2300 veb_eb_port_take(NULL((void *)0), port);
2301}
2302
2303static void
2304veb_eb_brport_rele(void *port)
2305{
2306 veb_eb_port_rele(NULL((void *)0), port);
2307}
2308
2309static size_t
2310veb_eb_port_ifname(void *arg, char *dst, size_t len, void *port)
2311{
2312 struct veb_port *p = port;
2313
2314 return (strlcpy(dst, p->p_ifp0->if_xname, len));
2315}
2316
2317static void
2318veb_eb_port_sa(void *arg, struct sockaddr_storage *ss, void *port)
2319{
2320 ss->ss_family = AF_UNSPEC0;
2321}
2322
2323/*
2324 * virtual ethernet bridge port
2325 */
2326
2327static int
2328vport_clone_create(struct if_clone *ifc, int unit)
2329{
2330 struct vport_softc *sc;
2331 struct ifnet *ifp;
2332
2333 sc = malloc(sizeof(*sc), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008|M_CANFAIL0x0004);
2334 if (sc == NULL((void *)0))
2335 return (ENOMEM12);
2336
2337 ifp = &sc->sc_ac.ac_if;
2338
2339 snprintf(ifp->if_xname, IFNAMSIZ16, "%s%d", ifc->ifc_name, unit);
2340
2341 ifp->if_softc = sc;
2342 ifp->if_typeif_data.ifi_type = IFT_ETHER0x06;
2343 ifp->if_hardmtu = ETHER_MAX_HARDMTU_LEN65435;
2344 ifp->if_ioctl = vport_ioctl;
2345 ifp->if_enqueue = vport_enqueue;
2346 ifp->if_qstart = vport_start;
2347 ifp->if_flags = IFF_BROADCAST0x2 | IFF_SIMPLEX0x800 | IFF_MULTICAST0x8000;
2348 ifp->if_xflags = IFXF_CLONED0x2 | IFXF_MPSAFE0x1;
2349 ether_fakeaddr(ifp);
2350
2351 if_counters_alloc(ifp);
2352 if_attach(ifp);
2353 ether_ifattach(ifp);
2354
2355 return (0);
2356}
2357
2358static int
2359vport_clone_destroy(struct ifnet *ifp)
2360{
2361 struct vport_softc *sc = ifp->if_softc;
2362
2363 NET_LOCK()do { rw_enter_write(&netlock); } while (0);
2364 sc->sc_dead = 1;
2365
2366 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2367 vport_down(sc);
2368 NET_UNLOCK()do { rw_exit_write(&netlock); } while (0);
2369
2370 ether_ifdetach(ifp);
2371 if_detach(ifp);
2372
2373 free(sc, M_DEVBUF2, sizeof(*sc));
2374
2375 return (0);
2376}
2377
2378static int
2379vport_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2380{
2381 struct vport_softc *sc = ifp->if_softc;
2382 int error = 0;
2383
2384 if (sc->sc_dead)
2385 return (ENXIO6);
2386
2387 switch (cmd) {
2388 case SIOCSIFFLAGS((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((16)))
:
2389 if (ISSET(ifp->if_flags, IFF_UP)((ifp->if_flags) & (0x1))) {
2390 if (!ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2391 error = vport_up(sc);
2392 } else {
2393 if (ISSET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) & (0x40)))
2394 error = vport_down(sc);
2395 }
2396 break;
2397
2398 case SIOCADDMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((49)))
:
2399 case SIOCDELMULTI((unsigned long)0x80000000 | ((sizeof(struct ifreq) & 0x1fff
) << 16) | ((('i')) << 8) | ((50)))
:
2400 break;
2401
2402 default:
2403 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2404 break;
2405 }
2406
2407 if (error == ENETRESET52)
2408 error = vport_iff(sc);
2409
2410 return (error);
2411}
2412
2413static int
2414vport_up(struct vport_softc *sc)
2415{
2416 struct ifnet *ifp = &sc->sc_ac.ac_if;
2417
2418 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2419 SET(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) |= (0x40));
2420
2421 return (0);
2422}
2423
2424static int
2425vport_iff(struct vport_softc *sc)
2426{
2427 return (0);
2428}
2429
2430static int
2431vport_down(struct vport_softc *sc)
2432{
2433 struct ifnet *ifp = &sc->sc_ac.ac_if;
2434
2435 NET_ASSERT_LOCKED()do { int _s = rw_status(&netlock); if ((splassert_ctl >
0) && (_s != 0x0001UL && _s != 0x0002UL)) splassert_fail
(0x0002UL, _s, __func__); } while (0)
;
2436 CLR(ifp->if_flags, IFF_RUNNING)((ifp->if_flags) &= ~(0x40));
2437
2438 return (0);
2439}
2440
2441static int
2442vport_if_enqueue(struct ifnet *ifp, struct mbuf *m)
2443{
2444 /*
2445 * switching an l2 packet toward a vport means pushing it
2446 * into the network stack. this function exists to make
2447 * if_vinput compat with veb calling if_enqueue.
2448 */
2449
2450 if_vinput(ifp, m);
2451
2452 return (0);
2453}
2454
2455static int
2456vport_enqueue(struct ifnet *ifp, struct mbuf *m)
2457{
2458 struct arpcom *ac;
2459 const struct ether_brport *eb;
2460 int error = ENETDOWN50;
2461#if NBPFILTER1 > 0
2462 caddr_t if_bpf;
2463#endif
2464
2465 /*
2466 * a packet sent from the l3 stack out a vport goes into
2467 * veb for switching out another port.
2468 */
2469
2470#if NPF1 > 0
2471 /*
2472 * there's no relationship between pf states in the l3 stack
2473 * and the l2 bridge.
2474 */
2475 pf_pkt_addr_changed(m);
2476#endif
2477
2478 ac = (struct arpcom *)ifp;
2479
2480 smr_read_enter();
2481 eb = SMR_PTR_GET(&ac->ac_brport)({ typeof(*&ac->ac_brport) __tmp = *(volatile typeof(*
&ac->ac_brport) *)&(*&ac->ac_brport); membar_datadep_consumer
(); __tmp; })
;
2482 if (eb != NULL((void *)0))
2483 eb->eb_port_take(eb->eb_port);
2484 smr_read_leave();
2485 if (eb != NULL((void *)0)) {
2486 struct mbuf *(*input)(struct ifnet *, struct mbuf *,
2487 uint64_t, void *) = eb->eb_input;
2488 struct ether_header *eh;
2489 uint64_t dst;
2490
2491 counters_pkt(ifp->if_counters, ifc_opackets, ifc_obytes,
2492 m->m_pkthdrM_dat.MH.MH_pkthdr.len);
2493
2494#if NBPFILTER1 > 0
2495 if_bpf = READ_ONCE(ifp->if_bpf)({ typeof(ifp->if_bpf) __tmp = *(volatile typeof(ifp->if_bpf
) *)&(ifp->if_bpf); membar_datadep_consumer(); __tmp; }
)
;
2496 if (if_bpf != NULL((void *)0))
2497 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT(1 << 1));
2498#endif
2499
2500 eh = mtod(m, struct ether_header *)((struct ether_header *)((m)->m_hdr.mh_data));
2501 dst = ether_addr_to_e64((struct ether_addr *)eh->ether_dhost);
2502
2503 if (input == veb_vport_input)
2504 input = veb_port_input;
2505 m = (*input)(ifp, m, dst, eb->eb_port);
2506
2507 error = 0;
2508
2509 eb->eb_port_rele(eb->eb_port);
2510 }
2511
2512 m_freem(m);
2513
2514 return (error);
2515}
2516
2517static void
2518vport_start(struct ifqueue *ifq)
2519{
2520 ifq_purge(ifq);
2521}